зеркало из
https://github.com/ssciwr/AMMICO.git
synced 2025-10-29 13:06:04 +02:00
Deploying to gh-pages from @ ssciwr/AMMICO@c28937b373 🚀
Этот коммит содержится в:
родитель
485b14a6f6
Коммит
5bb96ddb20
Двоичные данные
build/doctrees/ammico.doctree
Двоичные данные
build/doctrees/ammico.doctree
Двоичный файл не отображается.
Двоичные данные
build/doctrees/create_API_key_link.doctree
Двоичные данные
build/doctrees/create_API_key_link.doctree
Двоичный файл не отображается.
Двоичные данные
build/doctrees/environment.pickle
Двоичные данные
build/doctrees/environment.pickle
Двоичный файл не отображается.
Двоичные данные
build/doctrees/faq_link.doctree
Двоичные данные
build/doctrees/faq_link.doctree
Двоичный файл не отображается.
Двоичные данные
build/doctrees/index.doctree
Двоичные данные
build/doctrees/index.doctree
Двоичный файл не отображается.
Двоичные данные
build/doctrees/license_link.doctree
Двоичные данные
build/doctrees/license_link.doctree
Двоичный файл не отображается.
Двоичные данные
build/doctrees/modules.doctree
Двоичные данные
build/doctrees/modules.doctree
Двоичный файл не отображается.
@ -22,7 +22,7 @@
|
||||
"source": [
|
||||
"# if running on google colab\\\n",
|
||||
"# PLEASE RUN THIS ONLY AS CPU RUNTIME\n",
|
||||
"# for a GPU runtime, there are conflicts with pre-installed packages - \n",
|
||||
"# for a GPU runtime, there are conflicts with pre-installed packages -\n",
|
||||
"# you first need to uninstall them (prepare a clean environment with no pre-installs) and then install ammico\n",
|
||||
"# flake8-noqa-cell\n",
|
||||
"\n",
|
||||
@ -103,11 +103,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"# jax also sometimes leads to problems on google colab\n",
|
||||
"# if this is the case, try restarting the kernel and executing this \n",
|
||||
"# if this is the case, try restarting the kernel and executing this\n",
|
||||
"# and the above two code cells again\n",
|
||||
"import ammico\n",
|
||||
"\n",
|
||||
"# for displaying a progress bar\n",
|
||||
"from tqdm import tqdm"
|
||||
]
|
||||
@ -276,7 +276,7 @@
|
||||
"source": [
|
||||
"# dump file name\n",
|
||||
"dump_file = \"dump_file.csv\"\n",
|
||||
"# dump every N images \n",
|
||||
"# dump every N images\n",
|
||||
"dump_every = 10"
|
||||
]
|
||||
},
|
||||
@ -299,12 +299,18 @@
|
||||
"# the highest possible value is 100\n",
|
||||
"race_threshold = 50\n",
|
||||
"gender_threshold = 50\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key],\n",
|
||||
" emotion_threshold=emotion_threshold,\n",
|
||||
" race_threshold=race_threshold,\n",
|
||||
" gender_threshold=gender_threshold).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" if num % dump_every == 0 or num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key],\n",
|
||||
" emotion_threshold=emotion_threshold,\n",
|
||||
" race_threshold=race_threshold,\n",
|
||||
" gender_threshold=gender_threshold,\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 or num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -322,10 +328,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], analyse_text=True).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], analyse_text=True\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -344,13 +356,21 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize the models\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(subdict = image_dict, analysis_type=\"summary\", model_type=\"base\")\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# run the analysis without having to re-iniatialize the model\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\") # analyse image with SummaryDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" ) # analyse image with SummaryDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -369,14 +389,26 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize the models\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(subdict = image_dict, analysis_type=\"summary\", model_type=\"base\")\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key]).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], analyse_text=True).analyse_image() # analyse image with TextDetector and update dict\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\") # analyse image with SummaryDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file \n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key]\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], analyse_text=True\n",
|
||||
" ).analyse_image() # analyse image with TextDetector and update dict\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" ) # analyse image with SummaryDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -477,7 +509,7 @@
|
||||
"# set the dump file\n",
|
||||
"# dump file name\n",
|
||||
"dump_file = \"dump_file.csv\"\n",
|
||||
"# dump every N images \n",
|
||||
"# dump every N images\n",
|
||||
"dump_every = 10"
|
||||
]
|
||||
},
|
||||
@ -488,9 +520,15 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# analyze the csv file\n",
|
||||
"for num, key in tqdm(enumerate(text_dict.keys()), total=len(text_dict)): # loop through all text entries\n",
|
||||
" ammico.TextDetector(text_dict[key], analyse_text=True, skip_extraction=True).analyse_image() # analyse text with TextDetector and update dict\n",
|
||||
" if num % dump_every == 0 | num == len(text_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(text_dict.keys()), total=len(text_dict)\n",
|
||||
"): # loop through all text entries\n",
|
||||
" ammico.TextDetector(\n",
|
||||
" text_dict[key], analyse_text=True, skip_extraction=True\n",
|
||||
" ).analyse_image() # analyse text with TextDetector and update dict\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(text_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(text_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -565,14 +603,23 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], # analyse image with TextDetector and update dict\n",
|
||||
" analyse_text=True, model_names=[\"sshleifer/distilbart-cnn-12-6\", \n",
|
||||
" \"distilbert-base-uncased-finetuned-sst-2-english\", \n",
|
||||
" \"dbmdz/bert-large-cased-finetuned-conll03-english\"], \n",
|
||||
" revision_numbers=[\"a4f8f3e\", \"af0f99b\", \"f2482bf\"]).analyse_image()\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], # analyse image with TextDetector and update dict\n",
|
||||
" analyse_text=True,\n",
|
||||
" model_names=[\n",
|
||||
" \"sshleifer/distilbart-cnn-12-6\",\n",
|
||||
" \"distilbert-base-uncased-finetuned-sst-2-english\",\n",
|
||||
" \"dbmdz/bert-large-cased-finetuned-conll03-english\",\n",
|
||||
" ],\n",
|
||||
" revision_numbers=[\"a4f8f3e\", \"af0f99b\", \"f2482bf\"],\n",
|
||||
" ).analyse_image()\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -654,7 +701,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_detector = ammico.SummaryDetector(image_dict, analysis_type=\"summary\", model_type=\"base\")"
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -663,10 +712,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\")\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -704,14 +755,17 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(image_dict, analysis_type=\"questions\", \n",
|
||||
" model_type=\"vqa\")\n",
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"questions\", model_type=\"vqa\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(subdict=image_dict[key], \n",
|
||||
" analysis_type=\"questions\", \n",
|
||||
" list_of_questions = list_of_questions)\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -729,13 +783,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(image_dict, analysis_type=\"summary_and_questions\", \n",
|
||||
" model_type=\"base\")\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(subdict=image_dict[key], \n",
|
||||
" analysis_type=\"summary_and_questions\", \n",
|
||||
" list_of_questions = list_of_questions)\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"summary_and_questions\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"summary_and_questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -767,7 +824,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"obj = ammico.SummaryDetector(subdict=image_dict, analysis_type = \"summary_and_questions\", model_type = \"blip2_t5_caption_coco_flant5xl\")\n",
|
||||
"obj = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict,\n",
|
||||
" analysis_type=\"summary_and_questions\",\n",
|
||||
" model_type=\"blip2_t5_caption_coco_flant5xl\",\n",
|
||||
")\n",
|
||||
"# list of the new models that can be used:\n",
|
||||
"# \"blip2_t5_pretrain_flant5xxl\",\n",
|
||||
"# \"blip2_t5_pretrain_flant5xl\",\n",
|
||||
@ -781,7 +842,7 @@
|
||||
"# Or you can use `caption_coco_`` model types to generate coco-style captions.\n",
|
||||
"# `flant5` and `opt` means that the model equipped with FlanT5 and OPT LLMs respectively.\n",
|
||||
"\n",
|
||||
"#also you can perform all calculation on cpu if you set device_type= \"cpu\" or gpu if you set device_type= \"cuda\""
|
||||
"# also you can perform all calculation on cpu if you set device_type= \"cpu\" or gpu if you set device_type= \"cuda\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -812,9 +873,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions)\n",
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"# analysis_type can be \n",
|
||||
"# analysis_type can be\n",
|
||||
"# \"summary\",\n",
|
||||
"# \"questions\",\n",
|
||||
"# \"summary_and_questions\"."
|
||||
@ -848,7 +913,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions)"
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -886,7 +955,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions, consequential_questions=True)"
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" consequential_questions=True,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -940,9 +1014,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict.keys():\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
|
||||
" gender_threshold=50,\n",
|
||||
" accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key],\n",
|
||||
" emotion_threshold=50,\n",
|
||||
" race_threshold=50,\n",
|
||||
" gender_threshold=50,\n",
|
||||
" accept_disclosure=\"DISCLOSURE_AMMICO\",\n",
|
||||
" ).analyse_image()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1055,9 +1133,9 @@
|
||||
" image_names,\n",
|
||||
" features_image_stacked,\n",
|
||||
") = my_obj.parsing_images(\n",
|
||||
" model_type, \n",
|
||||
" model_type,\n",
|
||||
" path_to_save_tensors=\"/content/drive/MyDrive/misinformation-data/\",\n",
|
||||
" )"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1112,14 +1190,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import importlib_resources # only require for image query example\n",
|
||||
"image_example_query = str(importlib_resources.files(\"ammico\") / \"data\" / \"test-crop-image.png\") # creating the path to the image for the image query example\n",
|
||||
"import importlib_resources # only require for image query example\n",
|
||||
"\n",
|
||||
"image_example_query = str(\n",
|
||||
" importlib_resources.files(\"ammico\") / \"data\" / \"test-crop-image.png\"\n",
|
||||
") # creating the path to the image for the image query example\n",
|
||||
"\n",
|
||||
"search_query = [\n",
|
||||
" {\"text_input\": \"politician press conference\"}, \n",
|
||||
" {\"text_input\": \"politician press conference\"},\n",
|
||||
" {\"text_input\": \"a world map\"},\n",
|
||||
" {\"text_input\": \"a dog\"}, # This is how looks text query\n",
|
||||
" {\"image\": image_example_query}, # This is how looks image query, here `image_example_path` is the path to query image like \"data/test-crop-image.png\"\n",
|
||||
" {\"text_input\": \"a dog\"}, # This is how looks text query\n",
|
||||
" {\n",
|
||||
" \"image\": image_example_query\n",
|
||||
" }, # This is how looks image query, here `image_example_path` is the path to query image like \"data/test-crop-image.png\"\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
@ -1199,7 +1282,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"my_obj.show_results(\n",
|
||||
" search_query[0], # you can change the index to see the results for other queries\n",
|
||||
" search_query[0], # you can change the index to see the results for other queries\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -1210,7 +1293,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"my_obj.show_results(\n",
|
||||
" search_query[3], # you can change the index to see the results for other queries\n",
|
||||
" search_query[3], # you can change the index to see the results for other queries\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -1349,7 +1432,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"analysis_explorer = ammico.AnalysisExplorer(image_dict)\n",
|
||||
"analysis_explorer.run_server(port = 8057)"
|
||||
"analysis_explorer.run_server(port=8057)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1447,7 +1530,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.11.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@ -1,183 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Crop posts module"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Crop posts from social media posts images, to keep import text informations from social media posts images.\n",
|
||||
"We can set some manually cropped views from social media posts as reference for cropping the same type social media posts images."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Please ignore this cell: extra install steps that are only executed when running the notebook on Google Colab\n",
|
||||
"# flake8-noqa-cell\n",
|
||||
"import os\n",
|
||||
"if 'google.colab' in str(get_ipython()):\n",
|
||||
" # we're running on colab\n",
|
||||
" # first install pinned version of setuptools (latest version doesn't seem to work with this package on colab)\n",
|
||||
" %pip install setuptools==61 -qqq\n",
|
||||
" # install the moralization package\n",
|
||||
" %pip install git+https://github.com/ssciwr/AMMICO.git -qqq\n",
|
||||
"\n",
|
||||
" # prevent loading of the wrong opencv library\n",
|
||||
" %pip uninstall -y opencv-contrib-python\n",
|
||||
" %pip install opencv-contrib-python\n",
|
||||
"\n",
|
||||
" from google.colab import drive\n",
|
||||
" drive.mount('/content/drive')\n",
|
||||
"\n",
|
||||
" if not os.path.isdir('/content/ref'):\n",
|
||||
" !wget https://github.com/ssciwr/AMMICO/archive/refs/heads/ref-data.zip -q\n",
|
||||
" !unzip -qq ref-data.zip -d . && mv -f AMMICO-ref-data/data/ref . && rm -rf AMMICO-ref-data ref-data.zip"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import ammico.cropposts as crpo\n",
|
||||
"import ammico.utils as utils\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import cv2\n",
|
||||
"import importlib_resources\n",
|
||||
"pkg = importlib_resources.files(\"ammico\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The cropping is carried out by finding reference images on the image to be cropped. If a reference matches a region on the image, then everything below the matched region is removed. Manually look at a reference and an example post with the code below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# load ref view for cropping the same type social media posts images.\n",
|
||||
"# substitute the below paths for your samples\n",
|
||||
"path_ref = pkg / \"data\" / \"ref\" / \"ref-00.png\"\n",
|
||||
"ref_view = cv2.imread(path_ref.as_posix())\n",
|
||||
"RGB_ref_view = cv2.cvtColor(ref_view, cv2.COLOR_BGR2RGB)\n",
|
||||
"plt.figure(figsize=(10, 15))\n",
|
||||
"plt.imshow(RGB_ref_view)\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"path_post = pkg / \"data\" / \"test-crop-image.png\"\n",
|
||||
"view = cv2.imread(path_post.as_posix())\n",
|
||||
"RGB_view = cv2.cvtColor(view, cv2.COLOR_BGR2RGB)\n",
|
||||
"plt.figure(figsize=(10, 15))\n",
|
||||
"plt.imshow(RGB_view)\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can now crop the image and check on the way that everything looks fine. `plt_match` will plot the matches on the image and below which line content will be cropped; `plt_crop` will plot the cropped text part of the social media post with the comments removed; `plt_image` will plot the image part of the social media post if applicable."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# crop a posts from reference view, check the cropping \n",
|
||||
"# this will only plot something if the reference is found on the image\n",
|
||||
"crop_view = crpo.crop_posts_from_refs(\n",
|
||||
" [ref_view], view, \n",
|
||||
" plt_match=True, plt_crop=True, plt_image=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Batch crop images from the image folder given in `crop_dir`. The cropped images will save in `save_crop_dir` folder with the same file name as the original file. The reference images with the items to match are provided in `ref_dir`.\n",
|
||||
"\n",
|
||||
"Sometimes the cropping will be imperfect, due to improper matches on the image. It is sometimes easier to first categorize the social media posts and then set different references in the reference folder `ref_dir`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"crop_dir = \"data/\"\n",
|
||||
"ref_dir = pkg / \"data\" / \"ref\" \n",
|
||||
"save_crop_dir = \"data/crop/\"\n",
|
||||
"\n",
|
||||
"files = utils.find_files(path=crop_dir,limit=10,)\n",
|
||||
"ref_files = utils.find_files(path=ref_dir.as_posix(), limit=100)\n",
|
||||
"\n",
|
||||
"crpo.crop_media_posts(files, ref_files, save_crop_dir, plt_match=True, plt_crop=False, plt_image=False)\n",
|
||||
"print(\"Batch cropping images done\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "10",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Двоичные данные
build/doctrees/notebooks/DemoNotebook_ammico.doctree
Двоичные данные
build/doctrees/notebooks/DemoNotebook_ammico.doctree
Двоичный файл не отображается.
Двоичные данные
build/doctrees/notebooks/Example cropposts.doctree
Двоичные данные
build/doctrees/notebooks/Example cropposts.doctree
Двоичный файл не отображается.
Двоичные данные
build/doctrees/readme_link.doctree
Двоичные данные
build/doctrees/readme_link.doctree
Двоичный файл не отображается.
Двоичные данные
build/doctrees/set_up_credentials.doctree
Двоичные данные
build/doctrees/set_up_credentials.doctree
Двоичный файл не отображается.
@ -1,4 +1,4 @@
|
||||
# Sphinx build info version 1
|
||||
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
|
||||
config: 9487e2edbbf95a60cd8fdb622afe617f
|
||||
# This file records the configuration used when building these files. When it is not found, a full rebuild will be done.
|
||||
config: 2d7d574ccf2e81bd903f57e595d58511
|
||||
tags: 645f666f9bcd5a90fca523b33c5a78b7
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
"source": [
|
||||
"# if running on google colab\\\n",
|
||||
"# PLEASE RUN THIS ONLY AS CPU RUNTIME\n",
|
||||
"# for a GPU runtime, there are conflicts with pre-installed packages - \n",
|
||||
"# for a GPU runtime, there are conflicts with pre-installed packages -\n",
|
||||
"# you first need to uninstall them (prepare a clean environment with no pre-installs) and then install ammico\n",
|
||||
"# flake8-noqa-cell\n",
|
||||
"\n",
|
||||
@ -103,11 +103,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"# jax also sometimes leads to problems on google colab\n",
|
||||
"# if this is the case, try restarting the kernel and executing this \n",
|
||||
"# if this is the case, try restarting the kernel and executing this\n",
|
||||
"# and the above two code cells again\n",
|
||||
"import ammico\n",
|
||||
"\n",
|
||||
"# for displaying a progress bar\n",
|
||||
"from tqdm import tqdm"
|
||||
]
|
||||
@ -276,7 +276,7 @@
|
||||
"source": [
|
||||
"# dump file name\n",
|
||||
"dump_file = \"dump_file.csv\"\n",
|
||||
"# dump every N images \n",
|
||||
"# dump every N images\n",
|
||||
"dump_every = 10"
|
||||
]
|
||||
},
|
||||
@ -299,12 +299,18 @@
|
||||
"# the highest possible value is 100\n",
|
||||
"race_threshold = 50\n",
|
||||
"gender_threshold = 50\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key],\n",
|
||||
" emotion_threshold=emotion_threshold,\n",
|
||||
" race_threshold=race_threshold,\n",
|
||||
" gender_threshold=gender_threshold).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" if num % dump_every == 0 or num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key],\n",
|
||||
" emotion_threshold=emotion_threshold,\n",
|
||||
" race_threshold=race_threshold,\n",
|
||||
" gender_threshold=gender_threshold,\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 or num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -322,10 +328,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], analyse_text=True).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], analyse_text=True\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -344,13 +356,21 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize the models\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(subdict = image_dict, analysis_type=\"summary\", model_type=\"base\")\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# run the analysis without having to re-iniatialize the model\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\") # analyse image with SummaryDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" ) # analyse image with SummaryDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -369,14 +389,26 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize the models\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(subdict = image_dict, analysis_type=\"summary\", model_type=\"base\")\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key]).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], analyse_text=True).analyse_image() # analyse image with TextDetector and update dict\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\") # analyse image with SummaryDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file \n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key]\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], analyse_text=True\n",
|
||||
" ).analyse_image() # analyse image with TextDetector and update dict\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" ) # analyse image with SummaryDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -477,7 +509,7 @@
|
||||
"# set the dump file\n",
|
||||
"# dump file name\n",
|
||||
"dump_file = \"dump_file.csv\"\n",
|
||||
"# dump every N images \n",
|
||||
"# dump every N images\n",
|
||||
"dump_every = 10"
|
||||
]
|
||||
},
|
||||
@ -488,9 +520,15 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# analyze the csv file\n",
|
||||
"for num, key in tqdm(enumerate(text_dict.keys()), total=len(text_dict)): # loop through all text entries\n",
|
||||
" ammico.TextDetector(text_dict[key], analyse_text=True, skip_extraction=True).analyse_image() # analyse text with TextDetector and update dict\n",
|
||||
" if num % dump_every == 0 | num == len(text_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(text_dict.keys()), total=len(text_dict)\n",
|
||||
"): # loop through all text entries\n",
|
||||
" ammico.TextDetector(\n",
|
||||
" text_dict[key], analyse_text=True, skip_extraction=True\n",
|
||||
" ).analyse_image() # analyse text with TextDetector and update dict\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(text_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(text_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -565,14 +603,23 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], # analyse image with TextDetector and update dict\n",
|
||||
" analyse_text=True, model_names=[\"sshleifer/distilbart-cnn-12-6\", \n",
|
||||
" \"distilbert-base-uncased-finetuned-sst-2-english\", \n",
|
||||
" \"dbmdz/bert-large-cased-finetuned-conll03-english\"], \n",
|
||||
" revision_numbers=[\"a4f8f3e\", \"af0f99b\", \"f2482bf\"]).analyse_image()\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], # analyse image with TextDetector and update dict\n",
|
||||
" analyse_text=True,\n",
|
||||
" model_names=[\n",
|
||||
" \"sshleifer/distilbart-cnn-12-6\",\n",
|
||||
" \"distilbert-base-uncased-finetuned-sst-2-english\",\n",
|
||||
" \"dbmdz/bert-large-cased-finetuned-conll03-english\",\n",
|
||||
" ],\n",
|
||||
" revision_numbers=[\"a4f8f3e\", \"af0f99b\", \"f2482bf\"],\n",
|
||||
" ).analyse_image()\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -654,7 +701,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_detector = ammico.SummaryDetector(image_dict, analysis_type=\"summary\", model_type=\"base\")"
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -663,10 +712,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\")\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -704,14 +755,17 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(image_dict, analysis_type=\"questions\", \n",
|
||||
" model_type=\"vqa\")\n",
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"questions\", model_type=\"vqa\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(subdict=image_dict[key], \n",
|
||||
" analysis_type=\"questions\", \n",
|
||||
" list_of_questions = list_of_questions)\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -729,13 +783,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(image_dict, analysis_type=\"summary_and_questions\", \n",
|
||||
" model_type=\"base\")\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(subdict=image_dict[key], \n",
|
||||
" analysis_type=\"summary_and_questions\", \n",
|
||||
" list_of_questions = list_of_questions)\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"summary_and_questions\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"summary_and_questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -767,7 +824,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"obj = ammico.SummaryDetector(subdict=image_dict, analysis_type = \"summary_and_questions\", model_type = \"blip2_t5_caption_coco_flant5xl\")\n",
|
||||
"obj = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict,\n",
|
||||
" analysis_type=\"summary_and_questions\",\n",
|
||||
" model_type=\"blip2_t5_caption_coco_flant5xl\",\n",
|
||||
")\n",
|
||||
"# list of the new models that can be used:\n",
|
||||
"# \"blip2_t5_pretrain_flant5xxl\",\n",
|
||||
"# \"blip2_t5_pretrain_flant5xl\",\n",
|
||||
@ -781,7 +842,7 @@
|
||||
"# Or you can use `caption_coco_`` model types to generate coco-style captions.\n",
|
||||
"# `flant5` and `opt` means that the model equipped with FlanT5 and OPT LLMs respectively.\n",
|
||||
"\n",
|
||||
"#also you can perform all calculation on cpu if you set device_type= \"cpu\" or gpu if you set device_type= \"cuda\""
|
||||
"# also you can perform all calculation on cpu if you set device_type= \"cpu\" or gpu if you set device_type= \"cuda\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -812,9 +873,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions)\n",
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"# analysis_type can be \n",
|
||||
"# analysis_type can be\n",
|
||||
"# \"summary\",\n",
|
||||
"# \"questions\",\n",
|
||||
"# \"summary_and_questions\"."
|
||||
@ -848,7 +913,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions)"
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -886,7 +955,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions, consequential_questions=True)"
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" consequential_questions=True,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -940,9 +1014,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict.keys():\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
|
||||
" gender_threshold=50,\n",
|
||||
" accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key],\n",
|
||||
" emotion_threshold=50,\n",
|
||||
" race_threshold=50,\n",
|
||||
" gender_threshold=50,\n",
|
||||
" accept_disclosure=\"DISCLOSURE_AMMICO\",\n",
|
||||
" ).analyse_image()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1055,9 +1133,9 @@
|
||||
" image_names,\n",
|
||||
" features_image_stacked,\n",
|
||||
") = my_obj.parsing_images(\n",
|
||||
" model_type, \n",
|
||||
" model_type,\n",
|
||||
" path_to_save_tensors=\"/content/drive/MyDrive/misinformation-data/\",\n",
|
||||
" )"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1112,14 +1190,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import importlib_resources # only require for image query example\n",
|
||||
"image_example_query = str(importlib_resources.files(\"ammico\") / \"data\" / \"test-crop-image.png\") # creating the path to the image for the image query example\n",
|
||||
"import importlib_resources # only require for image query example\n",
|
||||
"\n",
|
||||
"image_example_query = str(\n",
|
||||
" importlib_resources.files(\"ammico\") / \"data\" / \"test-crop-image.png\"\n",
|
||||
") # creating the path to the image for the image query example\n",
|
||||
"\n",
|
||||
"search_query = [\n",
|
||||
" {\"text_input\": \"politician press conference\"}, \n",
|
||||
" {\"text_input\": \"politician press conference\"},\n",
|
||||
" {\"text_input\": \"a world map\"},\n",
|
||||
" {\"text_input\": \"a dog\"}, # This is how looks text query\n",
|
||||
" {\"image\": image_example_query}, # This is how looks image query, here `image_example_path` is the path to query image like \"data/test-crop-image.png\"\n",
|
||||
" {\"text_input\": \"a dog\"}, # This is how looks text query\n",
|
||||
" {\n",
|
||||
" \"image\": image_example_query\n",
|
||||
" }, # This is how looks image query, here `image_example_path` is the path to query image like \"data/test-crop-image.png\"\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
@ -1199,7 +1282,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"my_obj.show_results(\n",
|
||||
" search_query[0], # you can change the index to see the results for other queries\n",
|
||||
" search_query[0], # you can change the index to see the results for other queries\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -1210,7 +1293,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"my_obj.show_results(\n",
|
||||
" search_query[3], # you can change the index to see the results for other queries\n",
|
||||
" search_query[3], # you can change the index to see the results for other queries\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -1349,7 +1432,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"analysis_explorer = ammico.AnalysisExplorer(image_dict)\n",
|
||||
"analysis_explorer.run_server(port = 8057)"
|
||||
"analysis_explorer.run_server(port=8057)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1447,7 +1530,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.11.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@ -1,183 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Crop posts module"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Crop posts from social media posts images, to keep import text informations from social media posts images.\n",
|
||||
"We can set some manually cropped views from social media posts as reference for cropping the same type social media posts images."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Please ignore this cell: extra install steps that are only executed when running the notebook on Google Colab\n",
|
||||
"# flake8-noqa-cell\n",
|
||||
"import os\n",
|
||||
"if 'google.colab' in str(get_ipython()):\n",
|
||||
" # we're running on colab\n",
|
||||
" # first install pinned version of setuptools (latest version doesn't seem to work with this package on colab)\n",
|
||||
" %pip install setuptools==61 -qqq\n",
|
||||
" # install the moralization package\n",
|
||||
" %pip install git+https://github.com/ssciwr/AMMICO.git -qqq\n",
|
||||
"\n",
|
||||
" # prevent loading of the wrong opencv library\n",
|
||||
" %pip uninstall -y opencv-contrib-python\n",
|
||||
" %pip install opencv-contrib-python\n",
|
||||
"\n",
|
||||
" from google.colab import drive\n",
|
||||
" drive.mount('/content/drive')\n",
|
||||
"\n",
|
||||
" if not os.path.isdir('/content/ref'):\n",
|
||||
" !wget https://github.com/ssciwr/AMMICO/archive/refs/heads/ref-data.zip -q\n",
|
||||
" !unzip -qq ref-data.zip -d . && mv -f AMMICO-ref-data/data/ref . && rm -rf AMMICO-ref-data ref-data.zip"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import ammico.cropposts as crpo\n",
|
||||
"import ammico.utils as utils\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import cv2\n",
|
||||
"import importlib_resources\n",
|
||||
"pkg = importlib_resources.files(\"ammico\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The cropping is carried out by finding reference images on the image to be cropped. If a reference matches a region on the image, then everything below the matched region is removed. Manually look at a reference and an example post with the code below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# load ref view for cropping the same type social media posts images.\n",
|
||||
"# substitute the below paths for your samples\n",
|
||||
"path_ref = pkg / \"data\" / \"ref\" / \"ref-00.png\"\n",
|
||||
"ref_view = cv2.imread(path_ref.as_posix())\n",
|
||||
"RGB_ref_view = cv2.cvtColor(ref_view, cv2.COLOR_BGR2RGB)\n",
|
||||
"plt.figure(figsize=(10, 15))\n",
|
||||
"plt.imshow(RGB_ref_view)\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"path_post = pkg / \"data\" / \"test-crop-image.png\"\n",
|
||||
"view = cv2.imread(path_post.as_posix())\n",
|
||||
"RGB_view = cv2.cvtColor(view, cv2.COLOR_BGR2RGB)\n",
|
||||
"plt.figure(figsize=(10, 15))\n",
|
||||
"plt.imshow(RGB_view)\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can now crop the image and check on the way that everything looks fine. `plt_match` will plot the matches on the image and below which line content will be cropped; `plt_crop` will plot the cropped text part of the social media post with the comments removed; `plt_image` will plot the image part of the social media post if applicable."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# crop a posts from reference view, check the cropping \n",
|
||||
"# this will only plot something if the reference is found on the image\n",
|
||||
"crop_view = crpo.crop_posts_from_refs(\n",
|
||||
" [ref_view], view, \n",
|
||||
" plt_match=True, plt_crop=True, plt_image=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Batch crop images from the image folder given in `crop_dir`. The cropped images will save in `save_crop_dir` folder with the same file name as the original file. The reference images with the items to match are provided in `ref_dir`.\n",
|
||||
"\n",
|
||||
"Sometimes the cropping will be imperfect, due to improper matches on the image. It is sometimes easier to first categorize the social media posts and then set different references in the reference folder `ref_dir`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"crop_dir = \"data/\"\n",
|
||||
"ref_dir = pkg / \"data\" / \"ref\" \n",
|
||||
"save_crop_dir = \"data/crop/\"\n",
|
||||
"\n",
|
||||
"files = utils.find_files(path=crop_dir,limit=10,)\n",
|
||||
"ref_files = utils.find_files(path=ref_dir.as_posix(), limit=100)\n",
|
||||
"\n",
|
||||
"crpo.crop_media_posts(files, ref_files, save_crop_dir, plt_match=True, plt_crop=False, plt_image=False)\n",
|
||||
"print(\"Batch cropping images done\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "10",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@ -1,12 +1,5 @@
|
||||
/*
|
||||
* basic.css
|
||||
* ~~~~~~~~~
|
||||
*
|
||||
* Sphinx stylesheet -- basic theme.
|
||||
*
|
||||
* :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
|
||||
/* -- main layout ----------------------------------------------------------- */
|
||||
@ -115,15 +108,11 @@ img {
|
||||
/* -- search page ----------------------------------------------------------- */
|
||||
|
||||
ul.search {
|
||||
margin: 10px 0 0 20px;
|
||||
padding: 0;
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
ul.search li {
|
||||
padding: 5px 0 5px 20px;
|
||||
background-image: url(file.png);
|
||||
background-repeat: no-repeat;
|
||||
background-position: 0 7px;
|
||||
padding: 5px 0;
|
||||
}
|
||||
|
||||
ul.search li a {
|
||||
@ -752,14 +741,6 @@ abbr, acronym {
|
||||
cursor: help;
|
||||
}
|
||||
|
||||
.translated {
|
||||
background-color: rgba(207, 255, 207, 0.2)
|
||||
}
|
||||
|
||||
.untranslated {
|
||||
background-color: rgba(255, 207, 207, 0.2)
|
||||
}
|
||||
|
||||
/* -- code displays --------------------------------------------------------- */
|
||||
|
||||
pre {
|
||||
|
||||
@ -1,12 +1,5 @@
|
||||
/*
|
||||
* doctools.js
|
||||
* ~~~~~~~~~~~
|
||||
*
|
||||
* Base JavaScript utilities for all Sphinx HTML documentation.
|
||||
*
|
||||
* :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
"use strict";
|
||||
|
||||
|
||||
@ -1,13 +1,6 @@
|
||||
/*
|
||||
* language_data.js
|
||||
* ~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* This script contains the language-specific data used by searchtools.js,
|
||||
* namely the list of stopwords, stemmer, scorer and splitter.
|
||||
*
|
||||
* :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
|
||||
var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"];
|
||||
|
||||
@ -1,12 +1,5 @@
|
||||
/*
|
||||
* searchtools.js
|
||||
* ~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* Sphinx JavaScript utilities for the full-text search.
|
||||
*
|
||||
* :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
"use strict";
|
||||
|
||||
@ -20,7 +13,7 @@ if (typeof Scorer === "undefined") {
|
||||
// and returns the new score.
|
||||
/*
|
||||
score: result => {
|
||||
const [docname, title, anchor, descr, score, filename] = result
|
||||
const [docname, title, anchor, descr, score, filename, kind] = result
|
||||
return score
|
||||
},
|
||||
*/
|
||||
@ -47,6 +40,14 @@ if (typeof Scorer === "undefined") {
|
||||
};
|
||||
}
|
||||
|
||||
// Global search result kind enum, used by themes to style search results.
|
||||
class SearchResultKind {
|
||||
static get index() { return "index"; }
|
||||
static get object() { return "object"; }
|
||||
static get text() { return "text"; }
|
||||
static get title() { return "title"; }
|
||||
}
|
||||
|
||||
const _removeChildren = (element) => {
|
||||
while (element && element.lastChild) element.removeChild(element.lastChild);
|
||||
};
|
||||
@ -64,9 +65,13 @@ const _displayItem = (item, searchTerms, highlightTerms) => {
|
||||
const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY;
|
||||
const contentRoot = document.documentElement.dataset.content_root;
|
||||
|
||||
const [docName, title, anchor, descr, score, _filename] = item;
|
||||
const [docName, title, anchor, descr, score, _filename, kind] = item;
|
||||
|
||||
let listItem = document.createElement("li");
|
||||
// Add a class representing the item's type:
|
||||
// can be used by a theme's CSS selector for styling
|
||||
// See SearchResultKind for the class names.
|
||||
listItem.classList.add(`kind-${kind}`);
|
||||
let requestUrl;
|
||||
let linkUrl;
|
||||
if (docBuilder === "dirhtml") {
|
||||
@ -115,8 +120,10 @@ const _finishSearch = (resultCount) => {
|
||||
"Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories."
|
||||
);
|
||||
else
|
||||
Search.status.innerText = _(
|
||||
"Search finished, found ${resultCount} page(s) matching the search query."
|
||||
Search.status.innerText = Documentation.ngettext(
|
||||
"Search finished, found one page matching the search query.",
|
||||
"Search finished, found ${resultCount} pages matching the search query.",
|
||||
resultCount,
|
||||
).replace('${resultCount}', resultCount);
|
||||
};
|
||||
const _displayNextItem = (
|
||||
@ -138,7 +145,7 @@ const _displayNextItem = (
|
||||
else _finishSearch(resultCount);
|
||||
};
|
||||
// Helper function used by query() to order search results.
|
||||
// Each input is an array of [docname, title, anchor, descr, score, filename].
|
||||
// Each input is an array of [docname, title, anchor, descr, score, filename, kind].
|
||||
// Order the results by score (in opposite order of appearance, since the
|
||||
// `_displayNextItem` function uses pop() to retrieve items) and then alphabetically.
|
||||
const _orderResultsByScoreThenName = (a, b) => {
|
||||
@ -248,6 +255,7 @@ const Search = {
|
||||
searchSummary.classList.add("search-summary");
|
||||
searchSummary.innerText = "";
|
||||
const searchList = document.createElement("ul");
|
||||
searchList.setAttribute("role", "list");
|
||||
searchList.classList.add("search");
|
||||
|
||||
const out = document.getElementById("search-results");
|
||||
@ -318,7 +326,7 @@ const Search = {
|
||||
const indexEntries = Search._index.indexentries;
|
||||
|
||||
// Collect multiple result groups to be sorted separately and then ordered.
|
||||
// Each is an array of [docname, title, anchor, descr, score, filename].
|
||||
// Each is an array of [docname, title, anchor, descr, score, filename, kind].
|
||||
const normalResults = [];
|
||||
const nonMainIndexResults = [];
|
||||
|
||||
@ -337,6 +345,7 @@ const Search = {
|
||||
null,
|
||||
score + boost,
|
||||
filenames[file],
|
||||
SearchResultKind.title,
|
||||
]);
|
||||
}
|
||||
}
|
||||
@ -354,6 +363,7 @@ const Search = {
|
||||
null,
|
||||
score,
|
||||
filenames[file],
|
||||
SearchResultKind.index,
|
||||
];
|
||||
if (isMain) {
|
||||
normalResults.push(result);
|
||||
@ -475,6 +485,7 @@ const Search = {
|
||||
descr,
|
||||
score,
|
||||
filenames[match[0]],
|
||||
SearchResultKind.object,
|
||||
]);
|
||||
};
|
||||
Object.keys(objects).forEach((prefix) =>
|
||||
@ -502,9 +513,11 @@ const Search = {
|
||||
// perform the search on the required terms
|
||||
searchTerms.forEach((word) => {
|
||||
const files = [];
|
||||
// find documents, if any, containing the query word in their text/title term indices
|
||||
// use Object.hasOwnProperty to avoid mismatching against prototype properties
|
||||
const arr = [
|
||||
{ files: terms[word], score: Scorer.term },
|
||||
{ files: titleTerms[word], score: Scorer.title },
|
||||
{ files: terms.hasOwnProperty(word) ? terms[word] : undefined, score: Scorer.term },
|
||||
{ files: titleTerms.hasOwnProperty(word) ? titleTerms[word] : undefined, score: Scorer.title },
|
||||
];
|
||||
// add support for partial matches
|
||||
if (word.length > 2) {
|
||||
@ -536,8 +549,9 @@ const Search = {
|
||||
|
||||
// set score for the word in each file
|
||||
recordFiles.forEach((file) => {
|
||||
if (!scoreMap.has(file)) scoreMap.set(file, {});
|
||||
scoreMap.get(file)[word] = record.score;
|
||||
if (!scoreMap.has(file)) scoreMap.set(file, new Map());
|
||||
const fileScores = scoreMap.get(file);
|
||||
fileScores.set(word, record.score);
|
||||
});
|
||||
});
|
||||
|
||||
@ -576,7 +590,7 @@ const Search = {
|
||||
break;
|
||||
|
||||
// select one (max) score for the file.
|
||||
const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w]));
|
||||
const score = Math.max(...wordList.map((w) => scoreMap.get(file).get(w)));
|
||||
// add result to the result list
|
||||
results.push([
|
||||
docNames[file],
|
||||
@ -585,6 +599,7 @@ const Search = {
|
||||
null,
|
||||
score,
|
||||
filenames[file],
|
||||
SearchResultKind.text,
|
||||
]);
|
||||
}
|
||||
return results;
|
||||
|
||||
@ -14,7 +14,7 @@
|
||||
<script src="_static/jquery.js?v=5d32c60e"></script>
|
||||
<script src="_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
||||
<script src="_static/documentation_options.js?v=000c92bf"></script>
|
||||
<script src="_static/doctools.js?v=9a2dae69"></script>
|
||||
<script src="_static/doctools.js?v=9bcbadda"></script>
|
||||
<script src="_static/sphinx_highlight.js?v=dc90522c"></script>
|
||||
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
|
||||
<script src="_static/js/theme.js"></script>
|
||||
@ -52,35 +52,24 @@
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-0:-Create-and-set-a-Google-Cloud-Vision-Key">Step 0: Create and set a Google Cloud Vision Key</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-1:-Read-your-data-into-AMMICO">Step 1: Read your data into AMMICO</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#The-detector-modules">The detector modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/Example%20cropposts.html">Crop posts module</a></li>
|
||||
<li class="toctree-l1 current"><a class="reference internal" href="modules.html">AMMICO package modules</a><ul class="current">
|
||||
<li class="toctree-l2 current"><a class="current reference internal" href="#">text module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="#module-summary">summary module</a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#summary.SummaryDetector"><code class="docutils literal notranslate"><span class="pre">SummaryDetector</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.all_allowed_model_types"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.all_allowed_model_types</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.allowed_analysis_types"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.allowed_analysis_types</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.allowed_model_types"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.allowed_model_types</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.allowed_new_model_types"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.allowed_new_model_types</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.analyse_image"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.analyse_image()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.analyse_questions"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.analyse_questions()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.analyse_summary"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.analyse_summary()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.check_model"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.check_model()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.load_model"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.load_model_base"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_base()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.load_model_base_blip2_opt_caption_coco_opt67b"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_base_blip2_opt_caption_coco_opt67b()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.load_model_base_blip2_opt_pretrain_opt67b"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_base_blip2_opt_pretrain_opt67b()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.load_model_blip2_opt_caption_coco_opt27b"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_blip2_opt_caption_coco_opt27b()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.load_model_blip2_opt_pretrain_opt27b"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_blip2_opt_pretrain_opt27b()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.load_model_blip2_t5_caption_coco_flant5xl"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_blip2_t5_caption_coco_flant5xl()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.load_model_blip2_t5_pretrain_flant5xl"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_blip2_t5_pretrain_flant5xl()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.load_model_blip2_t5_pretrain_flant5xxl"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_blip2_t5_pretrain_flant5xxl()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.load_model_large"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_large()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.load_new_model"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_new_model()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#summary.SummaryDetector.load_vqa_model"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_vqa_model()</span></code></a></li>
|
||||
<li class="toctree-l2 current"><a class="current reference internal" href="#">text module</a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#text.TextAnalyzer"><code class="docutils literal notranslate"><span class="pre">TextAnalyzer</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#text.TextAnalyzer.read_csv"><code class="docutils literal notranslate"><span class="pre">TextAnalyzer.read_csv()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#text.TextDetector"><code class="docutils literal notranslate"><span class="pre">TextDetector</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#text.TextDetector.analyse_image"><code class="docutils literal notranslate"><span class="pre">TextDetector.analyse_image()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#text.TextDetector.get_text_from_image"><code class="docutils literal notranslate"><span class="pre">TextDetector.get_text_from_image()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#text.TextDetector.remove_linebreaks"><code class="docutils literal notranslate"><span class="pre">TextDetector.remove_linebreaks()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#text.TextDetector.set_keys"><code class="docutils literal notranslate"><span class="pre">TextDetector.set_keys()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#text.TextDetector.translate_text"><code class="docutils literal notranslate"><span class="pre">TextDetector.translate_text()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#text.privacy_disclosure"><code class="docutils literal notranslate"><span class="pre">privacy_disclosure()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="#summary-module">summary module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="#multimodal-search-module">multimodal search module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="#module-faces">faces module</a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#faces.EmotionDetector"><code class="docutils literal notranslate"><span class="pre">EmotionDetector</span></code></a><ul>
|
||||
@ -105,20 +94,37 @@
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="#module-cropposts">cropposts module</a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#cropposts.compute_crop_corner"><code class="docutils literal notranslate"><span class="pre">compute_crop_corner()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#cropposts.crop_image_from_post"><code class="docutils literal notranslate"><span class="pre">crop_image_from_post()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#cropposts.crop_media_posts"><code class="docutils literal notranslate"><span class="pre">crop_media_posts()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#cropposts.crop_posts_from_refs"><code class="docutils literal notranslate"><span class="pre">crop_posts_from_refs()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#cropposts.crop_posts_image"><code class="docutils literal notranslate"><span class="pre">crop_posts_image()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#cropposts.draw_matches"><code class="docutils literal notranslate"><span class="pre">draw_matches()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#cropposts.kp_from_matches"><code class="docutils literal notranslate"><span class="pre">kp_from_matches()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#cropposts.matching_points"><code class="docutils literal notranslate"><span class="pre">matching_points()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#cropposts.paste_image_and_comment"><code class="docutils literal notranslate"><span class="pre">paste_image_and_comment()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="#cropposts-module">cropposts module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="#module-utils">utils module</a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#utils.AnalysisMethod"><code class="docutils literal notranslate"><span class="pre">AnalysisMethod</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#utils.AnalysisMethod.analyse_image"><code class="docutils literal notranslate"><span class="pre">AnalysisMethod.analyse_image()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#utils.AnalysisMethod.set_keys"><code class="docutils literal notranslate"><span class="pre">AnalysisMethod.set_keys()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#utils.DownloadResource"><code class="docutils literal notranslate"><span class="pre">DownloadResource</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#utils.DownloadResource.get"><code class="docutils literal notranslate"><span class="pre">DownloadResource.get()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#utils.DownloadResource.resources"><code class="docutils literal notranslate"><span class="pre">DownloadResource.resources</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#utils.ammico_prefetch_models"><code class="docutils literal notranslate"><span class="pre">ammico_prefetch_models()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#utils.append_data_to_dict"><code class="docutils literal notranslate"><span class="pre">append_data_to_dict()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#utils.dump_df"><code class="docutils literal notranslate"><span class="pre">dump_df()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#utils.find_files"><code class="docutils literal notranslate"><span class="pre">find_files()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#utils.get_color_table"><code class="docutils literal notranslate"><span class="pre">get_color_table()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#utils.get_dataframe"><code class="docutils literal notranslate"><span class="pre">get_dataframe()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#utils.initialize_dict"><code class="docutils literal notranslate"><span class="pre">initialize_dict()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#utils.is_interactive"><code class="docutils literal notranslate"><span class="pre">is_interactive()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#utils.iterable"><code class="docutils literal notranslate"><span class="pre">iterable()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="#module-display">display module</a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="#display.AnalysisExplorer"><code class="docutils literal notranslate"><span class="pre">AnalysisExplorer</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#display.AnalysisExplorer.run_server"><code class="docutils literal notranslate"><span class="pre">AnalysisExplorer.run_server()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="#display.AnalysisExplorer.update_picture"><code class="docutils literal notranslate"><span class="pre">AnalysisExplorer.update_picture()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="#utils-module">utils module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="#display-module">display module</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="license_link.html">License</a></li>
|
||||
@ -149,297 +155,86 @@
|
||||
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
|
||||
<div itemprop="articleBody">
|
||||
|
||||
<section id="text-module">
|
||||
<h1>text module<a class="headerlink" href="#text-module" title="Link to this heading"></a></h1>
|
||||
</section>
|
||||
<section id="module-summary">
|
||||
<span id="summary-module"></span><h1>summary module<a class="headerlink" href="#module-summary" title="Link to this heading"></a></h1>
|
||||
<section id="module-text">
|
||||
<span id="text-module"></span><h1>text module<a class="headerlink" href="#module-text" title="Link to this heading"></a></h1>
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector">
|
||||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">summary.</span></span><span class="sig-name descname"><span class="pre">SummaryDetector</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">subdict</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">dict</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">{}</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">model_type</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">'base'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">analysis_type</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">'summary_and_questions'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">list_of_questions</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">list</span><span class="p"><span class="pre">[</span></span><span class="pre">str</span><span class="p"><span class="pre">]</span></span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">summary_model</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">summary_vis_processors</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">summary_vqa_model</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">summary_vqa_vis_processors</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">summary_vqa_txt_processors</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">summary_vqa_model_new</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">summary_vqa_vis_processors_new</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">summary_vqa_txt_processors_new</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">device_type</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#summary.SummaryDetector" title="Link to this definition"></a></dt>
|
||||
<dt class="sig sig-object py" id="text.TextAnalyzer">
|
||||
<em class="property"><span class="k"><span class="pre">class</span></span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">text.</span></span><span class="sig-name descname"><span class="pre">TextAnalyzer</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">csv_path</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">column_key</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">csv_encoding</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">'utf-8'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#text.TextAnalyzer" title="Link to this definition"></a></dt>
|
||||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
|
||||
<p>Used to get text from a csv and then run the TextDetector on it.</p>
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="text.TextAnalyzer.read_csv">
|
||||
<span class="sig-name descname"><span class="pre">read_csv</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">dict</span></span></span><a class="headerlink" href="#text.TextAnalyzer.read_csv" title="Link to this definition"></a></dt>
|
||||
<dd><p>Read the CSV file and return the dictionary with the text entries.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>dict</strong> – The dictionary with the text entries.</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="text.TextDetector">
|
||||
<em class="property"><span class="k"><span class="pre">class</span></span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">text.</span></span><span class="sig-name descname"><span class="pre">TextDetector</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">subdict</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">dict</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">analyse_text</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">skip_extraction</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">accept_privacy</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">'PRIVACY_AMMICO'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#text.TextDetector" title="Link to this definition"></a></dt>
|
||||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">AnalysisMethod</span></code></p>
|
||||
<dl class="py attribute">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.all_allowed_model_types">
|
||||
<span class="sig-name descname"><span class="pre">all_allowed_model_types</span></span><em class="property"><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="pre">['base',</span> <span class="pre">'large',</span> <span class="pre">'vqa',</span> <span class="pre">'blip2_t5_pretrain_flant5xxl',</span> <span class="pre">'blip2_t5_pretrain_flant5xl',</span> <span class="pre">'blip2_t5_caption_coco_flant5xl',</span> <span class="pre">'blip2_opt_pretrain_opt2.7b',</span> <span class="pre">'blip2_opt_pretrain_opt6.7b',</span> <span class="pre">'blip2_opt_caption_coco_opt2.7b',</span> <span class="pre">'blip2_opt_caption_coco_opt6.7b']</span></em><a class="headerlink" href="#summary.SummaryDetector.all_allowed_model_types" title="Link to this definition"></a></dt>
|
||||
<dd></dd></dl>
|
||||
|
||||
<dl class="py attribute">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.allowed_analysis_types">
|
||||
<span class="sig-name descname"><span class="pre">allowed_analysis_types</span></span><em class="property"><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="pre">['summary',</span> <span class="pre">'questions',</span> <span class="pre">'summary_and_questions']</span></em><a class="headerlink" href="#summary.SummaryDetector.allowed_analysis_types" title="Link to this definition"></a></dt>
|
||||
<dd></dd></dl>
|
||||
|
||||
<dl class="py attribute">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.allowed_model_types">
|
||||
<span class="sig-name descname"><span class="pre">allowed_model_types</span></span><em class="property"><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="pre">['base',</span> <span class="pre">'large',</span> <span class="pre">'vqa']</span></em><a class="headerlink" href="#summary.SummaryDetector.allowed_model_types" title="Link to this definition"></a></dt>
|
||||
<dd></dd></dl>
|
||||
|
||||
<dl class="py attribute">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.allowed_new_model_types">
|
||||
<span class="sig-name descname"><span class="pre">allowed_new_model_types</span></span><em class="property"><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="pre">['blip2_t5_pretrain_flant5xxl',</span> <span class="pre">'blip2_t5_pretrain_flant5xl',</span> <span class="pre">'blip2_t5_caption_coco_flant5xl',</span> <span class="pre">'blip2_opt_pretrain_opt2.7b',</span> <span class="pre">'blip2_opt_pretrain_opt6.7b',</span> <span class="pre">'blip2_opt_caption_coco_opt2.7b',</span> <span class="pre">'blip2_opt_caption_coco_opt6.7b']</span></em><a class="headerlink" href="#summary.SummaryDetector.allowed_new_model_types" title="Link to this definition"></a></dt>
|
||||
<dd></dd></dl>
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="text.TextDetector.analyse_image">
|
||||
<span class="sig-name descname"><span class="pre">analyse_image</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">dict</span></span></span><a class="headerlink" href="#text.TextDetector.analyse_image" title="Link to this definition"></a></dt>
|
||||
<dd><p>Perform text extraction and analysis of the text.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>dict</strong> – The updated dictionary with text analysis results.</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.analyse_image">
|
||||
<span class="sig-name descname"><span class="pre">analyse_image</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">subdict</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">dict</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">analysis_type</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">list_of_questions</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">list</span><span class="p"><span class="pre">[</span></span><span class="pre">str</span><span class="p"><span class="pre">]</span></span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">consequential_questions</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#summary.SummaryDetector.analyse_image" title="Link to this definition"></a></dt>
|
||||
<dd><p>Analyse image with blip_caption model.</p>
|
||||
<dt class="sig sig-object py" id="text.TextDetector.get_text_from_image">
|
||||
<span class="sig-name descname"><span class="pre">get_text_from_image</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#text.TextDetector.get_text_from_image" title="Link to this definition"></a></dt>
|
||||
<dd><p>Detect text on the image using Google Cloud Vision API.</p>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="text.TextDetector.remove_linebreaks">
|
||||
<span class="sig-name descname"><span class="pre">remove_linebreaks</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#text.TextDetector.remove_linebreaks" title="Link to this definition"></a></dt>
|
||||
<dd><p>Remove linebreaks from original and translated text.</p>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="text.TextDetector.set_keys">
|
||||
<span class="sig-name descname"><span class="pre">set_keys</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">dict</span></span></span><a class="headerlink" href="#text.TextDetector.set_keys" title="Link to this definition"></a></dt>
|
||||
<dd><p>Set the default keys for text analysis.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>dict</strong> – The dictionary with default text keys.</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="text.TextDetector.translate_text">
|
||||
<span class="sig-name descname"><span class="pre">translate_text</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#text.TextDetector.translate_text" title="Link to this definition"></a></dt>
|
||||
<dd><p>Translate the detected text to English using the Translator object.</p>
|
||||
</dd></dl>
|
||||
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py function">
|
||||
<dt class="sig sig-object py" id="text.privacy_disclosure">
|
||||
<span class="sig-prename descclassname"><span class="pre">text.</span></span><span class="sig-name descname"><span class="pre">privacy_disclosure</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">accept_privacy</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">'PRIVACY_AMMICO'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#text.privacy_disclosure" title="Link to this definition"></a></dt>
|
||||
<dd><p>Asks the user to accept the privacy statement.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>analysis_type</strong> (<em>str</em>) – type of the analysis.</p></li>
|
||||
<li><p><strong>subdict</strong> (<em>dict</em>) – dictionary with analising pictures.</p></li>
|
||||
<li><p><strong>list_of_questions</strong> (<em>list</em><em>[</em><em>str</em><em>]</em>) – list of questions.</p></li>
|
||||
<li><p><strong>consequential_questions</strong> (<em>bool</em>) – whether to ask consequential questions. Works only for new BLIP2 models.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p><strong>self.subdict</strong> (<em>dict</em>) – dictionary with analysis results.</p>
|
||||
<dd class="field-odd"><p><strong>accept_privacy</strong> (<em>str</em>) – The name of the disclosure variable (default: “PRIVACY_AMMICO”).</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.analyse_questions">
|
||||
<span class="sig-name descname"><span class="pre">analyse_questions</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">list_of_questions</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">list</span><span class="p"><span class="pre">[</span></span><span class="pre">str</span><span class="p"><span class="pre">]</span></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">consequential_questions</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">dict</span></span></span><a class="headerlink" href="#summary.SummaryDetector.analyse_questions" title="Link to this definition"></a></dt>
|
||||
<dd><p>Generate answers to free-form questions about image written in natural language.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>list_of_questions</strong> (<em>list</em><em>[</em><em>str</em><em>]</em>) – list of questions.</p></li>
|
||||
<li><p><strong>consequential_questions</strong> (<em>bool</em>) – whether to ask consequential questions. Works only for new BLIP2 models.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p><strong>self.subdict</strong> (<em>dict</em>) – dictionary with answers to questions.</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.analyse_summary">
|
||||
<span class="sig-name descname"><span class="pre">analyse_summary</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">nondeterministic_summaries</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">True</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#summary.SummaryDetector.analyse_summary" title="Link to this definition"></a></dt>
|
||||
<dd><p>Create 1 constant and 3 non deterministic captions for image.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>nondeterministic_summaries</strong> (<em>bool</em>) – whether to create 3 non deterministic captions.</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p><strong>self.subdict</strong> (<em>dict</em>) – dictionary with analysis results.</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.check_model">
|
||||
<span class="sig-name descname"><span class="pre">check_model</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#summary.SummaryDetector.check_model" title="Link to this definition"></a></dt>
|
||||
<dd><p>Check model type and return appropriate model and preprocessors.</p>
|
||||
<p>Args:</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>model</strong> (<em>nn.Module</em>) – model.</p></li>
|
||||
<li><p><strong>vis_processors</strong> (<em>dict</em>) – visual preprocessor.</p></li>
|
||||
<li><p><strong>txt_processors</strong> (<em>dict</em>) – text preprocessor.</p></li>
|
||||
<li><p><strong>model_old</strong> (<em>bool</em>) – whether model is old or new.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.load_model">
|
||||
<span class="sig-name descname"><span class="pre">load_model</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">model_type</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#summary.SummaryDetector.load_model" title="Link to this definition"></a></dt>
|
||||
<dd><p>Load blip_caption model and preprocessors for visual inputs from lavis.models.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>model_type</strong> (<em>str</em>) – type of the model.</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><ul class="simple">
|
||||
<li><p><strong>summary_model</strong> (<em>torch.nn.Module</em>) – model.</p></li>
|
||||
<li><p><strong>summary_vis_processors</strong> (<em>dict</em>) – preprocessors for visual inputs.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.load_model_base">
|
||||
<span class="sig-name descname"><span class="pre">load_model_base</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#summary.SummaryDetector.load_model_base" title="Link to this definition"></a></dt>
|
||||
<dd><p>Load base_coco blip_caption model and preprocessors for visual inputs from lavis.models.</p>
|
||||
<p>Args:</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>summary_model</strong> (<em>torch.nn.Module</em>) – model.</p></li>
|
||||
<li><p><strong>summary_vis_processors</strong> (<em>dict</em>) – preprocessors for visual inputs.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.load_model_base_blip2_opt_caption_coco_opt67b">
|
||||
<span class="sig-name descname"><span class="pre">load_model_base_blip2_opt_caption_coco_opt67b</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#summary.SummaryDetector.load_model_base_blip2_opt_caption_coco_opt67b" title="Link to this definition"></a></dt>
|
||||
<dd><p>Load BLIP2 model with caption_coco_opt6.7b architecture.</p>
|
||||
<p>Args:</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>model</strong> (<em>torch.nn.Module</em>) – model.</p></li>
|
||||
<li><p><strong>vis_processors</strong> (<em>dict</em>) – preprocessors for visual inputs.</p></li>
|
||||
<li><p><strong>txt_processors</strong> (<em>dict</em>) – preprocessors for text inputs.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.load_model_base_blip2_opt_pretrain_opt67b">
|
||||
<span class="sig-name descname"><span class="pre">load_model_base_blip2_opt_pretrain_opt67b</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#summary.SummaryDetector.load_model_base_blip2_opt_pretrain_opt67b" title="Link to this definition"></a></dt>
|
||||
<dd><p>Load BLIP2 model with pretrain_opt6.7b architecture.</p>
|
||||
<p>Args:</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>model</strong> (<em>torch.nn.Module</em>) – model.</p></li>
|
||||
<li><p><strong>vis_processors</strong> (<em>dict</em>) – preprocessors for visual inputs.</p></li>
|
||||
<li><p><strong>txt_processors</strong> (<em>dict</em>) – preprocessors for text inputs.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.load_model_blip2_opt_caption_coco_opt27b">
|
||||
<span class="sig-name descname"><span class="pre">load_model_blip2_opt_caption_coco_opt27b</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#summary.SummaryDetector.load_model_blip2_opt_caption_coco_opt27b" title="Link to this definition"></a></dt>
|
||||
<dd><p>Load BLIP2 model with caption_coco_opt2.7b architecture.</p>
|
||||
<p>Args:</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>model</strong> (<em>torch.nn.Module</em>) – model.</p></li>
|
||||
<li><p><strong>vis_processors</strong> (<em>dict</em>) – preprocessors for visual inputs.</p></li>
|
||||
<li><p><strong>txt_processors</strong> (<em>dict</em>) – preprocessors for text inputs.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.load_model_blip2_opt_pretrain_opt27b">
|
||||
<span class="sig-name descname"><span class="pre">load_model_blip2_opt_pretrain_opt27b</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#summary.SummaryDetector.load_model_blip2_opt_pretrain_opt27b" title="Link to this definition"></a></dt>
|
||||
<dd><p>Load BLIP2 model with pretrain_opt2 architecture.</p>
|
||||
<p>Args:</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>model</strong> (<em>torch.nn.Module</em>) – model.</p></li>
|
||||
<li><p><strong>vis_processors</strong> (<em>dict</em>) – preprocessors for visual inputs.</p></li>
|
||||
<li><p><strong>txt_processors</strong> (<em>dict</em>) – preprocessors for text inputs.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.load_model_blip2_t5_caption_coco_flant5xl">
|
||||
<span class="sig-name descname"><span class="pre">load_model_blip2_t5_caption_coco_flant5xl</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#summary.SummaryDetector.load_model_blip2_t5_caption_coco_flant5xl" title="Link to this definition"></a></dt>
|
||||
<dd><p>Load BLIP2 model with caption_coco_flant5xl architecture.</p>
|
||||
<p>Args:</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>model</strong> (<em>torch.nn.Module</em>) – model.</p></li>
|
||||
<li><p><strong>vis_processors</strong> (<em>dict</em>) – preprocessors for visual inputs.</p></li>
|
||||
<li><p><strong>txt_processors</strong> (<em>dict</em>) – preprocessors for text inputs.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.load_model_blip2_t5_pretrain_flant5xl">
|
||||
<span class="sig-name descname"><span class="pre">load_model_blip2_t5_pretrain_flant5xl</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#summary.SummaryDetector.load_model_blip2_t5_pretrain_flant5xl" title="Link to this definition"></a></dt>
|
||||
<dd><p>Load BLIP2 model with FLAN-T5 XL architecture.</p>
|
||||
<p>Args:</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>model</strong> (<em>torch.nn.Module</em>) – model.</p></li>
|
||||
<li><p><strong>vis_processors</strong> (<em>dict</em>) – preprocessors for visual inputs.</p></li>
|
||||
<li><p><strong>txt_processors</strong> (<em>dict</em>) – preprocessors for text inputs.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.load_model_blip2_t5_pretrain_flant5xxl">
|
||||
<span class="sig-name descname"><span class="pre">load_model_blip2_t5_pretrain_flant5xxl</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#summary.SummaryDetector.load_model_blip2_t5_pretrain_flant5xxl" title="Link to this definition"></a></dt>
|
||||
<dd><p>Load BLIP2 model with FLAN-T5 XXL architecture.</p>
|
||||
<p>Args:</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>model</strong> (<em>torch.nn.Module</em>) – model.</p></li>
|
||||
<li><p><strong>vis_processors</strong> (<em>dict</em>) – preprocessors for visual inputs.</p></li>
|
||||
<li><p><strong>txt_processors</strong> (<em>dict</em>) – preprocessors for text inputs.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.load_model_large">
|
||||
<span class="sig-name descname"><span class="pre">load_model_large</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#summary.SummaryDetector.load_model_large" title="Link to this definition"></a></dt>
|
||||
<dd><p>Load large_coco blip_caption model and preprocessors for visual inputs from lavis.models.</p>
|
||||
<p>Args:</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>summary_model</strong> (<em>torch.nn.Module</em>) – model.</p></li>
|
||||
<li><p><strong>summary_vis_processors</strong> (<em>dict</em>) – preprocessors for visual inputs.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.load_new_model">
|
||||
<span class="sig-name descname"><span class="pre">load_new_model</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">model_type</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#summary.SummaryDetector.load_new_model" title="Link to this definition"></a></dt>
|
||||
<dd><p>Load new BLIP2 models.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>model_type</strong> (<em>str</em>) – type of the model.</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><ul class="simple">
|
||||
<li><p><strong>model</strong> (<em>torch.nn.Module</em>) – model.</p></li>
|
||||
<li><p><strong>vis_processors</strong> (<em>dict</em>) – preprocessors for visual inputs.</p></li>
|
||||
<li><p><strong>txt_processors</strong> (<em>dict</em>) – preprocessors for text inputs.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="summary.SummaryDetector.load_vqa_model">
|
||||
<span class="sig-name descname"><span class="pre">load_vqa_model</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#summary.SummaryDetector.load_vqa_model" title="Link to this definition"></a></dt>
|
||||
<dd><p>Load blip_vqa model and preprocessors for visual and text inputs from lavis.models.</p>
|
||||
<p>Args:</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>summary_vqa_model</strong> (<em>torch.nn.Module</em>) – model.</p></li>
|
||||
<li><p><strong>summary_vqa_vis_processors</strong> (<em>dict</em>) – preprocessors for visual inputs.</p></li>
|
||||
<li><p><strong>summary_vqa_txt_processors</strong> (<em>dict</em>) – preprocessors for text inputs.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
</dd></dl>
|
||||
|
||||
</section>
|
||||
<section id="summary-module">
|
||||
<h1>summary module<a class="headerlink" href="#summary-module" title="Link to this heading"></a></h1>
|
||||
</section>
|
||||
<section id="multimodal-search-module">
|
||||
<h1>multimodal search module<a class="headerlink" href="#multimodal-search-module" title="Link to this heading"></a></h1>
|
||||
@ -448,7 +243,7 @@
|
||||
<span id="faces-module"></span><h1>faces module<a class="headerlink" href="#module-faces" title="Link to this heading"></a></h1>
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="faces.EmotionDetector">
|
||||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">faces.</span></span><span class="sig-name descname"><span class="pre">EmotionDetector</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">subdict</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">dict</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">emotion_threshold</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">float</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">50.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">race_threshold</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">float</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">50.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">gender_threshold</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">float</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">50.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">accept_disclosure</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">'DISCLOSURE_AMMICO'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#faces.EmotionDetector" title="Link to this definition"></a></dt>
|
||||
<em class="property"><span class="k"><span class="pre">class</span></span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">faces.</span></span><span class="sig-name descname"><span class="pre">EmotionDetector</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">subdict</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">dict</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">emotion_threshold</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">float</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">50.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">race_threshold</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">float</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">50.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">gender_threshold</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">float</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">50.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">accept_disclosure</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">'DISCLOSURE_AMMICO'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#faces.EmotionDetector" title="Link to this definition"></a></dt>
|
||||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">AnalysisMethod</span></code></p>
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="faces.EmotionDetector.analyse_image">
|
||||
@ -548,7 +343,7 @@
|
||||
<span id="color-analysis-module"></span><h1>color_analysis module<a class="headerlink" href="#module-colors" title="Link to this heading"></a></h1>
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="colors.ColorDetector">
|
||||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">colors.</span></span><span class="sig-name descname"><span class="pre">ColorDetector</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">subdict</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">dict</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">delta_e_method</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">'CIE</span> <span class="pre">1976'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#colors.ColorDetector" title="Link to this definition"></a></dt>
|
||||
<em class="property"><span class="k"><span class="pre">class</span></span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">colors.</span></span><span class="sig-name descname"><span class="pre">ColorDetector</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">subdict</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">dict</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">delta_e_method</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">'CIE</span> <span class="pre">1976'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#colors.ColorDetector" title="Link to this definition"></a></dt>
|
||||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">AnalysisMethod</span></code></p>
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="colors.ColorDetector.analyse_image">
|
||||
@ -593,190 +388,164 @@ These colors are: “red”, “green”, “blue”, “yellow”,”cyan”,
|
||||
</dd></dl>
|
||||
|
||||
</section>
|
||||
<section id="module-cropposts">
|
||||
<span id="cropposts-module"></span><h1>cropposts module<a class="headerlink" href="#module-cropposts" title="Link to this heading"></a></h1>
|
||||
<section id="cropposts-module">
|
||||
<h1>cropposts module<a class="headerlink" href="#cropposts-module" title="Link to this heading"></a></h1>
|
||||
</section>
|
||||
<section id="module-utils">
|
||||
<span id="utils-module"></span><h1>utils module<a class="headerlink" href="#module-utils" title="Link to this heading"></a></h1>
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="utils.AnalysisMethod">
|
||||
<em class="property"><span class="k"><span class="pre">class</span></span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">utils.</span></span><span class="sig-name descname"><span class="pre">AnalysisMethod</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">subdict</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">dict</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#utils.AnalysisMethod" title="Link to this definition"></a></dt>
|
||||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
|
||||
<p>Base class to be inherited by all analysis methods.</p>
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="utils.AnalysisMethod.analyse_image">
|
||||
<span class="sig-name descname"><span class="pre">analyse_image</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#utils.AnalysisMethod.analyse_image" title="Link to this definition"></a></dt>
|
||||
<dd></dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="utils.AnalysisMethod.set_keys">
|
||||
<span class="sig-name descname"><span class="pre">set_keys</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#utils.AnalysisMethod.set_keys" title="Link to this definition"></a></dt>
|
||||
<dd></dd></dl>
|
||||
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="utils.DownloadResource">
|
||||
<em class="property"><span class="k"><span class="pre">class</span></span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">utils.</span></span><span class="sig-name descname"><span class="pre">DownloadResource</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kwargs</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#utils.DownloadResource" title="Link to this definition"></a></dt>
|
||||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
|
||||
<p>A remote resource that needs on demand downloading.</p>
|
||||
<p>We use this as a wrapper to the pooch library. The wrapper registers
|
||||
each data file and allows prefetching through the CLI entry point
|
||||
ammico_prefetch_models.</p>
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="utils.DownloadResource.get">
|
||||
<span class="sig-name descname"><span class="pre">get</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#utils.DownloadResource.get" title="Link to this definition"></a></dt>
|
||||
<dd></dd></dl>
|
||||
|
||||
<dl class="py attribute">
|
||||
<dt class="sig sig-object py" id="utils.DownloadResource.resources">
|
||||
<span class="sig-name descname"><span class="pre">resources</span></span><em class="property"><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="pre">[]</span></em><a class="headerlink" href="#utils.DownloadResource.resources" title="Link to this definition"></a></dt>
|
||||
<dd></dd></dl>
|
||||
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py function">
|
||||
<dt class="sig sig-object py" id="cropposts.compute_crop_corner">
|
||||
<span class="sig-prename descclassname"><span class="pre">cropposts.</span></span><span class="sig-name descname"><span class="pre">compute_crop_corner</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">matches</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">DMatch</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kp1</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">ndarray</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kp2</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">ndarray</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">region</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">30</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">h_margin</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">v_margin</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">5</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">min_match</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">6</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">int</span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="pre">int</span><span class="p"><span class="pre">]</span></span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span></span><a class="headerlink" href="#cropposts.compute_crop_corner" title="Link to this definition"></a></dt>
|
||||
<dd><p>Estimate the position on the image from where to crop.</p>
|
||||
<dt class="sig sig-object py" id="utils.ammico_prefetch_models">
|
||||
<span class="sig-prename descclassname"><span class="pre">utils.</span></span><span class="sig-name descname"><span class="pre">ammico_prefetch_models</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#utils.ammico_prefetch_models" title="Link to this definition"></a></dt>
|
||||
<dd><p>Prefetch all the download resources</p>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py function">
|
||||
<dt class="sig sig-object py" id="utils.append_data_to_dict">
|
||||
<span class="sig-prename descclassname"><span class="pre">utils.</span></span><span class="sig-name descname"><span class="pre">append_data_to_dict</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">mydict</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">dict</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">dict</span></span></span><a class="headerlink" href="#utils.append_data_to_dict" title="Link to this definition"></a></dt>
|
||||
<dd><p>Append entries from nested dictionaries to keys in a global dict.</p>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py function">
|
||||
<dt class="sig sig-object py" id="utils.dump_df">
|
||||
<span class="sig-prename descclassname"><span class="pre">utils.</span></span><span class="sig-name descname"><span class="pre">dump_df</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">mydict</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">dict</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">DataFrame</span></span></span><a class="headerlink" href="#utils.dump_df" title="Link to this definition"></a></dt>
|
||||
<dd><p>Utility to dump the dictionary into a dataframe.</p>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py function">
|
||||
<dt class="sig sig-object py" id="utils.find_files">
|
||||
<span class="sig-prename descclassname"><span class="pre">utils.</span></span><span class="sig-name descname"><span class="pre">find_files</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">path</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pattern</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">['png',</span> <span class="pre">'jpg',</span> <span class="pre">'jpeg',</span> <span class="pre">'gif',</span> <span class="pre">'webp',</span> <span class="pre">'avif',</span> <span class="pre">'tiff']</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">recursive</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">limit</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">20</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">random_seed</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">dict</span></span></span><a class="headerlink" href="#utils.find_files" title="Link to this definition"></a></dt>
|
||||
<dd><p>Find image files on the file system.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>matches</strong> (<em>cv2.DMatch</em>) – The matched objects on the image.</p></li>
|
||||
<li><p><strong>kp1</strong> (<em>np.ndarray</em>) – Key points of the matches for the reference image.</p></li>
|
||||
<li><p><strong>kp2</strong> (<em>np.ndarray</em>) – Key points of the matches for the social media posts.</p></li>
|
||||
<li><p><strong>region</strong> (<em>int</em><em>, </em><em>optional</em>) – Area to consider around the keypoints.
|
||||
Defaults to 30.</p></li>
|
||||
<li><p><strong>h_margin</strong> (<em>int</em><em>, </em><em>optional</em>) – Horizontal margin to subtract from the minimum
|
||||
horizontal position. Defaults to 0.</p></li>
|
||||
<li><p><strong>v_margin</strong> (<em>int</em><em>, </em><em>optional</em>) – Vertical margin to subtract from the minimum
|
||||
vertical position. Defaults to 5.</p></li>
|
||||
<li><p><strong>min_match</strong> – Minimum number of matches required. Defaults to 6.</p></li>
|
||||
<li><p><strong>path</strong> (<em>str</em><em>, </em><em>optional</em>) – The base directory where we are looking for the images. Defaults
|
||||
to None, which uses the ammico data directory if set or the current
|
||||
working directory otherwise.</p></li>
|
||||
<li><p><strong>pattern</strong> (<em>str</em><em>|</em><em>list</em><em>, </em><em>optional</em>) – The naming pattern that the filename should match.
|
||||
Use either ‘.ext’ or just ‘ext’
|
||||
Defaults to [“png”, “jpg”, “jpeg”, “gif”, “webp”, “avif”,”tiff”]. Can be used to allow other patterns or to only include
|
||||
specific prefixes or suffixes.</p></li>
|
||||
<li><p><strong>recursive</strong> (<em>bool</em><em>, </em><em>optional</em>) – Whether to recurse into subdirectories. Default is set to True.</p></li>
|
||||
<li><p><strong>limit</strong> (<em>int/list</em><em>, </em><em>optional</em>) – The maximum number of images to be found.
|
||||
Provide a list or tuple of length 2 to batch the images.
|
||||
Defaults to 20. To return all images, set to None or -1.</p></li>
|
||||
<li><p><strong>random_seed</strong> (<em>int</em><em>, </em><em>optional</em>) – The random seed to use for shuffling the images.
|
||||
If None is provided the data will not be shuffeled. Defaults to None.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p><strong>tuple, optional</strong> – Tuple of vertical and horizontal crop corner coordinates.</p>
|
||||
<dd class="field-even"><p><strong>dict</strong> – A nested dictionary with file ids and all filenames including the path.</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py function">
|
||||
<dt class="sig sig-object py" id="cropposts.crop_image_from_post">
|
||||
<span class="sig-prename descclassname"><span class="pre">cropposts.</span></span><span class="sig-name descname"><span class="pre">crop_image_from_post</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">view</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">ndarray</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">final_h</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">ndarray</span></span></span><a class="headerlink" href="#cropposts.crop_image_from_post" title="Link to this definition"></a></dt>
|
||||
<dd><p>Crop the image part from the social media post.</p>
|
||||
<dt class="sig sig-object py" id="utils.get_color_table">
|
||||
<span class="sig-prename descclassname"><span class="pre">utils.</span></span><span class="sig-name descname"><span class="pre">get_color_table</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#utils.get_color_table" title="Link to this definition"></a></dt>
|
||||
<dd></dd></dl>
|
||||
|
||||
<dl class="py function">
|
||||
<dt class="sig sig-object py" id="utils.get_dataframe">
|
||||
<span class="sig-prename descclassname"><span class="pre">utils.</span></span><span class="sig-name descname"><span class="pre">get_dataframe</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">mydict</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">dict</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">DataFrame</span></span></span><a class="headerlink" href="#utils.get_dataframe" title="Link to this definition"></a></dt>
|
||||
<dd></dd></dl>
|
||||
|
||||
<dl class="py function">
|
||||
<dt class="sig sig-object py" id="utils.initialize_dict">
|
||||
<span class="sig-prename descclassname"><span class="pre">utils.</span></span><span class="sig-name descname"><span class="pre">initialize_dict</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">filelist</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">list</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">dict</span></span></span><a class="headerlink" href="#utils.initialize_dict" title="Link to this definition"></a></dt>
|
||||
<dd><p>Initialize the nested dictionary for all the found images.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>view</strong> (<em>np.ndarray</em>) – The image to be cropped.</p></li>
|
||||
<li><p><strong>final_h</strong> – The horizontal position up to which should be cropped.</p></li>
|
||||
</ul>
|
||||
<dd class="field-odd"><p><strong>filelist</strong> (<em>list</em>) – The list of files to be analyzed, including their paths.</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p><strong>np.ndarray</strong> – The cropped image part.</p>
|
||||
<dd class="field-even"><p><strong>dict</strong> – The nested dictionary with all image ids and their paths.</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py function">
|
||||
<dt class="sig sig-object py" id="cropposts.crop_media_posts">
|
||||
<span class="sig-prename descclassname"><span class="pre">cropposts.</span></span><span class="sig-name descname"><span class="pre">crop_media_posts</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">files</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ref_files</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">save_crop_dir</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">plt_match</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">plt_crop</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">plt_image</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">None</span></span></span><a class="headerlink" href="#cropposts.crop_media_posts" title="Link to this definition"></a></dt>
|
||||
<dd><p>Crop social media posts so that comments beyond the first comment/post are cut off.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>files</strong> (<em>list</em>) – List of all the files to be cropped.</p></li>
|
||||
<li><p><strong>ref_files</strong> (<em>list</em>) – List of all the reference images that signify
|
||||
below which regions should be cropped.</p></li>
|
||||
<li><p><strong>save_crop_dir</strong> (<em>str</em>) – Directory where to write the cropped social media posts to.</p></li>
|
||||
<li><p><strong>plt_match</strong> (<em>Bool</em><em>, </em><em>optional</em>) – Display the matched areas on the social media post.
|
||||
Defaults to False.</p></li>
|
||||
<li><p><strong>plt_crop</strong> (<em>Bool</em><em>, </em><em>optional</em>) – Display the cropped text part of the social media post.
|
||||
Defaults to False.</p></li>
|
||||
<li><p><strong>plt_image</strong> (<em>Bool</em><em>, </em><em>optional</em>) – Display the image part of the social media post.
|
||||
Defaults to False.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
<dt class="sig sig-object py" id="utils.is_interactive">
|
||||
<span class="sig-prename descclassname"><span class="pre">utils.</span></span><span class="sig-name descname"><span class="pre">is_interactive</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#utils.is_interactive" title="Link to this definition"></a></dt>
|
||||
<dd><p>Check if we are running in an interactive environment.</p>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py function">
|
||||
<dt class="sig sig-object py" id="cropposts.crop_posts_from_refs">
|
||||
<span class="sig-prename descclassname"><span class="pre">cropposts.</span></span><span class="sig-name descname"><span class="pre">crop_posts_from_refs</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">ref_views</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">List</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">view</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">ndarray</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">plt_match</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">plt_crop</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">plt_image</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">ndarray</span></span></span><a class="headerlink" href="#cropposts.crop_posts_from_refs" title="Link to this definition"></a></dt>
|
||||
<dd><p>Crop the social media post comments from the image.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>ref_views</strong> (<em>list</em>) – List of all the reference images (as numpy arrays) that signify
|
||||
below which regions should be cropped.</p></li>
|
||||
<li><p><strong>view</strong> (<em>np.ndarray</em>) – The image to crop.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p><strong>np.ndarray</strong> – The cropped social media post.</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py function">
|
||||
<dt class="sig sig-object py" id="cropposts.crop_posts_image">
|
||||
<span class="sig-prename descclassname"><span class="pre">cropposts.</span></span><span class="sig-name descname"><span class="pre">crop_posts_image</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">ref_view</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">List</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">view</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">ndarray</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">None</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">ndarray</span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="pre">int</span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="pre">int</span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="pre">int</span><span class="p"><span class="pre">]</span></span></span></span><a class="headerlink" href="#cropposts.crop_posts_image" title="Link to this definition"></a></dt>
|
||||
<dd><p>Crop the social media post to exclude additional comments. Sometimes also crops the
|
||||
image part of the post - this is put back in later.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>ref_views</strong> (<em>list</em>) – List of all the reference images (as numpy arrays) that signify
|
||||
below which regions should be cropped.</p></li>
|
||||
<li><p><strong>view</strong> (<em>np.ndarray</em>) – The image to crop.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p><strong>np.ndarray</strong> – The cropped social media post.</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py function">
|
||||
<dt class="sig sig-object py" id="cropposts.draw_matches">
|
||||
<span class="sig-prename descclassname"><span class="pre">cropposts.</span></span><span class="sig-name descname"><span class="pre">draw_matches</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">matches</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">List</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">img1</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">ndarray</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">img2</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">ndarray</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kp1</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">List</span><span class="p"><span class="pre">[</span></span><span class="pre">KeyPoint</span><span class="p"><span class="pre">]</span></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kp2</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">List</span><span class="p"><span class="pre">[</span></span><span class="pre">KeyPoint</span><span class="p"><span class="pre">]</span></span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">None</span></span></span><a class="headerlink" href="#cropposts.draw_matches" title="Link to this definition"></a></dt>
|
||||
<dd><p>Visualize the matches from SIFT.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>matches</strong> (<em>list</em><em>[</em><em>cv2.Match</em><em>]</em>) – List of cv2.Match matches on the image.</p></li>
|
||||
<li><p><strong>img1</strong> (<em>np.ndarray</em>) – The reference image.</p></li>
|
||||
<li><p><strong>img2</strong> (<em>np.ndarray</em>) – The social media post.</p></li>
|
||||
<li><p><strong>kp1</strong> (<em>list</em><em>[</em><em>cv2.KeyPoint</em><em>]</em>) – List of keypoints from the first image.</p></li>
|
||||
<li><p><strong>kp2</strong> (<em>list</em><em>[</em><em>cv2.KeyPoint</em><em>]</em>) – List of keypoints from the second image.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py function">
|
||||
<dt class="sig sig-object py" id="cropposts.kp_from_matches">
|
||||
<span class="sig-prename descclassname"><span class="pre">cropposts.</span></span><span class="sig-name descname"><span class="pre">kp_from_matches</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">matches</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kp1</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">ndarray</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kp2</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">ndarray</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">Tuple</span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="pre">Tuple</span><span class="p"><span class="pre">]</span></span></span></span><a class="headerlink" href="#cropposts.kp_from_matches" title="Link to this definition"></a></dt>
|
||||
<dd><p>Extract the match indices from the keypoints.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>kp1</strong> (<em>np.ndarray</em>) – Key points of the matches,</p></li>
|
||||
<li><p><strong>kp2</strong> (<em>np.ndarray</em>) – Key points of the matches,</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><ul class="simple">
|
||||
<li><p><strong>tuple</strong> – Index of the descriptor in the list of train descriptors.</p></li>
|
||||
<li><p><strong>tuple</strong> – index of the descriptor in the list of query descriptors.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py function">
|
||||
<dt class="sig sig-object py" id="cropposts.matching_points">
|
||||
<span class="sig-prename descclassname"><span class="pre">cropposts.</span></span><span class="sig-name descname"><span class="pre">matching_points</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">img1</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">ndarray</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">img2</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">ndarray</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">DMatch</span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="pre">List</span><span class="p"><span class="pre">[</span></span><span class="pre">KeyPoint</span><span class="p"><span class="pre">]</span></span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="pre">List</span><span class="p"><span class="pre">[</span></span><span class="pre">KeyPoint</span><span class="p"><span class="pre">]</span></span><span class="p"><span class="pre">]</span></span></span></span><a class="headerlink" href="#cropposts.matching_points" title="Link to this definition"></a></dt>
|
||||
<dd><p>Computes keypoint matches using the SIFT algorithm between two images.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>img1</strong> (<em>np.ndarray</em>) – The reference image.</p></li>
|
||||
<li><p><strong>img2</strong> (<em>np.ndarray</em>) – The social media post.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><ul class="simple">
|
||||
<li><p><strong>cv2.DMatch</strong> – List of filtered keypoint matches.</p></li>
|
||||
<li><p><strong>cv2.KeyPoint</strong> – List of keypoints from the first image.</p></li>
|
||||
<li><p><strong>cv2.KeyPoint</strong> – List of keypoints from the second image.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py function">
|
||||
<dt class="sig sig-object py" id="cropposts.paste_image_and_comment">
|
||||
<span class="sig-prename descclassname"><span class="pre">cropposts.</span></span><span class="sig-name descname"><span class="pre">paste_image_and_comment</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">crop_post</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">ndarray</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">crop_view</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">ndarray</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">ndarray</span></span></span><a class="headerlink" href="#cropposts.paste_image_and_comment" title="Link to this definition"></a></dt>
|
||||
<dd><p>Paste the image part and the text part together without the unecessary comments.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>crop_post</strong> (<em>np.ndarray</em>) – The cropped image part of the social media post.</p></li>
|
||||
<li><p><strong>crop_view</strong> (<em>np.ndarray</em>) – The cropped text part of the social media post.</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p><strong>np.ndarray</strong> – The image and text part of the social media post in one image.</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
<dt class="sig sig-object py" id="utils.iterable">
|
||||
<span class="sig-prename descclassname"><span class="pre">utils.</span></span><span class="sig-name descname"><span class="pre">iterable</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">arg</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#utils.iterable" title="Link to this definition"></a></dt>
|
||||
<dd></dd></dl>
|
||||
|
||||
</section>
|
||||
<section id="utils-module">
|
||||
<h1>utils module<a class="headerlink" href="#utils-module" title="Link to this heading"></a></h1>
|
||||
</section>
|
||||
<section id="display-module">
|
||||
<h1>display module<a class="headerlink" href="#display-module" title="Link to this heading"></a></h1>
|
||||
<section id="module-display">
|
||||
<span id="display-module"></span><h1>display module<a class="headerlink" href="#module-display" title="Link to this heading"></a></h1>
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="display.AnalysisExplorer">
|
||||
<em class="property"><span class="k"><span class="pre">class</span></span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">display.</span></span><span class="sig-name descname"><span class="pre">AnalysisExplorer</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">mydict</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">dict</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#display.AnalysisExplorer" title="Link to this definition"></a></dt>
|
||||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="display.AnalysisExplorer.run_server">
|
||||
<span class="sig-name descname"><span class="pre">run_server</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">port</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">8050</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">None</span></span></span><a class="headerlink" href="#display.AnalysisExplorer.run_server" title="Link to this definition"></a></dt>
|
||||
<dd><p>Run the Dash server to start the analysis explorer.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>port</strong> (<em>int</em><em>, </em><em>optional</em>) – The port number to run the server on (default: 8050).</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="display.AnalysisExplorer.update_picture">
|
||||
<span class="sig-name descname"><span class="pre">update_picture</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">img_path</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#display.AnalysisExplorer.update_picture" title="Link to this definition"></a></dt>
|
||||
<dd><p>Callback function to update the displayed image.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>img_path</strong> (<em>str</em>) – The path of the selected image.</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p><strong>Union[PIL.PngImagePlugin, None]</strong> – The image object to be displayed
|
||||
or None if the image path is</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
</dd></dl>
|
||||
|
||||
</section>
|
||||
|
||||
|
||||
|
||||
@ -14,7 +14,7 @@
|
||||
<script src="_static/jquery.js?v=5d32c60e"></script>
|
||||
<script src="_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
||||
<script src="_static/documentation_options.js?v=000c92bf"></script>
|
||||
<script src="_static/doctools.js?v=9a2dae69"></script>
|
||||
<script src="_static/doctools.js?v=9bcbadda"></script>
|
||||
<script src="_static/sphinx_highlight.js?v=dc90522c"></script>
|
||||
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
|
||||
<script src="_static/js/theme.js"></script>
|
||||
@ -52,7 +52,6 @@
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-0:-Create-and-set-a-Google-Cloud-Vision-Key">Step 0: Create and set a Google Cloud Vision Key</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-1:-Read-your-data-into-AMMICO">Step 1: Read your data into AMMICO</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#The-detector-modules">The detector modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/Example%20cropposts.html">Crop posts module</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="modules.html">AMMICO package modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="license_link.html">License</a></li>
|
||||
</ul>
|
||||
|
||||
@ -14,7 +14,7 @@
|
||||
<script src="_static/jquery.js?v=5d32c60e"></script>
|
||||
<script src="_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
||||
<script src="_static/documentation_options.js?v=000c92bf"></script>
|
||||
<script src="_static/doctools.js?v=9a2dae69"></script>
|
||||
<script src="_static/doctools.js?v=9bcbadda"></script>
|
||||
<script src="_static/sphinx_highlight.js?v=dc90522c"></script>
|
||||
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
|
||||
<script src="_static/js/theme.js"></script>
|
||||
@ -66,7 +66,6 @@
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-0:-Create-and-set-a-Google-Cloud-Vision-Key">Step 0: Create and set a Google Cloud Vision Key</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-1:-Read-your-data-into-AMMICO">Step 1: Read your data into AMMICO</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#The-detector-modules">The detector modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/Example%20cropposts.html">Crop posts module</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="modules.html">AMMICO package modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="license_link.html">License</a></li>
|
||||
</ul>
|
||||
|
||||
@ -13,7 +13,7 @@
|
||||
<script src="_static/jquery.js?v=5d32c60e"></script>
|
||||
<script src="_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
||||
<script src="_static/documentation_options.js?v=000c92bf"></script>
|
||||
<script src="_static/doctools.js?v=9a2dae69"></script>
|
||||
<script src="_static/doctools.js?v=9bcbadda"></script>
|
||||
<script src="_static/sphinx_highlight.js?v=dc90522c"></script>
|
||||
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
|
||||
<script src="_static/js/theme.js"></script>
|
||||
@ -49,7 +49,6 @@
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-0:-Create-and-set-a-Google-Cloud-Vision-Key">Step 0: Create and set a Google Cloud Vision Key</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-1:-Read-your-data-into-AMMICO">Step 1: Read your data into AMMICO</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#The-detector-modules">The detector modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/Example%20cropposts.html">Crop posts module</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="modules.html">AMMICO package modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="license_link.html">License</a></li>
|
||||
</ul>
|
||||
@ -87,41 +86,41 @@
|
||||
| <a href="#D"><strong>D</strong></a>
|
||||
| <a href="#E"><strong>E</strong></a>
|
||||
| <a href="#F"><strong>F</strong></a>
|
||||
| <a href="#K"><strong>K</strong></a>
|
||||
| <a href="#L"><strong>L</strong></a>
|
||||
| <a href="#G"><strong>G</strong></a>
|
||||
| <a href="#I"><strong>I</strong></a>
|
||||
| <a href="#M"><strong>M</strong></a>
|
||||
| <a href="#P"><strong>P</strong></a>
|
||||
| <a href="#R"><strong>R</strong></a>
|
||||
| <a href="#S"><strong>S</strong></a>
|
||||
| <a href="#T"><strong>T</strong></a>
|
||||
| <a href="#U"><strong>U</strong></a>
|
||||
| <a href="#W"><strong>W</strong></a>
|
||||
|
||||
</div>
|
||||
<h2 id="A">A</h2>
|
||||
<table style="width: 100%" class="indextable genindextable"><tr>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.all_allowed_model_types">all_allowed_model_types (summary.SummaryDetector attribute)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.allowed_analysis_types">allowed_analysis_types (summary.SummaryDetector attribute)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.allowed_model_types">allowed_model_types (summary.SummaryDetector attribute)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.allowed_new_model_types">allowed_new_model_types (summary.SummaryDetector attribute)</a>
|
||||
<li><a href="ammico.html#utils.ammico_prefetch_models">ammico_prefetch_models() (in module utils)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#colors.ColorDetector.analyse_image">analyse_image() (colors.ColorDetector method)</a>
|
||||
|
||||
<ul>
|
||||
<li><a href="ammico.html#faces.EmotionDetector.analyse_image">(faces.EmotionDetector method)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.analyse_image">(summary.SummaryDetector method)</a>
|
||||
<li><a href="ammico.html#text.TextDetector.analyse_image">(text.TextDetector method)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#utils.AnalysisMethod.analyse_image">(utils.AnalysisMethod method)</a>
|
||||
</li>
|
||||
</ul></li>
|
||||
</ul></td>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.analyse_questions">analyse_questions() (summary.SummaryDetector method)</a>
|
||||
<li><a href="ammico.html#display.AnalysisExplorer">AnalysisExplorer (class in display)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.analyse_summary">analyse_summary() (summary.SummaryDetector method)</a>
|
||||
<li><a href="ammico.html#utils.AnalysisMethod">AnalysisMethod (class in utils)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#faces.EmotionDetector.analyze_single_face">analyze_single_face() (faces.EmotionDetector method)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#utils.append_data_to_dict">append_data_to_dict() (in module utils)</a>
|
||||
</li>
|
||||
</ul></td>
|
||||
</tr></table>
|
||||
@ -129,36 +128,17 @@
|
||||
<h2 id="C">C</h2>
|
||||
<table style="width: 100%" class="indextable genindextable"><tr>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.check_model">check_model() (summary.SummaryDetector method)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#faces.EmotionDetector.clean_subdict">clean_subdict() (faces.EmotionDetector method)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#colors.ColorDetector">ColorDetector (class in colors)</a>
|
||||
</li>
|
||||
</ul></td>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li>
|
||||
colors
|
||||
|
||||
<ul>
|
||||
<li><a href="ammico.html#module-colors">module</a>
|
||||
</li>
|
||||
</ul></li>
|
||||
<li><a href="ammico.html#cropposts.compute_crop_corner">compute_crop_corner() (in module cropposts)</a>
|
||||
</li>
|
||||
</ul></td>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#cropposts.crop_image_from_post">crop_image_from_post() (in module cropposts)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#cropposts.crop_media_posts">crop_media_posts() (in module cropposts)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#cropposts.crop_posts_from_refs">crop_posts_from_refs() (in module cropposts)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#cropposts.crop_posts_image">crop_posts_image() (in module cropposts)</a>
|
||||
</li>
|
||||
<li>
|
||||
cropposts
|
||||
|
||||
<ul>
|
||||
<li><a href="ammico.html#module-cropposts">module</a>
|
||||
</li>
|
||||
</ul></li>
|
||||
</ul></td>
|
||||
@ -169,9 +149,18 @@
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#faces.deepface_symlink_processor">deepface_symlink_processor() (in module faces)</a>
|
||||
</li>
|
||||
<li>
|
||||
display
|
||||
|
||||
<ul>
|
||||
<li><a href="ammico.html#module-display">module</a>
|
||||
</li>
|
||||
</ul></li>
|
||||
</ul></td>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#cropposts.draw_matches">draw_matches() (in module cropposts)</a>
|
||||
<li><a href="ammico.html#utils.DownloadResource">DownloadResource (class in utils)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#utils.dump_df">dump_df() (in module utils)</a>
|
||||
</li>
|
||||
</ul></td>
|
||||
</tr></table>
|
||||
@ -202,45 +191,37 @@
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#faces.EmotionDetector.facial_expression_analysis">facial_expression_analysis() (faces.EmotionDetector method)</a>
|
||||
</li>
|
||||
</ul></td>
|
||||
</tr></table>
|
||||
|
||||
<h2 id="K">K</h2>
|
||||
<table style="width: 100%" class="indextable genindextable"><tr>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#cropposts.kp_from_matches">kp_from_matches() (in module cropposts)</a>
|
||||
<li><a href="ammico.html#utils.find_files">find_files() (in module utils)</a>
|
||||
</li>
|
||||
</ul></td>
|
||||
</tr></table>
|
||||
|
||||
<h2 id="L">L</h2>
|
||||
<h2 id="G">G</h2>
|
||||
<table style="width: 100%" class="indextable genindextable"><tr>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.load_model">load_model() (summary.SummaryDetector method)</a>
|
||||
<li><a href="ammico.html#utils.DownloadResource.get">get() (utils.DownloadResource method)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.load_model_base">load_model_base() (summary.SummaryDetector method)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.load_model_base_blip2_opt_caption_coco_opt67b">load_model_base_blip2_opt_caption_coco_opt67b() (summary.SummaryDetector method)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.load_model_base_blip2_opt_pretrain_opt67b">load_model_base_blip2_opt_pretrain_opt67b() (summary.SummaryDetector method)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.load_model_blip2_opt_caption_coco_opt27b">load_model_blip2_opt_caption_coco_opt27b() (summary.SummaryDetector method)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.load_model_blip2_opt_pretrain_opt27b">load_model_blip2_opt_pretrain_opt27b() (summary.SummaryDetector method)</a>
|
||||
<li><a href="ammico.html#utils.get_color_table">get_color_table() (in module utils)</a>
|
||||
</li>
|
||||
</ul></td>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.load_model_blip2_t5_caption_coco_flant5xl">load_model_blip2_t5_caption_coco_flant5xl() (summary.SummaryDetector method)</a>
|
||||
<li><a href="ammico.html#utils.get_dataframe">get_dataframe() (in module utils)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.load_model_blip2_t5_pretrain_flant5xl">load_model_blip2_t5_pretrain_flant5xl() (summary.SummaryDetector method)</a>
|
||||
<li><a href="ammico.html#text.TextDetector.get_text_from_image">get_text_from_image() (text.TextDetector method)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.load_model_blip2_t5_pretrain_flant5xxl">load_model_blip2_t5_pretrain_flant5xxl() (summary.SummaryDetector method)</a>
|
||||
</ul></td>
|
||||
</tr></table>
|
||||
|
||||
<h2 id="I">I</h2>
|
||||
<table style="width: 100%" class="indextable genindextable"><tr>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#utils.initialize_dict">initialize_dict() (in module utils)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.load_model_large">load_model_large() (summary.SummaryDetector method)</a>
|
||||
</ul></td>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#utils.is_interactive">is_interactive() (in module utils)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.load_new_model">load_new_model() (summary.SummaryDetector method)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#summary.SummaryDetector.load_vqa_model">load_vqa_model() (summary.SummaryDetector method)</a>
|
||||
<li><a href="ammico.html#utils.iterable">iterable() (in module utils)</a>
|
||||
</li>
|
||||
</ul></td>
|
||||
</tr></table>
|
||||
@ -248,19 +229,19 @@
|
||||
<h2 id="M">M</h2>
|
||||
<table style="width: 100%" class="indextable genindextable"><tr>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#cropposts.matching_points">matching_points() (in module cropposts)</a>
|
||||
</li>
|
||||
<li>
|
||||
module
|
||||
|
||||
<ul>
|
||||
<li><a href="ammico.html#module-colors">colors</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#module-cropposts">cropposts</a>
|
||||
<li><a href="ammico.html#module-display">display</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#module-faces">faces</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#module-summary">summary</a>
|
||||
<li><a href="ammico.html#module-text">text</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#module-utils">utils</a>
|
||||
</li>
|
||||
</ul></li>
|
||||
</ul></td>
|
||||
@ -269,7 +250,7 @@
|
||||
<h2 id="P">P</h2>
|
||||
<table style="width: 100%" class="indextable genindextable"><tr>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#cropposts.paste_image_and_comment">paste_image_and_comment() (in module cropposts)</a>
|
||||
<li><a href="ammico.html#text.privacy_disclosure">privacy_disclosure() (in module text)</a>
|
||||
</li>
|
||||
</ul></td>
|
||||
</tr></table>
|
||||
@ -277,7 +258,17 @@
|
||||
<h2 id="R">R</h2>
|
||||
<table style="width: 100%" class="indextable genindextable"><tr>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#text.TextAnalyzer.read_csv">read_csv() (text.TextAnalyzer method)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#text.TextDetector.remove_linebreaks">remove_linebreaks() (text.TextDetector method)</a>
|
||||
</li>
|
||||
</ul></td>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#utils.DownloadResource.resources">resources (utils.DownloadResource attribute)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#colors.ColorDetector.rgb2name">rgb2name() (colors.ColorDetector method)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#display.AnalysisExplorer.run_server">run_server() (display.AnalysisExplorer method)</a>
|
||||
</li>
|
||||
</ul></td>
|
||||
</tr></table>
|
||||
@ -289,19 +280,50 @@
|
||||
|
||||
<ul>
|
||||
<li><a href="ammico.html#faces.EmotionDetector.set_keys">(faces.EmotionDetector method)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#text.TextDetector.set_keys">(text.TextDetector method)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#utils.AnalysisMethod.set_keys">(utils.AnalysisMethod method)</a>
|
||||
</li>
|
||||
</ul></li>
|
||||
</ul></td>
|
||||
</tr></table>
|
||||
|
||||
<h2 id="T">T</h2>
|
||||
<table style="width: 100%" class="indextable genindextable"><tr>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li>
|
||||
text
|
||||
|
||||
<ul>
|
||||
<li><a href="ammico.html#module-text">module</a>
|
||||
</li>
|
||||
</ul></li>
|
||||
</ul></td>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#text.TextAnalyzer">TextAnalyzer (class in text)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#text.TextDetector">TextDetector (class in text)</a>
|
||||
</li>
|
||||
<li><a href="ammico.html#text.TextDetector.translate_text">translate_text() (text.TextDetector method)</a>
|
||||
</li>
|
||||
</ul></td>
|
||||
</tr></table>
|
||||
|
||||
<h2 id="U">U</h2>
|
||||
<table style="width: 100%" class="indextable genindextable"><tr>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li><a href="ammico.html#display.AnalysisExplorer.update_picture">update_picture() (display.AnalysisExplorer method)</a>
|
||||
</li>
|
||||
</ul></td>
|
||||
<td style="width: 33%; vertical-align: top;"><ul>
|
||||
<li>
|
||||
summary
|
||||
utils
|
||||
|
||||
<ul>
|
||||
<li><a href="ammico.html#module-summary">module</a>
|
||||
<li><a href="ammico.html#module-utils">module</a>
|
||||
</li>
|
||||
</ul></li>
|
||||
<li><a href="ammico.html#summary.SummaryDetector">SummaryDetector (class in summary)</a>
|
||||
</li>
|
||||
</ul></td>
|
||||
</tr></table>
|
||||
|
||||
|
||||
@ -14,11 +14,9 @@
|
||||
<script src="_static/jquery.js?v=5d32c60e"></script>
|
||||
<script src="_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
||||
<script src="_static/documentation_options.js?v=000c92bf"></script>
|
||||
<script src="_static/doctools.js?v=9a2dae69"></script>
|
||||
<script src="_static/doctools.js?v=9bcbadda"></script>
|
||||
<script src="_static/sphinx_highlight.js?v=dc90522c"></script>
|
||||
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
|
||||
<script>window.MathJax = {"tex": {"inlineMath": [["$", "$"], ["\\(", "\\)"]], "processEscapes": true}, "options": {"ignoreHtmlClass": "tex2jax_ignore|mathjax_ignore|document", "processHtmlClass": "tex2jax_process|mathjax_process|math|output_area"}}</script>
|
||||
<script defer="defer" src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
|
||||
<script src="_static/js/theme.js"></script>
|
||||
<link rel="index" title="Index" href="genindex.html" />
|
||||
<link rel="search" title="Search" href="search.html" />
|
||||
@ -53,7 +51,6 @@
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-0:-Create-and-set-a-Google-Cloud-Vision-Key">Step 0: Create and set a Google Cloud Vision Key</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-1:-Read-your-data-into-AMMICO">Step 1: Read your data into AMMICO</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#The-detector-modules">The detector modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/Example%20cropposts.html">Crop posts module</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="modules.html">AMMICO package modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="license_link.html">License</a></li>
|
||||
</ul>
|
||||
@ -123,16 +120,15 @@
|
||||
<li class="toctree-l2"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Further-detector-modules">Further detector modules</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/Example%20cropposts.html">Crop posts module</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="modules.html">AMMICO package modules</a><ul>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html">text module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#module-summary">summary module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#summary-module">summary module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#multimodal-search-module">multimodal search module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#module-faces">faces module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#module-colors">color_analysis module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#module-cropposts">cropposts module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#utils-module">utils module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#display-module">display module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#cropposts-module">cropposts module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#module-utils">utils module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#module-display">display module</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="license_link.html">License</a></li>
|
||||
|
||||
@ -14,7 +14,7 @@
|
||||
<script src="_static/jquery.js?v=5d32c60e"></script>
|
||||
<script src="_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
||||
<script src="_static/documentation_options.js?v=000c92bf"></script>
|
||||
<script src="_static/doctools.js?v=9a2dae69"></script>
|
||||
<script src="_static/doctools.js?v=9bcbadda"></script>
|
||||
<script src="_static/sphinx_highlight.js?v=dc90522c"></script>
|
||||
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
|
||||
<script src="_static/js/theme.js"></script>
|
||||
@ -51,7 +51,6 @@
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-0:-Create-and-set-a-Google-Cloud-Vision-Key">Step 0: Create and set a Google Cloud Vision Key</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-1:-Read-your-data-into-AMMICO">Step 1: Read your data into AMMICO</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#The-detector-modules">The detector modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/Example%20cropposts.html">Crop posts module</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="modules.html">AMMICO package modules</a></li>
|
||||
<li class="toctree-l1 current"><a class="current reference internal" href="#">License</a></li>
|
||||
</ul>
|
||||
|
||||
@ -14,14 +14,14 @@
|
||||
<script src="_static/jquery.js?v=5d32c60e"></script>
|
||||
<script src="_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
||||
<script src="_static/documentation_options.js?v=000c92bf"></script>
|
||||
<script src="_static/doctools.js?v=9a2dae69"></script>
|
||||
<script src="_static/doctools.js?v=9bcbadda"></script>
|
||||
<script src="_static/sphinx_highlight.js?v=dc90522c"></script>
|
||||
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
|
||||
<script src="_static/js/theme.js"></script>
|
||||
<link rel="index" title="Index" href="genindex.html" />
|
||||
<link rel="search" title="Search" href="search.html" />
|
||||
<link rel="next" title="text module" href="ammico.html" />
|
||||
<link rel="prev" title="Crop posts module" href="notebooks/Example%20cropposts.html" />
|
||||
<link rel="prev" title="AMMICO Demonstration Notebook" href="notebooks/DemoNotebook_ammico.html" />
|
||||
</head>
|
||||
|
||||
<body class="wy-body-for-nav">
|
||||
@ -52,16 +52,15 @@
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-0:-Create-and-set-a-Google-Cloud-Vision-Key">Step 0: Create and set a Google Cloud Vision Key</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-1:-Read-your-data-into-AMMICO">Step 1: Read your data into AMMICO</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#The-detector-modules">The detector modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/Example%20cropposts.html">Crop posts module</a></li>
|
||||
<li class="toctree-l1 current"><a class="current reference internal" href="#">AMMICO package modules</a><ul>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html">text module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#module-summary">summary module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#summary-module">summary module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#multimodal-search-module">multimodal search module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#module-faces">faces module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#module-colors">color_analysis module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#module-cropposts">cropposts module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#utils-module">utils module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#display-module">display module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#cropposts-module">cropposts module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#module-utils">utils module</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#module-display">display module</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="license_link.html">License</a></li>
|
||||
@ -95,33 +94,23 @@
|
||||
<h1>AMMICO package modules<a class="headerlink" href="#ammico-package-modules" title="Link to this heading"></a></h1>
|
||||
<div class="toctree-wrapper compound">
|
||||
<ul>
|
||||
<li class="toctree-l1"><a class="reference internal" href="ammico.html">text module</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="ammico.html#module-summary">summary module</a><ul>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#summary.SummaryDetector"><code class="docutils literal notranslate"><span class="pre">SummaryDetector</span></code></a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.all_allowed_model_types"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.all_allowed_model_types</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.allowed_analysis_types"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.allowed_analysis_types</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.allowed_model_types"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.allowed_model_types</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.allowed_new_model_types"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.allowed_new_model_types</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.analyse_image"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.analyse_image()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.analyse_questions"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.analyse_questions()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.analyse_summary"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.analyse_summary()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.check_model"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.check_model()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.load_model"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.load_model_base"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_base()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.load_model_base_blip2_opt_caption_coco_opt67b"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_base_blip2_opt_caption_coco_opt67b()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.load_model_base_blip2_opt_pretrain_opt67b"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_base_blip2_opt_pretrain_opt67b()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.load_model_blip2_opt_caption_coco_opt27b"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_blip2_opt_caption_coco_opt27b()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.load_model_blip2_opt_pretrain_opt27b"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_blip2_opt_pretrain_opt27b()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.load_model_blip2_t5_caption_coco_flant5xl"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_blip2_t5_caption_coco_flant5xl()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.load_model_blip2_t5_pretrain_flant5xl"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_blip2_t5_pretrain_flant5xl()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.load_model_blip2_t5_pretrain_flant5xxl"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_blip2_t5_pretrain_flant5xxl()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.load_model_large"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_model_large()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.load_new_model"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_new_model()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#summary.SummaryDetector.load_vqa_model"><code class="docutils literal notranslate"><span class="pre">SummaryDetector.load_vqa_model()</span></code></a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="ammico.html">text module</a><ul>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#text.TextAnalyzer"><code class="docutils literal notranslate"><span class="pre">TextAnalyzer</span></code></a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#text.TextAnalyzer.read_csv"><code class="docutils literal notranslate"><span class="pre">TextAnalyzer.read_csv()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#text.TextDetector"><code class="docutils literal notranslate"><span class="pre">TextDetector</span></code></a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#text.TextDetector.analyse_image"><code class="docutils literal notranslate"><span class="pre">TextDetector.analyse_image()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#text.TextDetector.get_text_from_image"><code class="docutils literal notranslate"><span class="pre">TextDetector.get_text_from_image()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#text.TextDetector.remove_linebreaks"><code class="docutils literal notranslate"><span class="pre">TextDetector.remove_linebreaks()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#text.TextDetector.set_keys"><code class="docutils literal notranslate"><span class="pre">TextDetector.set_keys()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#text.TextDetector.translate_text"><code class="docutils literal notranslate"><span class="pre">TextDetector.translate_text()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#text.privacy_disclosure"><code class="docutils literal notranslate"><span class="pre">privacy_disclosure()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="ammico.html#summary-module">summary module</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="ammico.html#multimodal-search-module">multimodal search module</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="ammico.html#module-faces">faces module</a><ul>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#faces.EmotionDetector"><code class="docutils literal notranslate"><span class="pre">EmotionDetector</span></code></a><ul>
|
||||
@ -146,20 +135,37 @@
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="ammico.html#module-cropposts">cropposts module</a><ul>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#cropposts.compute_crop_corner"><code class="docutils literal notranslate"><span class="pre">compute_crop_corner()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#cropposts.crop_image_from_post"><code class="docutils literal notranslate"><span class="pre">crop_image_from_post()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#cropposts.crop_media_posts"><code class="docutils literal notranslate"><span class="pre">crop_media_posts()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#cropposts.crop_posts_from_refs"><code class="docutils literal notranslate"><span class="pre">crop_posts_from_refs()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#cropposts.crop_posts_image"><code class="docutils literal notranslate"><span class="pre">crop_posts_image()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#cropposts.draw_matches"><code class="docutils literal notranslate"><span class="pre">draw_matches()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#cropposts.kp_from_matches"><code class="docutils literal notranslate"><span class="pre">kp_from_matches()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#cropposts.matching_points"><code class="docutils literal notranslate"><span class="pre">matching_points()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#cropposts.paste_image_and_comment"><code class="docutils literal notranslate"><span class="pre">paste_image_and_comment()</span></code></a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="ammico.html#cropposts-module">cropposts module</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="ammico.html#module-utils">utils module</a><ul>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#utils.AnalysisMethod"><code class="docutils literal notranslate"><span class="pre">AnalysisMethod</span></code></a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#utils.AnalysisMethod.analyse_image"><code class="docutils literal notranslate"><span class="pre">AnalysisMethod.analyse_image()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#utils.AnalysisMethod.set_keys"><code class="docutils literal notranslate"><span class="pre">AnalysisMethod.set_keys()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#utils.DownloadResource"><code class="docutils literal notranslate"><span class="pre">DownloadResource</span></code></a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#utils.DownloadResource.get"><code class="docutils literal notranslate"><span class="pre">DownloadResource.get()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#utils.DownloadResource.resources"><code class="docutils literal notranslate"><span class="pre">DownloadResource.resources</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#utils.ammico_prefetch_models"><code class="docutils literal notranslate"><span class="pre">ammico_prefetch_models()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#utils.append_data_to_dict"><code class="docutils literal notranslate"><span class="pre">append_data_to_dict()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#utils.dump_df"><code class="docutils literal notranslate"><span class="pre">dump_df()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#utils.find_files"><code class="docutils literal notranslate"><span class="pre">find_files()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#utils.get_color_table"><code class="docutils literal notranslate"><span class="pre">get_color_table()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#utils.get_dataframe"><code class="docutils literal notranslate"><span class="pre">get_dataframe()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#utils.initialize_dict"><code class="docutils literal notranslate"><span class="pre">initialize_dict()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#utils.is_interactive"><code class="docutils literal notranslate"><span class="pre">is_interactive()</span></code></a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#utils.iterable"><code class="docutils literal notranslate"><span class="pre">iterable()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="ammico.html#module-display">display module</a><ul>
|
||||
<li class="toctree-l2"><a class="reference internal" href="ammico.html#display.AnalysisExplorer"><code class="docutils literal notranslate"><span class="pre">AnalysisExplorer</span></code></a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#display.AnalysisExplorer.run_server"><code class="docutils literal notranslate"><span class="pre">AnalysisExplorer.run_server()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="ammico.html#display.AnalysisExplorer.update_picture"><code class="docutils literal notranslate"><span class="pre">AnalysisExplorer.update_picture()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="ammico.html#utils-module">utils module</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="ammico.html#display-module">display module</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</section>
|
||||
@ -168,7 +174,7 @@
|
||||
</div>
|
||||
</div>
|
||||
<footer><div class="rst-footer-buttons" role="navigation" aria-label="Footer">
|
||||
<a href="notebooks/Example%20cropposts.html" class="btn btn-neutral float-left" title="Crop posts module" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a>
|
||||
<a href="notebooks/DemoNotebook_ammico.html" class="btn btn-neutral float-left" title="AMMICO Demonstration Notebook" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a>
|
||||
<a href="ammico.html" class="btn btn-neutral float-right" title="text module" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right" aria-hidden="true"></span></a>
|
||||
</div>
|
||||
|
||||
|
||||
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
@ -22,7 +22,7 @@
|
||||
"source": [
|
||||
"# if running on google colab\\\n",
|
||||
"# PLEASE RUN THIS ONLY AS CPU RUNTIME\n",
|
||||
"# for a GPU runtime, there are conflicts with pre-installed packages - \n",
|
||||
"# for a GPU runtime, there are conflicts with pre-installed packages -\n",
|
||||
"# you first need to uninstall them (prepare a clean environment with no pre-installs) and then install ammico\n",
|
||||
"# flake8-noqa-cell\n",
|
||||
"\n",
|
||||
@ -103,11 +103,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"# jax also sometimes leads to problems on google colab\n",
|
||||
"# if this is the case, try restarting the kernel and executing this \n",
|
||||
"# if this is the case, try restarting the kernel and executing this\n",
|
||||
"# and the above two code cells again\n",
|
||||
"import ammico\n",
|
||||
"\n",
|
||||
"# for displaying a progress bar\n",
|
||||
"from tqdm import tqdm"
|
||||
]
|
||||
@ -276,7 +276,7 @@
|
||||
"source": [
|
||||
"# dump file name\n",
|
||||
"dump_file = \"dump_file.csv\"\n",
|
||||
"# dump every N images \n",
|
||||
"# dump every N images\n",
|
||||
"dump_every = 10"
|
||||
]
|
||||
},
|
||||
@ -299,12 +299,18 @@
|
||||
"# the highest possible value is 100\n",
|
||||
"race_threshold = 50\n",
|
||||
"gender_threshold = 50\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key],\n",
|
||||
" emotion_threshold=emotion_threshold,\n",
|
||||
" race_threshold=race_threshold,\n",
|
||||
" gender_threshold=gender_threshold).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" if num % dump_every == 0 or num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key],\n",
|
||||
" emotion_threshold=emotion_threshold,\n",
|
||||
" race_threshold=race_threshold,\n",
|
||||
" gender_threshold=gender_threshold,\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 or num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -322,10 +328,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], analyse_text=True).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], analyse_text=True\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -344,13 +356,21 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize the models\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(subdict = image_dict, analysis_type=\"summary\", model_type=\"base\")\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# run the analysis without having to re-iniatialize the model\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\") # analyse image with SummaryDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" ) # analyse image with SummaryDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -369,14 +389,26 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize the models\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(subdict = image_dict, analysis_type=\"summary\", model_type=\"base\")\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key]).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], analyse_text=True).analyse_image() # analyse image with TextDetector and update dict\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\") # analyse image with SummaryDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file \n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key]\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], analyse_text=True\n",
|
||||
" ).analyse_image() # analyse image with TextDetector and update dict\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" ) # analyse image with SummaryDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -477,7 +509,7 @@
|
||||
"# set the dump file\n",
|
||||
"# dump file name\n",
|
||||
"dump_file = \"dump_file.csv\"\n",
|
||||
"# dump every N images \n",
|
||||
"# dump every N images\n",
|
||||
"dump_every = 10"
|
||||
]
|
||||
},
|
||||
@ -488,9 +520,15 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# analyze the csv file\n",
|
||||
"for num, key in tqdm(enumerate(text_dict.keys()), total=len(text_dict)): # loop through all text entries\n",
|
||||
" ammico.TextDetector(text_dict[key], analyse_text=True, skip_extraction=True).analyse_image() # analyse text with TextDetector and update dict\n",
|
||||
" if num % dump_every == 0 | num == len(text_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(text_dict.keys()), total=len(text_dict)\n",
|
||||
"): # loop through all text entries\n",
|
||||
" ammico.TextDetector(\n",
|
||||
" text_dict[key], analyse_text=True, skip_extraction=True\n",
|
||||
" ).analyse_image() # analyse text with TextDetector and update dict\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(text_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(text_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -565,14 +603,23 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], # analyse image with TextDetector and update dict\n",
|
||||
" analyse_text=True, model_names=[\"sshleifer/distilbart-cnn-12-6\", \n",
|
||||
" \"distilbert-base-uncased-finetuned-sst-2-english\", \n",
|
||||
" \"dbmdz/bert-large-cased-finetuned-conll03-english\"], \n",
|
||||
" revision_numbers=[\"a4f8f3e\", \"af0f99b\", \"f2482bf\"]).analyse_image()\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], # analyse image with TextDetector and update dict\n",
|
||||
" analyse_text=True,\n",
|
||||
" model_names=[\n",
|
||||
" \"sshleifer/distilbart-cnn-12-6\",\n",
|
||||
" \"distilbert-base-uncased-finetuned-sst-2-english\",\n",
|
||||
" \"dbmdz/bert-large-cased-finetuned-conll03-english\",\n",
|
||||
" ],\n",
|
||||
" revision_numbers=[\"a4f8f3e\", \"af0f99b\", \"f2482bf\"],\n",
|
||||
" ).analyse_image()\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -654,7 +701,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_detector = ammico.SummaryDetector(image_dict, analysis_type=\"summary\", model_type=\"base\")"
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -663,10 +712,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\")\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -704,14 +755,17 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(image_dict, analysis_type=\"questions\", \n",
|
||||
" model_type=\"vqa\")\n",
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"questions\", model_type=\"vqa\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(subdict=image_dict[key], \n",
|
||||
" analysis_type=\"questions\", \n",
|
||||
" list_of_questions = list_of_questions)\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -729,13 +783,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(image_dict, analysis_type=\"summary_and_questions\", \n",
|
||||
" model_type=\"base\")\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(subdict=image_dict[key], \n",
|
||||
" analysis_type=\"summary_and_questions\", \n",
|
||||
" list_of_questions = list_of_questions)\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"summary_and_questions\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"summary_and_questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -767,7 +824,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"obj = ammico.SummaryDetector(subdict=image_dict, analysis_type = \"summary_and_questions\", model_type = \"blip2_t5_caption_coco_flant5xl\")\n",
|
||||
"obj = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict,\n",
|
||||
" analysis_type=\"summary_and_questions\",\n",
|
||||
" model_type=\"blip2_t5_caption_coco_flant5xl\",\n",
|
||||
")\n",
|
||||
"# list of the new models that can be used:\n",
|
||||
"# \"blip2_t5_pretrain_flant5xxl\",\n",
|
||||
"# \"blip2_t5_pretrain_flant5xl\",\n",
|
||||
@ -781,7 +842,7 @@
|
||||
"# Or you can use `caption_coco_`` model types to generate coco-style captions.\n",
|
||||
"# `flant5` and `opt` means that the model equipped with FlanT5 and OPT LLMs respectively.\n",
|
||||
"\n",
|
||||
"#also you can perform all calculation on cpu if you set device_type= \"cpu\" or gpu if you set device_type= \"cuda\""
|
||||
"# also you can perform all calculation on cpu if you set device_type= \"cpu\" or gpu if you set device_type= \"cuda\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -812,9 +873,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions)\n",
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"# analysis_type can be \n",
|
||||
"# analysis_type can be\n",
|
||||
"# \"summary\",\n",
|
||||
"# \"questions\",\n",
|
||||
"# \"summary_and_questions\"."
|
||||
@ -848,7 +913,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions)"
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -886,7 +955,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions, consequential_questions=True)"
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" consequential_questions=True,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -940,9 +1014,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict.keys():\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
|
||||
" gender_threshold=50,\n",
|
||||
" accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key],\n",
|
||||
" emotion_threshold=50,\n",
|
||||
" race_threshold=50,\n",
|
||||
" gender_threshold=50,\n",
|
||||
" accept_disclosure=\"DISCLOSURE_AMMICO\",\n",
|
||||
" ).analyse_image()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1055,9 +1133,9 @@
|
||||
" image_names,\n",
|
||||
" features_image_stacked,\n",
|
||||
") = my_obj.parsing_images(\n",
|
||||
" model_type, \n",
|
||||
" model_type,\n",
|
||||
" path_to_save_tensors=\"/content/drive/MyDrive/misinformation-data/\",\n",
|
||||
" )"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1112,14 +1190,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import importlib_resources # only require for image query example\n",
|
||||
"image_example_query = str(importlib_resources.files(\"ammico\") / \"data\" / \"test-crop-image.png\") # creating the path to the image for the image query example\n",
|
||||
"import importlib_resources # only require for image query example\n",
|
||||
"\n",
|
||||
"image_example_query = str(\n",
|
||||
" importlib_resources.files(\"ammico\") / \"data\" / \"test-crop-image.png\"\n",
|
||||
") # creating the path to the image for the image query example\n",
|
||||
"\n",
|
||||
"search_query = [\n",
|
||||
" {\"text_input\": \"politician press conference\"}, \n",
|
||||
" {\"text_input\": \"politician press conference\"},\n",
|
||||
" {\"text_input\": \"a world map\"},\n",
|
||||
" {\"text_input\": \"a dog\"}, # This is how looks text query\n",
|
||||
" {\"image\": image_example_query}, # This is how looks image query, here `image_example_path` is the path to query image like \"data/test-crop-image.png\"\n",
|
||||
" {\"text_input\": \"a dog\"}, # This is how looks text query\n",
|
||||
" {\n",
|
||||
" \"image\": image_example_query\n",
|
||||
" }, # This is how looks image query, here `image_example_path` is the path to query image like \"data/test-crop-image.png\"\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
@ -1199,7 +1282,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"my_obj.show_results(\n",
|
||||
" search_query[0], # you can change the index to see the results for other queries\n",
|
||||
" search_query[0], # you can change the index to see the results for other queries\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -1210,7 +1293,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"my_obj.show_results(\n",
|
||||
" search_query[3], # you can change the index to see the results for other queries\n",
|
||||
" search_query[3], # you can change the index to see the results for other queries\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -1349,7 +1432,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"analysis_explorer = ammico.AnalysisExplorer(image_dict)\n",
|
||||
"analysis_explorer.run_server(port = 8057)"
|
||||
"analysis_explorer.run_server(port=8057)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1447,7 +1530,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.11.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@ -1,226 +0,0 @@
|
||||
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html class="writer-html5" lang="en" data-content_root="../">
|
||||
<head>
|
||||
<meta charset="utf-8" /><meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Crop posts module — AMMICO 0.2.2 documentation</title>
|
||||
<link rel="stylesheet" type="text/css" href="../_static/pygments.css?v=b86133f3" />
|
||||
<link rel="stylesheet" type="text/css" href="../_static/css/theme.css?v=e59714d7" />
|
||||
<link rel="stylesheet" type="text/css" href="../_static/nbsphinx-code-cells.css?v=2aa19091" />
|
||||
|
||||
|
||||
<script src="../_static/jquery.js?v=5d32c60e"></script>
|
||||
<script src="../_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
||||
<script src="../_static/documentation_options.js?v=000c92bf"></script>
|
||||
<script src="../_static/doctools.js?v=9a2dae69"></script>
|
||||
<script src="../_static/sphinx_highlight.js?v=dc90522c"></script>
|
||||
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
|
||||
<script>window.MathJax = {"tex": {"inlineMath": [["$", "$"], ["\\(", "\\)"]], "processEscapes": true}, "options": {"ignoreHtmlClass": "tex2jax_ignore|mathjax_ignore|document", "processHtmlClass": "tex2jax_process|mathjax_process|math|output_area"}}</script>
|
||||
<script defer="defer" src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
|
||||
<script src="../_static/js/theme.js"></script>
|
||||
<link rel="index" title="Index" href="../genindex.html" />
|
||||
<link rel="search" title="Search" href="../search.html" />
|
||||
<link rel="next" title="AMMICO package modules" href="../modules.html" />
|
||||
<link rel="prev" title="AMMICO Demonstration Notebook" href="DemoNotebook_ammico.html" />
|
||||
</head>
|
||||
|
||||
<body class="wy-body-for-nav">
|
||||
<div class="wy-grid-for-nav">
|
||||
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
|
||||
<div class="wy-side-scroll">
|
||||
<div class="wy-side-nav-search" >
|
||||
|
||||
|
||||
|
||||
<a href="../index.html" class="icon icon-home">
|
||||
AMMICO
|
||||
</a>
|
||||
<div role="search">
|
||||
<form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
|
||||
<input type="text" name="q" placeholder="Search docs" aria-label="Search docs" />
|
||||
<input type="hidden" name="check_keywords" value="yes" />
|
||||
<input type="hidden" name="area" value="default" />
|
||||
</form>
|
||||
</div>
|
||||
</div><div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="Navigation menu">
|
||||
<p class="caption" role="heading"><span class="caption-text">Contents:</span></p>
|
||||
<ul class="current">
|
||||
<li class="toctree-l1"><a class="reference internal" href="../readme_link.html">AMMICO - AI-based Media and Misinformation Content Analysis Tool</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="../faq_link.html">FAQ</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="../create_API_key_link.html">Instructions how to generate and enable a google Cloud Vision API key</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="DemoNotebook_ammico.html">AMMICO Demonstration Notebook</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="DemoNotebook_ammico.html#Step-0:-Create-and-set-a-Google-Cloud-Vision-Key">Step 0: Create and set a Google Cloud Vision Key</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="DemoNotebook_ammico.html#Step-1:-Read-your-data-into-AMMICO">Step 1: Read your data into AMMICO</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="DemoNotebook_ammico.html#The-detector-modules">The detector modules</a></li>
|
||||
<li class="toctree-l1 current"><a class="current reference internal" href="#">Crop posts module</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="../modules.html">AMMICO package modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="../license_link.html">License</a></li>
|
||||
</ul>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap"><nav class="wy-nav-top" aria-label="Mobile navigation menu" >
|
||||
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
|
||||
<a href="../index.html">AMMICO</a>
|
||||
</nav>
|
||||
|
||||
<div class="wy-nav-content">
|
||||
<div class="rst-content">
|
||||
<div role="navigation" aria-label="Page navigation">
|
||||
<ul class="wy-breadcrumbs">
|
||||
<li><a href="../index.html" class="icon icon-home" aria-label="Home"></a></li>
|
||||
<li class="breadcrumb-item active">Crop posts module</li>
|
||||
<li class="wy-breadcrumbs-aside">
|
||||
<a href="https://github.com/ssciwr/AMMICO/blob/main/docs/source/notebooks/Example cropposts.ipynb" class="fa fa-github"> Edit on GitHub</a>
|
||||
</li>
|
||||
</ul>
|
||||
<hr/>
|
||||
</div>
|
||||
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
|
||||
<div itemprop="articleBody">
|
||||
|
||||
<section id="Crop-posts-module">
|
||||
<h1>Crop posts module<a class="headerlink" href="#Crop-posts-module" title="Link to this heading"></a></h1>
|
||||
<p>Crop posts from social media posts images, to keep import text informations from social media posts images. We can set some manually cropped views from social media posts as reference for cropping the same type social media posts images.</p>
|
||||
<div class="nbinput nblast docutils container">
|
||||
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[ ]:
|
||||
</pre></div>
|
||||
</div>
|
||||
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre><span></span><span class="c1"># Please ignore this cell: extra install steps that are only executed when running the notebook on Google Colab</span>
|
||||
<span class="c1"># flake8-noqa-cell</span>
|
||||
<span class="kn">import</span><span class="w"> </span><span class="nn">os</span>
|
||||
<span class="k">if</span> <span class="s1">'google.colab'</span> <span class="ow">in</span> <span class="nb">str</span><span class="p">(</span><span class="n">get_ipython</span><span class="p">()):</span>
|
||||
<span class="c1"># we're running on colab</span>
|
||||
<span class="c1"># first install pinned version of setuptools (latest version doesn't seem to work with this package on colab)</span>
|
||||
<span class="o">%</span><span class="k">pip</span> install setuptools==61 -qqq
|
||||
<span class="c1"># install the moralization package</span>
|
||||
<span class="o">%</span><span class="k">pip</span> install git+https://github.com/ssciwr/AMMICO.git -qqq
|
||||
|
||||
<span class="c1"># prevent loading of the wrong opencv library</span>
|
||||
<span class="o">%</span><span class="k">pip</span> uninstall -y opencv-contrib-python
|
||||
<span class="o">%</span><span class="k">pip</span> install opencv-contrib-python
|
||||
|
||||
<span class="kn">from</span><span class="w"> </span><span class="nn">google.colab</span><span class="w"> </span><span class="kn">import</span> <span class="n">drive</span>
|
||||
<span class="n">drive</span><span class="o">.</span><span class="n">mount</span><span class="p">(</span><span class="s1">'/content/drive'</span><span class="p">)</span>
|
||||
|
||||
<span class="k">if</span> <span class="ow">not</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">isdir</span><span class="p">(</span><span class="s1">'/content/ref'</span><span class="p">):</span>
|
||||
<span class="o">!</span>wget<span class="w"> </span>https://github.com/ssciwr/AMMICO/archive/refs/heads/ref-data.zip<span class="w"> </span>-q
|
||||
<span class="o">!</span>unzip<span class="w"> </span>-qq<span class="w"> </span>ref-data.zip<span class="w"> </span>-d<span class="w"> </span>.<span class="w"> </span><span class="o">&&</span><span class="w"> </span>mv<span class="w"> </span>-f<span class="w"> </span>AMMICO-ref-data/data/ref<span class="w"> </span>.<span class="w"> </span><span class="o">&&</span><span class="w"> </span>rm<span class="w"> </span>-rf<span class="w"> </span>AMMICO-ref-data<span class="w"> </span>ref-data.zip
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="nbinput nblast docutils container">
|
||||
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[ ]:
|
||||
</pre></div>
|
||||
</div>
|
||||
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span><span class="w"> </span><span class="nn">ammico.cropposts</span><span class="w"> </span><span class="k">as</span><span class="w"> </span><span class="nn">crpo</span>
|
||||
<span class="kn">import</span><span class="w"> </span><span class="nn">ammico.utils</span><span class="w"> </span><span class="k">as</span><span class="w"> </span><span class="nn">utils</span>
|
||||
<span class="kn">import</span><span class="w"> </span><span class="nn">matplotlib.pyplot</span><span class="w"> </span><span class="k">as</span><span class="w"> </span><span class="nn">plt</span>
|
||||
<span class="kn">import</span><span class="w"> </span><span class="nn">cv2</span>
|
||||
<span class="kn">import</span><span class="w"> </span><span class="nn">importlib_resources</span>
|
||||
<span class="n">pkg</span> <span class="o">=</span> <span class="n">importlib_resources</span><span class="o">.</span><span class="n">files</span><span class="p">(</span><span class="s2">"ammico"</span><span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<p>The cropping is carried out by finding reference images on the image to be cropped. If a reference matches a region on the image, then everything below the matched region is removed. Manually look at a reference and an example post with the code below.</p>
|
||||
<div class="nbinput nblast docutils container">
|
||||
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[ ]:
|
||||
</pre></div>
|
||||
</div>
|
||||
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre><span></span><span class="c1"># load ref view for cropping the same type social media posts images.</span>
|
||||
<span class="c1"># substitute the below paths for your samples</span>
|
||||
<span class="n">path_ref</span> <span class="o">=</span> <span class="n">pkg</span> <span class="o">/</span> <span class="s2">"data"</span> <span class="o">/</span> <span class="s2">"ref"</span> <span class="o">/</span> <span class="s2">"ref-00.png"</span>
|
||||
<span class="n">ref_view</span> <span class="o">=</span> <span class="n">cv2</span><span class="o">.</span><span class="n">imread</span><span class="p">(</span><span class="n">path_ref</span><span class="o">.</span><span class="n">as_posix</span><span class="p">())</span>
|
||||
<span class="n">RGB_ref_view</span> <span class="o">=</span> <span class="n">cv2</span><span class="o">.</span><span class="n">cvtColor</span><span class="p">(</span><span class="n">ref_view</span><span class="p">,</span> <span class="n">cv2</span><span class="o">.</span><span class="n">COLOR_BGR2RGB</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">15</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">RGB_ref_view</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
|
||||
<span class="n">path_post</span> <span class="o">=</span> <span class="n">pkg</span> <span class="o">/</span> <span class="s2">"data"</span> <span class="o">/</span> <span class="s2">"test-crop-image.png"</span>
|
||||
<span class="n">view</span> <span class="o">=</span> <span class="n">cv2</span><span class="o">.</span><span class="n">imread</span><span class="p">(</span><span class="n">path_post</span><span class="o">.</span><span class="n">as_posix</span><span class="p">())</span>
|
||||
<span class="n">RGB_view</span> <span class="o">=</span> <span class="n">cv2</span><span class="o">.</span><span class="n">cvtColor</span><span class="p">(</span><span class="n">view</span><span class="p">,</span> <span class="n">cv2</span><span class="o">.</span><span class="n">COLOR_BGR2RGB</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">15</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">RGB_view</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<p>You can now crop the image and check on the way that everything looks fine. <code class="docutils literal notranslate"><span class="pre">plt_match</span></code> will plot the matches on the image and below which line content will be cropped; <code class="docutils literal notranslate"><span class="pre">plt_crop</span></code> will plot the cropped text part of the social media post with the comments removed; <code class="docutils literal notranslate"><span class="pre">plt_image</span></code> will plot the image part of the social media post if applicable.</p>
|
||||
<div class="nbinput nblast docutils container">
|
||||
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[ ]:
|
||||
</pre></div>
|
||||
</div>
|
||||
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre><span></span><span class="c1"># crop a posts from reference view, check the cropping</span>
|
||||
<span class="c1"># this will only plot something if the reference is found on the image</span>
|
||||
<span class="n">crop_view</span> <span class="o">=</span> <span class="n">crpo</span><span class="o">.</span><span class="n">crop_posts_from_refs</span><span class="p">(</span>
|
||||
<span class="p">[</span><span class="n">ref_view</span><span class="p">],</span> <span class="n">view</span><span class="p">,</span>
|
||||
<span class="n">plt_match</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">plt_crop</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">plt_image</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
|
||||
<span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<p>Batch crop images from the image folder given in <code class="docutils literal notranslate"><span class="pre">crop_dir</span></code>. The cropped images will save in <code class="docutils literal notranslate"><span class="pre">save_crop_dir</span></code> folder with the same file name as the original file. The reference images with the items to match are provided in <code class="docutils literal notranslate"><span class="pre">ref_dir</span></code>.</p>
|
||||
<p>Sometimes the cropping will be imperfect, due to improper matches on the image. It is sometimes easier to first categorize the social media posts and then set different references in the reference folder <code class="docutils literal notranslate"><span class="pre">ref_dir</span></code>.</p>
|
||||
<div class="nbinput nblast docutils container">
|
||||
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[ ]:
|
||||
</pre></div>
|
||||
</div>
|
||||
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre><br/><span></span><span class="n">crop_dir</span> <span class="o">=</span> <span class="s2">"data/"</span>
|
||||
<span class="n">ref_dir</span> <span class="o">=</span> <span class="n">pkg</span> <span class="o">/</span> <span class="s2">"data"</span> <span class="o">/</span> <span class="s2">"ref"</span>
|
||||
<span class="n">save_crop_dir</span> <span class="o">=</span> <span class="s2">"data/crop/"</span>
|
||||
|
||||
<span class="n">files</span> <span class="o">=</span> <span class="n">utils</span><span class="o">.</span><span class="n">find_files</span><span class="p">(</span><span class="n">path</span><span class="o">=</span><span class="n">crop_dir</span><span class="p">,</span><span class="n">limit</span><span class="o">=</span><span class="mi">10</span><span class="p">,)</span>
|
||||
<span class="n">ref_files</span> <span class="o">=</span> <span class="n">utils</span><span class="o">.</span><span class="n">find_files</span><span class="p">(</span><span class="n">path</span><span class="o">=</span><span class="n">ref_dir</span><span class="o">.</span><span class="n">as_posix</span><span class="p">(),</span> <span class="n">limit</span><span class="o">=</span><span class="mi">100</span><span class="p">)</span>
|
||||
|
||||
<span class="n">crpo</span><span class="o">.</span><span class="n">crop_media_posts</span><span class="p">(</span><span class="n">files</span><span class="p">,</span> <span class="n">ref_files</span><span class="p">,</span> <span class="n">save_crop_dir</span><span class="p">,</span> <span class="n">plt_match</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">plt_crop</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">plt_image</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
|
||||
<span class="nb">print</span><span class="p">(</span><span class="s2">"Batch cropping images done"</span><span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="nbinput nblast docutils container">
|
||||
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[ ]:
|
||||
</pre></div>
|
||||
</div>
|
||||
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre><span></span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<footer><div class="rst-footer-buttons" role="navigation" aria-label="Footer">
|
||||
<a href="DemoNotebook_ammico.html" class="btn btn-neutral float-left" title="AMMICO Demonstration Notebook" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a>
|
||||
<a href="../modules.html" class="btn btn-neutral float-right" title="AMMICO package modules" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right" aria-hidden="true"></span></a>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
|
||||
<div role="contentinfo">
|
||||
<p>© Copyright 2022, Scientific Software Center, Heidelberg University.</p>
|
||||
</div>
|
||||
|
||||
Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
|
||||
<a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
|
||||
provided by <a href="https://readthedocs.org">Read the Docs</a>.
|
||||
|
||||
|
||||
</footer>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
</div>
|
||||
<script>
|
||||
jQuery(function () {
|
||||
SphinxRtdTheme.Navigation.enable(true);
|
||||
});
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
@ -1,183 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Crop posts module"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Crop posts from social media posts images, to keep import text informations from social media posts images.\n",
|
||||
"We can set some manually cropped views from social media posts as reference for cropping the same type social media posts images."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Please ignore this cell: extra install steps that are only executed when running the notebook on Google Colab\n",
|
||||
"# flake8-noqa-cell\n",
|
||||
"import os\n",
|
||||
"if 'google.colab' in str(get_ipython()):\n",
|
||||
" # we're running on colab\n",
|
||||
" # first install pinned version of setuptools (latest version doesn't seem to work with this package on colab)\n",
|
||||
" %pip install setuptools==61 -qqq\n",
|
||||
" # install the moralization package\n",
|
||||
" %pip install git+https://github.com/ssciwr/AMMICO.git -qqq\n",
|
||||
"\n",
|
||||
" # prevent loading of the wrong opencv library\n",
|
||||
" %pip uninstall -y opencv-contrib-python\n",
|
||||
" %pip install opencv-contrib-python\n",
|
||||
"\n",
|
||||
" from google.colab import drive\n",
|
||||
" drive.mount('/content/drive')\n",
|
||||
"\n",
|
||||
" if not os.path.isdir('/content/ref'):\n",
|
||||
" !wget https://github.com/ssciwr/AMMICO/archive/refs/heads/ref-data.zip -q\n",
|
||||
" !unzip -qq ref-data.zip -d . && mv -f AMMICO-ref-data/data/ref . && rm -rf AMMICO-ref-data ref-data.zip"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import ammico.cropposts as crpo\n",
|
||||
"import ammico.utils as utils\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import cv2\n",
|
||||
"import importlib_resources\n",
|
||||
"pkg = importlib_resources.files(\"ammico\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The cropping is carried out by finding reference images on the image to be cropped. If a reference matches a region on the image, then everything below the matched region is removed. Manually look at a reference and an example post with the code below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# load ref view for cropping the same type social media posts images.\n",
|
||||
"# substitute the below paths for your samples\n",
|
||||
"path_ref = pkg / \"data\" / \"ref\" / \"ref-00.png\"\n",
|
||||
"ref_view = cv2.imread(path_ref.as_posix())\n",
|
||||
"RGB_ref_view = cv2.cvtColor(ref_view, cv2.COLOR_BGR2RGB)\n",
|
||||
"plt.figure(figsize=(10, 15))\n",
|
||||
"plt.imshow(RGB_ref_view)\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"path_post = pkg / \"data\" / \"test-crop-image.png\"\n",
|
||||
"view = cv2.imread(path_post.as_posix())\n",
|
||||
"RGB_view = cv2.cvtColor(view, cv2.COLOR_BGR2RGB)\n",
|
||||
"plt.figure(figsize=(10, 15))\n",
|
||||
"plt.imshow(RGB_view)\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can now crop the image and check on the way that everything looks fine. `plt_match` will plot the matches on the image and below which line content will be cropped; `plt_crop` will plot the cropped text part of the social media post with the comments removed; `plt_image` will plot the image part of the social media post if applicable."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# crop a posts from reference view, check the cropping \n",
|
||||
"# this will only plot something if the reference is found on the image\n",
|
||||
"crop_view = crpo.crop_posts_from_refs(\n",
|
||||
" [ref_view], view, \n",
|
||||
" plt_match=True, plt_crop=True, plt_image=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Batch crop images from the image folder given in `crop_dir`. The cropped images will save in `save_crop_dir` folder with the same file name as the original file. The reference images with the items to match are provided in `ref_dir`.\n",
|
||||
"\n",
|
||||
"Sometimes the cropping will be imperfect, due to improper matches on the image. It is sometimes easier to first categorize the social media posts and then set different references in the reference folder `ref_dir`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"crop_dir = \"data/\"\n",
|
||||
"ref_dir = pkg / \"data\" / \"ref\" \n",
|
||||
"save_crop_dir = \"data/crop/\"\n",
|
||||
"\n",
|
||||
"files = utils.find_files(path=crop_dir,limit=10,)\n",
|
||||
"ref_files = utils.find_files(path=ref_dir.as_posix(), limit=100)\n",
|
||||
"\n",
|
||||
"crpo.crop_media_posts(files, ref_files, save_crop_dir, plt_match=True, plt_crop=False, plt_image=False)\n",
|
||||
"print(\"Batch cropping images done\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "10",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Двоичные данные
build/html/objects.inv
Двоичные данные
build/html/objects.inv
Двоичный файл не отображается.
@ -13,7 +13,7 @@
|
||||
<script src="_static/jquery.js?v=5d32c60e"></script>
|
||||
<script src="_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
||||
<script src="_static/documentation_options.js?v=000c92bf"></script>
|
||||
<script src="_static/doctools.js?v=9a2dae69"></script>
|
||||
<script src="_static/doctools.js?v=9bcbadda"></script>
|
||||
<script src="_static/sphinx_highlight.js?v=dc90522c"></script>
|
||||
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
|
||||
<script src="_static/js/theme.js"></script>
|
||||
@ -56,7 +56,6 @@
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-0:-Create-and-set-a-Google-Cloud-Vision-Key">Step 0: Create and set a Google Cloud Vision Key</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-1:-Read-your-data-into-AMMICO">Step 1: Read your data into AMMICO</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#The-detector-modules">The detector modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/Example%20cropposts.html">Crop posts module</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="modules.html">AMMICO package modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="license_link.html">License</a></li>
|
||||
</ul>
|
||||
@ -89,8 +88,10 @@
|
||||
|
||||
<div class="modindex-jumpbox">
|
||||
<a href="#cap-c"><strong>c</strong></a> |
|
||||
<a href="#cap-d"><strong>d</strong></a> |
|
||||
<a href="#cap-f"><strong>f</strong></a> |
|
||||
<a href="#cap-s"><strong>s</strong></a>
|
||||
<a href="#cap-t"><strong>t</strong></a> |
|
||||
<a href="#cap-u"><strong>u</strong></a>
|
||||
</div>
|
||||
|
||||
<table class="indextable modindextable">
|
||||
@ -102,10 +103,13 @@
|
||||
<td>
|
||||
<a href="ammico.html#module-colors"><code class="xref">colors</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="pcap"><td></td><td> </td><td></td></tr>
|
||||
<tr class="cap" id="cap-d"><td></td><td>
|
||||
<strong>d</strong></td><td></td></tr>
|
||||
<tr>
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="ammico.html#module-cropposts"><code class="xref">cropposts</code></a></td><td>
|
||||
<a href="ammico.html#module-display"><code class="xref">display</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="pcap"><td></td><td> </td><td></td></tr>
|
||||
<tr class="cap" id="cap-f"><td></td><td>
|
||||
@ -116,12 +120,20 @@
|
||||
<a href="ammico.html#module-faces"><code class="xref">faces</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="pcap"><td></td><td> </td><td></td></tr>
|
||||
<tr class="cap" id="cap-s"><td></td><td>
|
||||
<strong>s</strong></td><td></td></tr>
|
||||
<tr class="cap" id="cap-t"><td></td><td>
|
||||
<strong>t</strong></td><td></td></tr>
|
||||
<tr>
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="ammico.html#module-summary"><code class="xref">summary</code></a></td><td>
|
||||
<a href="ammico.html#module-text"><code class="xref">text</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="pcap"><td></td><td> </td><td></td></tr>
|
||||
<tr class="cap" id="cap-u"><td></td><td>
|
||||
<strong>u</strong></td><td></td></tr>
|
||||
<tr>
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="ammico.html#module-utils"><code class="xref">utils</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
</table>
|
||||
|
||||
|
||||
@ -14,7 +14,7 @@
|
||||
<script src="_static/jquery.js?v=5d32c60e"></script>
|
||||
<script src="_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
||||
<script src="_static/documentation_options.js?v=000c92bf"></script>
|
||||
<script src="_static/doctools.js?v=9a2dae69"></script>
|
||||
<script src="_static/doctools.js?v=9bcbadda"></script>
|
||||
<script src="_static/sphinx_highlight.js?v=dc90522c"></script>
|
||||
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
|
||||
<script src="_static/js/theme.js"></script>
|
||||
@ -64,7 +64,6 @@
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-0:-Create-and-set-a-Google-Cloud-Vision-Key">Step 0: Create and set a Google Cloud Vision Key</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-1:-Read-your-data-into-AMMICO">Step 1: Read your data into AMMICO</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#The-detector-modules">The detector modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/Example%20cropposts.html">Crop posts module</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="modules.html">AMMICO package modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="license_link.html">License</a></li>
|
||||
</ul>
|
||||
|
||||
@ -14,7 +14,7 @@
|
||||
<script src="_static/jquery.js?v=5d32c60e"></script>
|
||||
<script src="_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
||||
<script src="_static/documentation_options.js?v=000c92bf"></script>
|
||||
<script src="_static/doctools.js?v=9a2dae69"></script>
|
||||
<script src="_static/doctools.js?v=9bcbadda"></script>
|
||||
<script src="_static/sphinx_highlight.js?v=dc90522c"></script>
|
||||
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
|
||||
<script src="_static/js/theme.js"></script>
|
||||
@ -52,7 +52,6 @@
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-0:-Create-and-set-a-Google-Cloud-Vision-Key">Step 0: Create and set a Google Cloud Vision Key</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-1:-Read-your-data-into-AMMICO">Step 1: Read your data into AMMICO</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#The-detector-modules">The detector modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/Example%20cropposts.html">Crop posts module</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="modules.html">AMMICO package modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="license_link.html">License</a></li>
|
||||
</ul>
|
||||
|
||||
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
@ -14,7 +14,7 @@
|
||||
<script src="_static/jquery.js?v=5d32c60e"></script>
|
||||
<script src="_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
||||
<script src="_static/documentation_options.js?v=000c92bf"></script>
|
||||
<script src="_static/doctools.js?v=9a2dae69"></script>
|
||||
<script src="_static/doctools.js?v=9bcbadda"></script>
|
||||
<script src="_static/sphinx_highlight.js?v=dc90522c"></script>
|
||||
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
|
||||
<script src="_static/js/theme.js"></script>
|
||||
@ -50,7 +50,6 @@
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-0:-Create-and-set-a-Google-Cloud-Vision-Key">Step 0: Create and set a Google Cloud Vision Key</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#Step-1:-Read-your-data-into-AMMICO">Step 1: Read your data into AMMICO</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/DemoNotebook_ammico.html#The-detector-modules">The detector modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="notebooks/Example%20cropposts.html">Crop posts module</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="modules.html">AMMICO package modules</a></li>
|
||||
<li class="toctree-l1"><a class="reference internal" href="license_link.html">License</a></li>
|
||||
</ul>
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
"source": [
|
||||
"# if running on google colab\\\n",
|
||||
"# PLEASE RUN THIS ONLY AS CPU RUNTIME\n",
|
||||
"# for a GPU runtime, there are conflicts with pre-installed packages - \n",
|
||||
"# for a GPU runtime, there are conflicts with pre-installed packages -\n",
|
||||
"# you first need to uninstall them (prepare a clean environment with no pre-installs) and then install ammico\n",
|
||||
"# flake8-noqa-cell\n",
|
||||
"\n",
|
||||
@ -103,11 +103,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"# jax also sometimes leads to problems on google colab\n",
|
||||
"# if this is the case, try restarting the kernel and executing this \n",
|
||||
"# if this is the case, try restarting the kernel and executing this\n",
|
||||
"# and the above two code cells again\n",
|
||||
"import ammico\n",
|
||||
"\n",
|
||||
"# for displaying a progress bar\n",
|
||||
"from tqdm import tqdm"
|
||||
]
|
||||
@ -276,7 +276,7 @@
|
||||
"source": [
|
||||
"# dump file name\n",
|
||||
"dump_file = \"dump_file.csv\"\n",
|
||||
"# dump every N images \n",
|
||||
"# dump every N images\n",
|
||||
"dump_every = 10"
|
||||
]
|
||||
},
|
||||
@ -299,12 +299,18 @@
|
||||
"# the highest possible value is 100\n",
|
||||
"race_threshold = 50\n",
|
||||
"gender_threshold = 50\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key],\n",
|
||||
" emotion_threshold=emotion_threshold,\n",
|
||||
" race_threshold=race_threshold,\n",
|
||||
" gender_threshold=gender_threshold).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" if num % dump_every == 0 or num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key],\n",
|
||||
" emotion_threshold=emotion_threshold,\n",
|
||||
" race_threshold=race_threshold,\n",
|
||||
" gender_threshold=gender_threshold,\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 or num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -322,10 +328,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], analyse_text=True).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], analyse_text=True\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -344,13 +356,21 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize the models\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(subdict = image_dict, analysis_type=\"summary\", model_type=\"base\")\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# run the analysis without having to re-iniatialize the model\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\") # analyse image with SummaryDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" ) # analyse image with SummaryDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -369,14 +389,26 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize the models\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(subdict = image_dict, analysis_type=\"summary\", model_type=\"base\")\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key]).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], analyse_text=True).analyse_image() # analyse image with TextDetector and update dict\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\") # analyse image with SummaryDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file \n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key]\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], analyse_text=True\n",
|
||||
" ).analyse_image() # analyse image with TextDetector and update dict\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" ) # analyse image with SummaryDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -477,7 +509,7 @@
|
||||
"# set the dump file\n",
|
||||
"# dump file name\n",
|
||||
"dump_file = \"dump_file.csv\"\n",
|
||||
"# dump every N images \n",
|
||||
"# dump every N images\n",
|
||||
"dump_every = 10"
|
||||
]
|
||||
},
|
||||
@ -488,9 +520,15 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# analyze the csv file\n",
|
||||
"for num, key in tqdm(enumerate(text_dict.keys()), total=len(text_dict)): # loop through all text entries\n",
|
||||
" ammico.TextDetector(text_dict[key], analyse_text=True, skip_extraction=True).analyse_image() # analyse text with TextDetector and update dict\n",
|
||||
" if num % dump_every == 0 | num == len(text_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(text_dict.keys()), total=len(text_dict)\n",
|
||||
"): # loop through all text entries\n",
|
||||
" ammico.TextDetector(\n",
|
||||
" text_dict[key], analyse_text=True, skip_extraction=True\n",
|
||||
" ).analyse_image() # analyse text with TextDetector and update dict\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(text_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(text_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -565,14 +603,23 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], # analyse image with TextDetector and update dict\n",
|
||||
" analyse_text=True, model_names=[\"sshleifer/distilbart-cnn-12-6\", \n",
|
||||
" \"distilbert-base-uncased-finetuned-sst-2-english\", \n",
|
||||
" \"dbmdz/bert-large-cased-finetuned-conll03-english\"], \n",
|
||||
" revision_numbers=[\"a4f8f3e\", \"af0f99b\", \"f2482bf\"]).analyse_image()\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], # analyse image with TextDetector and update dict\n",
|
||||
" analyse_text=True,\n",
|
||||
" model_names=[\n",
|
||||
" \"sshleifer/distilbart-cnn-12-6\",\n",
|
||||
" \"distilbert-base-uncased-finetuned-sst-2-english\",\n",
|
||||
" \"dbmdz/bert-large-cased-finetuned-conll03-english\",\n",
|
||||
" ],\n",
|
||||
" revision_numbers=[\"a4f8f3e\", \"af0f99b\", \"f2482bf\"],\n",
|
||||
" ).analyse_image()\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -654,7 +701,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_detector = ammico.SummaryDetector(image_dict, analysis_type=\"summary\", model_type=\"base\")"
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -663,10 +712,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\")\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -704,14 +755,17 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(image_dict, analysis_type=\"questions\", \n",
|
||||
" model_type=\"vqa\")\n",
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"questions\", model_type=\"vqa\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(subdict=image_dict[key], \n",
|
||||
" analysis_type=\"questions\", \n",
|
||||
" list_of_questions = list_of_questions)\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -729,13 +783,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(image_dict, analysis_type=\"summary_and_questions\", \n",
|
||||
" model_type=\"base\")\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(subdict=image_dict[key], \n",
|
||||
" analysis_type=\"summary_and_questions\", \n",
|
||||
" list_of_questions = list_of_questions)\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"summary_and_questions\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"summary_and_questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -767,7 +824,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"obj = ammico.SummaryDetector(subdict=image_dict, analysis_type = \"summary_and_questions\", model_type = \"blip2_t5_caption_coco_flant5xl\")\n",
|
||||
"obj = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict,\n",
|
||||
" analysis_type=\"summary_and_questions\",\n",
|
||||
" model_type=\"blip2_t5_caption_coco_flant5xl\",\n",
|
||||
")\n",
|
||||
"# list of the new models that can be used:\n",
|
||||
"# \"blip2_t5_pretrain_flant5xxl\",\n",
|
||||
"# \"blip2_t5_pretrain_flant5xl\",\n",
|
||||
@ -781,7 +842,7 @@
|
||||
"# Or you can use `caption_coco_`` model types to generate coco-style captions.\n",
|
||||
"# `flant5` and `opt` means that the model equipped with FlanT5 and OPT LLMs respectively.\n",
|
||||
"\n",
|
||||
"#also you can perform all calculation on cpu if you set device_type= \"cpu\" or gpu if you set device_type= \"cuda\""
|
||||
"# also you can perform all calculation on cpu if you set device_type= \"cpu\" or gpu if you set device_type= \"cuda\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -812,9 +873,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions)\n",
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"# analysis_type can be \n",
|
||||
"# analysis_type can be\n",
|
||||
"# \"summary\",\n",
|
||||
"# \"questions\",\n",
|
||||
"# \"summary_and_questions\"."
|
||||
@ -848,7 +913,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions)"
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -886,7 +955,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions, consequential_questions=True)"
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" consequential_questions=True,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -940,9 +1014,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict.keys():\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
|
||||
" gender_threshold=50,\n",
|
||||
" accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key],\n",
|
||||
" emotion_threshold=50,\n",
|
||||
" race_threshold=50,\n",
|
||||
" gender_threshold=50,\n",
|
||||
" accept_disclosure=\"DISCLOSURE_AMMICO\",\n",
|
||||
" ).analyse_image()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1055,9 +1133,9 @@
|
||||
" image_names,\n",
|
||||
" features_image_stacked,\n",
|
||||
") = my_obj.parsing_images(\n",
|
||||
" model_type, \n",
|
||||
" model_type,\n",
|
||||
" path_to_save_tensors=\"/content/drive/MyDrive/misinformation-data/\",\n",
|
||||
" )"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1112,14 +1190,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import importlib_resources # only require for image query example\n",
|
||||
"image_example_query = str(importlib_resources.files(\"ammico\") / \"data\" / \"test-crop-image.png\") # creating the path to the image for the image query example\n",
|
||||
"import importlib_resources # only require for image query example\n",
|
||||
"\n",
|
||||
"image_example_query = str(\n",
|
||||
" importlib_resources.files(\"ammico\") / \"data\" / \"test-crop-image.png\"\n",
|
||||
") # creating the path to the image for the image query example\n",
|
||||
"\n",
|
||||
"search_query = [\n",
|
||||
" {\"text_input\": \"politician press conference\"}, \n",
|
||||
" {\"text_input\": \"politician press conference\"},\n",
|
||||
" {\"text_input\": \"a world map\"},\n",
|
||||
" {\"text_input\": \"a dog\"}, # This is how looks text query\n",
|
||||
" {\"image\": image_example_query}, # This is how looks image query, here `image_example_path` is the path to query image like \"data/test-crop-image.png\"\n",
|
||||
" {\"text_input\": \"a dog\"}, # This is how looks text query\n",
|
||||
" {\n",
|
||||
" \"image\": image_example_query\n",
|
||||
" }, # This is how looks image query, here `image_example_path` is the path to query image like \"data/test-crop-image.png\"\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
@ -1199,7 +1282,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"my_obj.show_results(\n",
|
||||
" search_query[0], # you can change the index to see the results for other queries\n",
|
||||
" search_query[0], # you can change the index to see the results for other queries\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -1210,7 +1293,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"my_obj.show_results(\n",
|
||||
" search_query[3], # you can change the index to see the results for other queries\n",
|
||||
" search_query[3], # you can change the index to see the results for other queries\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -1349,7 +1432,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"analysis_explorer = ammico.AnalysisExplorer(image_dict)\n",
|
||||
"analysis_explorer.run_server(port = 8057)"
|
||||
"analysis_explorer.run_server(port=8057)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1447,7 +1530,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.11.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@ -1,183 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Crop posts module"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Crop posts from social media posts images, to keep import text informations from social media posts images.\n",
|
||||
"We can set some manually cropped views from social media posts as reference for cropping the same type social media posts images."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Please ignore this cell: extra install steps that are only executed when running the notebook on Google Colab\n",
|
||||
"# flake8-noqa-cell\n",
|
||||
"import os\n",
|
||||
"if 'google.colab' in str(get_ipython()):\n",
|
||||
" # we're running on colab\n",
|
||||
" # first install pinned version of setuptools (latest version doesn't seem to work with this package on colab)\n",
|
||||
" %pip install setuptools==61 -qqq\n",
|
||||
" # install the moralization package\n",
|
||||
" %pip install git+https://github.com/ssciwr/AMMICO.git -qqq\n",
|
||||
"\n",
|
||||
" # prevent loading of the wrong opencv library\n",
|
||||
" %pip uninstall -y opencv-contrib-python\n",
|
||||
" %pip install opencv-contrib-python\n",
|
||||
"\n",
|
||||
" from google.colab import drive\n",
|
||||
" drive.mount('/content/drive')\n",
|
||||
"\n",
|
||||
" if not os.path.isdir('/content/ref'):\n",
|
||||
" !wget https://github.com/ssciwr/AMMICO/archive/refs/heads/ref-data.zip -q\n",
|
||||
" !unzip -qq ref-data.zip -d . && mv -f AMMICO-ref-data/data/ref . && rm -rf AMMICO-ref-data ref-data.zip"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import ammico.cropposts as crpo\n",
|
||||
"import ammico.utils as utils\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import cv2\n",
|
||||
"import importlib_resources\n",
|
||||
"pkg = importlib_resources.files(\"ammico\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The cropping is carried out by finding reference images on the image to be cropped. If a reference matches a region on the image, then everything below the matched region is removed. Manually look at a reference and an example post with the code below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# load ref view for cropping the same type social media posts images.\n",
|
||||
"# substitute the below paths for your samples\n",
|
||||
"path_ref = pkg / \"data\" / \"ref\" / \"ref-00.png\"\n",
|
||||
"ref_view = cv2.imread(path_ref.as_posix())\n",
|
||||
"RGB_ref_view = cv2.cvtColor(ref_view, cv2.COLOR_BGR2RGB)\n",
|
||||
"plt.figure(figsize=(10, 15))\n",
|
||||
"plt.imshow(RGB_ref_view)\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"path_post = pkg / \"data\" / \"test-crop-image.png\"\n",
|
||||
"view = cv2.imread(path_post.as_posix())\n",
|
||||
"RGB_view = cv2.cvtColor(view, cv2.COLOR_BGR2RGB)\n",
|
||||
"plt.figure(figsize=(10, 15))\n",
|
||||
"plt.imshow(RGB_view)\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can now crop the image and check on the way that everything looks fine. `plt_match` will plot the matches on the image and below which line content will be cropped; `plt_crop` will plot the cropped text part of the social media post with the comments removed; `plt_image` will plot the image part of the social media post if applicable."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# crop a posts from reference view, check the cropping \n",
|
||||
"# this will only plot something if the reference is found on the image\n",
|
||||
"crop_view = crpo.crop_posts_from_refs(\n",
|
||||
" [ref_view], view, \n",
|
||||
" plt_match=True, plt_crop=True, plt_image=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Batch crop images from the image folder given in `crop_dir`. The cropped images will save in `save_crop_dir` folder with the same file name as the original file. The reference images with the items to match are provided in `ref_dir`.\n",
|
||||
"\n",
|
||||
"Sometimes the cropping will be imperfect, due to improper matches on the image. It is sometimes easier to first categorize the social media posts and then set different references in the reference folder `ref_dir`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"crop_dir = \"data/\"\n",
|
||||
"ref_dir = pkg / \"data\" / \"ref\" \n",
|
||||
"save_crop_dir = \"data/crop/\"\n",
|
||||
"\n",
|
||||
"files = utils.find_files(path=crop_dir,limit=10,)\n",
|
||||
"ref_files = utils.find_files(path=ref_dir.as_posix(), limit=100)\n",
|
||||
"\n",
|
||||
"crpo.crop_media_posts(files, ref_files, save_crop_dir, plt_match=True, plt_crop=False, plt_image=False)\n",
|
||||
"print(\"Batch cropping images done\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "10",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Загрузка…
x
Ссылка в новой задаче
Block a user