зеркало из
https://github.com/ssciwr/AMMICO.git
synced 2025-10-29 21:16:06 +02:00
maintain: switch to ruff
Этот коммит содержится в:
родитель
183dfb1717
Коммит
cc9b16837f
@ -4,15 +4,11 @@ repos:
|
||||
hooks:
|
||||
- id: nbstripout
|
||||
files: ".ipynb"
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 25.1.0
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: v0.12.10
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://github.com/pycqa/flake8
|
||||
rev: 7.1.2
|
||||
hooks:
|
||||
- id: flake8
|
||||
- repo: https://github.com/s-weigand/flake8-nb
|
||||
rev: v0.5.3
|
||||
hooks:
|
||||
- id: flake8-nb
|
||||
# Run the linter.
|
||||
- id: ruff-check
|
||||
# Run the formatter.
|
||||
- id: ruff-format
|
||||
|
||||
@ -308,7 +308,7 @@ class EmotionDetector(AnalysisMethod):
|
||||
# We limit ourselves to identify emotion on max three faces per image
|
||||
result = {"number_faces": len(faces) if len(faces) <= 3 else 3}
|
||||
for i, face in enumerate(faces[:3]):
|
||||
result[f"person{i+1}"] = self.analyze_single_face(face)
|
||||
result[f"person{i + 1}"] = self.analyze_single_face(face)
|
||||
self.clean_subdict(result)
|
||||
# release memory
|
||||
K.clear_session()
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
"source": [
|
||||
"# if running on google colab\\\n",
|
||||
"# PLEASE RUN THIS ONLY AS CPU RUNTIME\n",
|
||||
"# for a GPU runtime, there are conflicts with pre-installed packages - \n",
|
||||
"# for a GPU runtime, there are conflicts with pre-installed packages -\n",
|
||||
"# you first need to uninstall them (prepare a clean environment with no pre-installs) and then install ammico\n",
|
||||
"# flake8-noqa-cell\n",
|
||||
"\n",
|
||||
@ -98,11 +98,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"# jax also sometimes leads to problems on google colab\n",
|
||||
"# if this is the case, try restarting the kernel and executing this \n",
|
||||
"# if this is the case, try restarting the kernel and executing this\n",
|
||||
"# and the above two code cells again\n",
|
||||
"import ammico\n",
|
||||
"\n",
|
||||
"# for displaying a progress bar\n",
|
||||
"from tqdm import tqdm"
|
||||
]
|
||||
@ -271,7 +271,7 @@
|
||||
"source": [
|
||||
"# dump file name\n",
|
||||
"dump_file = \"dump_file.csv\"\n",
|
||||
"# dump every N images \n",
|
||||
"# dump every N images\n",
|
||||
"dump_every = 10\n",
|
||||
"print(len(image_dict))"
|
||||
]
|
||||
@ -295,12 +295,18 @@
|
||||
"# the highest possible value is 100\n",
|
||||
"race_threshold = 50\n",
|
||||
"gender_threshold = 50\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key],\n",
|
||||
" emotion_threshold=emotion_threshold,\n",
|
||||
" race_threshold=race_threshold,\n",
|
||||
" gender_threshold=gender_threshold).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" if num % dump_every == 0 or num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key],\n",
|
||||
" emotion_threshold=emotion_threshold,\n",
|
||||
" race_threshold=race_threshold,\n",
|
||||
" gender_threshold=gender_threshold,\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 or num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -327,10 +333,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], analyse_text=True).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], analyse_text=True\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -349,13 +361,21 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize the models\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(subdict = image_dict, analysis_type=\"summary\", model_type=\"base\")\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# run the analysis without having to re-iniatialize the model\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\") # analyse image with SummaryDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" ) # analyse image with SummaryDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -374,14 +394,26 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize the models\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(subdict = image_dict, analysis_type=\"summary\", model_type=\"base\")\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key]).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], analyse_text=True).analyse_image() # analyse image with TextDetector and update dict\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\") # analyse image with SummaryDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file \n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key]\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], analyse_text=True\n",
|
||||
" ).analyse_image() # analyse image with TextDetector and update dict\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" ) # analyse image with SummaryDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -482,7 +514,7 @@
|
||||
"# set the dump file\n",
|
||||
"# dump file name\n",
|
||||
"dump_file = \"dump_file.csv\"\n",
|
||||
"# dump every N images \n",
|
||||
"# dump every N images\n",
|
||||
"dump_every = 10"
|
||||
]
|
||||
},
|
||||
@ -493,9 +525,15 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# analyze the csv file\n",
|
||||
"for num, key in tqdm(enumerate(text_dict.keys()), total=len(text_dict)): # loop through all text entries\n",
|
||||
" ammico.TextDetector(text_dict[key], analyse_text=True, skip_extraction=True).analyse_image() # analyse text with TextDetector and update dict\n",
|
||||
" if num % dump_every == 0 | num == len(text_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(text_dict.keys()), total=len(text_dict)\n",
|
||||
"): # loop through all text entries\n",
|
||||
" ammico.TextDetector(\n",
|
||||
" text_dict[key], analyse_text=True, skip_extraction=True\n",
|
||||
" ).analyse_image() # analyse text with TextDetector and update dict\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(text_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(text_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -570,14 +608,23 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], # analyse image with TextDetector and update dict\n",
|
||||
" analyse_text=True, model_names=[\"sshleifer/distilbart-cnn-12-6\", \n",
|
||||
" \"distilbert-base-uncased-finetuned-sst-2-english\", \n",
|
||||
" \"dbmdz/bert-large-cased-finetuned-conll03-english\"], \n",
|
||||
" revision_numbers=[\"a4f8f3e\", \"af0f99b\", \"f2482bf\"]).analyse_image()\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], # analyse image with TextDetector and update dict\n",
|
||||
" analyse_text=True,\n",
|
||||
" model_names=[\n",
|
||||
" \"sshleifer/distilbart-cnn-12-6\",\n",
|
||||
" \"distilbert-base-uncased-finetuned-sst-2-english\",\n",
|
||||
" \"dbmdz/bert-large-cased-finetuned-conll03-english\",\n",
|
||||
" ],\n",
|
||||
" revision_numbers=[\"a4f8f3e\", \"af0f99b\", \"f2482bf\"],\n",
|
||||
" ).analyse_image()\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -659,7 +706,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_detector = ammico.SummaryDetector(image_dict, analysis_type=\"summary\", model_type=\"base\")"
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -668,10 +717,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\")\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -709,14 +760,17 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(image_dict, analysis_type=\"questions\", \n",
|
||||
" model_type=\"vqa\")\n",
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"questions\", model_type=\"vqa\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(subdict=image_dict[key], \n",
|
||||
" analysis_type=\"questions\", \n",
|
||||
" list_of_questions = list_of_questions)\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -734,13 +788,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(image_dict, analysis_type=\"summary_and_questions\", \n",
|
||||
" model_type=\"base\")\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(subdict=image_dict[key], \n",
|
||||
" analysis_type=\"summary_and_questions\", \n",
|
||||
" list_of_questions = list_of_questions)\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"summary_and_questions\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"summary_and_questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -772,7 +829,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"obj = ammico.SummaryDetector(subdict=image_dict, analysis_type = \"summary_and_questions\", model_type = \"blip2_t5_caption_coco_flant5xl\")\n",
|
||||
"obj = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict,\n",
|
||||
" analysis_type=\"summary_and_questions\",\n",
|
||||
" model_type=\"blip2_t5_caption_coco_flant5xl\",\n",
|
||||
")\n",
|
||||
"# list of the new models that can be used:\n",
|
||||
"# \"blip2_t5_pretrain_flant5xxl\",\n",
|
||||
"# \"blip2_t5_pretrain_flant5xl\",\n",
|
||||
@ -786,7 +847,7 @@
|
||||
"# Or you can use `caption_coco_`` model types to generate coco-style captions.\n",
|
||||
"# `flant5` and `opt` means that the model equipped with FlanT5 and OPT LLMs respectively.\n",
|
||||
"\n",
|
||||
"#also you can perform all calculation on cpu if you set device_type= \"cpu\" or gpu if you set device_type= \"cuda\""
|
||||
"# also you can perform all calculation on cpu if you set device_type= \"cpu\" or gpu if you set device_type= \"cuda\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -817,9 +878,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions)\n",
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"# analysis_type can be \n",
|
||||
"# analysis_type can be\n",
|
||||
"# \"summary\",\n",
|
||||
"# \"questions\",\n",
|
||||
"# \"summary_and_questions\"."
|
||||
@ -853,7 +918,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions)"
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -891,7 +960,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions, consequential_questions=True)"
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" consequential_questions=True,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -945,9 +1019,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict.keys():\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
|
||||
" gender_threshold=50,\n",
|
||||
" accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key],\n",
|
||||
" emotion_threshold=50,\n",
|
||||
" race_threshold=50,\n",
|
||||
" gender_threshold=50,\n",
|
||||
" accept_disclosure=\"DISCLOSURE_AMMICO\",\n",
|
||||
" ).analyse_image()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1080,9 +1158,9 @@
|
||||
" image_names,\n",
|
||||
" features_image_stacked,\n",
|
||||
") = my_obj.parsing_images(\n",
|
||||
" model_type, \n",
|
||||
" model_type,\n",
|
||||
" path_to_save_tensors=str(data_path),\n",
|
||||
" )"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1137,11 +1215,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import importlib_resources # only require for image query example\n",
|
||||
"image_example_query = str(importlib_resources.files(\"ammico\") / \"data\" / \"test-crop-image.png\") # creating the path to the image for the image query example\n",
|
||||
"import importlib_resources # only require for image query example\n",
|
||||
"\n",
|
||||
"image_example_query = str(\n",
|
||||
" importlib_resources.files(\"ammico\") / \"data\" / \"test-crop-image.png\"\n",
|
||||
") # creating the path to the image for the image query example\n",
|
||||
"\n",
|
||||
"search_query = [\n",
|
||||
" {\"image\": image_example_query}, # This is how looks image query, here `image_example_path` is the path to query image like \"data/test-crop-image.png\"\n",
|
||||
" {\n",
|
||||
" \"image\": image_example_query\n",
|
||||
" }, # This is how looks image query, here `image_example_path` is the path to query image like \"data/test-crop-image.png\"\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
@ -1221,7 +1304,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"my_obj.show_results(\n",
|
||||
" search_query[0], # you can change the index to see the results for other queries\n",
|
||||
" search_query[0], # you can change the index to see the results for other queries\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -1378,7 +1461,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"analysis_explorer = ammico.AnalysisExplorer(image_dict)\n",
|
||||
"analysis_explorer.run_server(port = 8057)"
|
||||
"analysis_explorer.run_server(port=8057)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1476,7 +1559,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
"version": "3.11.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@ -129,7 +129,8 @@ class TextDetector(AnalysisMethod):
|
||||
"""Add a space after a full stop. Required by googletrans."""
|
||||
# we have found text, now we check for full stops
|
||||
index_stop = [
|
||||
i.start() for i in re.finditer("\.", self.subdict["text"]) # noqa
|
||||
i.start()
|
||||
for i in re.finditer("\.", self.subdict["text"]) # noqa
|
||||
]
|
||||
if not index_stop: # no full stops found
|
||||
return
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
"source": [
|
||||
"# if running on google colab\\\n",
|
||||
"# PLEASE RUN THIS ONLY AS CPU RUNTIME\n",
|
||||
"# for a GPU runtime, there are conflicts with pre-installed packages - \n",
|
||||
"# for a GPU runtime, there are conflicts with pre-installed packages -\n",
|
||||
"# you first need to uninstall them (prepare a clean environment with no pre-installs) and then install ammico\n",
|
||||
"# flake8-noqa-cell\n",
|
||||
"\n",
|
||||
@ -103,11 +103,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"# jax also sometimes leads to problems on google colab\n",
|
||||
"# if this is the case, try restarting the kernel and executing this \n",
|
||||
"# if this is the case, try restarting the kernel and executing this\n",
|
||||
"# and the above two code cells again\n",
|
||||
"import ammico\n",
|
||||
"\n",
|
||||
"# for displaying a progress bar\n",
|
||||
"from tqdm import tqdm"
|
||||
]
|
||||
@ -276,7 +276,7 @@
|
||||
"source": [
|
||||
"# dump file name\n",
|
||||
"dump_file = \"dump_file.csv\"\n",
|
||||
"# dump every N images \n",
|
||||
"# dump every N images\n",
|
||||
"dump_every = 10"
|
||||
]
|
||||
},
|
||||
@ -299,12 +299,18 @@
|
||||
"# the highest possible value is 100\n",
|
||||
"race_threshold = 50\n",
|
||||
"gender_threshold = 50\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key],\n",
|
||||
" emotion_threshold=emotion_threshold,\n",
|
||||
" race_threshold=race_threshold,\n",
|
||||
" gender_threshold=gender_threshold).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" if num % dump_every == 0 or num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key],\n",
|
||||
" emotion_threshold=emotion_threshold,\n",
|
||||
" race_threshold=race_threshold,\n",
|
||||
" gender_threshold=gender_threshold,\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 or num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -322,10 +328,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], analyse_text=True).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], analyse_text=True\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -344,13 +356,21 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize the models\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(subdict = image_dict, analysis_type=\"summary\", model_type=\"base\")\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# run the analysis without having to re-iniatialize the model\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\") # analyse image with SummaryDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" ) # analyse image with SummaryDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -369,14 +389,26 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize the models\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(subdict = image_dict, analysis_type=\"summary\", model_type=\"base\")\n",
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key]).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], analyse_text=True).analyse_image() # analyse image with TextDetector and update dict\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\") # analyse image with SummaryDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file \n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key]\n",
|
||||
" ).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], analyse_text=True\n",
|
||||
" ).analyse_image() # analyse image with TextDetector and update dict\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" ) # analyse image with SummaryDetector and update dict\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -477,7 +509,7 @@
|
||||
"# set the dump file\n",
|
||||
"# dump file name\n",
|
||||
"dump_file = \"dump_file.csv\"\n",
|
||||
"# dump every N images \n",
|
||||
"# dump every N images\n",
|
||||
"dump_every = 10"
|
||||
]
|
||||
},
|
||||
@ -488,9 +520,15 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# analyze the csv file\n",
|
||||
"for num, key in tqdm(enumerate(text_dict.keys()), total=len(text_dict)): # loop through all text entries\n",
|
||||
" ammico.TextDetector(text_dict[key], analyse_text=True, skip_extraction=True).analyse_image() # analyse text with TextDetector and update dict\n",
|
||||
" if num % dump_every == 0 | num == len(text_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(text_dict.keys()), total=len(text_dict)\n",
|
||||
"): # loop through all text entries\n",
|
||||
" ammico.TextDetector(\n",
|
||||
" text_dict[key], analyse_text=True, skip_extraction=True\n",
|
||||
" ).analyse_image() # analyse text with TextDetector and update dict\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(text_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(text_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -565,14 +603,23 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(image_dict[key], # analyse image with TextDetector and update dict\n",
|
||||
" analyse_text=True, model_names=[\"sshleifer/distilbart-cnn-12-6\", \n",
|
||||
" \"distilbert-base-uncased-finetuned-sst-2-english\", \n",
|
||||
" \"dbmdz/bert-large-cased-finetuned-conll03-english\"], \n",
|
||||
" revision_numbers=[\"a4f8f3e\", \"af0f99b\", \"f2482bf\"]).analyse_image()\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
"for num, key in tqdm(\n",
|
||||
" enumerate(image_dict.keys()), total=len(image_dict)\n",
|
||||
"): # loop through all images\n",
|
||||
" image_dict[key] = ammico.TextDetector(\n",
|
||||
" image_dict[key], # analyse image with TextDetector and update dict\n",
|
||||
" analyse_text=True,\n",
|
||||
" model_names=[\n",
|
||||
" \"sshleifer/distilbart-cnn-12-6\",\n",
|
||||
" \"distilbert-base-uncased-finetuned-sst-2-english\",\n",
|
||||
" \"dbmdz/bert-large-cased-finetuned-conll03-english\",\n",
|
||||
" ],\n",
|
||||
" revision_numbers=[\"a4f8f3e\", \"af0f99b\", \"f2482bf\"],\n",
|
||||
" ).analyse_image()\n",
|
||||
"\n",
|
||||
" if (\n",
|
||||
" num % dump_every == 0 | num == len(image_dict) - 1\n",
|
||||
" ): # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -654,7 +701,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_detector = ammico.SummaryDetector(image_dict, analysis_type=\"summary\", model_type=\"base\")"
|
||||
"image_summary_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"summary\", model_type=\"base\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -663,10 +712,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(subdict = image_dict[key], analysis_type=\"summary\")\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key], analysis_type=\"summary\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -704,14 +755,17 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(image_dict, analysis_type=\"questions\", \n",
|
||||
" model_type=\"vqa\")\n",
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"questions\", model_type=\"vqa\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(subdict=image_dict[key], \n",
|
||||
" analysis_type=\"questions\", \n",
|
||||
" list_of_questions = list_of_questions)\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -729,13 +783,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(image_dict, analysis_type=\"summary_and_questions\", \n",
|
||||
" model_type=\"base\")\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()),total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(subdict=image_dict[key], \n",
|
||||
" analysis_type=\"summary_and_questions\", \n",
|
||||
" list_of_questions = list_of_questions)\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1: \n",
|
||||
"image_summary_vqa_detector = ammico.SummaryDetector(\n",
|
||||
" image_dict, analysis_type=\"summary_and_questions\", model_type=\"base\"\n",
|
||||
")\n",
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)):\n",
|
||||
" image_dict[key] = image_summary_vqa_detector.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"summary_and_questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
" if num % dump_every == 0 | num == len(image_dict) - 1:\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
@ -767,7 +824,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"obj = ammico.SummaryDetector(subdict=image_dict, analysis_type = \"summary_and_questions\", model_type = \"blip2_t5_caption_coco_flant5xl\")\n",
|
||||
"obj = ammico.SummaryDetector(\n",
|
||||
" subdict=image_dict,\n",
|
||||
" analysis_type=\"summary_and_questions\",\n",
|
||||
" model_type=\"blip2_t5_caption_coco_flant5xl\",\n",
|
||||
")\n",
|
||||
"# list of the new models that can be used:\n",
|
||||
"# \"blip2_t5_pretrain_flant5xxl\",\n",
|
||||
"# \"blip2_t5_pretrain_flant5xl\",\n",
|
||||
@ -781,7 +842,7 @@
|
||||
"# Or you can use `caption_coco_`` model types to generate coco-style captions.\n",
|
||||
"# `flant5` and `opt` means that the model equipped with FlanT5 and OPT LLMs respectively.\n",
|
||||
"\n",
|
||||
"#also you can perform all calculation on cpu if you set device_type= \"cpu\" or gpu if you set device_type= \"cuda\""
|
||||
"# also you can perform all calculation on cpu if you set device_type= \"cpu\" or gpu if you set device_type= \"cuda\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -812,9 +873,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions)\n",
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"# analysis_type can be \n",
|
||||
"# analysis_type can be\n",
|
||||
"# \"summary\",\n",
|
||||
"# \"questions\",\n",
|
||||
"# \"summary_and_questions\"."
|
||||
@ -848,7 +913,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions)"
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -886,7 +955,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict:\n",
|
||||
" image_dict[key] = obj.analyse_image(subdict = image_dict[key], analysis_type=\"questions\", list_of_questions=list_of_questions, consequential_questions=True)"
|
||||
" image_dict[key] = obj.analyse_image(\n",
|
||||
" subdict=image_dict[key],\n",
|
||||
" analysis_type=\"questions\",\n",
|
||||
" list_of_questions=list_of_questions,\n",
|
||||
" consequential_questions=True,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -940,9 +1014,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for key in image_dict.keys():\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
|
||||
" gender_threshold=50,\n",
|
||||
" accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
|
||||
" image_dict[key] = ammico.EmotionDetector(\n",
|
||||
" image_dict[key],\n",
|
||||
" emotion_threshold=50,\n",
|
||||
" race_threshold=50,\n",
|
||||
" gender_threshold=50,\n",
|
||||
" accept_disclosure=\"DISCLOSURE_AMMICO\",\n",
|
||||
" ).analyse_image()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1055,9 +1133,9 @@
|
||||
" image_names,\n",
|
||||
" features_image_stacked,\n",
|
||||
") = my_obj.parsing_images(\n",
|
||||
" model_type, \n",
|
||||
" model_type,\n",
|
||||
" path_to_save_tensors=\"/content/drive/MyDrive/misinformation-data/\",\n",
|
||||
" )"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1112,14 +1190,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import importlib_resources # only require for image query example\n",
|
||||
"image_example_query = str(importlib_resources.files(\"ammico\") / \"data\" / \"test-crop-image.png\") # creating the path to the image for the image query example\n",
|
||||
"import importlib_resources # only require for image query example\n",
|
||||
"\n",
|
||||
"image_example_query = str(\n",
|
||||
" importlib_resources.files(\"ammico\") / \"data\" / \"test-crop-image.png\"\n",
|
||||
") # creating the path to the image for the image query example\n",
|
||||
"\n",
|
||||
"search_query = [\n",
|
||||
" {\"text_input\": \"politician press conference\"}, \n",
|
||||
" {\"text_input\": \"politician press conference\"},\n",
|
||||
" {\"text_input\": \"a world map\"},\n",
|
||||
" {\"text_input\": \"a dog\"}, # This is how looks text query\n",
|
||||
" {\"image\": image_example_query}, # This is how looks image query, here `image_example_path` is the path to query image like \"data/test-crop-image.png\"\n",
|
||||
" {\"text_input\": \"a dog\"}, # This is how looks text query\n",
|
||||
" {\n",
|
||||
" \"image\": image_example_query\n",
|
||||
" }, # This is how looks image query, here `image_example_path` is the path to query image like \"data/test-crop-image.png\"\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
@ -1199,7 +1282,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"my_obj.show_results(\n",
|
||||
" search_query[0], # you can change the index to see the results for other queries\n",
|
||||
" search_query[0], # you can change the index to see the results for other queries\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -1210,7 +1293,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"my_obj.show_results(\n",
|
||||
" search_query[3], # you can change the index to see the results for other queries\n",
|
||||
" search_query[3], # you can change the index to see the results for other queries\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -1349,7 +1432,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"analysis_explorer = ammico.AnalysisExplorer(image_dict)\n",
|
||||
"analysis_explorer.run_server(port = 8057)"
|
||||
"analysis_explorer.run_server(port=8057)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1447,7 +1530,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.11.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
Загрузка…
x
Ссылка в новой задаче
Block a user