From d1f65d016de9612a6484fd6d4eeb87c75477fe86 Mon Sep 17 00:00:00 2001 From: Petr Andriushchenko Date: Tue, 25 Apr 2023 11:49:12 +0200 Subject: [PATCH] editted github docs.yml, Dockerfile and README.md --- .github/workflows/docs.yml | 2 +- Dockerfile | 8 ++++---- README.md | 10 +++++----- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 58f94d7..80a5c93 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -15,7 +15,7 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 0 # otherwise, you will failed to push refs to dest repo - - name: install misinformation + - name: install ammico run: | pip install -e . python -m pip install -r requirements-dev.txt diff --git a/Dockerfile b/Dockerfile index fdb5ba2..e922a1c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,20 +6,20 @@ RUN apt update && apt install -y build-essential libgl1 libglib2.0-0 libsm6 libx USER $NB_USER # Copy the repository into the container -COPY --chown=${NB_UID} . /opt/misinformation +COPY --chown=${NB_UID} . /opt/ammico # Install the Python package -RUN python -m pip install /opt/misinformation +RUN python -m pip install /opt/ammico # Make JupyterLab the default for this application ENV JUPYTER_ENABLE_LAB=yes # Export where the data is located -ENV XDG_DATA_HOME=/opt/misinformation/data +ENV XDG_DATA_HOME=/opt/ammico/data # Copy notebooks into the home directory RUN rm -rf $HOME/work -RUN cp /opt/misinformation/notebooks/*.ipynb $HOME +RUN cp /opt/ammico/notebooks/*.ipynb $HOME ARG GOOGLE_CREDS ENV GOOGLE_APPLICATION_CREDENTIALS=credentials.json diff --git a/README.md b/README.md index 261d420..1a18655 100644 --- a/README.md +++ b/README.md @@ -45,17 +45,17 @@ This will install the package and its dependencies locally. There are sample notebooks in the `notebooks` folder for you to explore the package: 1. Text extraction: Use the notebook `get-text-from-image.ipynb` to extract any text from the images. The text is directly translated into English. If the text should be further analysed, set the keyword `analyse_text` to `True` as demonstrated in the notebook.\ -**You can run this notebook on google colab: [Here](https://colab.research.google.com/github/ssciwr/misinformation/blob/main/notebooks/get-text-from-image.ipynb)** +**You can run this notebook on google colab: [Here](https://colab.research.google.com/github/ssciwr/ammico/blob/main/notebooks/get-text-from-image.ipynb)** Place the data files and google cloud vision API key in your google drive to access the data. 1. Emotion recognition: Use the notebook `facial_expressions.ipynb` to identify if there are faces on the image, if they are wearing masks, and if they are not wearing masks also the race, gender and dominant emotion. -**You can run this notebook on google colab: [Here](https://colab.research.google.com/github/ssciwr/misinformation/blob/main/notebooks/facial_expressions.ipynb)** +**You can run this notebook on google colab: [Here](https://colab.research.google.com/github/ssciwr/ammico/blob/main/notebooks/facial_expressions.ipynb)** Place the data files in your google drive to access the data. 1. Content extraction: Use the notebook `image_summary.ipynb` to create captions for the images and ask questions about the image content. -**You can run this notebook on google colab: [Here](https://colab.research.google.com/github/ssciwr/misinformation/blob/main/notebooks/image_summary.ipynb)** +**You can run this notebook on google colab: [Here](https://colab.research.google.com/github/ssciwr/ammico/blob/main/notebooks/image_summary.ipynb)** 1. Multimodal content: Use the notebook `multimodal_search.ipynb` to find the best fitting images to an image or text query. -**You can run this notebook on google colab: [Here](https://colab.research.google.com/github/ssciwr/misinformation/blob/main/notebooks/multimodal_search.ipynb)** +**You can run this notebook on google colab: [Here](https://colab.research.google.com/github/ssciwr/ammico/blob/main/notebooks/multimodal_search.ipynb)** 1. Object analysis: Use the notebook `ojects_expression.ipynb` to identify certain objects in the image. Currently, the following objects are being identified: person, bicycle, car, motorcycle, airplane, bus, train, truck, boat, traffic light, cell phone. -**You can run this notebook on google colab: [Here](https://colab.research.google.com/github/ssciwr/misinformation/blob/main/notebooks/objects_expression.ipynb)** +**You can run this notebook on google colab: [Here](https://colab.research.google.com/github/ssciwr/ammico/blob/main/notebooks/objects_expression.ipynb)** There are further notebooks that are currently of exploratory nature (`colors_expression.ipynb` to identify certain colors on the image). To crop social media posts use the `cropposts.ipynb` notebook.