From 6187b3fe900728e041ed76894ae5b32518ef429c Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Wed, 31 May 2023 15:47:24 +0200 Subject: [PATCH] [DOCS] Updating Tutorials (#17769) --- docs/nbdoc/consts.py | 2 +- docs/notebooks-installation.md | 235 +++++++++++++++++++-------------- docs/tutorials.md | 104 ++++++++------- 3 files changed, 194 insertions(+), 147 deletions(-) diff --git a/docs/nbdoc/consts.py b/docs/nbdoc/consts.py index 5681153335f..4b7cafbf6b2 100644 --- a/docs/nbdoc/consts.py +++ b/docs/nbdoc/consts.py @@ -8,7 +8,7 @@ repo_owner = "openvinotoolkit" repo_name = "openvino_notebooks" -artifacts_link = "http://repository.toolbox.iotg.sclab.intel.com/projects/ov-notebook/0.1.0-latest/20230517220809/dist/rst_files/" +artifacts_link = "http://repository.toolbox.iotg.sclab.intel.com/projects/ov-notebook/0.1.0-latest/20230529220816/dist/rst_files/" blacklisted_extensions = ['.xml', '.bin'] diff --git a/docs/notebooks-installation.md b/docs/notebooks-installation.md index 79bc0ceda70..eee199201c2 100644 --- a/docs/notebooks-installation.md +++ b/docs/notebooks-installation.md @@ -107,7 +107,7 @@ operating system or environment. Linux Systems may require installation of additional libraries. - The following installation steps should work on Ubuntu Desktop 18.04, 20.04, 20.10, and on Ubuntu Server. + The following installation steps should work on a clean install of Ubuntu Desktop 20.04, and should also work on Ubuntu 22.04 and 20.10, and on Ubuntu Server. .. code-block:: @@ -115,6 +115,17 @@ operating system or environment. sudo apt-get upgrade sudo apt-get install python3-venv build-essential python3-dev git-all + For an Intel Integrated Graphics Card, you can install the `Intel Graphics Compute Runtime `__ to enable inference on this device. The command for Ubuntu 20.04 is: + + .. note:: + + Only execute this command if you do not yet have OpenCL drivers installed. + + .. code-block:: + + sudo apt-get install intel-opencl-icd + + The following installation steps should work on a clean install of Red Hat, CentOS, Amazon Linux 2 or Fedora. If any issues occur, see the `Troubleshooting <#-troubleshooting>`__ section. .. code-block:: @@ -123,19 +134,37 @@ operating system or environment. sudo yum upgrade sudo yum install python36-devel mesa-libGL -.. tab:: macOS +.. tab:: macOS - 1. **Install Python** + Alternatively, you may skip steps 1-3 if you prefer to manually install `Python 3 `__ and `Git `__. - Download Python software (3.7, 3.8, 3.9, 3.10, 3.11) from `python.org`. For example, this `installer`_. + 1. **Install Xcode Command Line Tools** - .. _installer: https://www.python.org/ftp/python/3.7.9/python-3.7.9-macosx10.9.pkg + .. code-block:: - Run the installer by double clicking it. Follow the installation steps to set up the software. + xcode-select --install + + 2. **Install Homebrew** + + .. code-block:: + + /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + After you install it, follow the instructions from the Homebrew installation to set it up. + + 3. **Install Python and dependencies** + + .. code-block:: + + brew install python@3.9 + brew install protobuf + + + Run each step below in a terminal. + + .. note:: + + If OpenVINO is installed globally, do not run any of these commands in a terminal where ``setupvars.sh`` is sourced. - .. note:: - - Refer to the "Important Information" displayed during installation for information about SSL/TLS certificate validation and running the "Install Certificates.command". These certificates are required to run some of the notebooks. .. tab:: Azure ML @@ -165,38 +194,53 @@ operating system or environment. FROM quay.io/thoth-station/s2i-thoth-ubi8-py38:v0.29.0 LABEL name="OpenVINO(TM) Notebooks" \ - maintainer="helena.kloosterman@intel.com" \ - vendor="Intel Corporation" \ - version="0.2.0" \ - release="2021.4" \ - summary="OpenVINO(TM) Developer Tools and Jupyter Notebooks" \ - description="OpenVINO(TM) Notebooks Container" + maintainer="helena.kloosterman@intel.com" \ + vendor="Intel Corporation" \ + version="0.2.0" \ + release="2021.4" \ + summary="OpenVINO(TM) Developer Tools and Jupyter Notebooks" \ + description="OpenVINO(TM) Notebooks Container" ENV JUPYTER_ENABLE_LAB="true" \ - ENABLE_MICROPIPENV="1" \ - UPGRADE_PIP_TO_LATEST="1" \ - WEB_CONCURRENCY="1" \ - THOTH_ADVISE="0" \ - THOTH_ERROR_FALLBACK="1" \ - THOTH_DRY_RUN="1" \ - THAMOS_DEBUG="0" \ - THAMOS_VERBOSE="1" \ - THOTH_PROVENANCE_CHECK="0" + ENABLE_MICROPIPENV="1" \ + UPGRADE_PIP_TO_LATEST="1" \ + WEB_CONCURRENCY="1" \ + THOTH_ADVISE="0" \ + THOTH_ERROR_FALLBACK="1" \ + THOTH_DRY_RUN="1" \ + THAMOS_DEBUG="0" \ + THAMOS_VERBOSE="1" \ + THOTH_PROVENANCE_CHECK="0" USER root # Upgrade NodeJS > 12.0 # Install dos2unix for line end conversion on Windows RUN curl -sL https://rpm.nodesource.com/setup_14.x | bash - && \ - yum remove -y nodejs && \ - yum install -y nodejs mesa-libGL dos2unix libsndfile && \ - yum -y update-minimal --security --sec-severity=Important --sec-severity=Critical --sec-severity=Moderate + yum remove -y nodejs && \ + yum install -y nodejs-14.18.1 mesa-libGL dos2unix libsndfile && \ + yum -y update-minimal --security --sec-severity=Important --sec-severity=Critical --sec-severity=Moderate + + # GPU drivers + RUN dnf install -y 'dnf-command(config-manager)' && \ + dnf config-manager --add-repo https://repositories.intel.com/graphics/rhel/8.5/intel-graphics.repo + + RUN rpm -ivh https://vault.centos.org/centos/8/AppStream/x86_64/os/Packages/mesa-filesystem-21.1.5-1.el8.x86_64.rpm && \ + dnf install --refresh -y \ + intel-opencl-22.28.23726.1-i419.el8.x86_64 intel-media intel-mediasdk libmfxgen1 libvpl2 \ + level-zero intel-level-zero-gpu \ + intel-metrics-library intel-igc-core intel-igc-cm \ + libva libva-utils intel-gmmlib && \ + rpm -ivh http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/ocl-icd-2.2.12-1.el8.x86_64.rpm && \ + rpm -ivh https://download-ib01.fedoraproject.org/pub/epel/8/Everything/x86_64/Packages/c/clinfo-3.0.21.02.21-4.el8.x86_64.rpm # Copying in override assemble/run scripts COPY .docker/.s2i/bin /tmp/scripts # Copying in source code COPY .docker /tmp/src COPY .ci/patch_notebooks.py /tmp/scripts + COPY .ci/validate_notebooks.py /tmp/scripts + COPY .ci/ignore_treon_docker.txt /tmp/scripts # Git on Windows may convert line endings. Run dos2unix to enable # building the image when the scripts have CRLF line endings. @@ -232,11 +276,11 @@ operating system or environment. .. code-block:: python -m venv openvino_env - - 2. **Activate the Environment** + + 2. **Activate the Environment** .. code-block:: - + openvino_env\Scripts\activate @@ -245,7 +289,7 @@ operating system or environment. Using the --depth=1 option for git clone reduces download size. .. code-block:: - + git clone --depth=1 https://github.com/openvinotoolkit/openvino_notebooks.git cd openvino_notebooks @@ -253,24 +297,17 @@ operating system or environment. .. code-block:: - python -m pip install --upgrade pip - - + python -m pip install --upgrade pip wheel setuptools + + 5. **Install required packages** .. code-block:: - + pip install -r requirements.txt - - 6. **Install the virtualenv Kernel in Jupyter** - .. code-block:: - - python -m ipykernel install --user --name openvino_env - - -.. tab:: Linux Systems +.. tab:: Linux Systems 1. **Create a Virtual Environment** @@ -279,11 +316,11 @@ operating system or environment. .. code-block:: python3 -m venv openvino_env - + 2. **Activate the Environment** .. code-block:: - + source openvino_env/bin/activate 3. **Clone the Repository** @@ -291,7 +328,7 @@ operating system or environment. Using the --depth=1 option for git clone reduces download size. .. code-block:: - + git clone --depth=1 https://github.com/openvinotoolkit/openvino_notebooks.git cd openvino_notebooks @@ -300,21 +337,17 @@ operating system or environment. .. code-block:: python -m pip install --upgrade pip - - + pip install wheel setuptools + + 5. **Install required packages** .. code-block:: - + pip install -r requirements.txt - 6. **Install the virtualenv Kernel in Jupyter** - .. code-block:: - - python -m ipykernel install --user --name openvino_env - -.. tab:: macOS +.. tab:: macOS 1. **Create a Virtual Environment** @@ -323,11 +356,11 @@ operating system or environment. .. code-block:: python3 -m venv openvino_env - + 2. **Activate the Environment** .. code-block:: - + source openvino_env/bin/activate 3. **Clone the Repository** @@ -335,7 +368,7 @@ operating system or environment. Using the --depth=1 option for git clone reduces download size. .. code-block:: - + git clone --depth=1 https://github.com/openvinotoolkit/openvino_notebooks.git cd openvino_notebooks @@ -343,66 +376,72 @@ operating system or environment. .. code-block:: - python -m pip install --upgrade pip - - + python -m pip install --upgrade pip wheel setuptools + 5. **Install required packages** .. code-block:: - + pip install -r requirements.txt - 6. **Install the virtualenv Kernel in Jupyter** - .. code-block:: - - python -m ipykernel install --user --name openvino_env +.. tab:: Azure ML -.. tab:: Azure ML - - 1. **Create a Virtual Environment** - - If you already have installed *openvino-dev*, you may skip this step and proceed with the next one. + 1. Create a Conda environment .. code-block:: - python3 -m venv openvino_env - - 2. **Activate the Environment** + conda create --name openvino_env python=3.8 -y + + 2. Activate the environment .. code-block:: - - source openvino_env/bin/activate - 3. **Clone the Repository** + conda activate openvino_env - Using the --depth=1 option for git clone reduces download size. + 3. Clone OpenVINO notebooks .. code-block:: - - git clone --depth=1 https://github.com/openvinotoolkit/openvino_notebooks.git + + git clone https://github.com/openvinotoolkit/openvino_notebooks.git + + 4. Change directory to ``openvino_notebooks`` + + .. code-block:: + cd openvino_notebooks - 4. **Upgrade PIP** + 5. Upgrade ``pip`` and install required dependencies. .. code-block:: python -m pip install --upgrade pip - - - 5. **Install required packages** - - .. code-block:: - pip install -r requirements.txt - 6. **Install the virtualenv Kernel in Jupyter** + 6. Add ``openvino_env`` to PATH .. code-block:: - - python -m ipykernel install --user --name openvino_env -.. tab:: Docker + set PATH="/anaconda/envs/openvino_env/bin;%PATH%" + + 7. Run the notebooks. + + To run the notebooks, click on Notebooks and refresh your Files: + + .. image:: https://user-images.githubusercontent.com/15709723/117580814-a725c300-b0ae-11eb-93bf-007779c26075.png + + .. image:: https://user-images.githubusercontent.com/15709723/117559447-2af19800-b03a-11eb-8bd6-8813b7a8814f.png + + .. image:: https://user-images.githubusercontent.com/15709723/117580973-37640800-b0af-11eb-91ae-7194b9b4e505.png + + .. note:: + + Make sure you are using the ``openvino_env`` environment (not Python 3). + + .. image:: https://user-images.githubusercontent.com/1720147/162269003-7937b47c-484f-416c-97c7-bb869376ff68.png + + +.. tab:: Docker 1. **Clone the Repository** @@ -416,19 +455,19 @@ operating system or environment. .. code-block:: docker build -t openvino_notebooks . - + 3. **Run the Docker Image** .. code-block:: docker run -it -p 8888:8888 openvino_notebooks - .. note:: - - For using model training notebooks, allocate additional memory: - + .. note:: + + For using model training notebooks, allocate additional memory: + .. code-block:: - + docker run -it -p 8888:8888 --shm-size 8G openvino_notebooks 4. **Start the browser** @@ -464,7 +503,7 @@ If you want to launch only one notebook, such as the *Monodepth* notebook, run t .. code:: bash - jupyter 201-vision-monodepth.ipynb + jupyter lab notebooks/201-vision-monodepth/201-vision-monodepth.ipynb Launch All Notebooks -------------------------- diff --git a/docs/tutorials.md b/docs/tutorials.md index af80d222d2e..2109e0fb20c 100644 --- a/docs/tutorials.md +++ b/docs/tutorials.md @@ -115,35 +115,37 @@ Tutorials that explain how to optimize and quantize models with OpenVINO tools. +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ | Notebook | Description | +==============================================================================================================================+==================================================================================================================================+ - | `105-language-quantize-bert `__ | Optimize and quantize a pre-trained BERT model | + | `105-language-quantize-bert `__ | Optimize and quantize a pre-trained BERT model. | +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ - | `106-auto-device `__ |br| |n106| | Demonstrates how to use AUTO Device | + | `106-auto-device `__ |br| |n106| | Demonstrates how to use AUTO Device. | +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ - | `107-speech-recognition-quantization `__ | Optimize and quantize a pre-trained Data2Vec speech model | + | `107-speech-recognition-quantization `__ | Optimize and quantize a pre-trained Data2Vec speech model. | +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ - | `107-speech-recognition-quantization `__ | Optimize and quantize a pre-trained Wav2Vec2 speech model | + | `107-speech-recognition-quantization `__ | Optimize and quantize a pre-trained Wav2Vec2 speech model. | +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ - | `108-gpu-device `__ | Working with GPUs in OpenVINO™ | + | `108-gpu-device `__ | Working with GPUs in OpenVINO™. | +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ - | `109-performance-tricks `__ | Performance tricks in OpenVINO™ | + | `109-performance-tricks `__ | Performance tricks in OpenVINO™. | +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ - | `110-ct-segmentation-quantize `__ |br| |n110| | Quantize a kidney segmentation model and show live inference | + | `110-ct-segmentation-quantize `__ |br| |n110| | Live inference of a kidney segmentation model and benchmark CT-scan data with OpenVINO. | +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ - | `111-yolov5-quantization-migration `__ | Migrate YOLOv5 POT API based quantization pipeline on Neural Network Compression Framework (NNCF) | + | `110-ct-segmentation-quantize `__ | Quantize a kidney segmentation model and show live inference. | +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ - | `112-pytorch-post-training-quantization-nncf `__ | Use Neural Network Compression Framework (NNCF) to quantize PyTorch model in post-training mode (without model fine-tuning) | + | `111-yolov5-quantization-migration `__ | Migrate YOLOv5 POT API based quantization pipeline on Neural Network Compression Framework (NNCF). | +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ - | `113-image-classification-quantization `__ |br| |n113| | Quantize mobilenet image classification | + | `112-pytorch-post-training-quantization-nncf `__ | Use Neural Network Compression Framework (NNCF) to quantize PyTorch model in post-training mode (without model fine-tuning). | +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ - | `114-quantization-simplified-mode `__ |br| |n114| | Quantize Image Classification Models with POT in Simplified Mode | + | `113-image-classification-quantization `__ |br| |n113| | Quantize MobileNet image classification. | +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ - | `115-async-api `__ |br| |n115| | Use Asynchronous Execution to Improve Data Pipelining | + | `114-quantization-simplified-mode `__ |br| |n114| | Quantize image classification models with POT in simplified mode. | +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ - | `116-sparsity-optimization `__ | Improve performance of sparse Transformer models | + | `115-async-api `__ |br| |n115| | Use asynchronous execution to improve data pipelining. | +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ - | `117-model-server `__ | Improve performance of sparse Transformer models | + | `116-sparsity-optimization `__ | Improve performance of sparse Transformer models. | +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ - | `118-optimize-preprocessing `__ | Improve performance of image preprocessing step | + | `117-model-server `__ | Improve performance of sparse Transformer models. | + +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ + | `118-optimize-preprocessing `__ | Improve performance of image preprocessing step. | +------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ @@ -181,7 +183,9 @@ Demos that demonstrate inference on a particular model. +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ | `202-vision-superresolution-video `__ |br| |n202v| | Turn 360p into 1080p video using a super resolution model. | |n202v-img1| → |n202v-img2| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `203-meter-reader `__ |br| |n203| | PaddlePaddle pre-trained models to read industrial meter's value | |n203-img1| | + | `203-meter-reader `__ |br| |n203| | PaddlePaddle pre-trained models to read industrial meter's value. | |n203-img1| | + +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ + | `204-segmenter-semantic-segmentation `__ | Semantic segmentation with OpenVINO™ using Segmenter. | |n204-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ | `206-vision-paddlegan-anime `__ | Turn an image into anime using a GAN. | |n206-img1| → |n206-img2| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ @@ -189,51 +193,55 @@ Demos that demonstrate inference on a particular model. +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ | `208-optical-character-recognition `__ | Annotate text on images using text recognition resnet. | |n208-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `212-pyannote-speaker-diarization `__ | Run inference on speaker diarization pipeline | |n212-img1| | + | `212-pyannote-speaker-diarization `__ | Run inference on speaker diarization pipeline. | |n212-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ | `213-question-answering `__ |br| |n213| | Answer your questions basing on a context. | |n213-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `214-grammar-correction `__ | Grammatical Error Correction with OpenVINO | | + | `214-grammar-correction `__ | Grammatical error correction with OpenVINO. | | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `217-vision-deblur `__ |br| |n217| | Deblur Images with DeblurGAN-v2. | |n217-img1| | + | `217-vision-deblur `__ |br| |n217| | Deblur images with DeblurGAN-v2. | |n217-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `219-knowledge-graphs-conve `__ |br| |n219| | Optimize the knowledge graph embeddings model (ConvE) with OpenVINO | | + | `219-knowledge-graphs-conve `__ |br| |n219| | Optimize the knowledge graph embeddings model (ConvE) with OpenVINO. | | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `220-yolov5-accuracy-check-and-quantization `__ | Quantize the Ultralytics YOLOv5 model and check accuracy using the OpenVINO POT API | |n220-img1| | + | `221-machine-translation `__ |br| |n221| | Real-time translation from English to German. | | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `221-machine-translation `__ |br| |n221| | Real-time translation from English to German | | + | `222-vision-image-colorization `__ |br| |n222| | Use pre-trained models to colorize black & white images using OpenVINO. | |n222-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `222-vision-image-colorization `__ |br| |n222| | Use pre-trained models to colorize black & white images using OpenVINO | |n222-img1| | + | `223-text-prediction `__ | Use pre-trained models to perform text prediction on an input sequence. | |n223-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `223-text-prediction `__ | Use pretrained models to perform text prediction on an input sequence | |n223-img1| | + | `224-3D-segmentation-point-clouds `__ | Process point cloud data and run 3D Part Segmentation with OpenVINO. | |n224-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `224-3D-segmentation-point-clouds `__ | Process point cloud data and run 3D Part Segmentation with OpenVINO | |n224-img1| | + | `225-stable-diffusion-text-to-image `__ | Text-to-image generation with Stable Diffusion method. | |n225-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `225-stable-diffusion-text-to-image `__ | Text-to-image generation with Stable Diffusion method | |n225-img1| | + | `226-yolov7-optimization `__ | Optimize YOLOv7, using NNCF PTQ API. | |n226-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `226-yolov7-optimization `__ | Optimize YOLOv7 using NNCF PTQ API | |n226-img1| | + | `227-whisper-subtitles-generation `__ | Generate subtitles for video with OpenAI Whisper and OpenVINO. | |n227-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `227-whisper-subtitles-generation `__ | Generate subtitles for video with OpenAI Whisper and OpenVINO | |n227-img1| | + | `228-clip-zero-shot-image-classification `__ | Perform Zero-shot image classification with CLIP and OpenVINO. | |n228-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `228-clip-zero-shot-image-classification `__ | Perform Zero-shot Image Classification with CLIP and OpenVINO | |n228-img1| | + | `229-distilbert-sequence-classification `__ |br| |n229| | Sequence classification with OpenVINO. | |n229-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `229-distilbert-sequence-classification `__ |br| |n229| | Sequence Classification with OpenVINO | |n229-img1| | + | `230-yolov8-optimization `__ | Optimize YOLOv8, using NNCF PTQ API. | |n230-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `230-yolov8-optimization `__ | Optimize YOLOv8 using NNCF PTQ API | |n230-img1| | + | `231-instruct-pix2pix-image-editing `__ | Image editing with InstructPix2Pix. | |n231-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `231-instruct-pix2pix-image-editing `__ | Image editing with InstructPix2Pix | |n231-img1| | + | `232-clip-language-saliency-map `__ | Language-visual saliency with CLIP and OpenVINO™. | |n232-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `232-clip-language-saliency-map `__ | Language-Visual Saliency with CLIP and OpenVINO™ | |n232-img1| | + | `233-blip-visual-language-processing `__ | Visual question answering and image captioning using BLIP and OpenVINO™. | |n233-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `233-blip-visual-language-processing `__ | Visual Question Answering and Image Captioning using BLIP and OpenVINO™ | |n233-img1| | + | `234-encodec-audio-compression `__ | Audio compression with EnCodec and OpenVINO™. | |n234-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `234-encodec-audio-compression `__ | Audio compression with EnCodec and OpenVINO™ | |n234-img1| | + | `235-controlnet-stable-diffusion `__ | A text-to-image generation with ControlNet Conditioning and OpenVINO™. | |n235-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `235-controlnet-stable-diffusion `__ | A Text-to-Image Generation with ControlNet Conditioning and OpenVINO™ | |n235-img1| | + | `236-stable-diffusion-v2 `__ | Text-to-image generation and Infinite Zoom with Stable Diffusion v2 and OpenVINO™. | |n236-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `236-stable-diffusion-v2 `__ | Text-to-Image Generation and Infinite Zoom with Stable Diffusion v2 and OpenVINO™ | |n236-img1| | + | `236-stable-diffusion-v2 `__ | Text-to-image generation with Stable Diffusion v2 and OpenVINO™. | |n236-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ - | `237-segment-anything `__ | Prompt based object segmentation mask generation using Segment Anything and OpenVINO™ | |n237-img1| | + | `237-segment-anything `__ | Prompt based object segmentation mask generation, using Segment Anything and OpenVINO™. | |n237-img1| | + +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ + | `238-deep-floyd-if `__ | Text-to-image generation with DeepFloyd IF and OpenVINO™. | |n238-img1| | + +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ + | `239-image-bind `__ | Binding multimodal data, using ImageBind and OpenVINO™. | |n239-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ @@ -250,10 +258,6 @@ Tutorials that include code to train neural networks. +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ | Notebook | Description | Preview | +===============================================================================================================================+============================================================================================================================================+===========================================+ -| `301-tensorflow-training-openvino `__ | Train a flower classification model from TensorFlow, then convert to OpenVINO IR. | |n301-img1| | -+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ -| `301-tensorflow-training-openvino-pot `__ | Use Post-training Optimization Tool (POT) to quantize the flowers model. | | -+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ | `302-pytorch-quantization-aware-training `__ | Use Neural Network Compression Framework (NNCF) to quantize PyTorch model. | | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ | `305-tensorflow-quantization-aware-training `__ | Use Neural Network Compression Framework (NNCF) to quantize TensorFlow model. | | @@ -278,13 +282,13 @@ Live inference demos that run on a webcam or video files. +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ | `403-action-recognition-webcam `__ |br| |n403| | Human action recognition with a webcam or video file. | |n403-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ -| `404-style-transfer-webcam `__ |br| |n404| | Style Transfer with a webcam or video file | |n404-img1| | +| `404-style-transfer-webcam `__ |br| |n404| | Style transfer with a webcam or video file. | |n404-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ -| `405-paddle-ocr-webcam `__ |br| |n405| | OCR with a webcam or video file | |n405-img1| | +| `405-paddle-ocr-webcam `__ |br| |n405| | OCR with a webcam or video file. | |n405-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ -| `406-3D-pose-estimation-webcam `__ |br| |n406| | 3D display of human pose estimation with a webcam or video file | |n406-img1| | +| `406-3D-pose-estimation-webcam `__ |br| |n406| | 3D display of human pose estimation with a webcam or video file. | |n406-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ -| `407-person-tracking-webcam `__ |br| |n407| | Person tracking with a webcam or video file | |n407-img1| | +| `407-person-tracking-webcam `__ |br| |n407| | Person tracking with a webcam or video file. | |n407-img1| | +-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+ @@ -384,6 +388,8 @@ Made with `contributors-img `__. :target: https://user-images.githubusercontent.com/15709723/127269258-a8e2c03e-731e-4317-b5b2-ed2ee767ff5e.gif .. |n203-img1| image:: https://user-images.githubusercontent.com/91237924/166135627-194405b0-6c25-4fd8-9ad1-83fb3a00a081.jpg :target: https://user-images.githubusercontent.com/91237924/166135627-194405b0-6c25-4fd8-9ad1-83fb3a00a081.jpg +.. |n204-img1| image:: https://user-images.githubusercontent.com/61357777/223854308-d1ac4a39-cc0c-4618-9e4f-d9d4d8b991e8.jpg + :target: https://user-images.githubusercontent.com/61357777/223854308-d1ac4a39-cc0c-4618-9e4f-d9d4d8b991e8.jpg .. |n205-img1| image:: https://user-images.githubusercontent.com/15709723/125184237-f4b6cd00-e1d0-11eb-8e3b-d92c9a728372.png :target: https://user-images.githubusercontent.com/15709723/125184237-f4b6cd00-e1d0-11eb-8e3b-d92c9a728372.png .. |n206-img1| image:: https://user-images.githubusercontent.com/15709723/127788059-1f069ae1-8705-4972-b50e-6314a6f36632.jpeg @@ -414,8 +420,6 @@ Made with `contributors-img `__. :target: https://user-images.githubusercontent.com/41332813/158430181-05d07f42-cdb8-4b7a-b7dc-e7f7d9391877.png .. |n218-img1| image:: https://user-images.githubusercontent.com/47499836/163544861-fa2ad64b-77df-4c16-b065-79183e8ed964.png :target: https://user-images.githubusercontent.com/47499836/163544861-fa2ad64b-77df-4c16-b065-79183e8ed964.png -.. |n220-img1| image:: https://user-images.githubusercontent.com/44352144/177097174-cfe78939-e946-445e-9fce-d8897417ef8e.png - :target: https://user-images.githubusercontent.com/44352144/177097174-cfe78939-e946-445e-9fce-d8897417ef8e.png .. |n222-img1| image:: https://user-images.githubusercontent.com/18904157/166343139-c6568e50-b856-4066-baef-5cdbd4e8bc18.png :target: https://user-images.githubusercontent.com/18904157/166343139-c6568e50-b856-4066-baef-5cdbd4e8bc18.png .. |n223-img1| image:: https://user-images.githubusercontent.com/91228207/185105225-0f996b0b-0a3b-4486-872d-364ac6fab68b.png @@ -448,6 +452,10 @@ Made with `contributors-img `__. :target: https://user-images.githubusercontent.com/29454499/228882108-25c1f65d-4c23-4e1d-8ba4-f6164280a3e3.gif .. |n237-img1| image:: https://user-images.githubusercontent.com/29454499/231468849-1cd11e68-21e2-44ed-8088-b792ef50c32d.png :target: https://user-images.githubusercontent.com/29454499/231468849-1cd11e68-21e2-44ed-8088-b792ef50c32d.png +.. |n238-img1| image:: https://user-images.githubusercontent.com/29454499/241643886-dfcf3c48-8d50-4730-ae28-a21595d9504f.png + :target: https://user-images.githubusercontent.com/29454499/241643886-dfcf3c48-8d50-4730-ae28-a21595d9504f.png +.. |n239-img1| image:: https://user-images.githubusercontent.com/29454499/240364108-39868933-d221-41e6-9b2e-dac1b14ef32f.png + :target: https://user-images.githubusercontent.com/29454499/240364108-39868933-d221-41e6-9b2e-dac1b14ef32f.png .. |n301-img1| image:: https://user-images.githubusercontent.com/15709723/127779607-8fa34947-1c35-4260-8d04-981c41a2a2cc.png :target: https://user-images.githubusercontent.com/15709723/127779607-8fa34947-1c35-4260-8d04-981c41a2a2cc.png .. |n401-img1| image:: https://user-images.githubusercontent.com/4547501/141471665-82b28c86-cf64-4bfe-98b3-c314658f2d96.gif