Removed obsolete scri[ts (#11107)

This commit is contained in:
Ilya Lavrenov 2022-03-22 14:52:03 +03:00 committed by GitHub
parent 5dcb6c2cee
commit 2f46890444
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 0 additions and 1898 deletions

View File

@ -1,64 +0,0 @@
=====================================================
Scripts to build and run OpenVINO samples
=====================================================
These scripts simplify process of build samples, download and convert models and run samples to perform inference. They can used to quick validation of OpenVINO installation and proper environment initialization.
Setting Up
================
If you are behind a proxy, set the following environment variables in the console session:
On Linux* and Mac OS:
export http_proxy=http://<proxyHost>:<proxyPort>
export https_proxy=https://<proxyHost>:<proxyPort>
On Windows* OS:
set http_proxy=http://<proxyHost>:<proxyPort>
set https_proxy=https://<proxyHost>:<proxyPort>
Running Samples
=============
The "demo" folder contains two scripts:
1. Classification sample using public SqueezeNet topology (run_sample_squeezenet.sh|bat)
2. Benchmark sample using public SqueezeNet topology (run_sample_benchmark_app.sh|bat)
To run the samples, invoke run_sample_squeezenet.sh or run_sample_benchmark_app.sh (*.bat on Windows) scripts from the console without parameters, for example:
./run_sample_squeezenet.sh
The script allows to specify the target device to infer on using -d <CPU|GPU|MYRIAD> option.
Classification Sample Using SqueezeNet
====================================
The sample illustrates the general workflow of using the Intel(R) Deep Learning Deployment Toolkit and performs the following:
- Downloads a public SqueezeNet model using the Model Downloader (extras\open_model_zoo\tools\downloader\downloader.py)
- Installs all prerequisites required for running the Model Optimizer using the scripts from the "tools\model_optimizer\install_prerequisites" folder
- Converts SqueezeNet to an IR using the Model Optimizer (tools\model_optimizer\mo.py) via the Model Converter (extras\open_model_zoo\tools\downloader\converter.py)
- Builds the Inference Engine classification_sample (samples\cpp\classification_sample)
- Runs the sample with the car.png picture located in the demo folder
The sample application prints top-10 inference results for the picture.
For more information about the Inference Engine classification sample, refer to the documentation available in the sample folder.
Benchmark Sample Using SqueezeNet
===============================
The sample illustrates how to use the Benchmark Application to estimate deep learning inference performance on supported devices.
The sample script does the following:
- Downloads a public SqueezeNet model using the Model Downloader (extras\open_model_zoo\tools\downloader\downloader.py)
- Installs all prerequisites required for running the Model Optimizer using the scripts from the "tools\model_optimizer\install_prerequisites" folder
- Converts SqueezeNet to an IR using the Model Optimizer (tools\model_optimizer\mo.py) via the Model Converter (extras\open_model_zoo\tools\downloader\converter.py)
- Builds the Inference Engine benchmark tool (samples\benchmark_app)
- Runs the tool with the car.png picture located in the demo folder
The benchmark app prints performance counters, resulting latency, and throughput values.
For more information about the Inference Engine benchmark app, refer to the documentation available in the sample folder.

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d8c498d351a7f537bf8e21e4b95a8c89b22eec552cdcfa1d3ecbe0fd8f0f66ab
size 310725

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:373dc00a415557108a5d3c9ee5050f757a9d8bd97676c7fca4adcc54f1cee2b3
size 1432032

View File

@ -1,214 +0,0 @@
:: Copyright (C) 2018-2022 Intel Corporation
:: SPDX-License-Identifier: Apache-2.0
@echo off
setlocal enabledelayedexpansion
set ROOT_DIR=%~dp0
set TARGET=CPU
set BUILD_FOLDER=%USERPROFILE%\Documents\Intel\OpenVINO
:: command line arguments parsing
:input_arguments_loop
if not "%1"=="" (
if "%1"=="-b" (
set BUILD_FOLDER=%2
shift
)
if "%1"=="-d" (
set TARGET=%2
echo target = !TARGET!
shift
)
if "%1"=="-sample-options" (
set SAMPLE_OPTIONS=%2 %3 %4 %5 %6
echo sample_options = !SAMPLE_OPTIONS!
shift
)
if "%1"=="-help" (
echo Benchmark sample using public SqueezeNet topology
echo.
echo Options:
echo -help Print help message
echo -b BUILD_FOLDER Specify the sample build directory
echo -d DEVICE Specify the target device to infer on; CPU, GPU, HDDL or MYRIAD are acceptable. Sample will look for a suitable plugin for device specified
echo -sample-options OPTIONS Specify command line arguments for the sample
exit /b
)
shift
goto :input_arguments_loop
)
set "SOLUTION_DIR64=%BUILD_FOLDER%\inference_engine_cpp_samples_build"
IF "%SAMPLE_OPTIONS%"=="" (
set SAMPLE_OPTIONS=-niter 1000
)
set TARGET_PRECISION=FP16
echo target_precision = !TARGET_PRECISION!
set models_path=%BUILD_FOLDER%\openvino_models\models
set models_cache=%BUILD_FOLDER%\openvino_models\cache
set irs_path=%BUILD_FOLDER%\openvino_models\ir
set model_name=squeezenet1.1
set target_image_path=%ROOT_DIR%car.png
set omz_tool_error_message=It is required to download and convert a model. Check https://pypi.org/project/openvino-dev/ to install it. Then run the script again.
if exist "%ROOT_DIR%..\..\setupvars.bat" (
call "%ROOT_DIR%..\..\setupvars.bat"
) else (
echo setupvars.bat is not found, INTEL_OPENVINO_DIR can't be set
goto error
)
echo INTEL_OPENVINO_DIR is set to %INTEL_OPENVINO_DIR%
:: Check if Python is installed
python --version 2>NUL
if errorlevel 1 (
echo Error^: Python is not installed. Please install Python 3.6 ^(64-bit^) or higher from https://www.python.org/downloads/
goto error
)
:: Check if Python version is equal or higher 3.4
for /F "tokens=* USEBACKQ" %%F IN (`python --version 2^>^&1`) DO (
set version=%%F
)
echo %var%
for /F "tokens=1,2,3 delims=. " %%a in ("%version%") do (
set Major=%%b
set Minor=%%c
)
if "%Major%" geq "3" (
if "%Minor%" geq "6" (
set python_ver=okay
)
)
if not "%python_ver%"=="okay" (
echo Unsupported Python version. Please install Python 3.6 ^(64-bit^) or higher from https://www.python.org/downloads/
goto error
)
omz_info_dumper --print_all >NUL
if errorlevel 1 (
echo Error: omz_info_dumper was not found. %omz_tool_error_message%
goto error
)
omz_downloader --print_all >NUL
if errorlevel 1 (
echo Error: omz_downloader was not found. %omz_tool_error_message%
goto error
)
omz_converter --print_all >NUL
if errorlevel 1 (
echo Error: omz_converter was not found. %omz_tool_error_message%
goto error
)
for /F "tokens=* usebackq" %%d in (
`omz_info_dumper --name "%model_name%" ^|
python -c "import sys, json; print(json.load(sys.stdin)[0]['subdirectory'])"`
) do (
set model_dir=%%d
)
set ir_dir=%irs_path%\%model_dir%\%target_precision%
echo.
echo Download public %model_name% model
echo omz_downloader --name "%model_name%" --output_dir "%models_path%" --cache_dir "%models_cache%"
omz_downloader --name "%model_name%" --output_dir "%models_path%" --cache_dir "%models_cache%"
echo %model_name% model downloading completed
CALL :delay 7
if exist "%ir_dir%" (
echo.
echo Target folder %ir_dir% already exists. Skipping IR generation with Model Optimizer.
echo If you want to convert a model again, remove the entire %ir_dir% folder.
CALL :delay 7
GOTO buildSample
)
echo.
echo ###############^|^| Run Model Optimizer ^|^|###############
echo.
CALL :delay 3
::set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp
echo omz_converter --mo "%INTEL_OPENVINO_DIR%\tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%"
omz_converter --mo "%INTEL_OPENVINO_DIR%\tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%"
if ERRORLEVEL 1 GOTO errorHandling
CALL :delay 7
:buildSample
echo.
echo ###############^|^| Generate VS solution for Inference Engine samples using cmake ^|^|###############
echo.
CALL :delay 3
if "%PROCESSOR_ARCHITECTURE%" == "AMD64" (
set "PLATFORM=x64"
) else (
set "PLATFORM=Win32"
)
if exist "%SOLUTION_DIR64%\CMakeCache.txt" del "%SOLUTION_DIR64%\CMakeCache.txt"
cd /d "%INTEL_OPENVINO_DIR%\samples\cpp" && cmake -E make_directory "%SOLUTION_DIR64%" && cd /d "%SOLUTION_DIR64%" && cmake -G "Visual Studio 16 2019" -A %PLATFORM% "%INTEL_OPENVINO_DIR%\samples\cpp"
if ERRORLEVEL 1 GOTO errorHandling
CALL :delay 7
echo.
echo ###############^|^| Build Inference Engine samples using cmake ^|^|###############
echo.
CALL :delay 3
echo cmake --build . --config Release --target benchmark_app
cmake --build . --config Release --target benchmark_app
if ERRORLEVEL 1 GOTO errorHandling
CALL :delay 7
:runSample
echo.
echo ###############^|^| Run Inference Engine benchmark app ^|^|###############
echo.
CALL :delay 3
copy /Y "%ROOT_DIR%%model_name%.labels" "%ir_dir%"
cd /d "%SOLUTION_DIR64%\intel64\Release"
echo benchmark_app.exe -i "%target_image_path%" -m "%ir_dir%\%model_name%.xml" -pc -d !TARGET! !SAMPLE_OPTIONS!
benchmark_app.exe -i "%target_image_path%" -m "%ir_dir%\%model_name%.xml" -pc -d !TARGET! !SAMPLE_OPTIONS!
if ERRORLEVEL 1 GOTO errorHandling
echo.
echo ###############^|^| Inference Engine benchmark app completed successfully ^|^|###############
CALL :delay 10
cd /d "%ROOT_DIR%"
goto :eof
:errorHandling
echo Error
cd /d "%ROOT_DIR%"
:delay
timeout %~1 2> nul
EXIT /B 0

View File

@ -1,192 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
echo -ne "\e[0;33mWARNING: If you get an error when running the sample in the Docker container, you may need to install additional packages. To do this, run the container as root (-u 0) and run install_openvino_dependencies.sh script. If you get a package-independent error, try setting additional parameters using -sample-options.\e[0m\n"
ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]-$0}" )" && pwd )"
build_dir="$HOME/inference_engine_cpp_samples_build"
. "$ROOT_DIR/utils.sh"
usage() {
echo "Benchmark demo using public SqueezeNet topology"
echo
echo "Options:"
echo " -help Print help message"
echo " -b BUILD_DIR Specify the sample build directory"
echo " -d DEVICE Specify the target device to infer on; CPU, GPU, HDDL or MYRIAD are acceptable. Sample will look for a suitable plugin for device specified"
echo " -sample-options OPTIONS Specify command line arguments for the sample"
echo
exit 1
}
trap 'error ${LINENO}' ERR
target="CPU"
# parse command line options
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-b | --build_dir)
build_dir="$2/inference_engine_cpp_samples_build"
shift
;;
-h | -help | --help)
usage
;;
-d)
target="$2"
echo target = "${target}"
shift
;;
-sample-options)
sampleoptions=("${@:2}")
echo sample-options = "${sampleoptions[*]}"
shift
;;
*)
# unknown option
;;
esac
shift
done
if [ -z "${sampleoptions[*]}" ]; then
sampleoptions=( -niter 1000 )
fi
target_precision="FP16"
echo -ne "target_precision = ${target_precision}\n"
models_path="$build_dir/../openvino_models/models"
models_cache="$build_dir/../openvino_models/cache"
irs_path="$build_dir/../openvino_models/ir"
model_name="squeezenet1.1"
target_image_path="$ROOT_DIR/car.png"
run_again="Then run the script again.\n\n"
omz_tool_error_message="It is required to download and convert a model. Check https://pypi.org/project/openvino-dev/ to install it. ${run_again}"
if [ -e "$ROOT_DIR/../../setupvars.sh" ]; then
setupvars_path="$ROOT_DIR/../../setupvars.sh"
else
echo -ne "Error: setupvars.sh is not found\n"
fi
if ! . "$setupvars_path" ; then
echo -ne "Unable to run ./setupvars.sh. Please check its presence. ${run_again}"
exit 1
fi
if [[ -f /etc/centos-release ]]; then
DISTRO="centos"
elif [[ -f /etc/lsb-release ]]; then
DISTRO="ubuntu"
elif [[ -f /etc/redhat-release ]]; then
DISTRO="redhat"
fi
if [[ $DISTRO == "centos" ]]; then
# check installed Python version
if command -v python3.6 >/dev/null 2>&1; then
python_binary=python3.6
fi
elif [[ $DISTRO == "redhat" ]]; then
python_binary=python3
elif [[ $DISTRO == "ubuntu" ]]; then
python_binary=python3
elif [[ "$OSTYPE" == "darwin"* ]]; then
# check installed Python version
if command -v python3.8 >/dev/null 2>&1; then
python_binary=python3.8
elif command -v python3.7 >/dev/null 2>&1; then
python_binary=python3.7
elif command -v python3.6 >/dev/null 2>&1; then
python_binary=python3.6
else
python_binary=python3
fi
fi
if ! command -v $python_binary &>/dev/null; then
echo -ne "\n\nPython 3.6 (x64) or higher is not installed. It is required to run Model Optimizer, please install it. ${run_again}"
exit 1
fi
if ! command -v omz_info_dumper &>/dev/null; then
echo -ne "\n\nomz_info_dumper was not found. ${omz_tool_error_message}"
exit 2
fi
if ! command -v omz_downloader &>/dev/null; then
echo -ne "\n\nomz_downloader was not found. ${omz_tool_error_message}"
exit 3
fi
if ! command -v omz_converter &>/dev/null; then
echo -ne "\n\nomz_converter was not found. ${omz_tool_error_message}"
exit 4
fi
# Step 1. Download the Caffe model and the prototxt of the model
echo -ne "\n###############|| Downloading the Caffe model and the prototxt ||###############\n\n"
model_dir=$(omz_info_dumper --name "$model_name" |
${python_binary} -c 'import sys, json; print(json.load(sys.stdin)[0]["subdirectory"])')
print_and_run omz_downloader --name "$model_name" --output_dir "${models_path}" --cache_dir "${models_cache}"
ir_dir="${irs_path}/${model_dir}/${target_precision}"
if [ ! -e "$ir_dir" ]; then
# Step 2. Convert a model with Model Optimizer
echo -ne "\n###############|| Convert a model with Model Optimizer ||###############\n\n"
export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp
print_and_run omz_converter --name "$model_name" -d "$models_path" -o "$irs_path" --precisions "$target_precision"
else
echo -ne "\n\nTarget folder ${ir_dir} already exists. Skipping IR generation with Model Optimizer."
echo -ne "If you want to convert a model again, remove the entire ${ir_dir} folder. ${run_again}"
fi
# Step 3. Build samples
echo -ne "\n###############|| Build Inference Engine samples ||###############\n\n"
OS_PATH=$(uname -m)
NUM_THREADS="-j2"
if [ "$OS_PATH" == "x86_64" ]; then
OS_PATH="intel64"
NUM_THREADS="-j8"
fi
samples_path="${INTEL_OPENVINO_DIR}/samples/cpp"
binaries_dir="${build_dir}/${OS_PATH}/Release"
if [ -e "$build_dir/CMakeCache.txt" ]; then
rm -rf "$build_dir/CMakeCache.txt"
fi
mkdir -p "$build_dir"
cd "$build_dir"
cmake -DCMAKE_BUILD_TYPE=Release "$samples_path"
make $NUM_THREADS benchmark_app
# Step 4. Run samples
echo -ne "\n###############|| Run Inference Engine benchmark app ||###############\n\n"
cd "$binaries_dir"
cp -f "$ROOT_DIR/${model_name}.labels" "${ir_dir}/"
print_and_run ./benchmark_app -d "$target" -i "$target_image_path" -m "${ir_dir}/${model_name}.xml" -pc "${sampleoptions[@]}"
echo -ne "\n###############|| Inference Engine benchmark app completed successfully ||###############\n\n"

View File

@ -1,212 +0,0 @@
:: Copyright (C) 2018-2022 Intel Corporation
:: SPDX-License-Identifier: Apache-2.0
@echo off
setlocal enabledelayedexpansion
set ROOT_DIR=%~dp0
set TARGET=CPU
set BUILD_FOLDER=%USERPROFILE%\Documents\Intel\OpenVINO
:: command line arguments parsing
:input_arguments_loop
if not "%1"=="" (
if "%1"=="-b" (
set BUILD_FOLDER=%2
shift
)
if "%1"=="-d" (
set TARGET=%2
echo target = !TARGET!
shift
)
if "%1"=="-sample-options" (
set SAMPLE_OPTIONS=%2 %3 %4 %5 %6
echo sample_options = !SAMPLE_OPTIONS!
shift
)
if "%1"=="-help" (
echo Classification sample using public SqueezeNet topology
echo.
echo Options:
echo -help Print help message
echo -b BUILD_FOLDER Specify the sample build directory
echo -d DEVICE Specify the target device to infer on; CPU, GPU, HDDL or MYRIAD are acceptable. Sample will look for a suitable plugin for device specified
echo -sample-options OPTIONS Specify command line arguments for the sample
exit /b
)
shift
goto :input_arguments_loop
)
set "SOLUTION_DIR64=%BUILD_FOLDER%\inference_engine_cpp_samples_build"
set TARGET_PRECISION=FP16
echo target_precision = !TARGET_PRECISION!
set models_path=%BUILD_FOLDER%\openvino_models\models
set models_cache=%BUILD_FOLDER%\openvino_models\cache
set irs_path=%BUILD_FOLDER%\openvino_models\ir
set model_name=squeezenet1.1
set target_image_path=%ROOT_DIR%car.png
set omz_tool_error_message=It is required to download and convert a model. Check https://pypi.org/project/openvino-dev/ to install it. Then run the script again.
if exist "%ROOT_DIR%..\..\setupvars.bat" (
call "%ROOT_DIR%..\..\setupvars.bat"
) else (
echo setupvars.bat is not found, INTEL_OPENVINO_DIR can't be set
goto error
)
echo INTEL_OPENVINO_DIR is set to %INTEL_OPENVINO_DIR%
:: Check if Python is installed
python --version 2>NUL
if errorlevel 1 (
echo Error^: Python is not installed. Please install Python 3.6 ^(64-bit^) or higher from https://www.python.org/downloads/
goto error
)
:: Check if Python version is equal or higher 3.4
for /F "tokens=* USEBACKQ" %%F IN (`python --version 2^>^&1`) DO (
set version=%%F
)
echo %var%
for /F "tokens=1,2,3 delims=. " %%a in ("%version%") do (
set Major=%%b
set Minor=%%c
)
if "%Major%" geq "3" (
if "%Minor%" geq "6" (
set python_ver=okay
)
)
if not "%python_ver%"=="okay" (
echo Unsupported Python version. Please install Python 3.6 ^(64-bit^) or higher from https://www.python.org/downloads/
goto error
)
omz_info_dumper --print_all >NUL
if errorlevel 1 (
echo Error: omz_info_dumper was not found. %omz_tool_error_message%
goto error
)
omz_downloader --print_all >NUL
if errorlevel 1 (
echo Error: omz_downloader was not found. %omz_tool_error_message%
goto error
)
omz_converter --print_all >NUL
if errorlevel 1 (
echo Error: omz_converter was not found. %omz_tool_error_message%
goto error
)
for /F "tokens=* usebackq" %%d in (
`omz_info_dumper --name "%model_name%" ^|
python -c "import sys, json; print(json.load(sys.stdin)[0]['subdirectory'])"`
) do (
set model_dir=%%d
)
set ir_dir=%irs_path%\%model_dir%\%target_precision%
echo.
echo Download public %model_name% model
echo omz_downloader --name "%model_name%" --output_dir "%models_path%" --cache_dir "%models_cache%"
omz_downloader --name "%model_name%" --output_dir "%models_path%" --cache_dir "%models_cache%"
echo %model_name% model downloading completed
CALL :delay 7
if exist "%ir_dir%" (
echo.
echo Target folder %ir_dir% already exists. Skipping IR generation with Model Optimizer.
echo If you want to convert a model again, remove the entire %ir_dir% folder.
CALL :delay 7
GOTO buildSample
)
echo.
echo ###############^|^| Run Model Optimizer ^|^|###############
echo.
CALL :delay 3
::set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp
echo omz_converter --mo "%INTEL_OPENVINO_DIR%\tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%"
omz_converter --mo "%INTEL_OPENVINO_DIR%\tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%"
if ERRORLEVEL 1 GOTO errorHandling
CALL :delay 7
:buildSample
echo.
echo ###############^|^| Generate VS solution for Inference Engine samples using cmake ^|^|###############
echo.
CALL :delay 3
if "%PROCESSOR_ARCHITECTURE%" == "AMD64" (
set "PLATFORM=x64"
) else (
set "PLATFORM=Win32"
)
if exist "%SOLUTION_DIR64%\CMakeCache.txt" del "%SOLUTION_DIR64%\CMakeCache.txt"
cd /d "%INTEL_OPENVINO_DIR%\samples\cpp" && cmake -E make_directory "%SOLUTION_DIR64%" && cd /d "%SOLUTION_DIR64%" && cmake -G "Visual Studio 16 2019" -A %PLATFORM% "%INTEL_OPENVINO_DIR%\samples\cpp"
if ERRORLEVEL 1 GOTO errorHandling
CALL :delay 7
echo.
echo ###############^|^| Build Inference Engine samples using cmake ^|^|###############
echo.
CALL :delay 3
echo cmake --build . --config Release --target classification_sample_async
cmake --build . --config Release --target classification_sample_async
if ERRORLEVEL 1 GOTO errorHandling
CALL :delay 7
:runSample
echo.
echo ###############^|^| Run Inference Engine classification sample ^|^|###############
echo.
CALL :delay 3
copy /Y "%ROOT_DIR%%model_name%.labels" "%ir_dir%"
cd /d "%SOLUTION_DIR64%\intel64\Release"
if not exist classification_sample_async.exe (
cd /d "%INTEL_OPENVINO_DIR%\samples\cpp\intel64\Release"
)
echo classification_sample_async.exe -i "%target_image_path%" -m "%ir_dir%\%model_name%.xml" -d !TARGET! !SAMPLE_OPTIONS!
classification_sample_async.exe -i "%target_image_path%" -m "%ir_dir%\%model_name%.xml" -d !TARGET! !SAMPLE_OPTIONS!
if ERRORLEVEL 1 GOTO errorHandling
echo.
echo ###############^|^| Classification sample completed successfully ^|^|###############
CALL :delay 10
cd /d "%ROOT_DIR%"
goto :eof
:errorHandling
echo Error
cd /d "%ROOT_DIR%"
:delay
timeout %~1 2> nul
EXIT /B 0

View File

@ -1,189 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
echo -ne "\e[0;33mWARNING: If you get an error when running the sample in the Docker container, you may need to install additional packages. To do this, run the container as root (-u 0) and run install_openvino_dependencies.sh script. If you get a package-independent error, try setting additional parameters using -sample-options.\e[0m\n"
ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]-$0}" )" && pwd )"
build_dir="$HOME/inference_engine_cpp_samples_build"
. "$ROOT_DIR/utils.sh"
usage() {
echo "Classification sample using public SqueezeNet topology"
echo
echo "Options:"
echo " -help Print help message"
echo " -b BUILD_DIR Specify the sample build directory"
echo " -d DEVICE Specify the target device to infer on; CPU, GPU, HDDL or MYRIAD are acceptable. Sample will look for a suitable plugin for device specified"
echo " -sample-options OPTIONS Specify command line arguments for the sample"
echo
exit 1
}
trap 'error ${LINENO}' ERR
target="CPU"
# parse command line options
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-b | --build_dir)
build_dir="$2/inference_engine_cpp_samples_build"
shift
;;
-h | -help | --help)
usage
;;
-d)
target="$2"
echo target = "${target}"
shift
;;
-sample-options)
sampleoptions=("${@:2}")
echo sample-options = "${sampleoptions[*]}"
shift
;;
*)
# unknown option
;;
esac
shift
done
target_precision="FP16"
echo -ne "target_precision = ${target_precision}\n"
models_path="$build_dir/../openvino_models/models"
models_cache="$build_dir/../openvino_models/cache"
irs_path="$build_dir/../openvino_models/ir"
model_name="squeezenet1.1"
target_image_path="$ROOT_DIR/car.png"
run_again="Then run the script again.\n\n"
omz_tool_error_message="It is required to download and convert a model. Check https://pypi.org/project/openvino-dev/ to install it. ${run_again}"
if [ -e "$ROOT_DIR/../../setupvars.sh" ]; then
setupvars_path="$ROOT_DIR/../../setupvars.sh"
else
echo -ne "Error: setupvars.sh is not found\n"
fi
if ! . "$setupvars_path" ; then
echo -ne "Unable to run ./setupvars.sh. Please check its presence. ${run_again}"
exit 1
fi
if [[ -f /etc/centos-release ]]; then
DISTRO="centos"
elif [[ -f /etc/lsb-release ]]; then
DISTRO="ubuntu"
elif [[ -f /etc/redhat-release ]]; then
DISTRO="redhat"
fi
if [[ $DISTRO == "centos" ]]; then
# check installed Python version
if command -v python3.6 >/dev/null 2>&1; then
python_binary=python3.6
fi
elif [[ $DISTRO == "redhat" ]]; then
python_binary=python3
elif [[ $DISTRO == "ubuntu" ]]; then
python_binary=python3
elif [[ "$OSTYPE" == "darwin"* ]]; then
# check installed Python version
if command -v python3.8 >/dev/null 2>&1; then
python_binary=python3.8
elif command -v python3.7 >/dev/null 2>&1; then
python_binary=python3.7
elif command -v python3.6 >/dev/null 2>&1; then
python_binary=python3.6
else
python_binary=python3
fi
fi
if ! command -v $python_binary &>/dev/null; then
echo -ne "\n\nPython 3.6 (x64) or higher is not installed. It is required to run Model Optimizer, please install it. ${run_again}"
exit 1
fi
if ! command -v omz_info_dumper &>/dev/null; then
echo -ne "\n\nomz_info_dumper was not found. ${omz_tool_error_message}"
exit 2
fi
if ! command -v omz_downloader &>/dev/null; then
echo -ne "\n\nomz_downloader was not found. ${omz_tool_error_message}"
exit 3
fi
if ! command -v omz_converter &>/dev/null; then
echo -ne "\n\nomz_converter was not found. ${omz_tool_error_message}"
exit 4
fi
# Step 1. Download the Caffe model and the prototxt of the model
echo -ne "\n###############|| Downloading the Caffe model and the prototxt ||###############\n\n"
model_dir=$(omz_info_dumper --name "$model_name" |
${python_binary} -c 'import sys, json; print(json.load(sys.stdin)[0]["subdirectory"])')
print_and_run omz_downloader --name "$model_name" --output_dir "${models_path}" --cache_dir "${models_cache}"
ir_dir="${irs_path}/${model_dir}/${target_precision}"
if [ ! -e "$ir_dir" ]; then
# Step 2. Convert a model with Model Optimizer
echo -ne "\n###############|| Convert a model with Model Optimizer ||###############\n\n"
export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp
print_and_run omz_converter --name "$model_name" -d "$models_path" -o "$irs_path" --precisions "$target_precision"
else
echo -ne "\n\nTarget folder ${ir_dir} already exists. Skipping IR generation with Model Optimizer."
echo -ne "If you want to convert a model again, remove the entire ${ir_dir} folder. ${run_again}"
fi
# Step 3. Build samples
echo -ne "\n###############|| Build Inference Engine samples ||###############\n\n"
OS_PATH=$(uname -m)
NUM_THREADS="-j2"
if [ "$OS_PATH" == "x86_64" ]; then
OS_PATH="intel64"
NUM_THREADS="-j8"
fi
samples_path="${INTEL_OPENVINO_DIR}/samples/cpp"
build_dir="$HOME/inference_engine_cpp_samples_build"
binaries_dir="${build_dir}/${OS_PATH}/Release"
if [ -e "$build_dir/CMakeCache.txt" ]; then
rm -rf "$build_dir/CMakeCache.txt"
fi
mkdir -p "$build_dir"
cd "$build_dir"
cmake -DCMAKE_BUILD_TYPE=Release "$samples_path"
make $NUM_THREADS classification_sample_async
# Step 4. Run sample
echo -ne "\n###############|| Run Inference Engine classification sample ||###############\n\n"
cd "$binaries_dir"
cp -f "$ROOT_DIR/${model_name}.labels" "${ir_dir}/"
print_and_run ./classification_sample_async -d "$target" -i "$target_image_path" -m "${ir_dir}/${model_name}.xml" "${sampleoptions[@]}"
echo -ne "\n###############|| Classification sample completed successfully ||###############\n\n"

File diff suppressed because it is too large Load Diff

View File

@ -1,21 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
error() {
local code="${3:-1}"
if [[ -n "$2" ]]; then
echo "Error on or near line $1: $2; exiting with status ${code}"
else
echo "Error on or near line $1; exiting with status ${code}"
fi
exit "${code}"
}
print_and_run() {
printf 'Run'
printf ' %q' "$@"
printf '\n\n'
"$@"
}