publish master branch snapshot, revision 49482ae3bea0cbaa07474f86f36db11943142687

This commit is contained in:
Alexey Suhov
2020-05-13 21:12:22 +03:00
parent 9d6501e9a6
commit 5b428f0655
924 changed files with 30841 additions and 8905 deletions

View File

@@ -27,7 +27,9 @@ The "demo" folder contains three scripts:
3. Benchmark demo using public SqueezeNet topology (demo_benchmark_app.sh|bat)
To run the demos, run demo_squeezenet_download_convert_run.sh or demo_security_barrier_camera.sh or demo_benchmark_app.sh (*.bat on Windows) scripts from the console without parameters, for example:
4. Speech recognition demo utilizing models trained on open LibriSpeech dataset
To run the demos, run demo_squeezenet_download_convert_run.sh or demo_security_barrier_camera.sh or demo_benchmark_app.sh or demo_speech_recognition.sh (*.bat on Windows) scripts from the console without parameters, for example:
./demo_squeezenet_download_convert_run.sh
@@ -80,4 +82,19 @@ The demo script does the following:
The benchmark app prints performance counters, resulting latency, and throughput values.
For more information about the Inference Engine benchmark app, refer to the documentation available in the sample folder.
For more information about the Inference Engine benchmark app, refer to the documentation available in the sample folder.
Speech Recognition Demo Using LibriSpeech models
================================================
The demo illustrates live speech recognition - transcribing speech from microphone or offline (from wave file).
The demo is also capable of live close captioning of an audio clip or movie, where signal is intercepted from the speaker.
The demo script does the following:
- Downloads US English models trained on LibriSpeech dataset prepared for direct usage by the Inference Engine
- Installs the required components
- Runs the command line offline demo
- As a final step, runs live speech recognition application with graphical interface
The GUI application prints the speech transcribed from input signal in window. Up to two channels can be transcribed in parallel: microphone & speakers streams.

View File

@@ -103,13 +103,13 @@ for /F "tokens=* usebackq" %%d in (
set ir_dir=%irs_path%\%model_dir%\%target_precision%
echo Download public %model_name% model
echo python "%downloader_dir%\downloader.py" --name %model_name% --output_dir %models_path% --cache_dir %models_cache%
python "%downloader_dir%\downloader.py" --name %model_name% --output_dir %models_path% --cache_dir %models_cache%
echo python "%downloader_dir%\downloader.py" --name "%model_name%" --output_dir "%models_path%" --cache_dir "%models_cache%"
python "%downloader_dir%\downloader.py" --name "%model_name%" --output_dir "%models_path%" --cache_dir "%models_cache%"
echo %model_name% model downloading completed
timeout 7
if exist %ir_dir% (
if exist "%ir_dir%" (
echo.
echo Target folder %ir_dir% already exists. Skipping IR generation with Model Optimizer.
echo If you want to convert a model again, remove the entire %ir_dir% folder.
@@ -220,7 +220,7 @@ echo.
echo ###############^|^| Build Inference Engine samples using MS Visual Studio (MSBuild.exe) ^|^|###############
echo.
timeout 3
echo !MSBUILD_BIN!" Samples.sln /p:Configuration=Release /t:benchmark_app /clp:ErrorsOnly /m
echo "!MSBUILD_BIN!" Samples.sln /p:Configuration=Release /t:benchmark_app /clp:ErrorsOnly /m
"!MSBUILD_BIN!" Samples.sln /p:Configuration=Release /t:benchmark_app /clp:ErrorsOnly /m
if ERRORLEVEL 1 GOTO errorHandling

View File

@@ -170,7 +170,7 @@ if [ ! -e "$ir_dir" ]; then
printf "Install Model Optimizer dependencies\n\n"
cd "${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/install_prerequisites"
. ./install_prerequisites.sh caffe
cd $cur_path
cd "$cur_path"
# Step 3. Convert a model with Model Optimizer
printf "${dashes}"

View File

@@ -87,8 +87,8 @@ if ERRORLEVEL 1 GOTO errorHandling
set models_path=%BUILD_FOLDER%\openvino_models\ir
set models_cache=%BUILD_FOLDER%\openvino_models\cache
if not exist %models_cache% (
mkdir %models_cache%
if not exist "%models_cache%" (
mkdir "%models_cache%"
)
set downloader_dir=%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\downloader

View File

@@ -98,13 +98,13 @@ for /F "tokens=* usebackq" %%d in (
set ir_dir=%irs_path%\%model_dir%\%target_precision%
echo Download public %model_name% model
echo python "%downloader_dir%\downloader.py" --name %model_name% --output_dir %models_path% --cache_dir %models_cache%
python "%downloader_dir%\downloader.py" --name %model_name% --output_dir %models_path% --cache_dir %models_cache%
echo python "%downloader_dir%\downloader.py" --name "%model_name%" --output_dir "%models_path%" --cache_dir "%models_cache%"
python "%downloader_dir%\downloader.py" --name "%model_name%" --output_dir "%models_path%" --cache_dir "%models_cache%"
echo %model_name% model downloading completed
timeout 7
if exist %ir_dir% (
if exist "%ir_dir%" (
echo.
echo Target folder %ir_dir% already exists. Skipping IR generation with Model Optimizer.
echo If you want to convert a model again, remove the entire %ir_dir% folder.
@@ -215,7 +215,7 @@ echo.
echo ###############^|^| Build Inference Engine samples using MS Visual Studio (MSBuild.exe) ^|^|###############
echo.
timeout 3
echo !MSBUILD_BIN!" Samples.sln /p:Configuration=Release /t:classification_sample_async /clp:ErrorsOnly /m
echo "!MSBUILD_BIN!" Samples.sln /p:Configuration=Release /t:classification_sample_async /clp:ErrorsOnly /m
"!MSBUILD_BIN!" Samples.sln /p:Configuration=Release /t:classification_sample_async /clp:ErrorsOnly /m
if ERRORLEVEL 1 GOTO errorHandling

View File

@@ -166,7 +166,7 @@ if [ ! -e "$ir_dir" ]; then
printf "Install Model Optimizer dependencies\n\n"
cd "${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/install_prerequisites"
. ./install_prerequisites.sh caffe
cd $cur_path
cd "$cur_path"
# Step 3. Convert a model with Model Optimizer
printf "${dashes}"