Compare commits

...

335 Commits

Author SHA1 Message Date
Alexey Suhov
023e7c2c3f update system requirements (#1321)
* update system requirements

* update release version in readme
2020-07-14 20:25:39 +03:00
Alexey Suhov
34ddb70f7d fix build target name in demos for Windows (#1248) 2020-07-07 18:26:50 +03:00
Andrew Bakalin
21e092122f [VPU] WA for statis shape allocation (#1106) 2020-06-24 16:28:59 +03:00
Roman Kazantsev
92c1333653 Correct removing nodes from graph and add test for ConstToResult transform (#1083)
Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>
2020-06-24 15:39:08 +03:00
Roman Kazantsev
c26ec8b312 [IE] Preserve output data name after merging and update output data map (#1092)
Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>
2020-06-24 12:30:25 +03:00
Andrew Bakalin
32054ff180 [VPU] Support for originalLayersNames attribute in exec graph (#1073) 2020-06-23 12:19:15 +03:00
Ilya Churaev
7cff005ada Disable ref implementations (#951)
* Add NGRAPH_EVALUATE_ENABLE flag and disable all reference implementations

* Enable some evaluate methods

* Added dynamic library with reference implementations

* Fixed tests

* Enabled unsqueeze  CF

* Removed nGraph test library

* Disable all nGraph tests to check

* Enable some reference implementations

* Added debug message

* EVALUATE true

* Revert "Disable all nGraph tests to check"

This reverts commit 38bca3ed3dfed029e892fe609ea7e48c5cfadb67.

* Enable some implementations

* Removed some TYPE_CASE reference implementations

* Fixed reshape

* Revert types for Broadcast and Add

* Disabled failing gpu_engine.user_context test

* Disabled failed nGraph tests

* Add u8 for non_zero

* Revert "Added debug message"

This reverts commit 4b9f4894f5ae9963426830ac5e5eb833af8847aa.

* Revert "Enable some reference implementations"

This reverts commit d2001a636df7504e0ad5abe5c98725ef0be07379.

Revert "Enabled unsqueeze  CF"

This reverts commit 814a8e52cb2b673446d24e54ed11af1dd3d80fad.

Revert "Enable some evaluate methods"

This reverts commit 73767b8942d857bf60317f29120c98c528344a04.

* Revert "Add NGRAPH_EVALUATE_ENABLE flag and disable all reference implementations"

This reverts commit cfaa7d7e7bf34b617f53a556d24fea2189372592.
2020-06-23 12:17:40 +03:00
Ivan Tikhonov
06707cc53f Fix for Kaldi models with Memory layers and a batch more than 1 (#1025)
* fix kaldi models with memory (batch > 1)

* apply review comments

* Added test for the case using the SetBatchSize function when ReadValue op is in the network

* Check status code instead of message

* Use new ngraph api
2020-06-23 11:47:18 +03:00
Konrad Dobros
fff93d8f05 [IE CLDNN] Add work-around for 1d input to Gather (#1069) 2020-06-23 11:44:20 +03:00
Gladilov, Gleb
637ddd5dfb [IE][VPU]: Fixes klocwork issues (#1075) 2020-06-23 09:58:12 +03:00
Ivan Tikhonov
fa4c5e8e38 Fix ARM build: explicit type conversion (#1061)
* fix arm build: explicit type conversion

* Use explicit conversion in prior_box_ie.cpp
2020-06-22 23:37:54 +03:00
Maxim Vafin
c9fc6f0531 Fix OneHot transformation for Bert Squad opset 10 (#954)
* Add transformation for squeezing depth input for ONNX OneHot operation because from some TF models it has shape [1] instead of []
2020-06-22 18:58:07 +03:00
Denis Orlov
c9eb6ae62b [GNA] Initialize a local variable (#1066) 2020-06-22 18:49:22 +03:00
Alexander Chaiko
eef56ca80c [IE CLDNN] WA to 1d input for concat (#1040) 2020-06-22 15:25:17 +03:00
Gorokhov Dmitriy
36f1c00e02 [CPU] Fixed issue with unsupported reorder case for groupped convolutions (#893) 2020-06-22 14:06:53 +03:00
Konrad Dobros
5c43765011 [IE CLDNN] Fix activation implementation for fsv16 format (#1038)
For b_fs_yx_fsv16 format in reference kernel features for dispatch are
rounded to multiple of 16. This change adds correct check in kernel to
return work-items that are inside this dispatch padding.
Previously those work-items could corrupt memory expected to be filled
with 0s, and for parametrized activation due to bounds checking with
modulo operator they could have been corrupting actual layer output.

Issue: CVS-27672
2020-06-22 09:17:00 +03:00
Ilya Lavrenov
bbfc9bbc14 Deprecated IGNORE_IR_STATISTIC VPU option (#1028) 2020-06-20 10:38:47 +03:00
Pavel Rodionov
9c607528ef [GNA] Support export model with multiple inputs/outputs and Permute layer (#1024) 2020-06-19 18:06:38 +03:00
Denis Orlov
ae9e0510f0 [GNA] Additional checks (#998) 2020-06-19 13:14:32 +03:00
Edward Shogulin
76af547c17 [LPT] BERT with specific biases support & improvement (#968)
* [LPT] BERT with biases support

* [LPT] Gemm biases and quantization

* [CPU] Fixed FullyConnected + Depthwise node fusing

* [LPT] FullyConnected 3D: symmetric quantization support

* [LPT] FullyConnected 3D: symmetric quantization support fix

* [CPU] Fixed FullyConnected + Depthwise fusing initialization

Co-authored-by: dmitrygo <dmitry.gorokhov@intel.com>
2020-06-19 13:14:20 +03:00
Kamil Magierski
5e97a3123f Fix cases then const blob precision is not FP32/FP16 (#1000)
Co-authored-by: kmagiers <kmagiers@intel.com>
2020-06-19 13:13:19 +03:00
Andrey Dmitriev
532dec140b [GNA] fix permute 0_2_1 (#993) 2020-06-19 10:20:55 +03:00
Vladimir Paramuzov
c41c6294f9 [IE CLDNN] Fix strided slice (#953) 2020-06-19 08:23:25 +03:00
Gorokhov Dmitriy
3bbe88e659 [IE Common][WA] Skipped const folding for Convolution layer (#1002) 2020-06-19 01:25:20 +03:00
Maxim Andronov
2f3d5f68cd [CPU] fix one dims scale shift (#983) 2020-06-18 14:21:07 +03:00
Evgeny Talanin
843f81a1cc [IE TESTS] disable Some myriad tests on Win (#763) (#988)
* [IE TESTS] disable Some myriad tests on Windisable Some myriad tests on Win

* Skip test with todo

Co-authored-by: Irina Efode <irina.efode@intel.com>
2020-06-18 13:57:21 +03:00
Pavel Esir
c596707a09 fixed some typos in MO help (#979) 2020-06-18 11:02:28 +03:00
Konrad Dobros
cf60baf2f0 [IE CLDNN] Fix gather dimensions calculation (#960) 2020-06-18 00:31:17 +03:00
Nikita Kudriavtsev
aeb70036d7 [IE Myriad] Remove Myriad 2 from supported devices in XLink (#978) 2020-06-17 17:47:55 +03:00
Daria Mityagina
dea04dae8c [IE Myriad] - WrapInLoop fix: if data has consumer's input inside subgraph - replace them (#958) 2020-06-17 17:27:17 +03:00
Ilya Churaev
14b44803ba Fixed cpack information, removed some links (#975) 2020-06-17 17:17:10 +03:00
Andrey Dmitriev
06286f2aae [GNA] Added fix multiple output with one go to memory and test (#888)
[GNA] Added fix multiple output with one go to memory and test

[GNA] Added fix multiple output with one go to memory and test

[GNA] Added fix multiple output with one go to memory and test

Added multi output

Update gna_pass_manager.cpp

test

[GNA] Added fix multiple output with one go to memory and test

[GNA] Added fix multiple output with one go to memory and test

[GNA] Added fix multiple output with one go to memory and test

Added multi output

Update gna_pass_manager.cpp

test

tests

[GNA] Added fix multiple output with one go to memory and test

[GNA] Added fix multiple output with one go to memory and test

Added multi output

Update gna_pass_manager.cpp

test

tests

Added pass

Test

test

tests_2

return old
2020-06-17 11:23:56 +03:00
Ilya Churaev
97e5fc4bae Use creators only for default opsets (#932) 2020-06-16 12:25:06 +03:00
Alexey Tarakanov
47218284b2 Support fp16 networks for releases_2020_4 (#936) 2020-06-16 10:31:57 +03:00
Andrey Dmitriev
6079a35b81 [GNA] Added test for ScaleShift and fixed power layer with non-zero shift (#922)
* [GNA] Added test ScaleShift and fixed power layer with non zero shift

added tests

[GNA] Added test ScaleShift and fixed power layer with non zero shift

* Test Assert

* rebuild
2020-06-16 00:32:28 +03:00
Roman Kazantsev
4f4352f301 Fix preserving names of output layers after TopK NGraph transformation (#928)
* Fix preserving names of output layers after TopK NGraph transformation (#843)

* Fix preserving names of output layers after TopK NGraph transformation

It helps to infer semantic-segmentation-adas-0001 model. See CVS-31977.

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Fix a test for TopK

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Fix TopK NGraph transformation and its test

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Disable smoke_LoadNetworkAccuracy due to sporadic failure

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>
2020-06-15 20:57:45 +03:00
Anastasia Kuporosova
a67d74c41f [Python API] Fix long inference (#897) 2020-06-15 16:21:41 +03:00
Ivan Tikhonov
26c563132d Revert prior box constant folding (#906)
* Revert "Const folding and reference implementation for PriorBox(Clustered) ops (#785)"

This reverts commit 9fc818478a.

* apply codestyle for ngraph part
2020-06-15 12:38:27 +03:00
Ilya Lavrenov
dc1ca195dd Updated dates of removal for deprecated API (#911) 2020-06-15 12:24:27 +03:00
Vladimir Paramuzov
f5ad3e6f89 [IE CLDNN] Fixed clone network to preserve original CNNNetwork (#870) 2020-06-12 15:53:30 +03:00
Konrad Dobros
6c736ce001 [IE CLDNN] Fix fsv16 -> bfyx reorder removal (#873) 2020-06-12 15:43:54 +03:00
Anastasia Kuporosova
30ab6534e1 [Python API] Fixate requirements (#905) 2020-06-12 12:06:11 +03:00
Ilya Lavrenov
259a4c25ce TESTS: Added test for parallel LoadNetwork with accuracy check (#858) 2020-06-12 11:56:59 +03:00
Andrey Somsikov
347930008c Use default thread sanitizer linkage (#899)
GCC and CLang *default* sanitizer linkage differs (static vs. dynamic).
Prefer default behavior as alternate seen having issues.

Default (GN)U linker fails with unresolved symbols linking Clang built
binaries with sanitizer enabled. Force use LLVM linker lld for Clang
builds.

Sanitizer instrumentation and link flags should be retained for all
binaries. Updating samples cmake configuration to keep those flags
after unset logic at the ie_build_samples().
2020-06-12 00:36:03 +03:00
Evgeny Latkin
4fa251483a [IE][Myriad] fix HW tiling (#894) 2020-06-11 20:48:56 +03:00
Vladimir Paramuzov
30f8af70fc [IE CLDNN] fix perf for fsv16 global avg pooling (#666) 2020-06-11 20:44:37 +03:00
Andrew Bakalin
3fc6d8a188 [VPU] Update firmware (#898) 2020-06-11 20:44:20 +03:00
Denis Orlov
66c8df6a87 [GNA] Fixes in checks, asserts, etc. (#867) 2020-06-11 20:04:46 +03:00
Nikolay Shchegolev
e53eb86334 [Common] Static analysed issues. Part II. 2020-06-11 19:59:44 +03:00
Edward Shogulin
2df99d4263 [LPT] Static code analysis issues fix (#889) 2020-06-11 15:09:20 +03:00
Gleb Kazantaev
deab4d38b0 Fix NopElimination (#869) 2020-06-11 13:28:27 +03:00
Vladimir Paramuzov
412428f1dd [IE CLDNN] Always use FP32 as intermediate type for fused quantize (#829) 2020-06-11 12:22:27 +03:00
Evgeny Lazarev
167c96a8af Relaxed MO requirements for "protobuf" package (#862) 2020-06-10 18:26:16 +03:00
Gleb Kazantaev
b7363ba711 Fix divide conversion for integer input type (#853) 2020-06-10 16:25:57 +03:00
Evgeny Lazarev
5cef9f3734 Fixed StridedSlice to Crop transformation (#836) (#845)
* Fixed StridedSlice to Crop transformation to not apply when rank of data is changed

* Added unit test for StridedSlice to Crop transformation
2020-06-10 11:54:02 +03:00
Andrey Dmitriev
0bf1f53356 [GNA] Added support permute layer (#723)
* [GNA] Added GNA natively supported permute layer cases.
2020-06-09 16:43:01 +03:00
Maksim Doronin
18004bdb5e [IE VPU] Dynamic Broadcast tests (#737)
* [IE VPU] Enable StaticShapeBroadcast tests

* [IE VPU] DSR: support case when shape is output and input for stage

* [IE VPU] Enable Broadcast and Transpose tests

* [IE VPU] DSR: fix typo

* [IE VPU] Add assertion for numConsumer in DSR

* [IE VPU] Added CheckMyriadX helper method

* [IE VPU] New DSR assert for input->getInputTo

* [IE VPU] Fix myriad2 tests bug
2020-06-09 16:10:12 +03:00
Ivan Tikhonov
9fc818478a Const folding and reference implementation for PriorBox(Clustered) ops (#785)
* Constant folding for PriorBox, PriorBoxClustered; Deleted PriorBoxIE, PriorBoxClusteredIE and transformations; Added unit tests; codestyle

* Delete debug info

* delete unnecessary convert_prior_to_ie_prior.hpp file

* fix ngraph reader tests; delete PriorBoxIE functional test

* fix for ngraph reader tests

* Apply review comment

* apply ngraph codestyle

* restore PriorBoxClustered tests in disabled state
2020-06-09 14:47:49 +03:00
Denis Orlov
ef8a8dd309 add support for multiple scale factors in speech sample (#835)
Co-authored-by: Anna Alberska <anna.alberska@intel.com>
2020-06-09 14:36:28 +03:00
Andrey Sokolov
d4e880de3d [IE VPU] Update firmware; enable convolution VPU OCL tests (#802) 2020-06-09 14:34:10 +03:00
Vladimir Paramuzov
fe198dd544 [IE CLDNN] Added 6d tensor support in eltwise/scale primitives (#826) 2020-06-09 14:29:36 +03:00
Anton Zaytsev
b0eb3e67ee [ci-skip][IE MKLDNN] Add Precision U16 in MKLDNN (#783) 2020-06-09 14:20:43 +03:00
dmitrygo
434361cea9 [TESTS] fixes after rebase 2020-06-09 14:11:18 +03:00
dmitrygo
aa30580109 [CPU] mkldnn submodule up 2020-06-09 14:11:18 +03:00
dmitrygo
051a429c31 [LPT] Fixed quantizeBlob routine for 3D case 2020-06-09 14:11:18 +03:00
Edward Shogulin
8eb88d51f2 [LPT] GPU tests were fixed 2020-06-09 14:11:18 +03:00
Edward Shogulin
971811c8c8 [LPT] [TEST] LayerTransformation test threshold was updated 2020-06-09 14:11:18 +03:00
Anton Voronov
629ca3a5d8 [CPU] Gemm node: supported precisions U8 and I8 and added tests 2020-06-09 14:11:18 +03:00
Edward Shogulin
92e5e010b9 [LPT] FullyConnected & Gemm tests 2020-06-09 14:11:18 +03:00
dmitrygo
c7313bab7f [CPU] Fixed weights candidate initialization in FC node 2020-06-09 14:11:18 +03:00
Edward Shogulin
d798831c95 [LPT] Gemm and FullyConnected 3D improvement 2020-06-09 14:11:18 +03:00
Edward Shogulin
4d01adbe01 [LPT] tests extending 2020-06-09 14:11:18 +03:00
Edward Shogulin
1d51d2185a [LPT] [Test] Low precision transformations functional tests infrastructure improvement 2020-06-09 14:11:18 +03:00
Edward Shogulin
65b00c1dfb [LPT] FullyConnected transformation fix 2020-06-09 14:11:18 +03:00
Edward Shogulin
9758305b32 [nGraph] Remove Reshape for 3D FullyConnected 2020-06-09 14:11:18 +03:00
Edward Shogulin
d7c77212b8 [IE COMMON] [LPT] Concat asymmetric quantization with signed interval fix 2020-06-09 14:11:18 +03:00
Edward Shogulin
e544dd1e28 [IE COMMON] [LPT] Support 3D layout for FullyConnected transformation 2020-06-09 14:11:18 +03:00
dmitry-gorokhov
bc98d17121 [CPU] Added custom implementations (power=0.5, power=-1.0) for Power node 2020-06-09 14:11:18 +03:00
dmitry-gorokhov
bcd38100db [CPU][WA] Supported 3D layout for FullyConnected primitive
Extended jit uni depthwise primitive to support 3D inputs
2020-06-09 14:11:18 +03:00
Nikolay Shchegolev
b6f2c06b26 [Common] Static analyzed issues. (#804) 2020-06-09 13:49:50 +03:00
Vladimir Paramuzov
b4546ad1e0 [IE CLDNN] Better error message when output is not found (#824) 2020-06-09 12:26:28 +03:00
Edward Shogulin
d02b9a9b81 [LPT] [TEST] LayerTransformation test threshold was updated (#828) 2020-06-09 10:34:17 +03:00
Maxim Andronov
d8e82d56d2 [CPU] fix set up config for bin conv fused (#608) 2020-06-09 09:59:29 +03:00
Anastasia Kuporosova
e91453e006 [Python API] Fixate requirements versions (#830) 2020-06-09 08:49:49 +03:00
Anastasia Kuporosova
6a60f93af0 [Python API] Fix deprecation warnings (#812) 2020-06-09 08:48:08 +03:00
Edward Shogulin
ca643edb1b [LPT] [CPU] NormalizeL2 transformation (#662)
* [LPT] NormalizeL2 transformation

* [LPT] NormalizeL2 transformation tests improvement

* [CPU] Fixed depthwise injector aux_vec_count for broadcasting case

* [LPT] Normalize on GPU enabling

Co-authored-by: Zinoviev, Vladimir <vladimir.zinoviev@intel.com>
Co-authored-by: dmitrygo <dmitry.gorokhov@intel.com>
2020-06-08 22:42:50 +03:00
Pavel Esir
7a11e36eeb Add fixedscale(bias) components to Kaldi (#725)
* Added fixed scale(bias) components

* Successfully converted after adding fixed bias,scale components

* Added unittests
2020-06-08 21:37:44 +03:00
Mikhail Letavin
155916acde [IE CLDNN] Fix variable initialization issues (#816) 2020-06-08 21:07:50 +03:00
Nikita Kudriavtsev
ac65ea30fd [ICV] Watchdog switch + ddr initialization (#554)
* [IE Myriad] Added XLinkBootFirmware method in XLink API for booting firmware buffer

* [IE Myriad] Patch firmware in mvnc. Added test to check device reset without connecting.

* [IE Myriad] Added option MOVIDIUS_DDR_TYPE for Myriad plugin

* [IE Myriad] Added tests for new option MOVIDIUS_DDR_TYPE

* [IE Myriad] Update firmware 1201 -> 1212

* [IE Myriad] Convolution3x3 tests are disabled due to firmware issue. #-32921
2020-06-08 20:51:45 +03:00
Irina Efode
3b5de94a09 [IE TEST] Eltwise tests refactoring (#726)
* [IE TEST] Eltwise tests refactoring

* [IE TESTS] Fix comments
2020-06-08 18:44:42 +03:00
Denis Orlov
ff00817bb7 [GNA] Support changing the execution mode in runtime (#801) 2020-06-08 18:43:12 +03:00
iliya mironov
eefaf56075 Fix unit tests for select layer. (#638)
* Fix unit tests for select layer.
2020-06-08 18:39:40 +03:00
Maxim Vafin
f1811ad060 Implement support for opset3 EmbeddingBag ops (#546)
* [MO] Implement EmbeddingBag_3

* Transform dynamic sub-graph of Wide and Deep into EmbeddingSegmentsSum

- Expressed SparseWeightedSum sub-graph through EmbeddingSegmentsSum
- Removed experimental SparseWeightedSum layer
- Implemented tests for the transformation

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Fix EmbeddingBag shape infer

* Fix EmbeddingSegmentsSum transformation for Wide and Deep

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Fix EmbeddingSegmentSum replacer after ports swap

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Update package_BOM.txt

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Add unit tests for EmbeddingXXX shape infer

* Fix ATen resolver

* Remove deleted files from BOM

* Add opset version to embedding_bag

* Use base class for EmbeddingBag

* Fix per_sample_weights case

* Fix EmbeddingSegmentsSum transformation

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Fix EmbeddingBag checks

* Fix ATen front transformation and merge conflicts

* Fix BOM

* Work around limitation for I64 input of W&D model

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Cleanup where operation to fix affect of WhereDecomposition transform

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Fix BOM

* Correct EmbeddingSegmentSum transform for Wide and Deep

Add casting segment ids to i32 and remove ConstToResult sub-graph.

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Update BOM with RemoveConstToResult transform

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Add more comments for RemoveConstToResult transformation

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Remove useless logging in EmbeddingSegmentsSum transformation

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Small fixes

* Move EmbeddingBag resolving back to front phase

* Improve error messages

* Fix typo in unittests

* Reimplement sparse_reshape middle transform

Avoid deprecated API.

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Clean-up graph after sparse_reshape and ConstToResult transformation

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Fix clean-up for transformations

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Fix clean-up for transformation #2

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

Co-authored-by: Roman Kazantsev <roman.kazantsev@intel.com>
2020-06-08 18:06:40 +03:00
Konrad Dobros
d155483573 [IE CLDNN] Optimize 1x1 imad convolution kernel (#757) 2020-06-08 16:44:50 +03:00
Andrey Somsikov
626bc4f3d4 Add commit links to memcheck report (#820) 2020-06-08 15:08:58 +03:00
Tomasz Dołbniak
60d4d62536 Disable warnings-as-errors for ONNX target (#749)
* Disable warnings-as-errors for ONNX target

* Disable warnigs-as-errors for windows too

* Change WIN32 -> MSVC
2020-06-08 13:52:45 +03:00
Evgenya Stepyreva
e7f5f53f92 [ MO ] Groupped conv fusion (#797)
Fixed the group convolution fusion pass to properly get the feature dim in NCHW layout case.
2020-06-08 13:00:54 +03:00
Edward Shogulin
a224078c5c [LPT] [Test] DepthToSpace sporadic fail fix (#815) 2020-06-08 12:55:37 +03:00
Alexander Zhogov
9a968b12db Azure CI: increase timeout for Mac to 180 min 2020-06-08 12:17:50 +03:00
Vladimir Paramuzov
f0498ad011 [IE CLDNN] Enable ShuffleChannels op (#787) 2020-06-07 22:57:20 +03:00
Edward Shogulin
63ee9f8916 [LPT] [CPU] DepthToSpace transformation (#663)
* [LPT] [TEST] LayerTransformation generalization

* [LPT] DequantizationDetails extending

* [LPT] DepthToSpace transformation implementation
2020-06-07 21:12:52 +03:00
Alexander Zhogov
93b60cacfa Azure: Add Ninja (#803)
* Azure: Add Ninja

* Fix 'Install Ninja' on Linux

* Fix bin dir path on Windows

* Add -Wno-unused-variable on Mac

* Add -Wno-error=unused-command-line-argument on Mac

* Set CXXFLAGS for Mac

* Improvements

* Fix BIN_DIR on Linux
2020-06-06 15:56:24 +03:00
Vladimir Paramuzov
0022eebd71 [IE CLDNN] Enable DepthToSpace (#780)
Enabled DepthToSpace ngraph transformat
Updated implementation to support 5d and mode parameter
fsv16 direct support
Functional tests for GPU
2020-06-05 20:16:47 +03:00
Daria Mityagina
807f85f93f it is duplicate of PR: #656, but without test modification (#794) 2020-06-05 19:57:03 +03:00
Chance Luo
7f09b54af8 Disable Hw Avg Pooling for small output tensors if excludePad=true (#772) 2020-06-05 19:47:53 +03:00
Kami-996
cad3ccd8a3 add /wd4819 to disable C4819 warning, which is treated as error in win32 (#767)
Co-authored-by: jasonlee <jasonlee@qiyi.com>
2020-06-05 16:04:59 +03:00
emmanuelattia-philips
a0d1dae91d Fix: ITT_INCLUDE_DIR was not correctly detected (#748) 2020-06-05 14:46:39 +03:00
Ilya Znamenskiy
4d3ddc1684 [IE CLDNN] GEMM int8 optimization using MMAD macro (#635) 2020-06-05 14:28:21 +03:00
Anton Voronov
70c2058b61 [CPU] supported ShuffleChannels and added tests (#636) 2020-06-05 14:10:55 +03:00
Ilya Churaev
3571d44896 Save the name of output data if we remove previous layer (#760)
* Save the name of output data if we remove previous layer

* Added test
2020-06-05 13:36:35 +03:00
Pavel Rodionov
20d812d959 [GNA] Set default GNA library to GNA2 (#771) 2020-06-05 13:00:58 +03:00
dmitrygo
b485e829d6 [CPU] DepthToSpace review leftovers 2020-06-05 12:47:24 +03:00
Maxim Vafin
f51c533ea8 Add ReduceL2 decomposition (#733)
* Add ReduceL2 decomposition

* Add ReduceL2 transformation tests

* Add const propagation unit test for ReduceL2
2020-06-05 12:34:57 +03:00
Denis Orlov
67e3e06bee Fix hetero mode in speech sample - set config when loading network (#786) 2020-06-05 11:54:03 +03:00
Ilya Churaev
7a5d447e9f [SAMPLES] Use defined constant instead of string (#788) 2020-06-05 11:22:24 +03:00
Gladilov, Gleb
f80bd537bf [IE][VPU][nGraph]: Fixes DTS transformations to properly keep outputs names (#734)
* NonZero, Broadcast

* Concat

* Gather

* [IE][VPU][nGraph]: Fixes DTS transformations to correctly keep outputs names

* [IE][VPU][nGraph]: Fixes dynamic to static shape nonzero tests

Co-authored-by: Roman Vyunov <roman.vyunov@intel.com>
2020-06-05 11:16:52 +03:00
Edward Shogulin
f9ac555857 [LPT] Output layers update fix (#754) 2020-06-05 10:54:38 +03:00
Sergey Shlyapnikov
6e491a89ad [IE CLDNN] Improve Gather performance and add fusing support (#736) 2020-06-05 10:20:58 +03:00
Egor Churaev
2100521a14 [IE CLDNN] Implement NormalizeL2 int8 kernels (#720) 2020-06-05 10:16:27 +03:00
Ilya Churaev
a705f0c358 Avoid loading of reader if it doesn't exist (#758)
* Avoid loading of reader if it doesn't exist

* Updated error messages
2020-06-04 21:21:13 +03:00
Maxim Vafin
c7d130efbe Fix Proposal for the case of 2 outputs (#773) 2020-06-04 20:56:46 +03:00
Evgeny Lazarev
c10ff28f12 Added default value for 'aligned' in the ExperimentalDetectronROIFeatureExtractor for backward compatibility (#777)
Fixed backward compatibility issue that old IRs with ExperimentalDetectronROIFeatureExtractor operation cannot be loaded with the new IE
2020-06-04 20:47:52 +03:00
Lukasz Debski
698dfc4bf6 [IE CLDNN] Permute fused ops support (#642) 2020-06-04 17:01:21 +03:00
Alexey Varyzgin
85aa23ec8a [CPU][BF16] Default Optimisation Capability of BF16 was enabled on CPX (#647) 2020-06-04 16:06:15 +03:00
Maxim Vafin
1001caf04e Add support for ONNX Pad-11 (#744) 2020-06-04 14:48:31 +03:00
Denis Orlov
0e60aed97a [GNA] Support 100 inputs, instead of 10 (#741) 2020-06-04 14:33:09 +03:00
Gorokhov Dmitriy
3183c116d9 DepthToSpace, SpaceToDepth layers optimizations (#706)
* [CPU] Updated DepthToSpace and SpaceToDepth layers to be conformant with the specification

The patch also includes n[d]hwc layout support as well as some optimizations

* [CPU][TESTS] Removed old DepthToSpace test since it doesn't corresponds to layer's specification

* [nGraph] Utilize CommonOptimizations pass with custom transformations callback
2020-06-04 14:25:19 +03:00
Evgenya Stepyreva
01e60d057d [ MO ] InterpolateConcat empty sources fix (#764) 2020-06-04 14:18:33 +03:00
Vladimir Paramuzov
d7fad0109a [IE CLDNN] Disabled sporadic detection output tests (#740) 2020-06-04 11:14:05 +03:00
Vladimir Paramuzov
28ffbf0857 [IE CLDNN] Remove unused fused deps for FQ (#712)
Remove unused fused FQ kernel arguments to avoid extra setArg() calls which significantly reduces host overhead
2020-06-04 10:30:46 +03:00
Egor Churaev
546377dc8e [IE CLDNN] Implement EmbeddingBag operations (#623)
Implemented three operations: EmbeddingBagPackedSum,
EmbeddingBagOffsetsSum and EmbeddingSegmentsSum. These operations do
the same work but have a different format of inputs.
2020-06-04 10:25:28 +03:00
Anton Voronov
e53b1b7fbc [MKLDNN_PLUGIN] Convolution node: skip initializing of primitive descriptors for planar layout if there is already jit primitive (#672) 2020-06-04 08:06:14 +03:00
Ilya Lavrenov
158d32139f Revert "Enabled thread tests (#717)" (#756)
This reverts commit 99a2423ec0.
2020-06-03 22:32:55 +03:00
wistal
2bb7010193 MO should support LRN k param with caffe model, rather than fixed to 1 (#716)
Co-authored-by: yipengqu <yipeng.qu@intel.com>
2020-06-03 20:33:55 +03:00
Alexey Suhov
1ffada0b23 [Docs] Fixes in readme files: (#750)
- change repo name to openvino
- update driver version
- fix path to samples data
- remove section about Movidius driver installation
- change latest release to 2020.3
- merge fixes in install_dependencies.sh from 2020 branch
2020-06-03 20:14:35 +03:00
Mikołaj Życzyński
023344a317 [IE CLDNN] Added fusing suport to all pooling kernels (#689)
adds fusing support to all available pooling kernels
tests all possible input type/output type configurations
fixes minor bug in max pooling in pooling_gpu_test.cpp
fixed minor bug with yxbf format in pooling_gpu_ref and pooling_gpu_int8_ref kernels
fixes bug with b_fs_yx_fsv32 format in pooling_gpu kernel
resolves bug with max pooling accuracy missmatch in case of non zero pad end layer parameter
resolves average pooling accuracy missmatch in case of non zero pad end layer parameter
2020-06-03 19:44:27 +03:00
Lukasz Debski
e2d1ae7055 [IE CLDNN] Fixed stack overflow in calculate_prior_boxes pass (#747)
The problem behind this error was in program_impl::init_graph() where in calculate_prior_boxes we are trying to calculate output layout of an entire network recursively which causes stack overflow. Calculating output layouts beforehand in processing order fixes this issue.
2020-06-03 19:42:50 +03:00
Ilya-Krylov
cfb5f27899 Add 'aligned' param to ExperimentalDetectronROIFeatureExtractor for CPU plugin and MO 2020-06-03 17:52:40 +03:00
Tomasz Dołbniak
53927034da Python API for Assign, ReadValue and ExtractImagePatches (#719) 2020-06-03 15:01:43 +02:00
LiweiSong
63a77bb4a1 mkldnn_memory_solver.hpp: include stdint.h to avoid build error (#729)
fix the following compile error:

inference-engine/src/mkldnn_plugin/mkldnn_memory_solver.hpp:60:9: error: 'int64_t' does not name a type
|    60 |         int64_t size;
|       |         ^~~~~~~

include stdint.h to fix this.

Signed-off-by: Liwei Song <liwei.song@windriver.com>
2020-06-03 15:19:29 +03:00
Edward Shogulin
7edebd8d87 [LPT] [TEST] Sporadic test fail fix (workaround) (#742) 2020-06-03 15:05:45 +03:00
Evgenya Stepyreva
da230131d0 [ nGraph ] FP16 for evaluate (#722) 2020-06-03 14:14:59 +03:00
Vitaliy Urusovskij
72d9a9fae7 Use pre-defined DB collection names in memcheck_upload.py CLI (#651)
Use argparses `choices` for `--db_collection` option.

Also removed unnecessary redefinition of `db_collection` in memcheck_upload.py
2020-06-03 13:54:38 +03:00
Sergey Shlyapnikov
20ef9a9423 [IE CLDNN] Improve kernel selection for b_fs_yx_fsv16 layout and optimize Convolution kernels (#730) 2020-06-03 13:42:15 +03:00
Anton Zaytsev
b457553593 [IE TESTS] Move InferRequestTests (#618)
* [IE TESTS] move Infer_request tests

* fix v0

* [ci-skip][IE TESTS] test update basic class v0

* [ci-skip][IE TESTS] test update basic class v1

* [ci-skip][IE TESTS] test update basic class

* [ci-skip][IE TESTS] test update basic class v3

* [ci-skip][IE TESTS] test update basic class final versions

* [ci-skip][IE TESTS] fix

* [ci-skip][IE TESTS] fix codestaly and comment

Co-authored-by: Irina Efode <irina.efode@intel.com>
2020-06-03 12:16:00 +03:00
Evgeny Talanin
ed85690136 Skip some functional tests on VPU (#568) 2020-06-03 12:15:06 +03:00
Adam Osewski
3a80f0476b [ONNX] GRU and RNN operators. (#607)
* Create generic RecurrentSequenceDirection enum.

* Helper class RecurrentSequenceOp.

* Add ONNX GRU & RNN operators.

* Use OutputVector.

* Update doc.

* Add UTs for GRU and skip them on IE_CPU

* Add UT for bidirectional mode and fix it.

* Normalize activation function name case.

* Add unit-tests for RNN operator.

* UT for GRU with linear_before_reset set to true.

* Fix ONNX GRU for linear_before_reset case.

* Remove unnecessary symbol export macro.

* Fix CentOS error.

* Update UTs.

- Update few tests accuracy tolerance
- Update rnn_fwd_activations with new reference values and model.

* Review comment: add check for static shape

* Add UT for RNN with constant inputs W, R.

* Skip UT with const W,R on IE_CPU
2020-06-03 12:01:56 +03:00
Gladilov, Gleb
4e0c7a217f [IE][VPU]: Faster-RCNN fixes on myriad plugin side (#711)
* [IE][VPU]: Enables pass for propagating dynamism to network outputs

If network had dynamic output and then myriad Front-End inserted
convert stage at the end (to convert FP16 -> FP32 - output precision)
then dynamism would not be propagated - we have convert stage that
has dynamic input, but static output. As a result, we have run-time
error in Convert kernel: input and output shapes do not match.

At the moment, pass supports only Convert stage as output stage
over which we should propagate dynamism to outputs.

Signed-off-by: Gladilov, Gleb <gleb.gladilov@intel.com>

* [IE][VPU]: Fixes parse DSR in case of output data

Replacing stage output must be done after replacing
data to shape parent, because the last one may access
original parent producer, but after replacing stage output
it'd not have one.

Signed-off-by: Gladilov, Gleb <gleb.gladilov@intel.com>

* [IE][VPU]: Fixes MacOS build

* [IE][VPU]: Fixes shape data naming convention

Plugin part assumes that if there is dynamic data object, that's
represented as 2 different data objects (data and shape), then
shape data object has name = data object name + @shape suffix.

Pass that creates new dynamic data object should respect that
assumption.

* [IE][VPU]: Fixes dis-alignment in names of data objects representing dynamic data object

MyriadInferRequest::GetResult assumes that in case of dynamic data object
"data" data object and "shape" data object will have aligned names:
"shape" name = "data" name + "@shape" suffix.

In order to meet that expectation propagating dynamism pass must use output
data object name as prefix. Additionally, propagating pass must be applied
before converting shape notation pass in order to make output shape in IE
notation, not MDK, as MyriadInferRequest::GetResult is expecting.

Signed-off-by: Gladilov, Gleb <gleb.gladilov@intel.com>
2020-06-03 11:43:19 +03:00
Mikhail Treskin
447dd3570d Remove deprecated layer test class (#610)
* Update activation layer test

Signed-off-by: Mikhail Treskin <mikhail.treskin@intel.com>

* Get rid of LayerTestsCommonDeprecated class

Signed-off-by: Mikhail Treskin <mikhail.treskin@intel.com>

* Fix activation tests instantiations for gpu and myriad plugins

* Remove leaking inferWithInterp function
2020-06-03 11:04:15 +03:00
Mikołaj Życzyński
3ea1657e4f [IE CLDNN] Activation with fused quantize bug fix (#613)
fixed bug connected with quantization fusing to activation
added scale and activation fusing support
added corresponding tests
2020-06-03 09:30:49 +03:00
Ilya Lavrenov
cdd31da1c7 Updated deprecated messages (#715) 2020-06-03 06:04:50 +03:00
Edward Shogulin
9f6fde9af2 [LPT] Output layers fix (#677) 2020-06-02 23:44:24 +03:00
Ilya Churaev
99a2423ec0 Enabled thread tests (#717) 2020-06-02 23:42:05 +03:00
Nikolay Shchegolev
4f6c976add [CPU] EmbeddingBagOffsetsSum, EmbeddingBagPackedSum, EmbeddingSegmentsSum operations. (#576)
* [CPU] EmbeddingBagOffsetsSum, EmbeddingBagPackedSum, EmbeddingSegmentsSum operations.

* Performance fix

* Perf v2

* Code style
2020-06-02 21:56:17 +03:00
Anna Alberska
4c44ce9795 add PassManagerSettings & create more legible description for concat quantization exception and a test for it (#563) 2020-06-02 21:03:27 +03:00
Andrey Babushkin
6f69ba04c8 [Jenkinsfile] Add failFast parameter (#721)
It allows us to rebuild Jenkins build and wait until all stages are finished despite of some of them may fail
2020-06-02 20:22:25 +03:00
iimironov
a79cd75596 Imironov/cvs 31297 add yolov4 support (#594)
* Add transformation of softplus operation into log(1.0 + exp(x)).
2020-06-02 19:20:29 +03:00
Evgeny Latkin
b2816dc1ec [IE][Myriad] Gather: add test case (#644) 2020-06-02 17:41:19 +03:00
Gleb Kazantaev
638c7b891c Updated DeconvolutionIE to support dynamic shapes (#671)
* Updated DeconvolutionIE to support dynamic shapes

* Updated DeconvolutionIE to support output_shape input

* Updated ConvertConvolutions pass
2020-06-02 17:26:28 +03:00
Vladimir Paramuzov
cbe45b7d0a [IE CLDNN] Fixed names mapping chain in runtime graph to respect original names (#599) 2020-06-02 17:25:41 +03:00
Vitaliy Urusovskij
1d179fdb39 Add parallel downloads to stress tests (#678) 2020-06-02 17:24:22 +03:00
Gleb Kazantaev
be3b4a3362 specificCreator for Transpose operation (#713)
* Updated Transpose node convertor; replaced get_vector with cast_vector

* Replaced NodeCreator with specificCreator
2020-06-02 17:15:36 +03:00
Andrey Somsikov
5776b66fb2 Enable Control Flow Guard for Windows binaries (#714)
Control Flow Guard is security option.
2020-06-02 16:46:23 +03:00
azhogov
8377c714aa Revert "Add ittnotify from IntelSEAPI"
This reverts commit 0583b37a14.
2020-06-02 12:52:14 +03:00
azhogov
f15096e101 Revert "Use ittnotify from thirdparty"
This reverts commit 3863656f44.
2020-06-02 12:50:06 +03:00
Anton Chetverikov
265e3c7cba Remove TopKnormalizer from MO IR Reader transformation_list (#590)
* Remove TopKnormalizer from transformation_list and added call of normalize_outputs to fix read/save of some models
2020-06-02 12:43:41 +03:00
Maksim Doronin
daaeaa5881 [IE VPU] Enable s32->u8 conversion (#699) 2020-06-02 12:20:06 +03:00
Evgeny Lazarev
278868b7a1 Align MO requirements files (#710) 2020-06-02 11:32:39 +03:00
Vladimir Paramuzov
dbdaaa93dd [IE CLDNN] Quantized deeplabv3 optimizations (#646)
Enabled dilation for imad dw fsv16 kernel
Added argmax and mutable_data to fsv16 white list
Enabled byxf input for quantize scale_shift kernel
2020-06-02 09:17:39 +03:00
Somsikov, Andrey
3863656f44 Use ittnotify from thirdparty
VTune ittnotify lack of support aarch64. Switching to use ittnotify
in sources to support any target architecture.
2020-06-01 20:53:39 +03:00
Somsikov, Andrey
0583b37a14 Add ittnotify from IntelSEAPI
Adding ittnotify component of https://github.com/intel/IntelSEAPI

commit 88a56e0ecd162667c7afd2ee9969221d62a32509 (HEAD -> master, origin/master, origin/HEAD)
Merge: 6d743e1 809062a
Author: Alex <alexander.a.raud@intel.com>
Date:   Wed Jul 10 15:06:46 2019 -0700
2020-06-01 20:53:39 +03:00
Andrew Bakalin
d48e0ef5a6 [VPU][NGraph] Reuse NonZero evaluate in StaticShapeNonZero (#658)
* [VPU][NGraph] Reuse NonZero evaluate in StaticShapeNonZero

* [VPU][Tests] Adopt old tests to work with reverted indices

* [VPU] Update firmware
2020-06-01 18:57:06 +03:00
Katya
41ed6f0891 [IE Python API] fix TensorDesc test file name (#701) 2020-06-01 15:58:05 +03:00
Maksim Doronin
69e3af4c99 [IE VPU] OutShapeOfReshape per-layer tests (#631)
* [IE VPU] OutShapeOfReshape per-layer tests

* [IE VPU] Update firmware

* [IE VPU] OutShapeOfReshape: get rid of code duplication
2020-06-01 14:51:04 +03:00
Piotr Rozen
935b48b978 Added speech recognition demo package for centOS (#682) 2020-06-01 14:41:45 +03:00
Vladislav Vinogradov
88264b895a [IE] Fix build error (#703)
Missing changes in transformation library due to IE API dependency removal.
2020-06-01 13:09:23 +03:00
Mikhail Letavin
65f62945dd [IE CLDNN] Free up first copy of weights/biases that were transferred to USM device memory (#561) 2020-06-01 12:01:28 +03:00
Roman Kazantsev
004f414b89 Fix SparseWeightedSum transform for Wide and Deep (#698)
WhereDecomposition transform is applied to Where operation in for-garbage sub-graph remained after SparseWeightedSum transform.

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>
2020-06-01 11:48:06 +03:00
Jedrzej Hajduczenia
4001d0d99f [IE CLDNN] Prefer bf(wz)yx format for reshape (#691)
Performance improvement for icnet-camvid model
2020-06-01 10:38:33 +03:00
Ilya Churaev
d970d0494e Removed dependency on Inference Engine from transformation library (#680)
* Removed dependency on Inference Engine from transformation library

* Change transformations export macro

* Fixed comments
2020-06-01 10:31:31 +03:00
Ivan Tikhonov
cd01ccd449 Reshape-Permute-Reshape pattern to DepthToSpace layer transformation (#601)
* implemented depth_to_space transformation

* renaming

* added functional tests, fixed mistakes in implementation of the transformation

* disable ConvertSpaceToDepth/ConvertDepthToSpace transformation for CPU plugin, enable DepthToSpaceFusion for CPU plugin only, add specific creators

* fix wrong include

* fix for functional tests: set transformation callback

* revert callback calls for CPU plugin

* move functions to .cpp file

* Apply review comments

* Apply additional review comments

* fix cast to bool type
2020-06-01 09:24:16 +03:00
Ewa Tusień
b4893945c7 [ONNX] Add Range op to ONNX importer (#548)
* Added Range op to ONNX importer.

* Disable tests for IE.
2020-06-01 05:59:39 +03:00
emmanuelattia-philips
7ec63cafe3 Ie capi callback with explicit calling convention (#697)
* Added explicit calling convention to CAPI callback

* Fixed typo spacing

* Renamed INFERENCE_ENGINE_CALLBACK to INFERENCE_ENGINE_C_API_CALLBAC to make the macro really specific to the C API
2020-05-31 23:19:37 +03:00
Gladilov, Gleb
3bf7a69df1 [IE][VPU]: Faster-RCNN fixes on myriad plugin side (#665)
* [IE][VPU]: Fixes deallocation data for cases of CMX allocator run

The final loop tries to deallocate data objects that keep shape values for
other data objects that're outputs of a model. But the case when allocator
takes only CMX data into consideration was not handled and since allocation
could not happen, it lead to fail on deallocation of a data object that has
not been allocated.

Signed-off-by: Gladilov, Gleb <gleb.gladilov@intel.com>

* [IE][VPU]: Fixes allocator with work on data to shape edges

Since there is new relationship between data objects: some
data objects may contain shape of other data object - allocator
must properly respect that. The thing is if 2 data objects are
connected in such a way, they represent unite entity (dynamic
data object) and should have the same lifetime.

Signed-off-by: Gladilov, Gleb <gleb.gladilov@intel.com>
2020-05-31 13:17:36 +03:00
emmanuelattia-philips
bad5bb30a3 Added ie_core_read_network_from_memory to the C API ie_bridge. (#674)
* * Added ie_core_read_network_from_memory to the C ie_bridge.

* Added size argument for xml_content, fixed const correctness of the weight_blob, fixed unit test

* * Removed debug message

* Changed variables names from model_xxx to weights_xxx to be more consistent with the argument name of the tested function.

* Added a description for xml_content_size in ie_core_read_network_from_memory.

* * xml_content is now passed as uint8_t
* reading function factorized in the unit-test
2020-05-31 02:25:39 +03:00
Ilya Lavrenov
3d42871523 Added dependency on ONNX reader (#693) 2020-05-30 15:15:20 +03:00
Denis Orlov
9af51a165f [GNA] Workaround support for callbacks (#591) 2020-05-30 00:43:42 +03:00
Edward Shogulin
e2729b87f3 [LPT] Convolution regression tests (#543)
* [LPT] Base test infrastructure extending & Convolution test

* [LPT] LPT test infrastructure refactoring
2020-05-29 22:56:58 +03:00
Anastasia Kuporosova
3ef1a26174 [IE TOOLS] Use input_info in python benchmark app (#660) 2020-05-29 21:28:17 +03:00
Anastasia Kuporosova
cbad43f3a5 [Python API] Fix PreProcessInfo tests (#690) 2020-05-29 21:20:16 +03:00
Vladimir Gavrilov
3a24eb6a62 MO fails generating IR from XLNET model due to a bug in the transformation ConvertGroupedStridedSlice (#625)
* Small fix in the transformation ConvertGroupedStridedSlice. Now VariadicSplit is generated only in the case when node has at least 2 output nodes.

* Added unittests for the case when there is only one StridedSlice.
2020-05-29 21:01:09 +03:00
Ilya Churaev
963f55a189 Fixed CODEOWNERS paths (#684) 2020-05-29 20:57:32 +03:00
Vladimir Paramuzov
f7052a107d [IE CLDNN] Optimized FQ kernel in fsv16 layout (#573)
- Optimized FQ kernel in fsv16 layout. Enabled scaleshift transform for FP16 precision
- Disabled activation_opt kernel with fused ops in some cases
2020-05-29 20:10:30 +03:00
Evgenya Stepyreva
6cfa77223e [ nG ] Added F16 folding support (#686) 2020-05-29 19:09:01 +03:00
Ilya Churaev
11bd4f8a42 Do not use ONNX reader if ONNX importer was disabled (#683) 2020-05-29 17:46:40 +03:00
Anna Khakimova
be3b711972 Pre-processing(GAPI): AVX2/AVX512 implementation of 3C/4C Resize via universal intrinsics. (#612) 2020-05-29 15:44:12 +03:00
Ilya Lavrenov
011128cb54 Python: Fixed installation rules to install additional .so files generated from .pyx (#676) 2020-05-29 14:45:59 +03:00
Katarzyna Mitrus
5f8f9ec108 [nGraph] Reorder nGraph LSTMSequence inputs and outputs dimensions (#560)
* Reorder nGraph LSTMSequence input/outpt dimensions

* Update nGraph pythonAPI for LSTMSequence

* Reorder axes in ONNX importer LSTM

* Tests update

* Fix clang warning

* Use opset3 namespace

* Style apply

* Tests update

* Use opset1  namespace

* Remove usage of  GetOutputElement in ONNX importer LSTM

* Remove opset0 header

* Use Node::output()
2020-05-29 14:29:18 +03:00
Ivan Tikhonov
a4f13ae9fe fix constant folding of Concat op (#675) 2020-05-29 14:09:20 +03:00
Artyom Anokhov
09192b804e [OpenVINO scripts] Fixed *.sh files index from 644 to 755 (#664)
* Fixed *.sh files index from 644 to 755

* Added convert.py executable permission
2020-05-29 13:50:17 +03:00
Gladilov, Gleb
67d733d5a8 Enables VPU maintainers notification in case of PR to VPU related folders and files (#667) 2020-05-29 09:32:10 +03:00
Evgenya Stepyreva
e290b14ab1 [ MO Interpolate ] Fixing broken model reshape-ability (#619) 2020-05-29 09:15:47 +03:00
Evgenya Stepyreva
5cc8114322 [ MO: CVS-32286 ] IdentityN fix (#668) 2020-05-29 09:11:22 +03:00
Ilya Churaev
e51e1682ca Enabled Unit tests and remove IReaderPtr (#653)
* Enabled Unit tests and remove IReaderPtr

* Fixed unicode tests for Windows

* Fixed typo
2020-05-28 22:40:20 +03:00
Andrey Somsikov
5f6999ed7e Remove Safety dependency (#627)
Safety tool should be isolated from the environment it is validating:
https://github.com/pyupio/safety/security/advisories/GHSA-7q25-qrjw-6fg2

Suggesting docker solution by default.
2020-05-28 18:31:10 +03:00
Gleb Kazantaev
bb41994f56 Removed StridedSlice to StridedSliceIE transformation (#661) 2020-05-28 18:27:54 +03:00
Vladimir Gavrilov
33aca7d2c4 SplitConcatPairToInterpolate inserts Interpolate when input is 2D (#596)
* SplitConcatPairToInterpolate transformation was moved to middle stage and is applied only for 4D and 5D inputs.
2020-05-28 18:08:24 +03:00
Andrew Bakalin
77162bf8ee [VPU][Tests] Fix sanitizer issue in unit tests (#630) 2020-05-28 18:01:56 +03:00
Irina Efode
23f41213bb [IE TESTS] MOVE plugin tests (#659) 2020-05-28 17:22:19 +03:00
Gleb Kazantaev
b731ce13d8 Fixed NMSIE shape infer function (#648) 2020-05-28 16:45:48 +03:00
Evgeny Lazarev
0efe474342 Fixes for Mask-RCNN conversion (#654)
* Fixed ONNX Mask-RCNN conversion

* Fixed validate_and_infet_types for NMS ops: added check for number of connected inputs

* Updated NMS ops to properly handle optional input with index 2

* Fixed typo in the implementation
2020-05-28 14:31:42 +03:00
Evgenya Stepyreva
ec5c9db932 [ MO ] Memory usage (#657) 2020-05-28 14:00:42 +03:00
Anton Zaytsev
00b53d6c33 [IE TESTS] Move Config behavior tests (#615)
* [ci-skip][IE TESTS] move config test

* [ci-skip][IE TESTS] fix config
2020-05-28 13:55:37 +03:00
Anton Zaytsev
25d36568f8 [IE TESTS] Move ExecGraphInfoTests (#617)
* [ci-skip][IE TESTS] move ExecGraph test

* [ci-skip][IE TESTS] fix

* [ci-skip][IE TESTS] fix codestyle

Co-authored-by: Zaytsev, Anton <antonzay@intel.com>
2020-05-28 13:48:16 +03:00
Irina Efode
246790f264 [IE TESTS] Move unit tests to the new infra (#641) 2020-05-28 12:33:56 +03:00
Roman Kazantsev
958e425775 Implement Bucketize in MO and MKLDNN for opset3 (#583)
This operation is used for Wide and Deep Model

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>
2020-05-28 11:11:07 +03:00
Anastasia Kuporosova
e025f1464b [Python API] Add InputInfo and PreProcessInfo (#637) 2020-05-28 10:55:11 +03:00
Michał Karzyński
125dd89c01 Remove Runtime classes from nGraph Python API (#569) 2020-05-28 09:50:57 +02:00
Gleb Kazantaev
f276b5fbb4 Updated StridedSlice to StridedSliceIE conversion to support dynamic shapes (#621)
* Updated ConvertStridedSliceToStridedSliceIE transformation to support dynamic shapes

* Fixed stridesluce to crop transform not to fail with dynamic shapes
2020-05-28 01:14:12 +03:00
Edward Shogulin
57da5ddab8 [LPT] Concat complex graph support (#527)
* [LPT] [Tests] LowPrecisionTransformations base test extending

* [LPT] Concat complex (neighbor) graph support

* [LPT] Multichannel concat: scale per channel support

* [LPT] test improvements

* [TEST] tests infrastructure improvements
2020-05-27 21:53:50 +03:00
Mikołaj Życzyński
e734377590 [IE CLDNN] Grouped convolution bug fix (#572)
Fixes bug in grouped convolution connected with wrong weights layout in SetDefault() method
2020-05-27 21:19:49 +03:00
Ilya Churaev
c75fd4db92 Removed CI and docker scripts (#622) 2020-05-27 20:58:03 +03:00
Alexander Zhogov
79b780413f Azure CI: Add timeouts, *smoke* filter (#574) 2020-05-27 19:47:23 +03:00
Alexander Zhogov
93333780c7 CODEOWNERS: Add link to help, and small fix 2020-05-27 18:50:47 +03:00
Ilya Churaev
3c718809d3 Added ONNX reader for the OpenVINO (#532)
* Added ONNX reader for the OpenVINO

* Fixed comments

* Fixed comments

* Fixed message

* Fixed memory consumption

* Revert IReaderPtr

* Fixed Myriad tests

* Fixed comment

* Renamed inference_engine_ir_readers to inference_engine_ir_reader
2020-05-27 18:37:19 +03:00
Gleb Kazantaev
d5434a036e CODEOWNERS: Added nGraph/Transformations/nGarphTests/IECore (#633) 2020-05-27 18:30:14 +03:00
Irina Efode
7fcb12603e [IE TESTS][IE CMAKE] Fix download 'testdata' repo via HTTPS (#597) 2020-05-27 16:20:09 +03:00
Gorokhov Dmitriy
c5124763db [CPU] Added quantization post op to the list of supported by FP32 DW Conv (#592) 2020-05-27 16:00:13 +03:00
Nikolay Shchegolev
27e8580b7d [CPU] ExtractImagePatches operation (#575) 2020-05-27 15:46:49 +03:00
Andrew Bakalin
fd1cc08cd8 [VPU][GT] Add convert shape notation pass (#559)
* [VPU] Update firmware

* [VPU][GT] Adjust allocator to deal with undeallocated shapes

* [VPU][Tests] Adjust NonZero tests and references

* [VPU][Tests] Add unit tests for pass

* [VPU][GT] Adjust previous unit tests

* [VPU][GT] Introduce convertShapeNotation pass

* [VPU][GT] Review fixes

* [VPU] Change dims order in dynamic output
2020-05-27 15:35:28 +03:00
Pavel Esir
e337350cc1 Fix skipping incorrect names in scale/mean values (#535)
* Fix skipping incorrect names in scale/mean values

* removed inappropriate comment in cli_parser.py
2020-05-27 14:53:50 +03:00
Evgeny Latkin
d24132912e ICV: fix Scatter layers: fix validators (#541)
* ICV: fix Scatter layers: fix validators

* ICV: fix Scatter layers: enable 0D for `axis`

* Revert "ICV: fix Scatter layers: enable 0D for `axis`"

This reverts commit 82da24b989678061a585a5c7ffd7d5dab10f5edc.

* ICV: fix Scatter layers: test, fix CNNNetworkImpl
2020-05-27 13:14:46 +03:00
Vladislav Vinogradov
946ed119c8 [IE CMAKE] Fix OpenBLAS dependency handling for Yocto ARM64 platfrom (#562)
Use `THIRDPARTY_SERVER_PATH` variable to override remote artifacts path.
2020-05-27 13:06:20 +03:00
Michał Karzyński
dcdeb34c8f CODEOWNERS: Add openvino-ngraph-maintainers (#628) 2020-05-27 12:50:18 +03:00
Konstantin Satunin
bfe416704d Change region of VMSS agents (#595) 2020-05-27 12:13:52 +03:00
Irina Efode
44cd77f54b [IE TESTS] Move old IE Unit tests to the new infra (#605) 2020-05-27 11:53:00 +03:00
Egor Churaev
31fe146539 [IE CLDNN] Implement CumSum operation (#533)
CumSum performs cumulative summation of the input elements along the given axis.

Details:
By default, it will do the sum inclusively meaning the first element is
copied as is. Through an "exclusive" attribute, this behavior can change
to exclude the first element. It can also perform summation in the
opposite direction of the axis. For that, set "reverse" attribute to
true.

JIRA: 29994
2020-05-27 11:47:16 +03:00
Nikita Kudriavtsev
2012d084f2 [IE Myriad] "printf" methods were replaced with mvLog (#552) 2020-05-27 11:45:34 +03:00
Andrew Bakalin
d337b4ed97 [VPU][GT] Refine edges methods (#550)
* [VPU][GT] Extract order manipulation into separate methods

* [VPU][GT] Rename data -> dependency

* [VPU][GT] Extend unit tests

* [VPU][GT] Introduce replacement and removal methods for StageDependency

* [VPU][GT] Update DataToShape connection methods
2020-05-27 11:14:02 +03:00
Evgenya Stepyreva
5c2eb05990 [ MO ONNX ] Resize-11 clear error message (#620)
* Small refactoring of extractors

* [ MO ] Throwing an exception while extracting Resize-11 which is not supported
2020-05-27 08:09:15 +03:00
Konrad Dobros
d3ea03bbfc [IE CLDNN] Enable int8 activation for fsv16 format (#516)
This change enables int8/uint8 standalone activation to use optimized
block format (b_fs_yx_fsv16). This should eliminate cases where such
activation had reorders before and after.

Support for this is already provided by activation_kernel_ref implementation.

Related JIRA: CVS-28494
2020-05-27 05:37:38 +03:00
Gleb Kazantaev
6788153ba9 Updated convert_nms_to_nms_ie transformation to support dynamic shapes (#614) 2020-05-27 00:38:25 +03:00
Gleb Kazantaev
851f64946a Updated ConvertGatherToGatherIE transformation to support dynamic shapes (#611) 2020-05-27 00:38:04 +03:00
Evgeny Lazarev
c1625743df Change Elu a regular op since decomposition works extremely slowly (#582)
* Moved Elu operation from Fused to regular ones because the decomposition works extremely slowly.

* Added reference implementation for the Elu op
2020-05-26 21:59:08 +03:00
Evgenya Stepyreva
73f3b7c8fc [ MO ONNX ] TopK-1/10/11 proper extracting (#600) 2020-05-26 21:53:24 +03:00
Vitaliy Urusovskij
4a44f84dab [Stress] Updated test_configs with new path to OMZ mtcnn models (#602) 2020-05-26 20:11:46 +03:00
Ilya Lavrenov
bb039adef8 Fixed compilation with clang-10 + xcode (#521) 2020-05-26 17:17:36 +03:00
Shashwat Dalakoti
4943a954c7 Updated requiremnets.txt (#593)
Alignment with the requirements_tf.txt file
2020-05-26 16:19:43 +03:00
JunX
7595512d1f fix issue log print wrong origin image shape (#581) 2020-05-26 14:27:14 +03:00
Irina Efode
c3aa866a33 [IE CMAKE] FIX PATHS (#553)
* [IE CMAKE] FIX PATHS

* Fix problems
2020-05-26 11:57:02 +03:00
Ilya Churaev
42a8364cb6 Disable nGraph tests if ENABLE_TESTS=OFF (#579) 2020-05-26 11:51:47 +03:00
Gleb Kazantaev
d3764a7563 Updated Mul->Add conversion to support dynamic shapes (#512)
* Updated Mul Add conversion to support dynamic shapes

* Keep changes

* Fix for cases when eltwise performs broadcasting via Constant

* Added comments;Fixed eltwise shape infer; Updated tests
2020-05-26 10:24:52 +03:00
Roman Donchenko
e835a4cf58 MO: Flush after dumping the arguments to stdout (#570)
When stdout is not a terminal, Python will buffer it by default. This
means that a consumer of MO's output will not see the argument information
until the buffer is flushed, which will normally only happen once MO
finishes (which might take a while).

Flushing stdout explicitly allows the consumer to see this info as soon
as it's printed.
2020-05-26 07:44:25 +03:00
Gleb Kazantaev
d3923f2ce0 Update TopKIE operation and transform to support dynamic shapes (#526)
* Update TopKIE operation and transform to support dynamic shapes

* Fix TopKIE shape infer

* Updated TopKIE infer function

* Removed index_element_type; changed swtich with as_string<> method

* Fixed ieFuncTests

* Fixed convert_topk transformation

* Updated convert_topk transformations

* ngraph::copy_runtime_info(topk, new_ops);
2020-05-26 01:19:38 +03:00
Irina Efode
c6e03d73d8 [IE TESTS] Move old IE unit tests to the new infra (#544)
* [IE TESTS] Move ie_blob_proxy tests

* [IE TESTS] Move network serializer tests

* [IE TESTS] Move CNNNetwork tests to the IE func

* [IE TEST] Fix deprecation warnings

* Fix comments
2020-05-25 23:28:59 +03:00
Vladimir Paramuzov
0b23215b72 CODEOWNERS: added cpu/gpu developers teams (#540) 2020-05-25 21:54:54 +03:00
Konstantin Satunin
2f9fd74151 Use compute optimized VMs for CI (#567) 2020-05-25 21:31:57 +03:00
Maxim Vafin
8c8629a4af Support ONNX Clamp-11 (#538) 2020-05-25 19:59:07 +03:00
Ilya Churaev
04bb8ab51d Added case less check for enum names (#534)
* Added case less check for enum names

* Added <algorithm> header
2020-05-25 16:23:55 +03:00
Nikita Kudriavtsev
74e8b54ce3 [IE Myriad] Correct destruction order in functional tests with DISABLE_PLUGIN_CACHE env. variable (#542) 2020-05-25 15:45:59 +03:00
Evgenya Stepyreva
b6a05c232e [ MO TF ] IdentityN support (#529) 2020-05-25 10:52:58 +03:00
Alexander Zhogov
507c06c8bc Azure CI: Enable cpuFuncTests on Windows 2020-05-23 01:29:36 +03:00
Alexander Zhogov
244f4e9fe7 CODEOWNERS: Fix 2020-05-23 01:27:53 +03:00
Alexander Zhogov
43fdf32729 Fix MO CI job name (#520) 2020-05-23 00:24:05 +03:00
Alexander Zhogov
20c1755efc Update public CI (#514)
* Update public CI

* Add MO test check

* Disable cpuFuncTests on Windows
2020-05-22 23:34:26 +03:00
Alexey Suhov
0064c299c3 add plugin template (#515) 2020-05-22 22:34:00 +03:00
Irina Efode
2e3928071f Update CODEOWNERS using openvino-ie-tests-maintainers group (#519) 2020-05-22 22:17:06 +03:00
Irina Efode
f1aa573b79 Update CODEOWNERS (#518) 2020-05-22 21:37:03 +03:00
Irina Efode
acc311e6f9 [IE TESTS] Fix win func test issue (#508) 2020-05-22 21:19:28 +03:00
Evgeny Talanin
d006030ad3 Update codeowners 1 (#517)
* Fine-grained groups to CODEOWNERS at day 1

* Fix

* Fix ie-maintainers

Co-authored-by: Alexander Zhogov <alexander.zhogov@intel.com>
2020-05-22 21:18:54 +03:00
Ilya Lavrenov
fc899e6ceb Remove test artifact (#511) 2020-05-22 20:02:00 +03:00
Alexander Zhogov
a3d482035e Azure: Update job names, add cpuFuncTests (#509) 2020-05-22 17:47:05 +03:00
Alexey Suhov
ca9a78874a Remove Dimension::size_t and callers
(cherry-pick master commit 72fa20942a3f135ea2e324f47dd401506a913876)
2020-05-22 11:17:20 +03:00
azhogov
b8611139ca Update job name 2020-05-22 10:23:59 +03:00
Alexander Zhogov
6e2cfbca0c CODEOWNERS: Add Jenkinsfile 2020-05-22 10:09:47 +03:00
Alexander Zhogov
b36d0df477 CODEOWNERS: add tools 2020-05-22 10:06:33 +03:00
Alexey Suhov
ccb7438803 publish master branch snapshot, revision ea98a886d925eb152931aab13856e68037665562 2020-05-22 03:42:00 +03:00
Alexey Suhov
deb008a26f publish master branch snapshot, revision 8d31237e2c3f673cbb0f0ba110fc10f5cce1d2bb 2020-05-22 02:23:12 +03:00
Alexey Suhov
eab7ef4895 add submodules for mkl-dnn, gflags and gtest 2020-05-21 23:00:55 +03:00
Konstantin Satunin
778063e5cb fixed latest release 2020-05-21 17:27:44 +03:00
Alexey Suhov
d222c99ca7 add speech demo 2020-05-21 17:14:03 +03:00
Alexey Suhov
29d24c613a move dependencies to https://download.01.org/opencv/master 2020-05-21 15:00:31 +03:00
Alexey Suhov
f1b7a7292b update TBB and VPU binary dependencies 2020-05-20 22:02:34 +03:00
azhogov
ec2ca3e54f Azure: disable IE_Lin 2020-05-20 13:15:07 +03:00
Alexander Zhogov
6d56e824d2 Azure Pipelines: Set -j12 for Lin_self 2020-05-20 12:05:04 +03:00
Konstantin Satunin
7566e8202f Test Ubuntu 1804 VMSS 2020-05-20 11:46:09 +03:00
Alexey Suhov
f30dcc218c publish master branch snapshot, revision 9df5eb1f84e13a35720a918f88324561222ab114 2020-05-20 01:13:06 +03:00
Alexey Suhov
3ad0e4e434 remove ngraph submodule 2020-05-20 00:20:33 +03:00
Alexander Zhogov
4893f27fb9 Update How to Contribute 2020-05-19 19:08:14 +03:00
Alexander Zhogov
12f0fc72db Create CONTRIBUTING.md 2020-05-19 19:04:27 +03:00
Alexander Zhogov
6dd7ce89af Update CODEOWNERS 2020-05-19 14:42:35 +03:00
Andrey Babushkin
ed9cd78421 [ie/scripts/dependencies.bat] Fix unpack for opencv 2020-05-18 20:56:05 +03:00
Alexander Zhogov
eb57da7605 Azure Pipelines: Try -j12 for Win vmss 2020-05-18 20:38:27 +03:00
azhogov
5bc6a1e723 Azure: Try -j8 for Win vmss 2020-05-18 20:33:56 +03:00
Konstantin Satunin
76b3d2d47b Check VMSS 2020-05-18 20:10:29 +03:00
Alexey Suhov
dd0a195f2d fix BOM file for model optimizer 2020-05-18 19:20:16 +03:00
Alexey Suhov
3248c3002a Merge branch 'master' of https://github.com/openvinotoolkit/openvino 2020-05-18 18:29:42 +03:00
Alexander Zhogov
c78a575d23 Azure Pipelines: Check WIN_VMSS_VENV 2020-05-18 18:28:54 +03:00
Alexey Suhov
d22e5e8260 add execute permissions to run_code_checks.sh 2020-05-18 18:28:53 +03:00
Alexey Suhov
ba0a339888 publish master branch snapshot, revision 59af1853ca21ea08acf17b177da0b239753deb46 2020-05-18 17:21:58 +03:00
Alexander Zhogov
0a5a63bc0c Azure Pipelines: change pool for Win_self to WIN_VMSS 2020-05-18 16:02:40 +03:00
azhogov
32488a5c26 Fix VS2017 compilation issue: Intermediate channels count type changed to size_t
(cherry-pick master 0f6155ac3616fb2a7b51cfaddfdad1cc189f968d)
2020-05-18 13:17:10 +03:00
Alexander Zhogov
8081638bb5 Update README.md links 2020-05-16 10:49:12 +03:00
Alexander Zhogov
c50d41826d Azure Pipelines: disable nGraph GPU UT, LTO, Mac crashed UT 2020-05-15 19:16:28 +03:00
Alexander Zhogov
7b5887afba Azure Pipelines: disable nGraph GPU UT 2020-05-15 17:16:04 +03:00
Alexander Zhogov
54bb6b057f Create CODEOWNERS 2020-05-15 11:41:46 +03:00
Alexey Suhov
645641e87d add execute permissions to get_testdata.py 2020-05-14 23:17:54 +03:00
Alexey Suhov
3d63b13ba5 Revert LTO on Windows 2020-05-14 16:30:29 +03:00
Alexey Suhov
5b428f0655 publish master branch snapshot, revision 49482ae3bea0cbaa07474f86f36db11943142687 2020-05-13 21:12:22 +03:00
Alexander Zhogov
9d6501e9a6 Azure Pipelines: exclude failed Mac test fix 2020-05-11 14:59:09 +03:00
Alexander Zhogov
5b07298559 Azure Pipelines: exclude failed Mac test fix 2020-05-11 12:27:57 +03:00
Alexander Zhogov
3a0a7e79ff Azure Pipelines: exclude failed Mac test 2020-05-10 20:30:18 +03:00
Alexander Zhogov
11b84926d4 Azure Pipelines: exclude failed Mac test 2020-05-10 10:58:47 +03:00
Alexander Zhogov
112c58cc40 Azure Pipelines: set -j3 2020-05-08 18:40:14 +03:00
Andrey Babushkin
2430d96a3e Create .coveragerc 2020-05-06 23:38:42 +03:00
Alexey Suhov
64df940035 add scripts which download tests dependencies 2020-05-06 21:52:42 +03:00
Alexander Zhogov
67077e4aa7 Azure Pipelines: Update Mac options 2020-04-30 12:03:14 +03:00
Alexander Zhogov
5b009e9a38 Azure Pipelines: Fix test env on Windows 2020-04-30 01:30:33 +03:00
Andrey Babushkin
1bb752f1b8 Run pylint workflow on pull request events (#476)
Also remove pylint cmdline arguments to ignore import errors
2020-04-29 17:05:24 +03:00
Alexander Zhogov
5176df56dd Azure Pipelines: Fix test env 2020-04-29 15:34:04 +03:00
Alexander Zhogov
107d67e44d Azure Pipelines: exclude backend_api.config_unsupported from nGraph UT 2020-04-29 11:44:15 +03:00
Alexander Zhogov
833ff8b591 Add testdata to Azure Pipelines 2020-04-29 10:23:05 +03:00
Alexey Suhov
9314daeb3c fix NGRAPH_ONNX_IMPORT_ENABLE in cmake 2020-04-28 22:20:54 +03:00
Andrey Babushkin
aa2cb40f17 Add GitHub Actions workflow to run pylint against model optimizer (#474) 2020-04-28 18:41:12 +03:00
Alexander Zhogov
079e16c4d1 Update Azure Pipelines 2020-04-28 11:49:03 +03:00
Alexey Suhov
357cc7eb4c publish master branch snapshot, revision 0110d9c98fd7209589d06344f0d836f61d81f4b3 2020-04-27 21:21:29 +03:00
Alexander Zhogov
822692f526 Update Azure Pipelines 2020-04-17 12:37:19 +03:00
Alexander Zhogov
4ea5ac39fc Update Azure Pipelines 2020-04-16 20:52:03 +03:00
Alexey Suhov
67ac796715 Merge branch 'master' of https://github.com/opencv/dldt 2020-04-16 14:40:24 +03:00
Alexey Suhov
6300b1490d fixed BOM file for model optimizer 2020-04-16 14:38:57 +03:00
Alexander Zhogov
165c00fe6d Update Azure Pipelines 2020-04-16 13:54:47 +03:00
Alexander Zhogov
68bdd184ef Fix typo 2020-04-16 11:07:04 +03:00
Alexander Zhogov
56b67d7d1c Set up CI with Azure Pipelines 2020-04-16 11:02:27 +03:00
Alexey Suhov
ae03bda480 moved pylint configuration files 2020-04-15 21:46:27 +03:00
Alexey Suhov
127cbac5bc publish master branch snapshot, revision cdcab9d7ab48ffb0ee5629fabbfa06cb45debd9b 2020-04-15 19:01:57 +03:00
6526 changed files with 802664 additions and 533405 deletions

View File

@@ -1,19 +0,0 @@
BasedOnStyle: Google
IndentWidth: 4
UseTab: Never
---
Language: Cpp
Standard: Cpp11
AccessModifierOffset: -4
AllowAllArgumentsOnNextLine: false
AllowShortFunctionsOnASingleLine: Empty
AllowShortLambdasOnASingleLine: Empty
AlwaysBreakBeforeMultilineStrings: false
ColumnLimit: 120
DerivePointerAlignment: false
FixNamespaceComments: true
IndentCaseLabels: false
SpaceBeforeCpp11BracedList: true
SpaceBeforeCtorInitializerColon: false
---

55
.github/workflows/mo.yml vendored Normal file
View File

@@ -0,0 +1,55 @@
name: MO
on:
push:
paths:
- 'model-optimizer/**'
pull_request:
paths:
- 'model-optimizer/**'
jobs:
Pylint-UT:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v1
with:
python-version: 3.6
- name: Cache pip
uses: actions/cache@v1
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('model-optimizer/requirements*.txt') }}
restore-keys: |
${{ runner.os }}-pip-
${{ runner.os }}-
# tensorflow 1.15 causes modules import
# errors, most likely due to https://github.com/PyCQA/pylint/issues/2603
# for tensorflow.core.framework and tensorflow.contrib
- name: Install dependencies
run: |
python -m pip install --upgrade pip setuptools
# For Pylint
pip install tensorflow==1.14.0 tensorboard==1.14.0 tensorflow-estimator==1.14.0
# For UT
pip install unittest-xml-reporting==3.0.2
# MO requirements
pip install -r requirements.txt
pip install -r requirements_dev.txt
working-directory: model-optimizer
- name: Pylint
run: pylint -d C,R,W mo/ mo.py extensions/
working-directory: model-optimizer
- name: UT
run: |
export PYTHONPATH=$PYTHONPATH:`pwd`
export MO_ROOT=`pwd`
env
mkdir ../mo-ut-logs
python3 -m xmlrunner discover -p *_test.py --output=../mo-ut-logs
working-directory: model-optimizer

383
.gitignore vendored
View File

@@ -1,342 +1,71 @@
## Ignore Visual Studio temporary files, build results, and
## files generated by popular Visual Studio add-ons.
# build/artifact dirs
_*
# but ensure we don't skip __init__.py
!__init__.py
# User-specific files
*.suo
*.user
*.userosscache
*.sln.docstates
# User-specific files (MonoDevelop/Xamarin Studio)
*.userprefs
# Build results
[Dd]ebug/
[Dd]ebugPublic/
[Rr]elease/
[Rr]eleases/
[Xx]64/
[Xx]86/
[Bb]uild/
bld/
[Bb]in/
[Oo]bj/
# PY.TEST
*.pyc
tests/integration/report.html
tests/integration/report.xml
tests/integration/assets/
tests/integration/__pycache__/
# Visual Studio 2015 cache/options directory
.vs/
# Uncomment if you have tasks that create the project's static files in wwwroot
#wwwroot/
# MSTest test Results
[Tt]est[Rr]esult*/
[Bb]uild[Ll]og.*
# NUNIT
*.VisualState.xml
TestResult.xml
# Build Results of an ATL Project
[Dd]ebugPS/
[Rr]eleasePS/
dlldata.c
# DNX
project.lock.json
artifacts/
*_i.c
*_p.c
*_i.h
*.ilk
*.meta
*.obj
*.pch
*.pdb
*.pgc
*.pgd
*.rsp
*.sbr
*.tlb
*.tli
*.tlh
*.tmp
*.tmp_proj
*.log
*.vspscc
*.vssscc
.builds
*.pidb
*.svclog
*.scc
# Chutzpah Test files
_Chutzpah*
# Visual C++ cache files
ipch/
*.aps
*.ncb
*.opendb
*.opensdf
*.sdf
*.cachefile
*.VC.db
# Visual Studio profiler
*.psess
*.vsp
*.vspx
*.sap
# TFS 2012 Local Workspace
$tf/
# Guidance Automation Toolkit
*.gpState
# ReSharper is a .NET coding add-in
_ReSharper*/
*.[Rr]e[Ss]harper
*.DotSettings.user
# JustCode is a .NET coding add-in
.JustCode
# TeamCity is a build add-in
_TeamCity*
# DotCover is a Code Coverage Tool
*.dotCover
# NCrunch
_NCrunch_*
.*crunch*.local.xml
nCrunchTemp_*
# MightyMoose
*.mm.*
AutoTest.Net/
# Web workbench (sass)
.sass-cache/
# Installshield output folder
[Ee]xpress/
# DocProject is a documentation generator add-in
DocProject/buildhelp/
DocProject/Help/*.HxT
DocProject/Help/*.HxC
DocProject/Help/*.hhc
DocProject/Help/*.hhk
DocProject/Help/*.hhp
DocProject/Help/Html2
DocProject/Help/html
# Click-Once directory
publish/
# Publish Web Output
*.[Pp]ublish.xml
*.azurePubxml
# TODO: Un-comment the next line if you do not want to checkin
# your web deploy settings because they may include unencrypted
# passwords
#*.pubxml
*.publishproj
# NuGet Packages
*.nupkg
# The packages folder can be ignored because of Package Restore
**/packages/*
# except build/, which is used as an MSBuild target.
!**/packages/build/
# Uncomment if necessary however generally it will be regenerated when needed
#!**/packages/repositories.config
# NuGet v3's project.json files produces more ignoreable files
*.nuget.props
*.nuget.targets
# Microsoft Azure Build Output
csx/
*.build.csdef
# Microsoft Azure Emulator
ecf/
rcf/
# Microsoft Azure ApplicationInsights config file
ApplicationInsights.config
# Windows Store app package directory
AppPackages/
BundleArtifacts/
# Visual Studio cache files
# files ending in .cache can be ignored
*.[Cc]ache
# but keep track of directories ending in .cache
!*.[Cc]ache/
# Others
ClientBin/
[Ss]tyle[Cc]op.*
~$*
*~
*.dbmdl
*.dbproj.schemaview
*.pfx
*.publishsettings
node_modules/
orleans.codegen.cs
# RIA/Silverlight projects
Generated_Code/
# Backup & report files from converting an old project file
# to a newer Visual Studio version. Backup files are not needed,
# because we have git ;-)
_UpgradeReport_Files/
Backup*/
UpgradeLog*.XML
UpgradeLog*.htm
# SQL Server files
*.mdf
*.ldf
# Business Intelligence projects
*.rdl.data
*.bim.layout
*.bim_*.settings
# Microsoft Fakes
FakesAssemblies/
# GhostDoc plugin setting file
*.GhostDoc.xml
# Target VS files:
vsx64
# Node.js Tools for Visual Studio
.ntvs_analysis.dat
# Visual Studio 6 build log
*.plg
# Visual Studio 6 workspace options file
*.opt
# Visual Studio LightSwitch build output
**/*.HTMLClient/GeneratedArtifacts
**/*.DesktopClient/GeneratedArtifacts
**/*.DesktopClient/ModelManifest.xml
**/*.Server/GeneratedArtifacts
**/*.Server/ModelManifest.xml
_Pvt_Extensions
# LightSwitch generated files
GeneratedArtifacts/
ModelManifest.xml
# Paket dependency manager
.paket/paket.exe
# FAKE - F# Make
.fake/
*.filters
/External
/Output
/InferenceEngineMain/models
/Test
/HTTPClient/*.a
/InferenceEngineMain/newModels
# developer tools
*.idea
.vscode
cmake-build-*
.DS_Store
# For IDEA
.idea/
VS/
Xcode/
temp/
report/
.kdev4/
*.kdev4
*.kate-swp
/lin-build
/win-build
/CMakeFiles
*.stamp
*.depend
*.vcxproj
*.sln
/CMakeCache.txt
.vimprj/
build_IA32/
.dir-locals.el
GTAGS
GPATH
GRTAGS
GSYMS
**/tags
compile_commands.json
service/dot-net-service/Output
**/sublime_build
/.project
.vscode/
/vsx32
/service/dot-net-service/.klocwork/DotNetService
cmake-build-*/
/lin64
.gdb_history
bin/
build/
.local_vimrc
.ycm_extra_conf.py
tags
.gdb_history
.vimspector.json
doc/
!ngraph/doc
docs/build_documentation/work_dir/
inference-engine/plugins/
inference-engine/temp
inference-engine/report
.repo/
docs/template_plugin/html/
CMakeLists.txt.user
docs/IE_PLUGIN_DG/html/
# from Model Optimizer repo
.idea
.project
.cproject
.pydevproject
.settings
/bin/
/gen/
*.project
*.cproject
*.pydevproject
*.settings
*/gen/
__pycache__
*.swp
/config.xml
# Python-specific
.env3
*.env3
*.pyc
# Tests-specific
.coverage
htmlcov
pylint_report.txt
pylint_report_comments.txt
# Documentation-generated
docs/build
docs/source/_static
docs/source/_templates
docs/source/generated/
*.coverage
*htmlcov
*pylint_report.txt
*pylint_report_comments.txt
# Artifacts
/*.bin
/*.xml
/*.json
/*.so
/*.txt
/*.mapping
/*.dat
/*.svg
/model-optimizer/*.bin
/model-optimizer/*.xml
/model-optimizer/*.json
/model-optimizer/*.so
/model-optimizer/*.txt
/model-optimizer/*.pb
/model-optimizer/*.pbtxt
/model-optimizer/!CMakeLists.txt
/model-optimizer/*.mapping
/model-optimizer/*.dat
/model-optimizer/*.svg
# ngraph
ngraph/src/CPackConfig.cmake
ngraph/src/CPackSourceConfig.cmake
ngraph/src/VERSION
ngraph/src/gtest/
ngraph/src/json/
ngraph/src/ngraphConfig.cmake
ngraph/src/ngraphConfigVersion.cmake
ngraph/src/protobuf/
ngraph/src/src/
ngraph/src/test/

14
.gitmodules vendored
View File

@@ -2,7 +2,15 @@
path = inference-engine/thirdparty/ade
url = https://github.com/opencv/ade.git
ignore = dirty
[submodule "ngraph"]
path = ngraph
url = https://github.com/NervanaSystems/ngraph.git
[submodule "inference-engine/thirdparty/mkl-dnn"]
path = inference-engine/thirdparty/mkl-dnn
url = https://github.com/openvinotoolkit/oneDNN.git
ignore = dirty
[submodule "inference-engine/tests/ie_test_utils/common_test_utils/gtest"]
path = inference-engine/tests/ie_test_utils/common_test_utils/gtest
url = https://github.com/openvinotoolkit/googletest.git
ignore = dirty
[submodule "inference-engine/samples/thirdparty/gflags"]
path = inference-engine/samples/thirdparty/gflags
url = https://github.com/gflags/gflags.git
ignore = dirty

View File

@@ -9,24 +9,28 @@ cmake_policy(SET CMP0054 NEW)
# See https://blog.kitware.com/cmake-3-13-0-available-for-download/
if (APPLE)
# due to https://cmake.org/cmake/help/v3.12/policy/CMP0068.html
cmake_minimum_required(VERSION 3.9 FATAL_ERROR)
if(CMAKE_GENERATOR STREQUAL "Xcode")
# due to https://gitlab.kitware.com/cmake/cmake/issues/14254
cmake_minimum_required(VERSION 3.12.0 FATAL_ERROR)
else()
# due to https://cmake.org/cmake/help/v3.12/policy/CMP0068.html
cmake_minimum_required(VERSION 3.9 FATAL_ERROR)
endif()
else()
cmake_minimum_required(VERSION 3.7.2 FATAL_ERROR)
endif()
project(OpenVINO)
set(OpenVINO_MAIN_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(IE_MAIN_SOURCE_DIR ${OpenVINO_MAIN_SOURCE_DIR}/inference-engine)
set(CMAKE_MODULE_PATH "${OpenVINO_MAIN_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
list(APPEND CMAKE_MODULE_PATH "${OpenVINO_MAIN_SOURCE_DIR}/cmake")
include(CTest)
include(features)
# include developer package
include(developer_package NO_POLICY_SCOPE)
include(developer_package)
# These options are shared with 3rdparty plugins
# by means of developer package
@@ -37,7 +41,7 @@ include(dependencies)
message (STATUS "PROJECT ............................... " ${PROJECT_NAME})
message (STATUS "CMAKE_BINARY_DIR ...................... " ${CMAKE_BINARY_DIR})
message (STATUS "OpenVINO_MAIN_SOURCE_DIR .............. " ${OpenVINO_MAIN_SOURCE_DIR})
message (STATUS "IE_MAIN_SOURCE_DIR .............. " ${IE_MAIN_SOURCE_DIR})
message (STATUS "IE_MAIN_SOURCE_DIR .................... " ${IE_MAIN_SOURCE_DIR})
message (STATUS "CMAKE_GENERATOR ....................... " ${CMAKE_GENERATOR})
message (STATUS "CMAKE_C_COMPILER_ID ................... " ${CMAKE_C_COMPILER_ID})
message (STATUS "CMAKE_BUILD_TYPE ...................... " ${CMAKE_BUILD_TYPE})
@@ -61,33 +65,25 @@ function(build_ngraph)
else ()
ngraph_set(NGRAPH_ADDRESS_SANITIZER FALSE)
endif ()
ngraph_set(NGRAPH_TOOLS_ENABLE FALSE)
ngraph_set(NGRAPH_CPU_ENABLE FALSE)
ngraph_set(NGRAPH_INTERPRETER_ENABLE TRUE)
ngraph_set(NGRAPH_NOP_ENABLE FALSE)
ngraph_set(NGRAPH_GPUH_ENABLE FALSE)
ngraph_set(NGRAPH_GENERIC_CPU_ENABLE FALSE)
ngraph_set(NGRAPH_ENABLE_CPU_CONV_AUTO FALSE)
ngraph_set(NGRAPH_PYTHON_BUILD_ENABLE FALSE)
ngraph_set(NGRAPH_PLAIDML_ENABLE FALSE)
ngraph_set(NGRAPH_FAST_MATH_ENABLE FALSE)
ngraph_set(NGRAPH_JSON_ENABLE FALSE)
ngraph_set(NGRAPH_DYNAMIC_COMPONENTS_ENABLE FALSE)
ngraph_set(NGRAPH_NATIVE_ARCH_ENABLE FALSE)
if (NOT ANDROID)
ngraph_set(NGRAPH_UNIT_TEST_ENABLE TRUE)
ngraph_set(NGRAPH_UNIT_TEST_OPENVINO_ENABLE TRUE)
# ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE TRUE)
set(NGRAPH_ONNX_IMPORT_ENABLE TRUE CACHE BOOL "" FORCE)
if(ENABLE_TESTS)
ngraph_set(NGRAPH_UNIT_TEST_ENABLE TRUE)
ngraph_set(NGRAPH_IE_ENABLE TRUE)
else()
ngraph_set(NGRAPH_UNIT_TEST_ENABLE FALSE)
ngraph_set(NGRAPH_IE_ENABLE FALSE)
endif()
ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE TRUE)
else()
ngraph_set(NGRAPH_UNIT_TEST_ENABLE FALSE)
ngraph_set(NGRAPH_TEST_UTIL_ENABLE FALSE)
ngraph_set(NGRAPH_UNIT_TEST_OPENVINO_ENABLE FALSE)
ngraph_set(NGRAPH_IE_ENABLE FALSE)
ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE FALSE)
endif()
ngraph_set(NGRAPH_INTERPRETER_ENABLE TRUE)
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
if(CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$")
ie_add_compiler_flags(-Wno-error=uninitialized -Wno-error=literal-conversion)
elseif(UNIX)
ie_add_compiler_flags(-Wno-error=maybe-uninitialized -Wno-error=return-type -fPIC)
@@ -100,7 +96,7 @@ function(build_ngraph)
if (UNIX)
ie_add_compiler_flags(-Wno-error=return-type -Wno-undef)
elseif(WIN32)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4308 /wd4146 /wd4703 /wd4244")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4308 /wd4146 /wd4703 /wd4244 /wd4819")
endif()
if(ENABLE_LTO)
@@ -112,10 +108,56 @@ function(build_ngraph)
set(SDL_cmake_included ON)
# set(NGRAPH_COMPONENT_PREFIX "deployment_tools/ngraph/")
add_subdirectory(ngraph)
set(NGRAPH_LIBRARIES ngraph PARENT_SCOPE)
endfunction()
build_ngraph()
add_subdirectory(inference-engine)
add_subdirectory(docs)
# cpack
# install setupvars
ie_cpack_add_component(setupvars REQUIRED)
if(UNIX)
install(PROGRAMS scripts/setupvars/setupvars.sh
DESTINATION bin
COMPONENT setupvars)
elseif(WIN32)
install(PROGRAMS scripts/setupvars/setupvars.bat
DESTINATION bin
COMPONENT setupvars)
endif()
# install install_dependencies
if(UNIX)
ie_cpack_add_component(install_dependencies REQUIRED)
install(DIRECTORY scripts/install_dependencies/
DESTINATION install_dependencies
COMPONENT install_dependencies)
endif()
# install files for demo
ie_cpack_add_component(demo_scripts REQUIRED DEPENDS core)
if(UNIX)
install(DIRECTORY scripts/demo/
DESTINATION deployment_tools/demo
COMPONENT demo_scripts
USE_SOURCE_PERMISSIONS
PATTERN *.bat EXCLUDE)
elseif(WIN32)
install(DIRECTORY scripts/demo/
DESTINATION deployment_tools/demo
COMPONENT demo_scripts
USE_SOURCE_PERMISSIONS
PATTERN *.sh EXCLUDE)
endif()
ie_cpack(${IE_CPACK_COMPONENTS_ALL})

66
CODEOWNERS Normal file
View File

@@ -0,0 +1,66 @@
# See help here: https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners
* @openvinotoolkit/openvino-maintainers
CODEOWNERS @openvinotoolkit/openvino-admins @openvinotoolkit/openvino-maintainers
# CI:
Jenkinsfile @openvinotoolkit/openvino-admins
azure-pipelines.yml @openvinotoolkit/openvino-admins
/.github/ @openvinotoolkit/openvino-admins
# QA Tests:
/tests/ @openvinotoolkit/openvino-tests-maintainers
# IE Core:
/inference-engine/ @openvinotoolkit/openvino-ie-maintainers
/inference-engine/src/transformations/ @GlebKazantaev @ichuraev
/inference-engine/src/legacy_api/ @openvinotoolkit/openvino-ngraph-maintainers
/inference-engine/src/readers/ @openvinotoolkit/openvino-ngraph-maintainers
# IE CPU:
/inference-engine/src/mkldnn_plugin/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers
/inference-engine/src/low_precision_transformations/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers
/inference-engine/thirdparty/mkl-dnn/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers
# IE GPU:
/inference-engine/src/cldnn_engine/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/inference-engine/include/gpu/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/inference-engine/include/cldnn/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/inference-engine/thirdparty/clDNN/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
# IE VPU:
/inference-engine/src/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers
/inference-engine/include/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers
/inference-engine/thirdparty/movidius/ @openvinotoolkit/openvino-ie-vpu-maintainers
/inference-engine/tests_deprecated/unit/engines/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests_deprecated/functional/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests_deprecated/behavior/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests/functional/plugin/myriad/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests/unit/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests/unit/engines/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tools/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers
/inference-engine/scripts/run_tests_myriad_multistick.sh @openvinotoolkit/openvino-ie-vpu-maintainers
# IE GNA:
/inference-engine/src/gna_plugin/ @openvinotoolkit/openvino-ie-gna-maintainers
/inference-engine/include/gna/ @openvinotoolkit/openvino-ie-gna-maintainers
# IE MULTI:
/inference-engine/src/multi_device/ @openvinotoolkit/openvino-ie-multi-maintainers
/inference-engine/include/multi-device/ @openvinotoolkit/openvino-ie-multi-maintainers
# IE Tests:
/inference-engine/tests/ @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests_deprecated/ @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests/functional/inference_engine/ngraph_reader/ @openvinotoolkit/openvino-ie-tests-maintainers @openvinotoolkit/openvino-ngraph-maintainers
/inference-engine/tests/functional/inference_engine/transformations/ @openvinotoolkit/openvino-ie-tests-maintainers @openvinotoolkit/openvino-ngraph-maintainers
# MO:
/model-optimizer/ @openvinotoolkit/openvino-mo-maintainers
# nGraph:
/ngraph/ @openvinotoolkit/openvino-ngraph-maintainers
# Tools
/tools/ @openvinotoolkit/openvino-tools-maintainers

18
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,18 @@
# How to Contribute
We welcome community contributions to the OpenVINO™ repository.
If you have an idea how to improve the product, please share it
with us doing the following steps:
* Make sure you can build the product and run all tests and samples with your patch
* In case of a larger feature, provide relevant unit tests and one or more sample
* Submit a pull request at https://github.com/openvinotoolkit/openvino/pulls
## OpenVINO™ Coding Style Guide
We basically use the Google style (https://google.github.io/styleguide/cppguide.html) with some exceptions:
* 4 spaces instead of 2 spaces for indentations
* Limitation of 160 symbols for the line length
* Exceptions are allowed
* Using namespace are allowed in cpp and prohibited in headers
* Underscore symbol before member in classes/structures
* thisStyleForFunctions()
* theSameStyleForVariables

10
Jenkinsfile vendored Executable file
View File

@@ -0,0 +1,10 @@
#!groovy
properties([
parameters([
booleanParam(defaultValue: true,
description: 'Cancel the rest of parallel stages if one of them fails and return status immediately',
name: 'failFast')
])
])
dldtPipelineEntrypoint(this)

View File

@@ -1,5 +1,5 @@
# [OpenVINO™ Toolkit](https://01.org/openvinotoolkit) - Deep Learning Deployment Toolkit repository
[![Stable release](https://img.shields.io/badge/version-2020.1-green.svg)](https://github.com/opencv/dldt/releases/tag/2020.1)
[![Stable release](https://img.shields.io/badge/version-2020.4-green.svg)](https://github.com/openvinotoolkit/openvino/releases/tag/2020.4.0)
[![Apache License Version 2.0](https://img.shields.io/badge/license-Apache_2.0-green.svg)](LICENSE)
This toolkit allows developers to deploy pre-trained deep learning models
@@ -30,23 +30,13 @@ and release your contribution under these terms.
* [Model Optimizer Developer Guide](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html)
## How to Contribute
We welcome community contributions to the Deep Learning Deployment Toolkit
repository. If you have an idea how to improve the product, please share it
with us doing the following steps:
* Make sure you can build the product and run all tests and samples with your patch
* In case of a larger feature, provide relevant unit tests and one or more sample
* Submit a pull request at https://github.com/opencv/dldt/pulls
We will review your contribution and, if any additional fixes or modifications
are necessary, may give some feedback to guide you. Your pull request will be
merged into GitHub* repositories if accepted.
See [CONTRIBUTING](./CONTRIBUTING.md) for details. Thank you!
## Support
Please report questions, issues and suggestions using:
* The `openvino` [tag on StackOverflow]\*
* [GitHub* Issues](https://github.com/opencv/dldt/issues)
* [GitHub* Issues](https://github.com/openvinotoolkit/openvino/issues)
* [Forum](https://software.intel.com/en-us/forums/computer-vision)
---

333
azure-pipelines.yml Normal file
View File

@@ -0,0 +1,333 @@
jobs:
- job: Lin
# About 150% of total time
timeoutInMinutes: 75
pool:
#vmImage: 'ubuntu-18.04'
name: LIN_VMSS_VENV_F8S_WU2
variables:
BUILD_TYPE: Release
BIN_DIR: ../bin/intel64/$(BUILD_TYPE)
steps:
- script: |
whoami
uname -a
which python3
gcc --version
lsb_release
env
cat /proc/cpuinfo
cat /proc/meminfo
vmstat -s
df
displayName: 'System properties'
- script: |
sudo apt --assume-yes install libusb-1.0-0-dev
python3 -m pip install -r ./inference-engine/ie_bridges/python/requirements.txt
# For running Python API tests
python3 -m pip install -r ./inference-engine/ie_bridges/python/src/requirements-dev.txt
displayName: 'Install dependencies'
- script: |
wget https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-linux.zip
unzip ninja-linux.zip
sudo cp -v ninja /usr/local/bin/
displayName: 'Install Ninja'
- script: git submodule update --init --recursive --jobs 8
displayName: 'Clone submodules'
- script: |
mkdir dldt-build
cd dldt-build
displayName: 'Create build directory'
- task: CMake@1
inputs:
workingDirectory: dldt-build
# CMake must get Python 3.x version by default
cmakeArgs: .. -GNinja -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=/usr/bin/python3.6 -DENABLE_TESTS=ON
- script: ninja
workingDirectory: dldt-build
displayName: 'Build Lin'
- script: ls -alR ../bin/
workingDirectory: dldt-build
displayName: 'List files'
- script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU*
workingDirectory: dldt-build
displayName: 'nGraph UT'
continueOnError: false
- script: $(BIN_DIR)/InferenceEngineUnitTests
workingDirectory: dldt-build
displayName: 'IE UT old'
continueOnError: false
- script: $(BIN_DIR)/ieUnitTests
workingDirectory: dldt-build
displayName: 'IE UT'
continueOnError: false
- script: $(BIN_DIR)/cpuUnitTests
workingDirectory: dldt-build
displayName: 'CPU UT'
continueOnError: false
- script: $(BIN_DIR)/gnaUnitTests
workingDirectory: dldt-build
displayName: 'GNA UT'
continueOnError: false
- script: $(BIN_DIR)/vpuUnitTests
workingDirectory: dldt-build
displayName: 'VPU UT'
continueOnError: false
- script: $(BIN_DIR)/ieFuncTests
workingDirectory: dldt-build
displayName: 'IE FuncTests'
continueOnError: false
- script: $(BIN_DIR)/cpuFuncTests
workingDirectory: dldt-build
displayName: 'CPU FuncTests'
continueOnError: false
- script: $(BIN_DIR)/MklDnnBehaviorTests
workingDirectory: dldt-build
displayName: 'MklDnnBehaviorTests'
continueOnError: false
- script: git clone https://github.com/openvinotoolkit/testdata.git
displayName: 'Clone testdata'
- script: |
export DATA_PATH=`pwd`/../testdata
export MODELS_PATH=`pwd`/../testdata
$(BIN_DIR)/MklDnnFunctionalTests --gtest_filter=*smoke*:-smoke_MobileNet/ModelTransformationsTest.LPT/mobilenet_v2_tf_depthwise_batch1_inPluginDisabled_inTestDisabled_asymmetric*
workingDirectory: dldt-build
displayName: 'MklDnnFunctionalTests'
continueOnError: false
- script: |
export DATA_PATH=`pwd`/../testdata
export MODELS_PATH=`pwd`/../testdata
$(BIN_DIR)/InferenceEngineCAPITests
workingDirectory: dldt-build
displayName: 'IE CAPITests'
continueOnError: false
- script: |
export DATA_PATH=`pwd`/../testdata
export MODELS_PATH=`pwd`/../testdata
export LD_LIBRARY_PATH=`pwd`/$(BIN_DIR)/lib
export PYTHONPATH=`pwd`/$(BIN_DIR)/lib/python_api/python3.6
env
cd ../inference-engine/ie_bridges/python/tests
pytest
workingDirectory: dldt-build
displayName: 'Python API Tests'
continueOnError: false
enabled: false
- job: Mac
# About 200% of total time (perfomace of Mac hosts is unstable)
timeoutInMinutes: 180
pool:
vmImage: 'macOS-10.15'
variables:
BUILD_TYPE: Release
BIN_DIR: ../bin/intel64/$(BUILD_TYPE)
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.7'
- script: |
whoami
uname -a
which python3
gcc --version
xcrun --sdk macosx --show-sdk-version
env
sysctl -a
displayName: 'System properties'
- script: |
brew install cython
brew install automake
displayName: 'Install dependencies'
- script: brew install ninja
displayName: 'Install Ninja'
- script: git submodule update --init --recursive --jobs 8
displayName: 'Clone submodules'
- script: |
mkdir dldt-build
cd dldt-build
displayName: 'Create build directory'
- script: |
export PATH="/usr/local/opt/cython/bin:$PATH"
export CC=gcc
export CXX=g++
# Disable errors with Ninja
export CXXFLAGS="-Wno-error=unused-command-line-argument"
export CFLAGS="-Wno-error=unused-command-line-argument"
cmake .. -GNinja -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DENABLE_TESTS=ON
workingDirectory: dldt-build
displayName: 'CMake'
- script: ninja
workingDirectory: dldt-build
displayName: 'Build Mac'
- script: ls -alR ../bin/
workingDirectory: dldt-build
displayName: 'List files'
- script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU*:IE_CPU.onnx_model_sigmoid
workingDirectory: dldt-build
displayName: 'nGraph UT'
continueOnError: false
- script: $(BIN_DIR)/InferenceEngineUnitTests
workingDirectory: dldt-build
displayName: 'IE UT old'
continueOnError: false
- script: $(BIN_DIR)/ieUnitTests
workingDirectory: dldt-build
displayName: 'IE UT'
continueOnError: false
- script: $(BIN_DIR)/cpuUnitTests
workingDirectory: dldt-build
displayName: 'CPU UT'
continueOnError: false
- script: $(BIN_DIR)/vpuUnitTests
workingDirectory: dldt-build
displayName: 'VPU UT'
continueOnError: false
- script: $(BIN_DIR)/ieFuncTests
workingDirectory: dldt-build
displayName: 'IE FuncTests'
continueOnError: false
- script: $(BIN_DIR)/cpuFuncTests
workingDirectory: dldt-build
displayName: 'CPU FuncTests'
continueOnError: false
- script: $(BIN_DIR)/MklDnnBehaviorTests
workingDirectory: dldt-build
displayName: 'MklDnnBehaviorTests'
continueOnError: false
- script: git clone https://github.com/openvinotoolkit/testdata.git
displayName: 'Clone testdata'
- script: |
export DATA_PATH=`pwd`/../testdata
export MODELS_PATH=`pwd`/../testdata
$(BIN_DIR)/MklDnnFunctionalTests --gtest_filter=*smoke*:-smoke_MobileNet/ModelTransformationsTest.LPT/mobilenet_v2_tf_depthwise_batch1_inPluginDisabled_inTestDisabled_asymmetric*
workingDirectory: dldt-build
displayName: 'MklDnnFunctionalTests'
continueOnError: false
- script: |
export DATA_PATH=`pwd`/../testdata
export MODELS_PATH=`pwd`/../testdata
$(BIN_DIR)/InferenceEngineCAPITests
workingDirectory: dldt-build
displayName: 'IE CAPITests'
continueOnError: false
- job: Win
# About 150% of total time
timeoutInMinutes: 120
pool:
#vmImage: 'vs2017-win2016'
name: WIN_VMSS_VENV_F8S_WU2
variables:
BUILD_TYPE: Release
BUILD_DIR: D:\dldt-build
BIN_DIR: ..\bin\intel64
MSVS_VARS_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe
steps:
- script: |
where python3
wmic computersystem get TotalPhysicalMemory
wmic cpu list
wmic logicaldisk get description,name
wmic VOLUME list
set
displayName: 'System properties'
- script: |
certutil -urlcache -split -f https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-win.zip ninja-win.zip
powershell -command "Expand-Archive -Force ninja-win.zip"
displayName: Install Ninja
- script: git submodule update --init --recursive --jobs 8
displayName: 'Clone submodules'
- script: |
rd /Q /S $(BUILD_DIR)
mkdir $(BUILD_DIR)\bin
rd /Q /S dldt-build
mkdir dldt-build
displayName: 'Create build directory'
- script: |
set PATH=$(Build.Repository.LocalPath)\ninja-win;%PATH%
call "$(MSVS_VARS_PATH)" && cmake -GNinja -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(Build.Repository.LocalPath)
workingDirectory: $(BUILD_DIR)
displayName: 'CMake'
- script: |
set PATH=$(Build.Repository.LocalPath)\ninja-win;%PATH%
call "$(MSVS_VARS_PATH)" && ninja
workingDirectory: $(BUILD_DIR)
displayName: 'Build Win'
- script: dir ..\bin\ /s /b
workingDirectory: dldt-build
displayName: 'List files'
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU*
workingDirectory: dldt-build
displayName: 'nGraph UT'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\InferenceEngineUnitTests
workingDirectory: dldt-build
displayName: 'IE UT old'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\ieUnitTests
workingDirectory: dldt-build
displayName: 'IE UT'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\cpuUnitTests
workingDirectory: dldt-build
displayName: 'CPU UT'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\gnaUnitTests
workingDirectory: dldt-build
displayName: 'GNA UT'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\vpuUnitTests
workingDirectory: dldt-build
displayName: 'VPU UT'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\ieFuncTests
workingDirectory: dldt-build
displayName: 'IE FuncTests'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\cpuFuncTests
workingDirectory: dldt-build
displayName: 'CPU FuncTests'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\MklDnnBehaviorTests
workingDirectory: dldt-build
displayName: 'MklDnnBehaviorTests'
continueOnError: false
- script: git clone https://github.com/openvinotoolkit/testdata.git
workingDirectory: $(BUILD_DIR)
displayName: 'Clone testdata'
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;$(Build.Repository.LocalPath)\inference-engine\temp\opencv_4.3.0\opencv\bin;%PATH%
set DATA_PATH=$(BUILD_DIR)\testdata
set MODELS_PATH=$(BUILD_DIR)\testdata
$(BIN_DIR)\MklDnnFunctionalTests --gtest_filter=*smoke*:-smoke_MobileNet/ModelTransformationsTest.LPT/mobilenet_v2_tf_depthwise_batch1_inPluginDisabled_inTestDisabled_asymmetric*
workingDirectory: dldt-build
displayName: 'MklDnnFunctionalTests'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;$(Build.Repository.LocalPath)\inference-engine\temp\opencv_4.3.0\opencv\bin;%PATH%
set DATA_PATH=$(BUILD_DIR)\testdata
set MODELS_PATH=$(BUILD_DIR)\testdata
$(BIN_DIR)\InferenceEngineCAPITests
workingDirectory: dldt-build
displayName: 'IE CAPITests'
continueOnError: false

View File

@@ -28,7 +28,6 @@
- [Add Inference Engine to Your Project](#add-inference-engine-to-your-project)
- [(Optional) Additional Installation Steps for the Intel® Movidius™ Neural Compute Stick and Neural Compute Stick 2](#optional-additional-installation-steps-for-the-intel-movidius-neural-compute-stick-and-neural-compute-stick-2)
- [For Linux, Raspbian Stretch* OS](#for-linux-raspbian-stretch-os)
- [For Windows](#for-windows-1)
- [Next Steps](#next-steps)
- [Additional Resources](#additional-resources)
@@ -53,19 +52,20 @@ as a part of [Intel® Distribution of OpenVINO™].
## Build on Linux\* Systems
The software was validated on:
- Ubuntu\* 18.04 (64-bit) with default GCC\* 7.5.0
- Ubuntu\* 16.04 (64-bit) with default GCC\* 5.4.0
- CentOS\* 7.4 (64-bit) with default GCC\* 4.8.5
### Software Requirements
- [CMake]\* 3.11 or higher
- GCC\* 4.8 or higher to build the Inference Engine
- Python 2.7 or higher for Inference Engine Python API wrapper
- Python 3.5 or higher for Inference Engine Python API wrapper
- (Optional) [Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 19.41.14441].
### Build Steps
1. Clone submodules:
```sh
cd dldt
cd openvino
git submodule update --init --recursive
```
2. Install build dependencies using the `install_dependencies.sh` script in the
@@ -172,10 +172,10 @@ Native compilation of the Inference Engine is the most straightforward solution.
sudo apt-get install -y git cmake libusb-1.0-0-dev
```
2. Go to the cloned `dldt` repository:
2. Go to the cloned `openvino` repository:
```bash
cd dldt
cd openvino
```
3. Initialize submodules:
@@ -203,7 +203,7 @@ Native compilation of the Inference Engine is the most straightforward solution.
This compilation was tested on the following configuration:
* Host: Ubuntu\* 16.04 (64-bit, Intel® Core™ i7-6700K CPU @ 4.00GHz × 8)
* Host: Ubuntu\* 18.04 (64-bit, Intel® Core™ i7-6700K CPU @ 4.00GHz × 8)
* Target: Raspbian\* Stretch (32-bit, ARMv7, Raspberry Pi\* 3)
1. Install Docker\*:
@@ -262,15 +262,15 @@ with the following content:
5. Run Docker\* container with mounted source code folder from host:
```bash
docker run -it -v /absolute/path/to/dldt:/dldt ie_cross_armhf /bin/bash
docker run -it -v /absolute/path/to/openvino:/openvino ie_cross_armhf /bin/bash
```
6. While in the container:
1. Go to the cloned `dldt` repository:
1. Go to the cloned `openvino` repository:
```bash
cd dldt
cd openvino
```
2. Create a build folder:
@@ -291,8 +291,8 @@ with the following content:
```
7. Press **Ctrl+D** to exit from Docker. You can find the resulting binaries
in the `dldt/bin/armv7l/` directory and the OpenCV*
installation in the `dldt/inference-engine/temp`.
in the `openvino/bin/armv7l/` directory and the OpenCV*
installation in the `openvino/inference-engine/temp`.
>**NOTE**: Native applications that link to cross-compiled Inference Engine
library require an extra compilation flag `-march=armv7-a`.
@@ -338,7 +338,7 @@ The software was validated on:
- [CMake]\*3.11 or higher
- Microsoft\* Visual Studio 2017, 2019 or [Intel® C++ Compiler] 18.0
- (Optional) Intel® Graphics Driver for Windows* (26.20) [driver package].
- Python 3.4 or higher for Inference Engine Python API wrapper
- Python 3.5 or higher for Inference Engine Python API wrapper
### Build Steps
@@ -381,8 +381,8 @@ cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
6. Before running the samples, add paths to the TBB and OpenCV binaries used for
the build to the `%PATH%` environment variable. By default, TBB binaries are
downloaded by the CMake-based script to the `<dldt_repo>/inference-engine/temp/tbb/bin`
folder, OpenCV binaries to the `<dldt_repo>/inference-engine/temp/opencv_4.3.0/opencv/bin`
downloaded by the CMake-based script to the `<openvino_repo>/inference-engine/temp/tbb/bin`
folder, OpenCV binaries to the `<openvino_repo>/inference-engine/temp/opencv_4.3.0/opencv/bin`
folder.
### Additional Build Options
@@ -437,7 +437,7 @@ cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
call "C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\bin\ipsxe-comp-vars.bat" intel64 vs2017
set CXX=icl
set CC=icl
:: clean TBBROOT value set by ipsxe-comp-vars.bat, required TBB package will be downloaded by dldt cmake script
:: clean TBBROOT value set by ipsxe-comp-vars.bat, required TBB package will be downloaded by openvino cmake script
set TBBROOT=
cmake -G Ninja -Wno-dev -DCMAKE_BUILD_TYPE=Release ..
cmake --build . --config Release
@@ -455,13 +455,13 @@ The software was validated on:
- [CMake]\* 3.11 or higher
- Clang\* compiler from Xcode\* 10.1 or higher
- Python\* 3.4 or higher for the Inference Engine Python API wrapper
- Python\* 3.5 or higher for the Inference Engine Python API wrapper
### Build Steps
1. Clone submodules:
```sh
cd dldt
cd openvino
git submodule update --init --recursive
```
2. Install build dependencies using the `install_dependencies.sh` script in the
@@ -545,7 +545,7 @@ This section describes how to build Inference Engine for Android x86 (64-bit) op
2. Clone submodules
```sh
cd dldt
cd openvino
git submodule update --init --recursive
```
@@ -575,8 +575,7 @@ This section describes how to build Inference Engine for Android x86 (64-bit) op
## Use Custom OpenCV Builds for Inference Engine
> **NOTE**: The recommended and tested version of OpenCV is 4.3. The minimum
supported version is 3.4.0.
> **NOTE**: The recommended and tested version of OpenCV is 4.4.0.
Required versions of OpenCV packages are downloaded automatically during the
building Inference Engine library. If the build script can not find and download
@@ -610,7 +609,7 @@ before running the Inference Engine build:
For CMake projects, set the `InferenceEngine_DIR` environment variable:
```sh
export InferenceEngine_DIR=/path/to/dldt/build/
export InferenceEngine_DIR=/path/to/openvino/build/
```
Then you can find Inference Engine by `find_package`:
@@ -660,26 +659,12 @@ sudo ldconfig
rm 97-myriad-usbboot.rules
```
### For Windows
For Intel® Movidius™ Neural Compute Stick and Intel® Neural Compute Stick 2,
install the Movidius™ VSC driver:
1. Go to the `<DLDT_ROOT_DIR>/inference-engine/thirdparty/movidius/MovidiusDriver`
directory, where the `DLDT_ROOT_DIR` is the directory to which the DLDT
repository was cloned.
2. Right click on the `Movidius_VSC_Device.inf` file and choose **Install** from
the pop-up menu.
You have installed the driver for your Intel® Movidius™ Neural Compute Stick
or Intel® Neural Compute Stick 2.
## Next Steps
Congratulations, you have built the Inference Engine. To get started with the
OpenVINO™, proceed to the Get Started guides:
* [Get Started with Deep Learning Deployment Toolkit on Linux*](../get-started-linux.md)
* [Get Started with Deep Learning Deployment Toolkit on Linux*](get-started-linux.md)
## Notice

View File

@@ -20,14 +20,18 @@ if (NOT ENABLE_MKL_DNN)
endif()
if(ENABLE_AVX512F)
if ((CMAKE_CXX_COMPILER_ID MATCHES MSVC) AND (MSVC_VERSION VERSION_LESS 1920))
if ((CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") AND (MSVC_VERSION VERSION_LESS 1920))
# 1920 version of MSVC 2019. In MSVC 2017 AVX512F not work
set(ENABLE_AVX512F OFF CACHE BOOL "" FORCE)
endif()
if (CMAKE_CXX_COMPILER_ID MATCHES Clang)
if ((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 6))
set(ENABLE_AVX512F OFF CACHE BOOL "" FORCE)
endif()
if ((CMAKE_CXX_COMPILER_ID STREQUAL GNU) AND (NOT (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9)))
if ((CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang") AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 10))
# TBD: clarify which AppleClang version supports avx512
set(ENABLE_AVX512F OFF CACHE BOOL "" FORCE)
endif()
if ((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9))
set(ENABLE_AVX512F OFF CACHE BOOL "" FORCE)
endif()
endif()

View File

@@ -4,14 +4,17 @@
if(NOT TARGET ie_coverage_clean)
add_custom_target(ie_coverage_clean)
set_target_properties(ie_coverage_clean PROPERTIES FOLDER coverage)
endif()
if(NOT TARGET ie_coverage_init)
add_custom_target(ie_coverage_init)
set_target_properties(ie_coverage_init PROPERTIES FOLDER coverage)
endif()
if(NOT TARGET ie_coverage)
add_custom_target(ie_coverage)
set_target_properties(ie_coverage PROPERTIES FOLDER coverage)
endif()
set(IE_COVERAGE_REPORTS "${CMAKE_BINARY_DIR}/coverage")
@@ -26,10 +29,10 @@ function(ie_coverage_clean)
cmake_parse_arguments(IE_COVERAGE "" "REPOSITORY;DIRECTORY" "" ${ARGN})
add_custom_target(ie_coverage_zerocounters_${IE_COVERAGE_REPOSITORY}
COMMAND lcov --zerocounters --quiet
--directory "${IE_COVERAGE_DIRECTORY}"
COMMENT "Add zero counters for coverage for ${IE_COVERAGE_REPOSITORY}"
VERBATIM)
COMMAND lcov --zerocounters --quiet
--directory "${IE_COVERAGE_DIRECTORY}"
COMMENT "Add zero counters for coverage for ${IE_COVERAGE_REPOSITORY}"
VERBATIM)
add_custom_target(ie_coverage_clean_${IE_COVERAGE_REPOSITORY}
COMMAND ${CMAKE_COMMAND}
@@ -42,6 +45,10 @@ function(ie_coverage_clean)
DEPENDS "${IE_COVERAGE_SCRIPT_DIR}/coverage_clean.cmake"
VERBATIM)
set_target_properties(ie_coverage_zerocounters_${IE_COVERAGE_REPOSITORY}
ie_coverage_clean_${IE_COVERAGE_REPOSITORY}
PROPERTIES FOLDER coverage)
add_dependencies(ie_coverage_clean ie_coverage_zerocounters_${IE_COVERAGE_REPOSITORY}
ie_coverage_clean_${IE_COVERAGE_REPOSITORY})
endfunction()
@@ -87,6 +94,8 @@ function(ie_coverage_capture)
add_custom_target(ie_coverage_${IE_COVERAGE_INFO_FILE}_info
DEPENDS ${output_file})
set_target_properties(ie_coverage_${IE_COVERAGE_INFO_FILE}_info
PROPERTIES FOLDER coverage)
endfunction()
#
@@ -111,6 +120,8 @@ function(ie_coverage_extract)
VERBATIM)
add_custom_target(ie_coverage_${IE_COVERAGE_OUTPUT}_info
DEPENDS ${output_file})
set_target_properties(ie_coverage_${IE_COVERAGE_OUTPUT}_info
PROPERTIES FOLDER coverage)
add_dependencies(ie_coverage_${IE_COVERAGE_OUTPUT}_info ie_coverage_${IE_COVERAGE_INPUT}_info)
endfunction()
@@ -137,6 +148,8 @@ function(ie_coverage_remove)
VERBATIM)
add_custom_target(ie_coverage_${IE_COVERAGE_OUTPUT}_info
DEPENDS ${output_file})
set_target_properties(ie_coverage_${IE_COVERAGE_OUTPUT}_info
PROPERTIES FOLDER coverage)
add_dependencies(ie_coverage_${IE_COVERAGE_OUTPUT}_info ie_coverage_${IE_COVERAGE_INPUT}_info)
endfunction()
@@ -164,6 +177,8 @@ function(ie_coverage_merge)
VERBATIM)
add_custom_target(ie_coverage_${IE_COVERAGE_OUTPUT}_info
DEPENDS ${output_file})
set_target_properties(ie_coverage_${IE_COVERAGE_OUTPUT}_info
PROPERTIES FOLDER coverage)
add_dependencies(ie_coverage_${IE_COVERAGE_OUTPUT}_info ${dependencies})
endfunction()
@@ -188,6 +203,8 @@ function(ie_coverage_genhtml)
VERBATIM)
add_custom_target(ie_coverage_${IE_COVERAGE_INFO_FILE}_genhtml
DEPENDS "${output_directory}/index.html")
set_target_properties(ie_coverage_${IE_COVERAGE_INFO_FILE}_genhtml
PROPERTIES FOLDER coverage)
add_dependencies(ie_coverage_${IE_COVERAGE_INFO_FILE}_genhtml ie_coverage_${IE_COVERAGE_INFO_FILE}_info)
add_dependencies(ie_coverage ie_coverage_${IE_COVERAGE_INFO_FILE}_genhtml)

View File

@@ -0,0 +1,105 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# =================================================================
#
# Generates cpp file with dispatcher for cross compiled function
# Parameters:
# XARCH_API_HEADER -- path to header with function declaration
# XARCH_FUNC_NAME -- name of function to dispatch
# XARCH_NAMESPACES -- full namespace used to keep ODR
# XARCH_DISP_FILE -- dispatcher file name to generate
# XARCH_SET -- set of ARCH supported by dispatcher. space delimited
#
# =================================================================
set(_CPU_CHECK_ANY "true")
set(_CPU_CHECK_SSE42 "with_cpu_x86_sse42()")
set(_CPU_CHECK_AVX "with_cpu_x86_avx()")
set(_CPU_CHECK_AVX2 "with_cpu_x86_avx2()")
set(_CPU_CHECK_AVX512F "with_cpu_x86_avx512f()")
function(_generate_dispatcher)
_find_signature_in_file(${XARCH_API_HEADER} ${XARCH_FUNC_NAME} SIGNATURE)
_generate_call_line_from_signature("${SIGNATURE}" CALL_LINE)
string(REPLACE " " ";" XARCH_SET "${XARCH_SET}")
string(REPLACE "::" ";" XARCH_NAMESPACES "${XARCH_NAMESPACES}")
list(GET XARCH_NAMESPACES -1 XARCH_CURRENT_NAMESPACE)
set(PARENT_NAMESPACES ${XARCH_NAMESPACES})
list(REMOVE_AT PARENT_NAMESPACES -1)
set(DISP_CONTENT
"
//
// Auto generated file by CMake macros cross_compiled_file()
// !! do not modify it !!!
//
#include \"${XARCH_API_HEADER}\"
#include \"ie_system_conf.h\"
")
foreach(_namespace ${PARENT_NAMESPACES})
string(APPEND DISP_CONTENT
"namespace ${_namespace} {\n")
endforeach()
foreach(_arch ${XARCH_SET})
string(APPEND DISP_CONTENT
"namespace ${_arch} {\n ${SIGNATURE}\; \n}\n")
endforeach()
string(APPEND DISP_CONTENT
"namespace ${XARCH_CURRENT_NAMESPACE} {\n\n${SIGNATURE} {\n")
foreach(_arch ${XARCH_SET})
string(APPEND DISP_CONTENT
" if (${_CPU_CHECK_${_arch}}) {\n return ${_arch}::${CALL_LINE}\;\n }\n")
endforeach()
string(APPEND DISP_CONTENT "}\n\n}\n")
foreach(_namespace ${PARENT_NAMESPACES})
string(APPEND DISP_CONTENT "} // namespace ${_namespace}\n")
endforeach()
file(WRITE ${XARCH_DISP_FILE} ${DISP_CONTENT})
endfunction()
function(_find_signature_in_file FILE FUNCTION RESULT_NAME)
file(READ "${FILE}" CONTENT)
set(valid_chars "<>:_*& a-zA-Z0-9\n") ## valid chars for type/var specification (including new line /n)
string(REGEX MATCH "[${valid_chars}]*${FUNCTION}[ ]*[(][=,${valid_chars}]*[)]" SIGNATURE ${CONTENT})
string(STRIP "${SIGNATURE}" SIGNATURE)
set (${RESULT_NAME} "${SIGNATURE}" PARENT_SCOPE)
endfunction()
function(_generate_call_line_from_signature SIGNATURE RESULT_NAME)
## extract func name
set(_name ${SIGNATURE})
string(REGEX REPLACE "[ ]*[(].*[)]" "" _name "${_name}") # remove arguments
string(REGEX MATCH "[a-zA-Z0-9_]*[ ]*$" _name "${_name}") # extract func name
set(nt_chars "[:_*& a-zA-Z0-9\n]*") ## any sequence of chars to describe object type (no template)
## extract arg names
set(_args ${SIGNATURE})
string(REGEX MATCH "[(].*[)]" _args "${_args}") # extract args with types, all inside brackets
string(REGEX REPLACE "<${nt_chars},${nt_chars}>" "" _args "${_args}") # remove template brackets with ','
string(REPLACE "(" "" _args ${_args})
string(REPLACE ")" "" _args ${_args})
string(REPLACE "," ";" _args ${_args}) # now it's list
foreach(_arg_elem ${_args})
string(REGEX MATCH "[a-zA-Z0-9_]*[ ]*$" _arg_elem "${_arg_elem}")
list(APPEND _arg_names ${_arg_elem})
endforeach()
string(REPLACE ";" ", " _arg_names "${_arg_names}") # back to comma separated string
set (${RESULT_NAME} "${_name}(${_arg_names})" PARENT_SCOPE)
endfunction()
_generate_dispatcher()

View File

@@ -0,0 +1,16 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# =================================================================
#
# This file is used to add dependency on option value. If the args
# was changes the configure file will be updated. And the dependent
# add_custom_command will rerun.
#
# Otherwise the changing of CMake options will not have affect on
# generated file.
#
# =================================================================
@_GEN_ARGS_LIST@

View File

@@ -0,0 +1,227 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
## list of available instruction sets
set(_ARCH_LIST ANY SSE42 AVX AVX2 AVX512F)
set(_ACCEPTED_ARCHS_ANY "^(ANY)$")
set(_ACCEPTED_ARCHS_SSE42 "^(ANY|SSE42)$")
set(_ACCEPTED_ARCHS_AVX "^(ANY|SSE42|AVX)$")
set(_ACCEPTED_ARCHS_AVX2 "^(ANY|SSE42|AVX|AVX2)$")
set(_ACCEPTED_ARCHS_AVX512F "^(ANY|SSE42|AVX|AVX2|AVX512F)$")
## Arch specific definitions
set(_DEFINE_ANY "")
set(_DEFINE_SSE42 "-DHAVE_SSE42" ${_DEFINE_ANY})
set(_DEFINE_AVX "-DHAVE_AVX" ${_DEFINE_SSE42})
set(_DEFINE_AVX2 "-DHAVE_AVX2" ${_DEFINE_AVX})
set(_DEFINE_AVX512F "-DHAVE_AVX512F" ${_DEFINE_AVX2})
## Arch specific compile options
ie_avx512_optimization_flags(_FLAGS_AVX512F)
ie_avx2_optimization_flags (_FLAGS_AVX2)
ie_sse42_optimization_flags (_FLAGS_SSE42)
set(_FLAGS_AVX "") ## TBD is not defined for IE project yet
set(_FLAGS_ANY "") ##
## way to duplicate file via cmake tool set
if (UNIX)
## Clone sources via sym link because it allow to modify original file in IDE along with debug
set(TO_DUPLICATE create_symlink)
else()
## Windows and others - just copy
set(TO_DUPLICATE copy)
endif()
set(DISPATCHER_GEN_SCRIPT ${CMAKE_CURRENT_LIST_DIR}/cross_compiled_disp_gen.cmake)
set(DISPATCHER_GEN_OPTIONS_HOLDER ${CMAKE_CURRENT_LIST_DIR}/cross_compiled_disp_gen_options.in)
#######################################
#
# Allow to enable multiple cross compilation of source file inside one module
# with keeping requirements on minimal instruction set. The CPU check performed
# in runtime via common utils declared in "ie_system_conf.h".
#
# Usage example:
# cross_compiled_file(<target>
# ARCH
# ANY <source_file>
# SSE SSE42 <source_file>
# AVX AVX2 <source_file>
# AVX512F <source_file>
# API <header_file>
# NAMESPACE <namespace> # like "IE::Ext::CPU::XARCH"
# NAME <function_name> # like "my_fun"
# )
#
function(cross_compiled_file TARGET)
set(oneValueArgs API ## Header with declaration of cross compiled function
NAMESPACE ## The namespace where cross compiled function was declared
NAME) ## String with function signature to make cross compiled
set(multiValueArgs ARCH) ## List of architecture described in _ARCH_LIST
cmake_parse_arguments(X "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
## verification
if(X_UNPARSED_ARGUMENTS)
message(FATAL_ERROR "Unknown argument: " ${X_UNPARSED_ARGUMENTS})
endif()
if((NOT TARGET) OR (NOT X_NAME) OR (NOT X_NAMESPACE) OR (NOT X_API) OR (NOT X_ARCH))
message(FATAL_ERROR "Missed arguments")
endif()
_currently_requested_top_arch(TOP_ARCH)
set(_CURRENT_ARCH_FILTER "${_ACCEPTED_ARCHS_${TOP_ARCH}}")
## format: ARCH1 ARCH2 <src1> ARCH3 <src2> ...
foreach(_it ${X_ARCH})
if (_it IN_LIST _ARCH_LIST)
## that is arch ID
set(_arch ${_it})
if(_arch MATCHES ${_CURRENT_ARCH_FILTER})
list(APPEND _CUR_ARCH_SET ${_arch})
list(APPEND _FULL_ARCH_SET ${_arch})
endif()
else()
## that is source file name
set(_src_name ${_it})
_remove_source_from_target(${TARGET} ${_src_name})
_clone_source_to_target(${TARGET} ${_src_name} "${_CUR_ARCH_SET}")
set(_CUR_ARCH_SET "")
endif()
endforeach()
_add_dispatcher_to_target(${TARGET} ${X_API} ${X_NAME} "${X_NAMESPACE}" "${_FULL_ARCH_SET}")
endfunction()
##########################################
#
# Add source multiple time per each element in ARCH_SET.
# Also provide corresponding arch specific flags and defines.
#
function(_clone_source_to_target TARGET SOURCE ARCH_SET)
foreach(_arch ${ARCH_SET})
set(_arch_dir cross-compiled/${_arch})
get_filename_component(ARCH_NAME ${SOURCE} NAME)
get_filename_component(ARCH_INCLUDE_DIR ${SOURCE} DIRECTORY)
set(ARCH_SOURCE "${_arch_dir}/${ARCH_NAME}")
add_custom_command(
OUTPUT ${ARCH_SOURCE}
COMMAND ${CMAKE_COMMAND} -E make_directory
${CMAKE_CURRENT_BINARY_DIR}/${_arch_dir}
COMMAND ${CMAKE_COMMAND} -E ${TO_DUPLICATE}
${CMAKE_CURRENT_SOURCE_DIR}/${SOURCE}
${CMAKE_CURRENT_BINARY_DIR}/${ARCH_SOURCE}
DEPENDS ${SOURCE}
)
set(_ARCH_SPECIFIC_FLAGS
${_DEFINE_${_arch}}
${_FLAGS_${_arch}}
"-DXARCH=${_arch}" ## to replace XARCH with direct ARCH name
"-I${CMAKE_CURRENT_SOURCE_DIR}/${ARCH_INCLUDE_DIR}" ## To make valid #include "some.hpp"
)
_add_source_compile_flags(${ARCH_SOURCE} ${_ARCH_SPECIFIC_FLAGS})
list(APPEND _ARCH_SOURCES ${ARCH_SOURCE})
endforeach()
_add_source_to_target(${TARGET} ${_ARCH_SOURCES})
endfunction()
##########################################
#
# Generate dispatcher for provided function
# for archs in ARCH_SET.
#
function(_add_dispatcher_to_target TARGET HEADER FUNC_NAME NAMESPACE ARCH_SET)
get_filename_component(DISPATCHER_NAME ${HEADER} NAME_WE)
get_filename_component(DISPATCHER_INCLUDE_DIR ${HEADER} DIRECTORY)
set(DISPATCHER_SOURCE "cross-compiled/${DISPATCHER_NAME}_disp.cpp")
set(DISPATCHER_OPT_HOLDER "cross-compiled/${DISPATCHER_NAME}_holder.txt")
set(_GEN_ARGS_LIST
-DXARCH_FUNC_NAME="${X_NAME}"
-DXARCH_NAMESPACES="${NAMESPACE}"
-DXARCH_API_HEADER="${CMAKE_CURRENT_SOURCE_DIR}/${HEADER}"
-DXARCH_DISP_FILE="${CMAKE_CURRENT_BINARY_DIR}/${DISPATCHER_SOURCE}"
-DXARCH_SET="${ARCH_SET}"
)
configure_file(${DISPATCHER_GEN_OPTIONS_HOLDER} ${DISPATCHER_OPT_HOLDER})
add_custom_command(
OUTPUT ${DISPATCHER_SOURCE}
COMMAND ${CMAKE_COMMAND} ${_GEN_ARGS_LIST}
-P ${DISPATCHER_GEN_SCRIPT}
DEPENDS ${HEADER}
${DISPATCHER_GEN_SCRIPT}
${CMAKE_CURRENT_BINARY_DIR}/${DISPATCHER_OPT_HOLDER} ## Just to make run dependency on args value
)
_add_source_compile_flags(${DISPATCHER_SOURCE} "-I${DISPATCHER_INCLUDE_DIR}")
_add_source_to_target(${TARGET} ${DISPATCHER_SOURCE})
endfunction()
#######################################
#
# Return currently requested ARCH id
#
function(_currently_requested_top_arch VAR)
if(ENABLE_AVX512F)
set(RES AVX512F)
elseif(ENABLE_AVX2)
set(RES AVX2)
elseif(ENABLE_SSE42)
set(RES SSE42)
else()
set(RES ANY)
endif()
set (${VAR} "${RES}" PARENT_SCOPE)
endfunction()
#####################################
#
# Utils to handle with cmake target
#
function(_remove_source_from_target TARGET SOURCE_FILE)
get_target_property(ORIGINAL_SOURCES ${TARGET} SOURCES)
## To match by file name only. The path is any.
list(FILTER ORIGINAL_SOURCES EXCLUDE REGEX ".*${SOURCE_FILE}$")
set_target_properties(${TARGET}
PROPERTIES
SOURCES "${ORIGINAL_SOURCES}")
endfunction()
function(_add_source_to_target TARGET)
get_target_property(ORIGINAL_SOURCES ${TARGET} SOURCES)
list(APPEND ORIGINAL_SOURCES ${ARGN})
set_target_properties(${TARGET}
PROPERTIES
SOURCES "${ORIGINAL_SOURCES}")
endfunction()
function(_add_source_compile_flags SOURCE)
get_source_file_property(ORIGINAL_FLAGS ${SOURCE} COMPILE_FLAGS)
## Empty list of COMPILE_FLAGS represented as NOTFOUND
if(NOT ORIGINAL_FLAGS)
set(ORIGINAL_FLAGS "")
endif()
string(REPLACE ";" " " NEW_FLAGS "${ARGN}")
string(APPEND ORIGINAL_FLAGS " " ${NEW_FLAGS})
set_source_files_properties(${SOURCE}
PROPERTIES
COMPILE_FLAGS "${ORIGINAL_FLAGS}")
endfunction()

View File

@@ -2,7 +2,10 @@
# SPDX-License-Identifier: Apache-2.0
#
set(CMAKE_MODULE_PATH "${OpenVINO_MAIN_SOURCE_DIR}/cmake/download" ${CMAKE_MODULE_PATH})
list(APPEND CMAKE_MODULE_PATH
"${OpenVINO_MAIN_SOURCE_DIR}/cmake/download"
"${OpenVINO_MAIN_SOURCE_DIR}/cmake/cross_compile"
)
include(CPackComponent)
unset(IE_CPACK_COMPONENTS_ALL CACHE)
@@ -36,9 +39,13 @@ function(ie_cpack_set_library_dir)
endif()
if(WIN32)
set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${CMAKE_BUILD_TYPE}/${ARCH} PARENT_SCOPE)
set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE)
set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/bin/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE)
set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE)
else()
set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE)
set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE)
set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE)
endif()
endfunction()
@@ -57,6 +64,7 @@ endmacro()
macro(ie_cpack)
set(CPACK_GENERATOR "TGZ")
string(REPLACE "/" "_" CPACK_PACKAGE_VERSION "${CI_BUILD_NUMBER}")
if(WIN32)
set(CPACK_PACKAGE_NAME inference-engine_${CMAKE_BUILD_TYPE})
else()
@@ -137,7 +145,10 @@ if("${CMAKE_BUILD_TYPE}" STREQUAL "")
set(CMAKE_BUILD_TYPE "Release")
endif()
set(OUTPUT_ROOT ${OpenVINO_MAIN_SOURCE_DIR})
# allow to override default OUTPUT_ROOT root
if(NOT DEFINED OUTPUT_ROOT)
set(OUTPUT_ROOT ${OpenVINO_MAIN_SOURCE_DIR})
endif()
# Enable postfixes for Debug/Release builds
set(IE_DEBUG_POSTFIX_WIN "d")
@@ -161,8 +172,8 @@ endif()
set(CMAKE_DEBUG_POSTFIX ${IE_DEBUG_POSTFIX})
set(CMAKE_RELEASE_POSTFIX ${IE_RELEASE_POSTFIX})
if (WIN32)
# Support CMake multiconfiguration for Visual Studio build
if (WIN32 OR CMAKE_GENERATOR STREQUAL "Xcode")
# Support CMake multiconfiguration for Visual Studio or Xcode build
set(IE_BUILD_POSTFIX $<$<CONFIG:Debug>:${IE_DEBUG_POSTFIX}>$<$<CONFIG:Release>:${IE_RELEASE_POSTFIX}>)
else ()
if (${CMAKE_BUILD_TYPE} STREQUAL "Debug" )
@@ -176,10 +187,6 @@ message(STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}")
add_definitions(-DIE_BUILD_POSTFIX=\"${IE_BUILD_POSTFIX}\")
if(NOT UNIX)
if (WIN32)
# set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT")
# set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd")
endif()
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
@@ -194,15 +201,22 @@ else()
endif()
if(APPLE)
set(CMAKE_MACOSX_RPATH 1)
endif(APPLE)
# WA for Xcode generator + object libraries issue:
# https://gitlab.kitware.com/cmake/cmake/issues/20260
# http://cmake.3232098.n2.nabble.com/XCODE-DEPEND-HELPER-make-Deletes-Targets-Before-and-While-They-re-Built-td7598277.html
set(CMAKE_XCODE_GENERATE_TOP_LEVEL_PROJECT_ONLY ON)
set(CMAKE_MACOSX_RPATH ON)
endif()
# Use solution folders
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
set(CMAKE_POLICY_DEFAULT_CMP0054 NEW)
include(sdl)
include(os_flags NO_POLICY_SCOPE)
include(os_flags)
include(sanitizer)
include(cross_compiled_func)
function(set_ci_build_number)
set(OpenVINO_MAIN_SOURCE_DIR "${CMAKE_SOURCE_DIR}")

View File

@@ -138,6 +138,14 @@ function (RESOLVE_DEPENDENCY NAME_OF_CMAKE_VAR)
endfunction(RESOLVE_DEPENDENCY)
function (resolve_model_dependency network archive network_model_path)
RESOLVE_DEPENDENCY(${network_model_path}
ARCHIVE "models_archives/${archive}"
TARGET_PATH "${MODELS_PATH}/${network}")
string (REPLACE ${MODELS_PATH} "" relative_path ${${network_model_path}})
set(${network_model_path} ".${relative_path}" PARENT_SCOPE)
endfunction()
function(reset_deps_cache)
#
# Reset the dependencies cache if it was set by dependency solver

View File

@@ -7,7 +7,7 @@ include ("download_and_check")
function (GetNameAndUrlToDownload name url archive_name_unified archive_name_win archive_name_lin archive_name_mac archive_name_android)
if (archive_name_unified)
set (${url} "${archive_name_unified}" PARENT_SCOPE)
set (${url} "thirdparty/unified/${archive_name_unified}" PARENT_SCOPE)
set (${name} ${archive_name_unified} PARENT_SCOPE)
else()
if(archive_name_lin)
@@ -27,7 +27,7 @@ function (GetNameAndUrlToDownload name url archive_name_unified archive_name_win
endif()
set (${name} ${archive_name} PARENT_SCOPE)
set (${url} "${archive_name}" PARENT_SCOPE)
set (${url} "thirdparty/${PLATFORM_FOLDER}/${archive_name}" PARENT_SCOPE)
endif()
endfunction(GetNameAndUrlToDownload)
@@ -151,10 +151,12 @@ function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked
set (status "ON")
set (on_master FALSE)
if(DEFINED ENV{IE_PATH_TO_DEPS})
if(DEFINED IE_PATH_TO_DEPS)
set(URL "${IE_PATH_TO_DEPS}/${RELATIVE_URL}")
elseif(DEFINED ENV{IE_PATH_TO_DEPS})
set(URL "$ENV{IE_PATH_TO_DEPS}/${RELATIVE_URL}")
else()
set(URL "https://download.01.org/opencv/2020/openvinotoolkit/2020.2/inference_engine/${RELATIVE_URL}")
set(URL "https://download.01.org/opencv/master/openvinotoolkit/${RELATIVE_URL}")
endif()
#no message on recursive calls

View File

@@ -12,6 +12,9 @@ if(X86_64)
else()
set(ENABLE_MKL_DNN_DEFAULT OFF)
endif()
ie_option (ENABLE_TESTS "unit, behavior and functional tests" OFF)
ie_option (ENABLE_MKL_DNN "MKL-DNN plugin for inference engine" ${ENABLE_MKL_DNN_DEFAULT})
ie_dependent_option (ENABLE_CLDNN "clDnn based plugin for inference engine" ON "WIN32 OR X86_64;NOT APPLE;NOT MINGW" OFF)

View File

@@ -4,7 +4,7 @@
function(enable_fuzzing)
# Enable (libFuzzer)[https://llvm.org/docs/LibFuzzer.html] if supported.
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND NOT WIN32)
if(CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$" AND NOT WIN32)
# Communicate libfuzzer is enabled
set(WITH_LIBFUZZER ON PARENT_SCOPE)
add_compile_definitions(WITH_LIBFUZZER)

View File

@@ -2,19 +2,21 @@
# SPDX-License-Identifier: Apache-2.0
#
include(ProcessorCount)
#
# Disables deprecated warnings generation
# Defines ie_c_cxx_deprecated varaible which contains C / C++ compiler flags
#
macro(disable_deprecated_warnings)
if(WIN32)
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(ie_c_cxx_deprecated "/Qdiag-disable:1478,1786")
elseif(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
set(ie_c_cxx_deprecated "/wd4996")
endif()
else()
if(CMAKE_CXX_COMPILER_ID STREQUAL Intel)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(ie_c_cxx_deprecated "-diag-disable=1478,1786")
else()
set(ie_c_cxx_deprecated "-Wno-deprecated-declarations")
@@ -35,13 +37,13 @@ endmacro()
#
macro(ie_deprecated_no_errors)
if(WIN32)
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(ie_c_cxx_deprecated "/Qdiag-warning:1478,1786")
elseif(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
set(ie_c_cxx_deprecated "/wd4996")
endif()
else()
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(ie_c_cxx_deprecated_no_errors "-diag-warning=1478,1786")
else()
set(ie_c_cxx_deprecated_no_errors "-Wno-error=deprecated-declarations")
@@ -61,15 +63,15 @@ endmacro()
#
function(ie_sse42_optimization_flags flags)
if(WIN32)
if(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
# No such option for MSVC 2019
elseif(CMAKE_CXX_COMPILER_ID STREQUAL Intel)
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(${flags} "/arch:SSE4.2 /QxSSE4.2" PARENT_SCOPE)
else()
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
endif()
else()
if(CMAKE_CXX_COMPILER_ID STREQUAL Intel)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(${flags} "-msse4.2 -xSSE4.2" PARENT_SCOPE)
else()
set(${flags} "-msse4.2" PARENT_SCOPE)
@@ -82,15 +84,15 @@ endfunction()
#
function(ie_avx2_optimization_flags flags)
if(WIN32)
if(CMAKE_CXX_COMPILER_ID STREQUAL Intel)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(${flags} "/QxCORE-AVX2" PARENT_SCOPE)
elseif(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
set(${flags} "/arch:AVX2" PARENT_SCOPE)
else()
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
endif()
else()
if(CMAKE_CXX_COMPILER_ID STREQUAL Intel)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(${flags} "-march=core-avx2 -xCORE-AVX2 -mtune=core-avx2" PARENT_SCOPE)
else()
set(${flags} "-mavx2 -mfma" PARENT_SCOPE)
@@ -104,18 +106,21 @@ endfunction()
#
function(ie_avx512_optimization_flags flags)
if(WIN32)
if(CMAKE_CXX_COMPILER_ID STREQUAL Intel)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(${flags} "/QxCOMMON-AVX512" PARENT_SCOPE)
elseif(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
set(${flags} "/arch:AVX512" PARENT_SCOPE)
else()
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
endif()
else()
if(CMAKE_CXX_COMPILER_ID STREQUAL Intel)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(${flags} "-xCOMMON-AVX512" PARENT_SCOPE)
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL GNU)
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(${flags} "-mavx512f -mfma" PARENT_SCOPE)
endif()
if(CMAKE_CXX_COMPILER_ID MATCHES "^(Clang|AppleClang)$")
set(${flags} "-mavx512f -mfma" PARENT_SCOPE)
endif()
endif()
@@ -125,7 +130,22 @@ endfunction()
# Enables Link Time Optimization compilation
#
macro(ie_enable_lto)
if(UNIX)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel" AND OFF)
ProcessorCount(N)
if(UNIX)
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -ipo")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -ipo")
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} -ipo-jobs${N}")
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} -ipo-jobs${N}")
set(CMAKE_MODULE_LINKER_FLAGS_RELEASE "${CMAKE_MODULE_LINKER_FLAGS_RELEASE} -ipo-jobs${N}")
else()
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Qipo")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /Qipo")
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /Qipo-jobs:${N}")
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /Qipo-jobs:${N}")
set(CMAKE_MODULE_LINKER_FLAGS_RELEASE "${CMAKE_MODULE_LINKER_FLAGS_RELEASE} /Qipo-jobs:${N}")
endif()
elseif(UNIX)
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -flto")
# LTO causes issues with gcc 4.8.5 during cmake pthread check
if(NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 4.9)
@@ -137,14 +157,12 @@ macro(ie_enable_lto)
set(CMAKE_AR "gcc-ar")
set(CMAKE_RANLIB "gcc-ranlib")
endif()
elseif(WIN32)
if(CMAKE_BUILD_TYPE STREQUAL Release)
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GL")
# set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /GL")
# set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /LTCG:STATUS")
# set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /LTCG:STATUS")
# set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} /LTCG:STATUS")
endif()
elseif(MSVC AND OFF)
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GL")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /GL")
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG:STATUS")
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG:STATUS")
set(CMAKE_MODULE_LINKER_FLAGS_RELEASE "${CMAKE_MODULE_LINKER_FLAGS_RELEASE} /LTCG:STATUS")
endif()
endmacro()
@@ -167,7 +185,7 @@ set(THREADS_PREFER_PTHREAD_FLAG ON)
# to allows to override CMAKE_CXX_STANDARD from command line
if(NOT DEFINED CMAKE_CXX_STANDARD)
if(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
set(CMAKE_CXX_STANDARD 14)
else()
set(CMAKE_CXX_STANDARD 11)
@@ -176,7 +194,7 @@ if(NOT DEFINED CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
endif()
if(COVERAGE)
if(ENABLE_COVERAGE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --coverage")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --coverage")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage")
@@ -198,10 +216,10 @@ if(WIN32)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /LARGEADDRESSAWARE")
if (TREAT_WARNING_AS_ERROR)
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
ie_add_compiler_flags(/WX)
ie_add_compiler_flags(/Qdiag-warning:47,1740,1786)
elseif (CMAKE_CXX_COMPILER_ID MATCHES MSVC)
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
# ie_add_compiler_flags(/WX) # Too many warnings
endif()
endif()
@@ -212,43 +230,30 @@ if(WIN32)
# Disable noisy warnings
if(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
# C4251 needs to have dll-interface to be used by clients of class
ie_add_compiler_flags(/wd4251)
# C4275 non dll-interface class used as base for dll-interface class
ie_add_compiler_flags(/wd4275)
endif()
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
# 161 unrecognized pragma
# 177 variable was declared but never referenced
# 556 not matched type of assigned function pointer
# 1744: field of class type without a DLL interface used in a class with a DLL interface
# 2586 decorated name length exceeded, name was truncated
# 2651: attribute does not apply to any entity
# 3180 unrecognized OpenMP pragma
# 11075: To get full report use -Qopt-report:4 -Qopt-report-phase ipo
# 15335 was not vectorized: vectorization possible but seems inefficient. Use vector always directive or /Qvec-threshold0 to override
ie_add_compiler_flags(/Qdiag-disable:161,177,556,2586,2651,3180,11075,15335)
ie_add_compiler_flags(/Qdiag-disable:161,177,556,1744,2586,2651,3180,11075,15335)
endif()
# Debug information flags
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /Z7")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Z7")
if(ENABLE_DEBUG_SYMBOLS)
ie_add_compiler_flags(/Z7)
set(DEBUG_SYMBOLS_LINKER_FLAGS "/DEBUG")
if (CMAKE_BUILD_TYPE STREQUAL "Release")
# Keep default /OPT values. See /DEBUG reference for details.
set(DEBUG_SYMBOLS_LINKER_FLAGS "${DEBUG_SYMBOLS_LINKER_FLAGS} /OPT:REF /OPT:ICF")
endif()
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${DEBUG_SYMBOLS_LINKER_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${DEBUG_SYMBOLS_LINKER_FLAGS}")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${DEBUG_SYMBOLS_LINKER_FLAGS}")
endif()
else()
# TODO: enable for C sources as well
# ie_add_compiler_flags(-Werror)
@@ -275,6 +280,8 @@ else()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
ie_add_compiler_flags(-diag-disable=remark)
# noisy warnings from Intel Compiler 19.1.1.217 20200306
ie_add_compiler_flags(-diag-disable=2196)
endif()
# Linker flags

View File

@@ -14,6 +14,8 @@ if (ENABLE_SANITIZER)
set(SANITIZER_LINKER_FLAGS "-fsanitize=address")
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=gold")
elseif(CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$" AND NOT WIN32)
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=lld")
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
@@ -24,10 +26,15 @@ if (ENABLE_SANITIZER)
endif()
if (ENABLE_THREAD_SANITIZER)
set(SANITIZER_COMPILER_FLAGS "-g -fsanitize=thread")
set(SANITIZER_COMPILER_FLAGS "-g -fsanitize=thread -fno-omit-frame-pointer")
set(SANITIZER_LINKER_FLAGS "-fsanitize=thread")
if(CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$" AND NOT WIN32)
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 8.0)
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=lld")
else()
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -static-libsan")
endif()
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${SANITIZER_LINKER_FLAGS}")

View File

@@ -25,7 +25,7 @@ if (CMAKE_BUILD_TYPE STREQUAL "Release")
if (NOT ENABLE_SANITIZER)
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -s")
endif()
elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
elseif(CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$")
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -fstack-protector-all")
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
if (NOT ENABLE_SANITIZER)
@@ -36,8 +36,8 @@ if (CMAKE_BUILD_TYPE STREQUAL "Release")
set(CMAKE_MODULE_LINKER_FLAGS_RELEASE "${CMAKE_MODULE_LINKER_FLAGS_RELEASE} -z noexecstack -z relro -z now")
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} -z noexecstack -z relro -z now")
endif()
elseif(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} /sdl")
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} /sdl /guard:cf")
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${IE_C_CXX_FLAGS}")

View File

@@ -7,7 +7,7 @@ if(CMAKE_CL_64)
set(MSVC64 ON)
endif()
if(WIN32 AND CMAKE_CXX_COMPILER_ID MATCHES "GNU")
if(WIN32 AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -dumpmachine
OUTPUT_VARIABLE OPENVINO_GCC_TARGET_MACHINE
OUTPUT_STRIP_TRAILING_WHITESPACE)

60
docs/CMakeLists.txt Normal file
View File

@@ -0,0 +1,60 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
add_subdirectory(examples)
# Detect nGraph
find_package(ngraph QUIET)
if(NOT ngraph_FOUND)
set(ngraph_DIR ${CMAKE_BINARY_DIR}/ngraph)
endif()
# Detect InferenceEngine
find_package(InferenceEngine QUIET)
if(NOT InferenceEngine_FOUND)
set(InferenceEngine_DIR ${CMAKE_BINARY_DIR})
endif()
add_subdirectory(template_extension)
set(all_docs_targets
ie_docs_examples
template_extension
templatePlugin TemplateBehaviorTests TemplateFunctionalTests)
foreach(target_name IN LISTS all_docs_targets)
if (TARGET ${target_name})
set_target_properties(${target_name} PROPERTIES FOLDER docs)
endif()
endforeach()
# OpenVINO docs
set(OPENVINO_DOCS_PATH "" CACHE PATH "Path to openvino-documentation local repository")
set(args "")
if(OPENVINO_DOCS_PATH)
set(args "${args} ovinodoc_path:${OPENVINO_DOCS_PATH}")
endif()
file(GLOB_RECURSE docs_files "${OpenVINO_MAIN_SOURCE_DIR}/docs")
file(GLOB_RECURSE include_files "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/include")
file(GLOB_RECURSE ovino_files "${OPENVINO_DOCS_PATH}")
add_custom_target(ie_docs
COMMAND ./build_docs.sh ${args}
WORKING_DIRECTORY "${OpenVINO_MAIN_SOURCE_DIR}/docs/build_documentation"
COMMENT "Generating OpenVINO documentation"
SOURCES ${docs_files} ${include_files} ${ovino_files}
VERBATIM)
set_target_properties(ie_docs PROPERTIES FOLDER docs)
find_program(browser NAMES xdg-open)
if(browser)
add_custom_target(ie_docs_open
COMMAND ${browser} "${OpenVINO_MAIN_SOURCE_DIR}/doc/html/index.html"
DEPENDS ie_docs
COMMENT "Open OpenVINO documentation"
VERBATIM)
set_target_properties(ie_docs_open PROPERTIES FOLDER docs)
endif()

View File

@@ -0,0 +1,13 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME ie_docs_examples)
file(GLOB SOURCES *.cpp)
add_library(ie_docs_examples STATIC ${SOURCES})
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_plugin_api)
#add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})

View File

@@ -0,0 +1,68 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <threading/ie_itask_executor.hpp>
#include <cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp>
#include <memory>
using namespace InferenceEngine;
class AcceleratorSyncRequest : public InferRequestInternal {
public:
using Ptr = std::shared_ptr<AcceleratorSyncRequest>;
void Preprocess();
void WriteToDevice();
void RunOnDevice();
void ReadFromDevice();
void PostProcess();
};
// ! [async_infer_request:define_pipeline]
// Inherits from AsyncInferRequestThreadSafeDefault
class AcceleratorAsyncInferRequest : public AsyncInferRequestThreadSafeDefault {
// Store the pointer to the synchronous request and five executors
AcceleratorAsyncInferRequest(const AcceleratorSyncRequest::Ptr& syncRequest,
const ITaskExecutor::Ptr& preprocessExecutor,
const ITaskExecutor::Ptr& writeToDeviceExecutor,
const ITaskExecutor::Ptr& runOnDeviceExecutor,
const ITaskExecutor::Ptr& readFromDeviceExecutor,
const ITaskExecutor::Ptr& postProcessExecutor) :
AsyncInferRequestThreadSafeDefault(syncRequest, nullptr, nullptr),
_accSyncRequest{syncRequest},
_preprocessExecutor{preprocessExecutor},
_writeToDeviceExecutor{writeToDeviceExecutor},
_runOnDeviceExecutor{runOnDeviceExecutor},
_readFromDeviceExecutor{readFromDeviceExecutor},
_postProcessExecutor{postProcessExecutor}
{
// Five pipeline stages of synchronous infer request are run by different executors
_pipeline = {
{ _preprocessExecutor , [this] {
_accSyncRequest->Preprocess();
}},
{ _writeToDeviceExecutor , [this] {
_accSyncRequest->WriteToDevice();
}},
{ _runOnDeviceExecutor , [this] {
_accSyncRequest->RunOnDevice();
}},
{ _readFromDeviceExecutor , [this] {
_accSyncRequest->ReadFromDevice();
}},
{ _postProcessExecutor , [this] {
_accSyncRequest->PostProcess();
}},
};
}
// As all stages use _accSyncRequest member we should wait for all stages tasks before the destructor destroy this member.
~AcceleratorAsyncInferRequest() {
StopAndWait();
}
AcceleratorSyncRequest::Ptr _accSyncRequest;
ITaskExecutor::Ptr _preprocessExecutor, _writeToDeviceExecutor, _runOnDeviceExecutor, _readFromDeviceExecutor, _postProcessExecutor;
};
// ! [async_infer_request:define_pipeline]

View File

@@ -0,0 +1,53 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <threading/ie_cpu_streams_executor.hpp>
#include <memory>
#include <future>
#include <iostream>
void example1() {
// ! [itask_executor:define_pipeline]
// std::promise is move only object so to satisfy copy callable constraint we use std::shared_ptr
auto promise = std::make_shared<std::promise<void>>();
// When the promise is created we can get std::future to wait the result
auto future = promise->get_future();
// Rather simple task
InferenceEngine::Task task = [] {std::cout << "Some Output" << std::endl; };
// Create an executor
InferenceEngine::ITaskExecutor::Ptr taskExecutor = std::make_shared<InferenceEngine::CPUStreamsExecutor>();
if (taskExecutor == nullptr) {
// ProcessError(e);
return;
}
// We capture the task and the promise. When the task is executed in the task executor context
// we munually call std::promise::set_value() method
taskExecutor->run([task, promise] {
std::exception_ptr currentException;
try {
task();
} catch(...) {
// If there is some exceptions store the pointer to current exception
currentException = std::current_exception();
}
if (nullptr == currentException) {
promise->set_value(); // <-- If there is no problems just call std::promise::set_value()
} else {
promise->set_exception(currentException); // <-- If there is an exception forward it to std::future object
}
});
// To wait the task completion we call std::future::wait method
future.wait(); // The current thread will be blocked here and wait when std::promise::set_value()
// or std::promise::set_exception() method will be called.
// If the future store the exception it will be rethrown in std::future::get method
try {
future.get();
} catch(std::exception& /*e*/) {
// ProcessError(e);
}
// ! [itask_executor:define_pipeline]
}

View File

@@ -0,0 +1,18 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# [cmake:extension]
set(TARGET_NAME "template_extension")
find_package(ngraph REQUIRED)
find_package(InferenceEngine REQUIRED)
file(GLOB_RECURSE SRC *.cpp)
add_library(${TARGET_NAME} SHARED ${SRC})
target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_INFERENCE_EXTENSION_API)
target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES}
${NGRAPH_LIBRARIES})
# [cmake:extension]

View File

@@ -0,0 +1,124 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "cpu_kernel.hpp"
#include "op.hpp"
#include <details/ie_exception.hpp>
#include <ie_layouts.h>
using namespace TemplateExtension;
//! [cpu_implementation:ctor]
OpImplementation::OpImplementation(const std::shared_ptr<ngraph::Node> &node) {
try {
auto castedNode = std::dynamic_pointer_cast<Operation>(node);
if (!castedNode)
THROW_IE_EXCEPTION << "Cannot create implementation for unknown operation!";
if (castedNode->inputs().size() != 1 || castedNode->outputs().size() != 1)
THROW_IE_EXCEPTION << "Cannot create implementation for operation with incorrect number of inputs or outputs!";
if (castedNode->get_input_partial_shape(0).is_dynamic() || castedNode->get_output_partial_shape(0).is_dynamic())
THROW_IE_EXCEPTION << "Cannot create implementation for op with dynamic shapes!";
if (castedNode->get_input_shape(0).size() != 4 || castedNode->get_output_shape(0).size() != 4)
THROW_IE_EXCEPTION << "Operation supports only 4d tensors for input and output.";
if (castedNode->get_input_element_type(0) != ngraph::element::f32 || castedNode->get_output_element_type(0) != ngraph::element::f32)
THROW_IE_EXCEPTION << "Operation supports only FP32 tensors.";
add = castedNode->getAddAttr();
} catch (InferenceEngine::details::InferenceEngineException& ex) {
error = ex.what();
}
}
//! [cpu_implementation:ctor]
//! [cpu_implementation:getSupportedConfigurations]
InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf,
InferenceEngine::ResponseDesc *resp) noexcept {
auto createConfig = [](const InferenceEngine::SizeVector inShape, const InferenceEngine::SizeVector& outShape, bool planar) {
InferenceEngine::LayerConfig config;
config.dynBatchSupport = false;
InferenceEngine::DataConfig inData;
InferenceEngine::DataConfig outData;
InferenceEngine::SizeVector order = {0, 1, 2, 3};
// Allow any offset before data
size_t offset((std::numeric_limits<size_t>::max)());
if (planar) {
inData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inShape, order, offset});
config.inConfs.push_back(inData);
outData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outShape, order, offset});
config.outConfs.push_back(outData);
} else {
// Add blocked (nChw8c) format
auto div_up = [](const int a, const int b) -> int {
if (!b)
return 0;
return (a + b - 1) / b;
};
order.push_back(1);
InferenceEngine::SizeVector inBlkDims = inShape;
inBlkDims[1] = div_up(inBlkDims[1], 8);
inBlkDims.push_back(8);
InferenceEngine::SizeVector outBlkDims = outShape;
outBlkDims[1] = div_up(outBlkDims[1], 8);
outBlkDims.push_back(8);
inData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inBlkDims, order, offset});
config.inConfs.push_back(inData);
outData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outBlkDims, order, offset});
config.outConfs.push_back(outData);
}
return config;
};
if (!error.empty()) {
if (resp) {
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
resp->msg[sizeof(resp->msg)-1] = 0;
}
return InferenceEngine::GENERAL_ERROR;
}
// Add planar format
conf.emplace_back(createConfig(inShape, outShape, true));
// Add blocked format nChw8c
conf.emplace_back(createConfig(inShape, outShape, false));
return InferenceEngine::OK;
}
//! [cpu_implementation:getSupportedConfigurations]
//! [cpu_implementation:init]
InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig &config, InferenceEngine::ResponseDesc *resp) noexcept {
try {
if (config.inConfs.size() != 1 || config.outConfs.size() != 1) {
THROW_IE_EXCEPTION << "Operation cannot be initialized with incorrect number of inputs/outputs!";
}
if (config.inConfs[0].desc.getDims().size() != 4 || config.outConfs[0].desc.getDims().size() != 4) {
THROW_IE_EXCEPTION << "Operation can be initialized only with 4d input/output tensors!";
}
if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 ||
config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) {
THROW_IE_EXCEPTION << "Operation supports only FP32 precisions!";
}
} catch (InferenceEngine::details::InferenceEngineException& ex) {
if (resp) {
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
resp->msg[sizeof(resp->msg)-1] = 0;
}
return InferenceEngine::GENERAL_ERROR;
}
return InferenceEngine::OK;
}
//! [cpu_implementation:init]
//! [cpu_implementation:execute]
InferenceEngine::StatusCode OpImplementation::execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc *resp) noexcept {
const float* src_data = inputs[0]->cbuffer().as<const float *>() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
float *dst_data = outputs[0]->buffer().as<float *>() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
for (size_t i = 0; i < inputs[0]->size(); i++) {
dst_data[i] = src_data[i] + add;
}
return InferenceEngine::OK;
}
//! [cpu_implementation:execute]

View File

@@ -0,0 +1,31 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_iextension.h>
#include <ngraph/ngraph.hpp>
namespace TemplateExtension {
//! [cpu_implementation:header]
class OpImplementation : public InferenceEngine::ILayerExecImpl {
public:
explicit OpImplementation(const std::shared_ptr<ngraph::Node>& node);
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf,
InferenceEngine::ResponseDesc *resp) noexcept override;
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config,
InferenceEngine::ResponseDesc *resp) noexcept override;
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc *resp) noexcept override;
private:
int64_t add;
ngraph::Shape inShape;
ngraph::Shape outShape;
std::string error;
};
//! [cpu_implementation:header]
} // namespace TemplateExtension

View File

@@ -0,0 +1,73 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "extension.hpp"
#include "cpu_kernel.hpp"
#include "op.hpp"
#include <ngraph/factory.hpp>
#include <ngraph/opsets/opset.hpp>
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
using namespace TemplateExtension;
//! [extension:GetVersion]
void Extension::GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept {
static InferenceEngine::Version ExtensionDescription = {
{1, 0}, // extension API version
"1.0",
"template_ext" // extension description message
};
versionInfo = &ExtensionDescription;
}
//! [extension:GetVersion]
//! [extension:getOpSets]
std::map<std::string, ngraph::OpSet> Extension::getOpSets() {
std::map<std::string, ngraph::OpSet> opsets;
ngraph::OpSet opset;
opset.insert<Operation>();
opsets["custom_opset"] = opset;
return opsets;
}
//! [extension:getOpSets]
//! [extension:getImplTypes]
std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ngraph::Node> &node) {
if (std::dynamic_pointer_cast<Operation>(node)) {
return {"CPU"};
}
return {};
}
//! [extension:getImplTypes]
//! [extension:getImplementation]
InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr<ngraph::Node> &node, const std::string &implType) {
if (std::dynamic_pointer_cast<Operation>(node) && implType == "CPU") {
return std::make_shared<OpImplementation>(node);
}
return nullptr;
}
//! [extension:getImplementation]
//! [extension:CreateExtension]
// Exported function
INFERENCE_EXTENSION_API(InferenceEngine::StatusCode) InferenceEngine::CreateExtension(InferenceEngine::IExtension *&ext,
InferenceEngine::ResponseDesc *resp) noexcept {
try {
ext = new Extension();
return OK;
} catch (std::exception &ex) {
if (resp) {
std::string err = ((std::string) "Couldn't create extension: ") + ex.what();
err.copy(resp->msg, 255);
}
return InferenceEngine::GENERAL_ERROR;
}
}
//! [extension:CreateExtension]

View File

@@ -0,0 +1,31 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_iextension.h>
#include <ie_api.h>
#include <ngraph/ngraph.hpp>
#include <memory>
#include <vector>
#include <string>
#include <map>
//! [extension:header]
namespace TemplateExtension {
class Extension : public InferenceEngine::IExtension {
public:
Extension() = default;
void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override;
void Unload() noexcept override {}
void Release() noexcept override { delete this; }
std::map<std::string, ngraph::OpSet> getOpSets() override;
std::vector<std::string> getImplTypes(const std::shared_ptr<ngraph::Node>& node) override;
InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) override;
};
} // namespace TemplateExtension
//! [extension:header]

View File

@@ -0,0 +1,38 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "op.hpp"
using namespace TemplateExtension;
constexpr ngraph::NodeTypeInfo Operation::type_info;
//! [op:ctor]
Operation::Operation(const ngraph::Output<ngraph::Node> &arg, int64_t add) : Op({arg}), add(add) {
constructor_validate_and_infer_types();
}
//! [op:ctor]
//! [op:validate]
void Operation::validate_and_infer_types() {
// Operation doesn't change shapes end element type
set_output_type(0, get_input_element_type(0), get_input_partial_shape(0));
}
//! [op:validate]
//! [op:copy]
std::shared_ptr<ngraph::Node> Operation::copy_with_new_args(const ngraph::NodeVector &new_args) const {
if (new_args.size() != 1) {
throw ngraph::ngraph_error("Incorrect number of new arguments");
}
return std::make_shared<Operation>(new_args.at(0), add);
}
//! [op:copy]
//! [op:visit_attributes]
bool Operation::visit_attributes(ngraph::AttributeVisitor &visitor) {
visitor.on_attribute("add", add);
return true;
}
//! [op:visit_attributes]

View File

@@ -0,0 +1,29 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ngraph/ngraph.hpp>
//! [op:header]
namespace TemplateExtension {
class Operation : public ngraph::op::Op {
public:
static constexpr ngraph::NodeTypeInfo type_info{"Template", 0};
const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; }
Operation() = default;
Operation(const ngraph::Output<ngraph::Node>& arg, int64_t add);
void validate_and_infer_types() override;
std::shared_ptr<ngraph::Node> copy_with_new_args(const ngraph::NodeVector& new_args) const override;
bool visit_attributes(ngraph::AttributeVisitor& visitor) override;
int64_t getAddAttr() { return add; }
private:
int64_t add;
};
//! [op:header]
} // namespace TemplateExtension

View File

@@ -0,0 +1,39 @@
# Copyright (C) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# [cmake:main]
if (APPLE)
# due to https://cmake.org/cmake/help/v3.12/policy/CMP0068.html
cmake_minimum_required(VERSION 3.9 FATAL_ERROR)
else()
cmake_minimum_required(VERSION 3.7.2 FATAL_ERROR)
endif()
project(InferenceEngineTemplatePlugin)
set(IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR ${InferenceEngineTemplatePlugin_SOURCE_DIR})
find_package(InferenceEngineDeveloperPackage REQUIRED)
add_subdirectory(src)
if(ENABLE_TESTS)
include(CTest)
enable_testing()
if(ENABLE_FUNCTIONAL_TESTS)
add_subdirectory(tests_deprecated/functional)
add_subdirectory(tests/functional)
endif()
if(ENABLE_BEH_TESTS)
add_subdirectory(tests_deprecated/behavior)
endif()
endif()
# [cmake:main]
# install
# ATTENTION: uncomment to install component
# ie_cpack(template)

View File

@@ -0,0 +1,18 @@
# template-plugin
Template Plugin for Inference Engine which demonstrates basics of how Inference Engine plugin can be built and implemented on top of Inference Engine Developer Package and Plugin API.
## How to build
```bash
$ cd $DLDT_HOME
$ mkdir $DLDT_HOME/build
$ cd $DLDT_HOME/build
$ cmake -DENABLE_TESTS=ON -DENABLE_BEH_TESTS=ON -DENABLE_FUNCTIONAL_TESTS=ON ..
$ make -j8
$ cd $TEMPLATE_PLUGIN_HOME
$ mkdir $TEMPLATE_PLUGIN_HOME/build
$ cd $TEMPLATE_PLUGIN_HOME/build
$ cmake -DInferenceEngineDeveloperPackage_DIR=$DLDT_HOME/build ..
$ make -j8
```

View File

@@ -0,0 +1,59 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief A header that defines advanced related properties for DLIA plugins.
* These properties should be used in SetConfig() and LoadNetwork() methods of plugins
*
* @file dlia_config.hpp
*/
#pragma once
#include <string>
#include "ie_plugin_config.hpp"
namespace InferenceEngine {
namespace TemplateMetrics {
/**
* @def TEMPLATE_METRIC_VALUE(name)
* @brief Shortcut for defining Template metric values
*/
#define TEMPLATE_METRIC_VALUE(name) InferenceEngine::TemplateMetrics::name
#define DECLARE_TEMPLATE_METRIC_VALUE(name) static constexpr auto name = #name
// ! [public_header:metrics]
/**
* @brief Defines whether current Template device instance supports hardware blocks for fast convolution computations.
*/
DECLARE_TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION);
// ! [public_header:metrics]
} // namespace TemplateMetrics
namespace TemplateConfigParams {
/**
* @def TEMPLATE_CONFIG_KEY(name)
* @brief Shortcut for defining Template device configuration keys
*/
#define TEMPLATE_CONFIG_KEY(name) InferenceEngine::TemplateConfigParams::_CONFIG_KEY(TEMPLATE_##name)
#define DECLARE_TEMPLATE_CONFIG_KEY(name) DECLARE_CONFIG_KEY(TEMPLATE_##name)
#define DECLARE_TEMPLATE_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(TEMPLATE_##name)
/**
* @brief The key to define the type of transformations for TEMPLATE inputs and outputs.
* TEMPLATE use custom data layout for input and output blobs. IE TEMPLATE Plugin provides custom
* optimized version of transformation functions that do not use OpenMP and much more faster
* than native TEMPLATE functions. Values: "NO" - optimized plugin transformations
* are used, "YES" - native TEMPLATE transformations are used.
*/
DECLARE_TEMPLATE_CONFIG_KEY(ANY_CONFIG_KEY);
} // namespace TemplateConfigParams
} // namespace InferenceEngine

View File

@@ -0,0 +1,43 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# [cmake:plugin]
set(TARGET_NAME "templatePlugin")
if(ENABLE_LTO)
ie_enable_lto()
endif()
file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
file(GLOB_RECURSE HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
# adds a shared library with plugin
ie_add_plugin(NAME ${TARGET_NAME}
DEVICE_NAME "TEMPLATE"
SOURCES ${SOURCES} ${HEADERS}
SKIP_INSTALL # ATTENTION: uncomment to install component
VERSION_DEFINES_FOR template_plugin.cpp)
target_include_directories(${TARGET_NAME} PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}"
"${IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR}/include")
target_link_libraries(${TARGET_NAME} PRIVATE IE::inference_engine IE::inference_engine_transformations ${NGRAPH_LIBRARIES} ${INTEL_ITT_LIBS})
# ATTENTION: uncomment to register a plugin in the plugins.xml file
# ie_register_plugins(MAIN_TARGET ${TARGET_NAME}
# POSSIBLE_PLUGINS ${TARGET_NAME})
# [cmake:plugin]
# ATTENTION: uncomment to install component
# install
# set(component_name template)
# ie_cpack_add_component(${component_name} REQUIRED)
# install(TARGETS ${TARGET_NAME}
# RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH}
# ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH}
# LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH}
# COMPONENT ${component_name})

View File

@@ -0,0 +1,44 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <ie_profiling.hpp>
#include "template_async_infer_request.hpp"
#include "template_executable_network.hpp"
using namespace TemplatePlugin;
// ! [async_infer_request:ctor]
TemplateAsyncInferRequest::TemplateAsyncInferRequest(
const TemplateInferRequest::Ptr& inferRequest,
const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor) :
AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor),
_inferRequest(inferRequest), _waitExecutor(waitExecutor) {
_pipeline = {
{cpuTaskExecutor, [this] {
IE_PROFILING_AUTO_SCOPE(PreprocessingAndStartPipeline)
_inferRequest->inferPreprocess();
_inferRequest->startPipeline();
}},
{_waitExecutor, [this] {
IE_PROFILING_AUTO_SCOPE(WaitPipeline)
_inferRequest->waitPipeline();
}},
{cpuTaskExecutor, [this] {
IE_PROFILING_AUTO_SCOPE(Postprocessing)
_inferRequest->inferPostprocess();
}}
};
}
// ! [async_infer_request:ctor]
// ! [async_infer_request:dtor]
TemplateAsyncInferRequest::~TemplateAsyncInferRequest() {
InferenceEngine::AsyncInferRequestThreadSafeDefault::StopAndWait();
}
// ! [async_infer_request:dtor]

View File

@@ -0,0 +1,30 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp>
#include "template_infer_request.hpp"
namespace TemplatePlugin {
// ! [async_infer_request:header]
class TemplateAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault {
public:
TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest,
const InferenceEngine::ITaskExecutor::Ptr& taskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
~TemplateAsyncInferRequest() override;
private:
TemplateInferRequest::Ptr _inferRequest;
InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
};
// ! [async_infer_request:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,45 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <string>
#include <vector>
#include <algorithm>
#include <ie_util_internal.hpp>
#include <ie_plugin_config.hpp>
#include <file_utils.h>
#include <cpp_interfaces/exception2status.hpp>
#include "template_config.hpp"
using namespace TemplatePlugin;
Configuration::Configuration() { }
Configuration::Configuration(const ConfigMap& config, const Configuration & defaultCfg, bool throwOnUnsupported) {
*this = defaultCfg;
for (auto&& c : config) {
const auto& key = c.first;
const auto& value = c.second;
if (CONFIG_KEY(DEVICE_ID) == key) {
deviceId = std::stoi(value);
} else if (CONFIG_KEY(PERF_COUNT) == key) {
perfCount = (CONFIG_VALUE(YES) == value);
} else if (throwOnUnsupported) {
THROW_IE_EXCEPTION << NOT_FOUND_str << ": " << key;
}
}
}
InferenceEngine::Parameter Configuration::Get(const std::string& name) const {
if (name == CONFIG_KEY(DEVICE_ID)) {
return {std::to_string(deviceId)};
} else if (name == CONFIG_KEY(PERF_COUNT)) {
return {perfCount};
} else {
THROW_IE_EXCEPTION << NOT_FOUND_str << ": " << name;
}
}

View File

@@ -0,0 +1,40 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <vector>
#include <string>
#include <map>
#include <unordered_map>
#include <ie_parameter.hpp>
namespace TemplatePlugin {
template<typename T>
using IOMap = std::unordered_map<std::string, T>;
// ! [configuration:header]
using ConfigMap = std::map<std::string, std::string>;
struct Configuration {
Configuration();
Configuration(const Configuration&) = default;
Configuration(Configuration&&) = default;
Configuration& operator=(const Configuration&) = default;
Configuration& operator=(Configuration&&) = default;
explicit Configuration(const ConfigMap& config, const Configuration & defaultCfg = {}, const bool throwOnUnsupported = true);
InferenceEngine::Parameter Get(const std::string& name) const;
// Plugin configuration parameters
int deviceId = 0;
bool perfCount = true;
};
// ! [configuration:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,167 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <atomic>
#include <set>
#include <utility>
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include <ie_metric_helpers.hpp>
#include <ie_util_internal.hpp>
#include <ie_plugin_config.hpp>
#include <network_serializer.h>
#include <threading/ie_executor_manager.hpp>
#include <details/ie_cnn_network_tools.h>
#include <ngraph/specialize_function.hpp>
#include <ngraph/pass/manager.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include <transformations/convert_divide.hpp>
#include "template_plugin.hpp"
#include "template_executable_network.hpp"
using namespace TemplatePlugin;
// ! [executable_network:ctor_cnnnetwork]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(InferenceEngine::ICNNNetwork& network,
const Configuration& cfg):
_name(network.getName()),
_cfg(cfg),
_waitExecutor(InferenceEngine::ExecutorManager::getInstance()->getExecutor("Template")) {
// TODO: if your plugin supports device ID (more that single instance of device can be on host machine)
// you should select proper device based on KEY_DEVICE_ID or automatic behavior
// In this case, _waitExecutor should also be created per device.
try {
if (std::shared_ptr<const ngraph::Function> ngraphFunction = network.getFunction()) {
CompileGraph(ngraphFunction);
} else {
THROW_IE_EXCEPTION << "TEMPLATE plugin can compile only IR v10 networks";
}
}
catch (const InferenceEngineException & e) {
throw e;
}
catch (const std::exception & e) {
THROW_IE_EXCEPTION << "Standard exception from compilation library: " << e.what();
}
catch (...) {
THROW_IE_EXCEPTION << "Generic exception is thrown";
}
}
// ! [executable_network:ctor_cnnnetwork]
// ! [executable_network:ctor_import_stream]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
const Configuration& cfg) :
_cfg(cfg) {
// TODO: since Import network is not a mandatory functionality, this ctor can just be removed
}
// ! [executable_network:ctor_import_stream]
// ! [executable_network:compile_graph]
void TemplatePlugin::ExecutableNetwork::CompileGraph(const std::shared_ptr<const ngraph::Function> & ngraphFunction) {
// TODO: perform actual graph compilation taking `_cfg` into account
// 1.Copy ngraph::Function first to apply some transformations later in
// ExecutableNetwork::CompileGraph, which modify original ngraph::Function
const bool shareConsts = false, constFolding = false;
std::vector<::ngraph::element::Type> new_types;
std::vector<::ngraph::PartialShape> new_shapes;
for (const auto &parameter : ngraphFunction->get_parameters()) {
new_shapes.emplace_back(parameter->get_partial_shape());
new_types.emplace_back(parameter->get_element_type());
}
auto copyFunction = ngraph::specialize_function(std::const_pointer_cast<ngraph::Function>(ngraphFunction),
new_types, new_shapes, std::vector<void *>(new_types.size(), nullptr), constFolding, shareConsts);
// 2. Perform common and device-specific transformations
ngraph::pass::Manager passManager;
// Example: register standard ngraph transformation from ngraph::ngraph
passManager.register_pass<ngraph::pass::ConstantFolding>();
// Example: register inference engine optimization transformation for IE::inference_engine_transformations
passManager.register_pass<ngraph::pass::ConvertDivide>();
// Register any other transformations
// ..
// After `run_passes`, we have the transformed function, where operations match device operations,
// and we can create device hardware-dependent graph
passManager.run_passes(copyFunction);
// 3. Iterate over operations and create hardware-specific ngraph
for (const auto& op : copyFunction->get_ordered_ops()) {
// TODO: map ngraph `op` to device operation
}
// 4. Perform any other steps like allocation and filling device buffers, and so on
}
// ! [executable_network:compile_graph]
// ! [executable_network:create_infer_request_impl]
InferenceEngine::InferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) {
return std::make_shared<TemplateInferRequest>(networkInputs, networkOutputs, std::static_pointer_cast<ExecutableNetwork>(shared_from_this()));
}
// ! [executable_network:create_infer_request_impl]
// ! [executable_network:create_infer_request]
void TemplatePlugin::ExecutableNetwork::CreateInferRequest(IInferRequest::Ptr& asyncRequest) {
auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs);
auto asyncThreadSafeImpl = std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest),
_taskExecutor, _waitExecutor, _callbackExecutor);
asyncRequest.reset(new InferenceEngine::InferRequestBase<TemplateAsyncInferRequest>(asyncThreadSafeImpl),
[](InferenceEngine::IInferRequest *p) { p->Release(); });
asyncThreadSafeImpl->SetPointerToPublicInterface(asyncRequest);
}
// ! [executable_network:create_infer_request]
// ! [executable_network:get_config]
void TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name, Parameter &result, ResponseDesc *resp) const {
// TODO: return more supported values for config keys
if (name == CONFIG_KEY(DEVICE_ID) ||
name == CONFIG_KEY(PERF_COUNT)) {
result = _cfg.Get(name);
} else {
THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork config key: " << name;
}
}
// ! [executable_network:get_config]
// ! [executable_network:get_metric]
void TemplatePlugin::ExecutableNetwork::GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *) const {
// TODO: return more supported values for metrics
if (METRIC_KEY(SUPPORTED_METRICS) == name) {
result = IE_SET_METRIC(SUPPORTED_METRICS, std::vector<std::string>{
METRIC_KEY(NETWORK_NAME),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)});
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
result = IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, std::vector<std::string>{
CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT)});
} else if (METRIC_KEY(NETWORK_NAME) == name) {
result = IE_SET_METRIC(NETWORK_NAME, _name);
} else if (METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS) == name) {
// TODO: fill with actual number
unsigned int value = 1;
result = IE_SET_METRIC(OPTIMAL_NUMBER_OF_INFER_REQUESTS, value);
} else {
THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork metric: " << name;
}
}
// ! [executable_network:get_metric]
// ! [executable_network:export_impl]
void TemplatePlugin::ExecutableNetwork::ExportImpl(std::ostream& dlaModel) {
// TODO: Code which exports graph from std::ostream
}
// ! [executable_network:export_impl]

View File

@@ -0,0 +1,68 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <utility>
#include <tuple>
#include <memory>
#include <string>
#include <vector>
#include <map>
#include <unordered_map>
#include <list>
#include <ie_common.h>
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
#include <cnn_network_impl.hpp>
#include <threading/ie_itask_executor.hpp>
#include <ngraph/function.hpp>
#include "template_config.hpp"
#include "template_infer_request.hpp"
#include "template_async_infer_request.hpp"
namespace TemplatePlugin {
class Engine;
/**
* @class ExecutableNetwork
* @brief Interface of executable network
*/
// ! [executable_network:header]
class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault {
public:
ExecutableNetwork(InferenceEngine::ICNNNetwork& network,
const Configuration& cfg);
ExecutableNetwork(std::istream & model,
const Configuration& cfg);
~ExecutableNetwork() override = default;
// Methods from a base class ExecutableNetworkThreadSafeDefault
void ExportImpl(std::ostream& model) override;
InferenceEngine::InferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override;
void CreateInferRequest(InferenceEngine::IInferRequest::Ptr &asyncRequest) override;
void GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
void GetConfig(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
std::atomic<std::size_t> _requestId = {0};
std::string _name;
Configuration _cfg;
private:
void CompileGraph(const std::shared_ptr<const ngraph::Function> & ngraphFunction);
std::shared_ptr<Engine> _plugin;
InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
};
// ! [executable_network:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,224 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <algorithm>
#include <memory>
#include <string>
#include <map>
#include <ie_blob.h>
#include <ie_plugin.hpp>
#include <description_buffer.hpp>
#include <debug.h>
#include <ie_layouts.h>
#include <threading/ie_executor_manager.hpp>
#include <blob_transform.hpp>
#include <ie_parallel.hpp>
#include <ie_memcpy.h>
#include <precision_utils.h>
#include <template/template_config.hpp>
#include "template_infer_request.hpp"
#include "template_executable_network.hpp"
#include "template_plugin.hpp"
using namespace TemplatePlugin;
using Time = std::chrono::high_resolution_clock;
using ns = std::chrono::nanoseconds;
using fsec = std::chrono::duration<float>;
// ! [infer_request:ctor]
TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs,
const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<TemplatePlugin::ExecutableNetwork>& executableNetwork) :
InferRequestInternal(networkInputs, networkOutputs),
_executableNetwork(executableNetwork) {
// TODO: allocate infer request device and host buffers if needed, fill actual list of profiling tasks
auto requestID = std::to_string(_executableNetwork->_requestId);
_executableNetwork->_requestId++;
std::string name = _executableNetwork->_name + "_Req" + requestID;
_profilingTask = { {
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_Preprocess") },
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_Postprocess") },
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_StartPipline") },
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_WaitPipline") },
} };
allocateDeviceBuffers();
allocateInputBlobs();
allocateOutputBlobs();
}
// ! [infer_request:ctor]
// ! [infer_request:dtor]
TemplateInferRequest::~TemplateInferRequest() {
_executableNetwork->_requestId--;
}
// ! [infer_request:dtor]
void TemplateInferRequest::allocateDeviceBuffers() {
// TODO: allocate device buffers if Template device is a remote one
}
void TemplateInferRequest::allocateInputBlobs() {
for (auto &networkInput : _networkInputs) {
SizeVector dims = networkInput.second->getTensorDesc().getDims();
Precision precision = networkInput.second->getTensorDesc().getPrecision();
Layout input_layout = networkInput.second->getInputData()->getLayout();
Blob::Ptr inputBlob;
Blob::Ptr inputBlobNCHW;
switch (precision) {
case Precision::FP32 :
inputBlobNCHW = inputBlob = InferenceEngine::make_shared_blob<float>({ precision, dims, input_layout });
if (input_layout == Layout::NHWC) {
inputBlobNCHW = InferenceEngine::make_shared_blob<float>({ precision, dims, Layout::NCHW });
}
break;
case Precision::FP16 :
case Precision::I16 :
inputBlobNCHW = inputBlob = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, input_layout });
if (input_layout == Layout::NHWC) {
inputBlobNCHW = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, Layout::NCHW });
}
break;
case Precision::U8 :
inputBlobNCHW = inputBlob = InferenceEngine::make_shared_blob<uint8_t>({ precision, dims, input_layout });
if (input_layout == Layout::NHWC) {
inputBlobNCHW = InferenceEngine::make_shared_blob<uint8_t>({ precision, dims, Layout::NCHW });
}
break;
default:
THROW_IE_EXCEPTION << "Unsupported network precision: " << precision
<< precision << "! Supported precisions are: FP32, FP16, I16, U8";
}
// allocate the input blob
inputBlob->allocate();
_inputs[networkInput.first] = inputBlob;
if (inputBlobNCHW != inputBlob) {
inputBlobNCHW->allocate();
}
_inputsNCHW[networkInput.first] = inputBlobNCHW;
}
}
void TemplateInferRequest::allocateOutputBlobs() {
for (auto &networkOutput : _networkOutputs) {
SizeVector dims = networkOutput.second->getTensorDesc().getDims();
Precision precision = networkOutput.second->getPrecision();
Blob::Ptr outputBlob;
// allocate the output blob
Blob::Ptr outputBlobNCHW;
switch (precision) {
case Precision::FP32 :
outputBlobNCHW = outputBlob = InferenceEngine::make_shared_blob<float>({ precision, dims, networkOutput.second->getLayout() });
if (networkOutput.second->getLayout() == Layout::NHWC) {
outputBlobNCHW = InferenceEngine::make_shared_blob<float>({ precision, dims, Layout::NCHW });
}
break;
case Precision::FP16 :
outputBlobNCHW = outputBlob = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, networkOutput.second->getLayout() });
if (networkOutput.second->getLayout() == Layout::NHWC) {
outputBlobNCHW = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, Layout::NCHW });
}
break;
default:
THROW_IE_EXCEPTION << PARAMETER_MISMATCH_str << "Unsupported output precision: "
<< precision << "! Supported precisions are: FP32, FP16";
}
// allocate the output blob
outputBlob->allocate();
_outputs[networkOutput.first] = outputBlob;
if (outputBlobNCHW != outputBlob) {
outputBlobNCHW->allocate();
}
_outputsNCHW[networkOutput.first] = outputBlobNCHW;
}
if (_networkOutputs.empty() || _networkInputs.empty()) {
THROW_IE_EXCEPTION << "Internal error: no information about network's output/input";
}
}
// ! [infer_request:infer_impl]
void TemplateInferRequest::InferImpl() {
// TODO: fill with actual list of pipeline stages, which are executed syncronously for sync infer requests
inferPreprocess();
startPipeline();
waitPipeline();
inferPostprocess();
}
// ! [infer_request:infer_impl]
// ! [infer_request:infer_preprocess]
void TemplateInferRequest::inferPreprocess() {
auto prev = Time::now();
// execute input pre-processing.
InferRequestInternal::execDataPreprocessing(_inputs);
for (auto &input : InferRequestInternal::_inputs) {
auto& src = input.second;
auto& dst = _inputsNCHW[input.first];
if (src != dst) {
if (src->getTensorDesc().getPrecision() == dst->getTensorDesc().getPrecision()
&& src->getTensorDesc().getDims() == dst->getTensorDesc().getDims()
&& src->getTensorDesc().getLayout() == dst->getTensorDesc().getLayout()) {
_inputsNCHW[input.first] = input.second;
} else { // Convert Layout to NCHW
InferenceEngine::blob_copy(src, dst);
}
}
}
// TODO: Preprocessing on inputs if needed: work _inputsNCHW
_inputPreprocessTime = static_cast<double>(std::chrono::duration_cast<ns>(Time::now() - prev).count());
}
// ! [infer_request:infer_preprocess]
void TemplateInferRequest::startPipeline() {
IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[StartPipeline])
// TODO: Start pipeline and fill _inputTransferTime, _executeTime, _outputTransferTime
}
void TemplateInferRequest::waitPipeline() {
IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[WaitPipeline])
auto prev = Time::now();
// TODO: Wait pipeline using driver API or other synronizations methods
_inputPreprocessTime = static_cast<double>(std::chrono::duration_cast<ns>(Time::now() - prev).count());
}
void TemplateInferRequest::inferPostprocess() {
IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[Postprocess])
auto prev = Time::now();
// TODO: perform post-processing and convert to NHWC layout
_outputPostProcessTime = static_cast<double>(std::chrono::duration_cast<ns>(Time::now() - prev).count());
}
// ! [infer_request:get_performance_counts]
void TemplateInferRequest::GetPerformanceCounts(std::map<std::string, InferenceEngineProfileInfo> &perfMap) const {
InferenceEngineProfileInfo info;
info.execution_index = 0;
info.status = InferenceEngineProfileInfo::EXECUTED;
info.cpu_uSec = info.realTime_uSec = _inputPreprocessTime / 1000;
perfMap["1. input preprocessing"] = info;
info.cpu_uSec = 0;
info.realTime_uSec = _inputTransferTime / 1000;
perfMap["2. input transfer to a device"] = info;
info.cpu_uSec = 0;
info.realTime_uSec = _executeTime / 1000;
perfMap["3. execution time"] = info;
info.cpu_uSec = 0;
info.realTime_uSec = _outputTransferTime / 1000;
perfMap["4. output transfer from a device"] = info;
info.cpu_uSec = info.realTime_uSec = _outputPostProcessTime / 1000;
perfMap["5. output postprocessing"] = info;
}
// ! [infer_request:get_performance_counts]

View File

@@ -0,0 +1,74 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <string>
#include <vector>
#include <memory>
#include <unordered_map>
#include <ie_common.h>
#include <ie_profiling.hpp>
#include <cpp_interfaces/impl/ie_infer_request_internal.hpp>
#include <cpp_interfaces/impl/ie_executable_network_internal.hpp>
#include <threading/ie_itask_executor.hpp>
#include "template_config.hpp"
namespace TemplatePlugin {
class ExecutableNetwork;
// ! [infer_request:header]
class TemplateInferRequest : public InferenceEngine::InferRequestInternal {
public:
typedef std::shared_ptr<TemplateInferRequest> Ptr;
TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs,
const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<ExecutableNetwork>& executableNetwork);
~TemplateInferRequest() override;
void InferImpl() override;
void GetPerformanceCounts(std::map<std::string, InferenceEngine::InferenceEngineProfileInfo>& perfMap) const override;
// pipeline methods-stages which are used in async infer request implementation and assigned to particular executor
void inferPreprocess();
void startPipeline();
void waitPipeline();
void inferPostprocess();
std::shared_ptr<ExecutableNetwork> _executableNetwork;
private:
void allocateDeviceBuffers();
void allocateInputBlobs();
void allocateOutputBlobs();
enum {
Preprocess,
Postprocess,
StartPipeline,
WaitPipeline,
numOfStages
};
std::array<InferenceEngine::ProfilingTask, numOfStages> _profilingTask;
InferenceEngine::BlobMap _inputsNCHW;
InferenceEngine::BlobMap _outputsNCHW;
// for performance counts
double _inputPreprocessTime = 0.0;
double _inputTransferTime = 0.0;
double _executeTime = 0.0;
double _outputTransferTime = 0.0;
double _outputPostProcessTime = 0.0;
};
// ! [infer_request:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,193 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <memory>
#include <vector>
#include <sstream>
#include <regex>
#include <string>
#include <map>
#include <ie_metric_helpers.hpp>
#include <details/ie_cnn_network_tools.h>
#include <ie_plugin_config.hpp>
#include <ie_util_internal.hpp>
#include <inference_engine.hpp>
#include <file_utils.h>
#include <cpp_interfaces/base/ie_plugin_base.hpp>
#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
#include <threading/ie_executor_manager.hpp>
#include <graph_tools.hpp>
#include <ie_input_info.hpp>
#include <ie_layouts.h>
#include <hetero/hetero_plugin_config.hpp>
#include <template/template_config.hpp>
#include "template_plugin.hpp"
#include "template_executable_network.hpp"
#include "template_infer_request.hpp"
using namespace TemplatePlugin;
// ! [plugin:ctor]
Plugin::Plugin() {
// TODO: fill with actual device name
_pluginName = "TEMPLATE";
}
// ! [plugin:ctor]
// ! [plugin:load_exe_network_impl]
InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork & network,
const ConfigMap &config) {
auto cfg = Configuration{ config, _cfg };
InferenceEngine::InputsDataMap networkInputs;
InferenceEngine::OutputsDataMap networkOutputs;
network.getInputsInfo(networkInputs);
network.getOutputsInfo(networkOutputs);
// TODO: check with precisions supported by Template device
for (auto networkOutput : networkOutputs) {
auto output_precision = networkOutput.second->getPrecision();
if (output_precision != Precision::FP32 &&
output_precision != Precision::FP16) {
THROW_IE_EXCEPTION << "Template device supports only FP16 and FP32 output precision.";
}
}
for (auto networkInput : networkInputs) {
auto input_precision = networkInput.second->getTensorDesc().getPrecision();
if (input_precision != InferenceEngine::Precision::FP32 &&
input_precision != InferenceEngine::Precision::FP16 &&
input_precision != InferenceEngine::Precision::I16 &&
input_precision != InferenceEngine::Precision::U8) {
THROW_IE_EXCEPTION << "Input image format " << input_precision << " is not supported yet.\n"
<< "Supported formats are: FP32, FP16, I16 and U8.";
}
}
auto clonedNetwork = cloneNet(network);
ConstTransformer transformator(clonedNetwork.get());
transformator.fullTrim();
return std::make_shared<ExecutableNetwork>(*clonedNetwork, cfg);
}
// ! [plugin:load_exe_network_impl]
// ! [plugin:import_network_impl]
InferenceEngine::ExecutableNetwork Plugin::ImportNetworkImpl(std::istream& model, const std::map<std::string, std::string>& config) {
// TODO: Import network from stream is not mandatory functionality;
// Can just throw an exception and remove the code below
Configuration exportedCfg;
// some code below which reads exportedCfg from `model` stream
// ..
auto cfg = Configuration(config, exportedCfg);
IExecutableNetwork::Ptr executableNetwork;
auto exec_network_impl = std::make_shared<ExecutableNetwork>(model, cfg);
executableNetwork.reset(new ExecutableNetworkBase<ExecutableNetworkInternal>(exec_network_impl),
[](InferenceEngine::details::IRelease *p) {p->Release(); });
return InferenceEngine::ExecutableNetwork{ executableNetwork };
}
// ! [plugin:import_network_impl]
// ! [plugin:query_network]
void Plugin::QueryNetwork(const ICNNNetwork &network, const ConfigMap& config, QueryNetworkResult &res) const {
Configuration cfg{config, _cfg, false};
res.rc = StatusCode::OK;
if (std::shared_ptr<const ngraph::Function> ngraphFunction = network.getFunction()) {
auto ops = ngraphFunction->get_ordered_ops();
for (auto&& op : ops) {
// TODO: investigate if an op is actually supported by Template device
bool supported = true;
if (supported) {
res.supportedLayersMap.insert({ op->get_friendly_name(), GetName() });
}
}
} else {
THROW_IE_EXCEPTION << "TEMPLATE plugin can query only IR v10 networks";
}
}
// ! [plugin:query_network]
// ! [plugin:add_extension]
void Plugin::AddExtension(InferenceEngine::IExtensionPtr /*extension*/) {
// TODO: add extensions if plugin supports extensions
}
// ! [plugin:add_extension]
// ! [plugin:set_config]
void Plugin::SetConfig(const ConfigMap &config) {
_cfg = Configuration{config, _cfg};
}
// ! [plugin:set_config]
// ! [plugin:get_config]
InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & /*options*/) const {
return _cfg.Get(name);
}
// ! [plugin:get_config]
// ! [plugin:get_metric]
InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const {
if (METRIC_KEY(SUPPORTED_METRICS) == name) {
std::vector<std::string> supportedMetrics = {
METRIC_KEY(AVAILABLE_DEVICES),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(FULL_DEVICE_NAME),
METRIC_KEY(OPTIMIZATION_CAPABILITIES),
METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) };
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, supportedMetrics);
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
std::vector<std::string> confiKeys = {
CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT) };
IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, confiKeys);
} else if (METRIC_KEY(AVAILABLE_DEVICES) == name) {
// TODO: fill list of available devices
std::vector<std::string> availableDevices = { "" };
IE_SET_METRIC_RETURN(AVAILABLE_DEVICES, availableDevices);
} else if (METRIC_KEY(FULL_DEVICE_NAME) == name) {
std::string name = "Template Device Full Name";
IE_SET_METRIC_RETURN(FULL_DEVICE_NAME, name);
} else if (METRIC_KEY(OPTIMIZATION_CAPABILITIES) == name) {
// TODO: fill actual list of supported capabilities: e.g. Template device supports only FP32
std::vector<std::string> capabilities = { METRIC_VALUE(FP32), TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION) };
IE_SET_METRIC_RETURN(OPTIMIZATION_CAPABILITIES, capabilities);
} else if (METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) == name) {
// TODO: fill with actual values
using uint = unsigned int;
IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint{1}, uint{1}, uint{1}));
} else {
THROW_IE_EXCEPTION << "Unsupported device metric: " << name;
}
}
// ! [plugin:get_metric]
IE_SUPPRESS_DEPRECATED_START
// ! [plugin:create_plugin_engine]
INFERENCE_PLUGIN_API(StatusCode) CreatePluginEngine(IInferencePlugin *&plugin, ResponseDesc *resp) noexcept {
try {
plugin = make_ie_compatible_plugin({2, 1, CI_BUILD_NUMBER, "templatePlugin"},
std::make_shared<Plugin>());
return OK;
}
catch (std::exception &ex) {
return DescriptionBuffer(GENERAL_ERROR, resp) << ex.what();
}
}
// ! [plugin:create_plugin_engine]
IE_SUPPRESS_DEPRECATED_END

View File

@@ -0,0 +1,47 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <inference_engine.hpp>
#include <description_buffer.hpp>
#include <cpp_interfaces/impl/ie_plugin_internal.hpp>
#include <memory>
#include <string>
#include <map>
#include <unordered_map>
#include <vector>
#include "template_executable_network.hpp"
#include "template_config.hpp"
//! [plugin:header]
namespace TemplatePlugin {
class Plugin : public InferenceEngine::InferencePluginInternal {
public:
using Ptr = std::shared_ptr<Plugin>;
Plugin();
~Plugin() override = default;
void SetConfig(const std::map<std::string, std::string> &config) override;
void QueryNetwork(const InferenceEngine::ICNNNetwork &network,
const std::map<std::string, std::string>& config,
InferenceEngine::QueryNetworkResult &res) const override;
InferenceEngine::ExecutableNetworkInternal::Ptr
LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork &network,
const std::map<std::string, std::string> &config) override;
void AddExtension(InferenceEngine::IExtensionPtr extension) override;
InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override;
InferenceEngine::Parameter GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override;
InferenceEngine::ExecutableNetwork ImportNetworkImpl(std::istream& model, const std::map<std::string, std::string>& config) override;
private:
Configuration _cfg;
};
} // namespace TemplatePlugin
//! [plugin:header]

View File

@@ -0,0 +1,18 @@
# Copyright (C) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME TemplateFuncTests)
addIeTargetTest(
NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
DEPENDENCIES
templatePlugin
LINK_LIBRARIES
IE::funcSharedTests
ADD_CPPLINT
LABELS
TEMPLATE
)

View File

@@ -0,0 +1,49 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/config.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
IncorrectConfigTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigAPITests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
IncorrectConfigAPITests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
CorrectConfigAPITests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CorrectConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
CorrectConfigAPITests::getTestCaseName);
} // namespace

View File

@@ -0,0 +1,25 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior/exec_graph_info.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, ExecGraphTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
ExecGraphTests::getTestCaseName);
} // namespace

View File

@@ -0,0 +1,24 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior/infer_request.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
InferRequestTests::getTestCaseName);
} // namespace

View File

@@ -0,0 +1,25 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior/infer_request_callback.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
CallbackTests::getTestCaseName);
} // namespace

View File

@@ -0,0 +1,25 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior/infer_request_config.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
InferConfigTests::getTestCaseName);
} // namespace

View File

@@ -0,0 +1,28 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_input.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
InferRequestInputTests::getTestCaseName);
} // namespace

View File

@@ -0,0 +1,28 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_output.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
InferRequestOutputTests::getTestCaseName);
} // namespace

View File

@@ -0,0 +1,26 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/set_preprocess.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
PreprocessTest::getTestCaseName);
} // namespace

View File

@@ -0,0 +1,13 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <string>
#include "functional_test_utils/skip_tests_config.hpp"
std::vector<std::string> disabledTestPatterns() {
return {
};
}

View File

@@ -0,0 +1,33 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
disable_deprecated_warnings()
set(TARGET_NAME TemplateBehaviorTests)
file(GLOB_RECURSE TEST_INCLUDE
${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
file(GLOB_RECURSE TEST_SRC
${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
)
list(APPEND DEPENDENCIES
templatePlugin)
source_group("src" FILES ${TEST_SRC})
source_group("include" FILES ${TEST_INCLUDE})
add_executable(${TARGET_NAME}
${TEST_SRC}
${TEST_INCLUDE})
target_link_libraries(${TARGET_NAME} PRIVATE IE::IEBehaviorSharedTests)
add_test(NAME ${TARGET_NAME}
COMMAND ${TARGET_NAME})
add_dependencies(${TARGET_NAME} ${DEPENDENCIES})
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})

View File

@@ -0,0 +1,19 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "holders_tests.hpp"
#include <vector>
INSTANTIATE_TEST_CASE_P(ReleaseOrderTests, CPP_HoldersTests, testing::Combine(testing::ValuesIn(std::vector<std::vector<int>> {
// 0 - plugin
// 1 - executable_network
// 2 - infer_request
{0, 1, 2},
{0, 2, 1},
{1, 0, 2},
{1, 2, 0},
{2, 0, 1},
{2, 1, 0},
}), testing::Values("TEMPLATE")));

View File

@@ -0,0 +1,93 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_layers.hpp"
conv_test_params deconv_test_cases[] = {
conv_test_params("TEMPLATE", conv_case),
};
conv_test_params conv_test_cases[] = {
conv_test_params("TEMPLATE", conv_dw_case),
};
INSTANTIATE_TEST_CASE_P(BehaviorTest, DeconvolutionLayerTest,
::testing::ValuesIn(deconv_test_cases),
getTestName<conv_test_params>);
INSTANTIATE_TEST_CASE_P(BehaviorTest, ConvolutionLayerTest,
::testing::ValuesIn(conv_test_cases),
getTestName<conv_test_params>);
pool_test_params roi_pool_test_cases[] = {
pool_test_params("TEMPLATE", "FP32", pool_case),
};
INSTANTIATE_TEST_CASE_P(BehaviorTest, ROIPoolingLayerTest,
::testing::ValuesIn(roi_pool_test_cases),
getTestName<pool_test_params>);
activ_test_params activ_test_cases[] = {
activ_test_params("TEMPLATE", "FP16", activation_case),
};
activ_test_params clamp_test_cases[] = {
activ_test_params("TEMPLATE", "FP16", clamp_case),
};
INSTANTIATE_TEST_CASE_P(BehaviorTest, ActivationLayerTest,
::testing::ValuesIn(activ_test_cases),
getTestName<activ_test_params>);
INSTANTIATE_TEST_CASE_P(BehaviorTest, ReLULayerTest,
::testing::Values(activ_test_params("TEMPLATE", "FP32", activation_case)),
getTestName<activ_test_params>);
INSTANTIATE_TEST_CASE_P(BehaviorTest, ClampLayerTest,
::testing::ValuesIn(clamp_test_cases),
getTestName<activ_test_params>);
norm_test_params norm_test_cases[] = {
norm_test_params("TEMPLATE", "FP32", norm_case),
};
INSTANTIATE_TEST_CASE_P(BehaviorTest, NormalizeLayerTest,
::testing::ValuesIn(norm_test_cases),
getTestName<norm_test_params>);
scale_test_params scale_test_cases[] = {
scale_test_params("TEMPLATE", "FP32", scale_case),
};
INSTANTIATE_TEST_CASE_P(BehaviorTest, ScalingLayerTest,
::testing::ValuesIn(scale_test_cases),
getTestName<scale_test_params>);
INSTANTIATE_TEST_CASE_P(BehaviorTest, ShapingLayerTest,
::testing::Values(shaping_test_params("TEMPLATE", "FP32", shape_case)),
getTestName<shaping_test_params>);
INSTANTIATE_TEST_CASE_P(BehaviorTest, ElementWiseLayerTest,
::testing::Values(element_test_params("TEMPLATE", "FP32", shape_case)),
getTestName<element_test_params>);
object_test_params object_test_cases[] = {
object_test_params("TEMPLATE", "FP32", object_case),
};
INSTANTIATE_TEST_CASE_P(BehaviorTest, ObjectDetectionLayerTest,
::testing::ValuesIn(object_test_cases),
getTestName<object_test_params>);
memory_test_params memory_test_cases[] = {
memory_test_params("TEMPLATE", "FP32", memory_case),
};
// FIXME
// #if (defined INSTANTIATE_TESTS)
// INSTANTIATE_TEST_CASE_P(BehaviorTest, MemoryLayerTest,
// ::testing::ValuesIn(memory_test_cases),
// getTestName<memory_test_params>);
// #endif

View File

@@ -0,0 +1,36 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_layout.hpp"
layout_test_params power_test_cases[] = {
layout_test_params("TEMPLATE", "FP16", Layout::NCHW, power_params({ { 1, 3, 16, 16 } }, 1, 2, 2)),
};
layout_test_params conv_test_cases_1[] = {
layout_test_params("TEMPLATE", "FP16", Layout::NCHW, power_params({ { 1, 3, 16, 16 } }, 1, 2, 2)),
};
layout_test_params power_neg_test_cases[] = {
// Graph Error Description: Error: Tensor size should not be 0.
layout_test_params("TEMPLATE", "FP16", Layout::NC, power_params({ { 1, 3 } }, 1, 2, 2)),
layout_test_params("TEMPLATE", "FP16", Layout::CHW, power_params({ { 3, 32, 16 } }, 1, 2, 2)),
};
layout_test_params conv_neg_test_cases[] = {
// LoadNetwork hangs if Network has 1 dims format: CVS-8508
layout_test_params("TEMPLATE", "FP16", Layout::C, power_params({ { 3 } }, 1, 2, 2)),
layout_test_params("TEMPLATE", "FP16", Layout::NC, power_params({ { 1, 3 } }, 1, 2, 2)),
layout_test_params("TEMPLATE", "FP16", Layout::CHW, power_params({ { 3, 32, 16 } }, 1, 2, 2)),
};
INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanLoadPower,
::testing::ValuesIn(power_test_cases), getTestName);
INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanLoadConv,
::testing::ValuesIn(conv_test_cases_1), getTestName);
INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanNotLoadPower,
::testing::ValuesIn(power_neg_test_cases), getTestName);
INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanNotLoadConv,
::testing::ValuesIn(conv_neg_test_cases), getTestName);

View File

@@ -0,0 +1,14 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_unsupported.hpp"
#include "template_test_data.hpp"
// INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestAllUnsupported, ValuesIn(allUnSupportedValues),
// getTestCaseName);
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestTypeUnsupported, ValuesIn(typeUnSupportedValues),
getTestCaseName);
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestBatchUnsupported, ValuesIn(batchUnSupportedValues),
getTestCaseName);

View File

@@ -0,0 +1,8 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_version.hpp"
#include "template_test_data.hpp"
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestVersion, ValuesIn(add_element_into_array(supportedValues, BEH_HETERO)), getTestCaseName);

View File

@@ -0,0 +1,12 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin.h"
#include "behavior_test_plugins.hpp"
#include "template_test_data.hpp"
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInput, ValuesIn(allInputSupportedValues),
getTestCaseName);
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestOutput, ValuesIn(allOutputSupportedValues),
getOutputTestCaseName);

View File

@@ -0,0 +1,71 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior_test_plugin.h"
// correct params
#define BEH_HETERO BehTestParams("HETERO", \
FuncTestUtils::TestModel::convReluNormPoolFcModelFP32.model_xml_str, \
FuncTestUtils::TestModel::convReluNormPoolFcModelFP32.weights_blob, \
Precision::FP32)
#define BEH_TEMPLATE BehTestParams("TEMPLATE", \
FuncTestUtils::TestModel::convReluNormPoolFcModelFP16.model_xml_str, \
FuncTestUtils::TestModel::convReluNormPoolFcModelFP16.weights_blob, \
Precision::FP32)
// all parameters are unsupported - reversed
#define BEH_US_ALL_TEMPLATE BehTestParams("TEMPLATE", \
FuncTestUtils::TestModel::convReluNormPoolFcModelQ78.model_xml_str, \
FuncTestUtils::TestModel::convReluNormPoolFcModelQ78.weights_blob, \
Precision::Q78)
const BehTestParams supportedValues[] = {
BEH_TEMPLATE,
};
const BehTestParams requestsSupportedValues[] = {
BEH_TEMPLATE,
};
const BehTestParams allInputSupportedValues[] = {
BEH_TEMPLATE,
BEH_TEMPLATE.withIn(Precision::FP16),
BEH_TEMPLATE.withIn(Precision::U8),
BEH_TEMPLATE.withIn(Precision::I16),
};
const BehTestParams allOutputSupportedValues[] = {
BEH_TEMPLATE,
BEH_TEMPLATE.withOut(Precision::FP16),
};
const BehTestParams typeUnSupportedValues[] = {
BEH_TEMPLATE.withIn(Precision::Q78),
BEH_TEMPLATE.withIn(Precision::U16),
BEH_TEMPLATE.withIn(Precision::I8),
BEH_TEMPLATE.withIn(Precision::I32),
};
const BehTestParams batchUnSupportedValues[] = {
BEH_TEMPLATE.withBatchSize(0),
};
const BehTestParams allUnSupportedValues[] = {
BEH_US_ALL_TEMPLATE,
};
const std::vector<BehTestParams> withCorrectConfValuesNetworkOnly = {
BEH_TEMPLATE.withConfig({ { KEY_DEVICE_ID, "0" } }),
};
const BehTestParams withIncorrectConfValues[] = {
BEH_TEMPLATE.withConfig({ { KEY_CPU_BIND_THREAD, "ON" } }),
};
const std::vector<BehTestParams> withCorrectConfValues = {
BEH_TEMPLATE.withConfig({}),
};

View File

@@ -0,0 +1,13 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <string>
#include "functional_test_utils/skip_tests_config.hpp"
std::vector<std::string> disabledTestPatterns() {
return {
};
}

View File

@@ -0,0 +1,21 @@
# Copyright (C) 2019 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
disable_deprecated_warnings()
# [cmake:functional_tests]
set(TARGET_NAME TemplateFunctionalTests)
file(GLOB_RECURSE TEST_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
add_executable(${TARGET_NAME} ${TEST_SOURCES})
# link a library with common Inference Engine tests
target_link_libraries(${TARGET_NAME} PRIVATE IE::IESharedTests)
# make sure plugin is built before tests are run
add_dependencies(${TARGET_NAME} templatePlugin)
# [cmake:functional_tests]
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})

View File

@@ -0,0 +1,205 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <string>
#include <vector>
#include "ie_class.hpp"
//
// IE Class Common tests with <pluginName, deviceName params>
//
INSTANTIATE_TEST_CASE_P(
nightly_IEClassBasicTestP, IEClassBasicTestP,
::testing::Values(std::make_pair("templatePlugin", "TEMPLATE")));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassNetworkTestP, IEClassNetworkTestP,
::testing::Values("TEMPLATE"));
//
// IE Class GetMetric
//
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetAvailableDevices, IEClassGetAvailableDevices,
::testing::Values("TEMPLATE"));
//
// IE Class SetConfig
//
using IEClassSetConfigTestHETERO = IEClassNetworkTest;
TEST_F(IEClassSetConfigTestHETERO, nightly_SetConfigNoThrow) {
{
Core ie;
Parameter p;
ASSERT_NO_THROW(ie.SetConfig({ { HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(YES) } }, "HETERO"));
ASSERT_NO_THROW(p = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)));
bool dump = p.as<bool>();
ASSERT_TRUE(dump);
}
{
Core ie;
Parameter p;
ASSERT_NO_THROW(ie.SetConfig({ { HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(NO) } }, "HETERO"));
ASSERT_NO_THROW(p = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)));
bool dump = p.as<bool>();
ASSERT_FALSE(dump);
}
{
Core ie;
Parameter p;
ASSERT_NO_THROW(ie.GetMetric("HETERO", METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
ASSERT_NO_THROW(ie.SetConfig({ { HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(YES) } }, "HETERO"));
ASSERT_NO_THROW(p = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)));
bool dump = p.as<bool>();
ASSERT_TRUE(dump);
}
}
//
// IE Class GetConfig
//
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetConfigTest, IEClassGetConfigTest,
::testing::Values("TEMPLATE"));
using IEClassGetConfigTestTEMPLATE = IEClassNetworkTest;
TEST_F(IEClassGetConfigTestTEMPLATE, nightly_GetConfigNoThrow) {
Core ie;
Parameter p;
std::string deviceName = "TEMPLATE";
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
std::vector<std::string> configValues = p;
for (auto && confKey : configValues) {
if (CONFIG_KEY(DEVICE_ID) == confKey) {
std::string defaultDeviceID = ie.GetConfig(deviceName, CONFIG_KEY(DEVICE_ID));
std::cout << CONFIG_KEY(DEVICE_ID) << " : " << defaultDeviceID << std::endl;
} else if (CONFIG_KEY(PERF_COUNT) == confKey) {
bool defaultPerfCount = ie.GetConfig(deviceName, CONFIG_KEY(PERF_COUNT));
std::cout << CONFIG_KEY(PERF_COUNT) << " : " << defaultPerfCount << std::endl;
} else if (CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) == confKey) {
bool defaultExclusive = ie.GetConfig(deviceName, CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS));
std::cout << CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) << " : " << defaultExclusive << std::endl;
}
}
}
//
// Executable Network GetMetric
//
INSTANTIATE_TEST_CASE_P(
nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values("TEMPLATE", "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
::testing::Values("TEMPLATE", "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME,
::testing::Values("TEMPLATE", "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS,
::testing::Values("TEMPLATE", "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, IEClassExecutableNetworkGetMetricTest,
::testing::Values("TEMPLATE", "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
//
// Executable Network GetConfig / SetConfig
//
INSTANTIATE_TEST_CASE_P(
nightly_IEClassExecutableNetworkGetConfigTest, IEClassExecutableNetworkGetConfigTest,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassExecutableNetworkSetConfigTest, IEClassExecutableNetworkSetConfigTest,
::testing::Values("TEMPLATE"));
// IE Class Query network
INSTANTIATE_TEST_CASE_P(
nightly_IEClassQueryNetworkTest, IEClassQueryNetworkTest,
::testing::Values("TEMPLATE"));
// IE Class Load network
INSTANTIATE_TEST_CASE_P(
nightly_IEClassLoadNetworkTest, IEClassLoadNetworkTest,
::testing::Values("TEMPLATE"));
//
// Hetero Executable Network GetMetric
//
#ifdef ENABLE_MKL_DNN
INSTANTIATE_TEST_CASE_P(
nightly_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK,
::testing::Values("TEMPLATE"));
#endif // ENABLE_MKL_DNN

View File

@@ -0,0 +1,17 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "test_model_repo.hpp"
std::string get_model_repo() {
return ":";
}
const char* TestDataHelpers::getModelPathNonFatal() noexcept {
return TestDataHelpers::getModelPathNonFatalDefault();
}
std::string TestDataHelpers::get_data_path() {
return TestDataHelpers::get_data_path_default();
}

View File

@@ -1,7 +1,7 @@
# Get Started with OpenVINO™ Deep Learning Deployment Toolkit (DLDT) on Linux*
# Get Started with OpenVINO™ Toolkit on Linux*
This guide provides you with the information that will help you to start using
the DLDT on Linux\*. With this guide, you will learn how to:
the OpenVINO™ Toolkit on Linux\*. With this guide, you will learn how to:
1. [Configure the Model Optimizer](#configure-the-model-optimizer)
2. [Prepare a model for sample inference](#prepare-a-model-for-sample-inference)
@@ -10,13 +10,13 @@ the DLDT on Linux\*. With this guide, you will learn how to:
3. [Run the Image Classification Sample Application with the model](#run-the-image-classification-sample-application)
## Prerequisites
1. This guide assumes that you have already cloned the `dldt` repo and
1. This guide assumes that you have already cloned the `openvino` repo and
successfully built the Inference Engine and Samples using the
[build instructions](inference-engine/README.md).
2. The original structure of the repository directories remains unchanged.
> **NOTE**: Below, the directory to which the `dldt` repository is cloned is
referred to as `<DLDT_DIR>`.
> **NOTE**: Below, the directory to which the `openvino` repository is cloned is
referred to as `<OPENVINO_DIR>`.
## Configure the Model Optimizer
@@ -53,7 +53,7 @@ If you see error messages, check for any missing dependencies.
1. Go to the Model Optimizer prerequisites directory:
```sh
cd <DLDT_DIR>/model_optimizer/install_prerequisites
cd <OPENVINO_DIR>/model_optimizer/install_prerequisites
```
2. Run the script to configure the Model Optimizer for Caffe,
TensorFlow, MXNet, Kaldi\*, and ONNX:
@@ -68,7 +68,7 @@ Configure individual frameworks separately **ONLY** if you did not select
1. Go to the Model Optimizer prerequisites directory:
```sh
cd <DLDT_DIR>/model_optimizer/install_prerequisites
cd <OPENVINO_DIR>/model_optimizer/install_prerequisites
```
2. Run the script for your model framework. You can run more than one script:
@@ -162,20 +162,20 @@ as `<models_dir>` below) with the Model Downloader:
**For CPU (FP32):**
```sh
python3 <DLDT_DIR>/model_optimizer/mo.py --input_model <models_dir>/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP32 --output_dir <ir_dir>
python3 <OPENVINO_DIR>/model_optimizer/mo.py --input_model <models_dir>/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP32 --output_dir <ir_dir>
```
**For GPU and MYRIAD (FP16):**
```sh
python3 <DLDT_DIR>/model_optimizer/mo.py --input_model <models_dir>/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP16 --output_dir <ir_dir>
python3 <OPENVINO_DIR>/model_optimizer/mo.py --input_model <models_dir>/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP16 --output_dir <ir_dir>
```
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `<ir_dir>` directory.
3. Copy the `squeezenet1.1.labels` file from the `<DLDT_DIR>/inference-engine/samples/sample_data/`
3. Copy the `squeezenet1.1.labels` file from the `<OPENVINO_DIR>/scripts/demo/`
folder to the model IR directory. This file contains the classes that ImageNet
uses so that the inference results show text instead of classification numbers:
```sh
cp <DLDT_DIR>/inference-engine/samples/sample_data/squeezenet1.1.labels <ir_dir>
cp <OPENVINO_DIR>/scripts/demo/squeezenet1.1.labels <ir_dir>
```
Now you are ready to run the Image Classification Sample Application.
@@ -184,28 +184,28 @@ Now you are ready to run the Image Classification Sample Application.
The Inference Engine sample applications are automatically compiled when you
built the Inference Engine using the [build instructions](inference-engine/README.md).
The binary files are located in the `<DLDT_DIR>/inference-engine/bin/intel64/Release`
The binary files are located in the `<OPENVINO_DIR>/inference-engine/bin/intel64/Release`
directory.
To run the Image Classification sample application with an input image on the prepared IR:
1. Go to the samples build directory:
```sh
cd <DLDT_DIR>/inference-engine/bin/intel64/Release
cd <OPENVINO_DIR>/inference-engine/bin/intel64/Release
2. Run the sample executable with specifying the `car.png` file from the
`<DLDT_DIR>/inference-engine/samples/sample_data/` directory as an input
`<OPENVINO_DIR>/scripts/demo/` directory as an input
image, the IR of your model and a plugin for a hardware device to perform
inference on:
**For CPU:**
```sh
./classification_sample -i <DLDT_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d CPU
./classification_sample -i <OPENVINO_DIR>/scripts/demo/car.png -m <ir_dir>/squeezenet1.1.xml -d CPU
```
**For GPU:**
```sh
./classification_sample -i <DLDT_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d GPU
./classification_sample -i <OPENVINO_DIR>/scripts/demo/car.png -m <ir_dir>/squeezenet1.1.xml -d GPU
```
**For MYRIAD:**
@@ -214,14 +214,14 @@ To run the Image Classification sample application with an input image on the pr
Stick or Intel® Neural Compute Stick 2) with the MYRIAD plugin requires
performing [additional hardware configuration steps](inference-engine/README.md#optional-additional-installation-steps-for-the-intel-movidius-neural-compute-stick-and-neural-compute-stick-2).
```sh
./classification_sample -i <DLDT_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d MYRIAD
./classification_sample -i <OPENVINO_DIR>/scripts/demo/car.png -m <ir_dir>/squeezenet1.1.xml -d MYRIAD
```
When the Sample Application completes, you will have the label and confidence for the top-10 categories printed on the screen. Below is a sample output with inference results on CPU:
```sh
Top 10 results:
Image /home/user/dldt/inference-engine/samples/sample_data/car.png
Image /home/user/openvino/scripts/demo/car.png
classid probability label
------- ----------- -----

View File

@@ -25,12 +25,6 @@ if (ENABLE_FUZZING)
enable_fuzzing()
endif()
find_package(ngraph QUIET)
if(NOT ngraph_FOUND)
set(ngraph_DIR ${CMAKE_BINARY_DIR}/ngraph)
endif()
find_package(ngraph REQUIRED)
find_package(Threads REQUIRED)
unset(IEDeveloperPackageTargets CACHE)
@@ -60,7 +54,7 @@ function(ie_developer_export)
APPEND FILE "${CMAKE_BINARY_DIR}/targets_developer.cmake")
# Custom target to build only Inference Engine Developer Package targets
add_custom_target(ie_dev_targets ALL DEPENDS ${IEDeveloperPackageTargets})
add_custom_target(ie_dev_targets ALL DEPENDS ${IEDeveloperPackageTargets} gflags)
endfunction()
add_subdirectory(thirdparty)
@@ -74,9 +68,24 @@ endif()
add_subdirectory(tools)
function(ie_build_samples)
# samples should be build with the same flags as from OpenVINO package,
# so unset all flags
foreach(var CMAKE_CXX_FLAGS CMAKE_C_FLAGS CMAKE_CXX_STANDARD
CMAKE_EXE_LINKER_FLAGS CMAKE_POLICY_DEFAULT_CMP0063
CMAKE_CXX_VISIBILITY_PRESET CMAKE_C_VISIBILITY_PRESET
CMAKE_VISIBILITY_INLINES_HIDDEN CMAKE_POSITION_INDEPENDENT_CODE
THREADS_PREFER_PTHREAD_FLAG X86_64 X86 ARM AARCH64 LINUX
MINGW64 CMAKE_BUILD_TYPE CMAKE_MACOSX_RPATH)
unset(${var})
endforeach()
include(sanitizer)
add_subdirectory(samples)
endfunction()
# gflags and format_reader targets are kept inside of samples directory and
# they must be built even if samples build is disabled (required for tests and tools).
add_subdirectory(samples)
ie_build_samples()
file(GLOB_RECURSE SAMPLES_SOURCES samples/*.cpp samples/*.hpp samples/*.h)
add_cpplint_target(sample_cpplint
@@ -109,7 +118,7 @@ if(UNIX)
PATTERN *.bat EXCLUDE
PATTERN speech_libs_and_demos EXCLUDE)
elseif(WIN32)
install(DIRECTORY samples
install(DIRECTORY samples/
DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp
COMPONENT cpp_samples
USE_SOURCE_PERMISSIONS
@@ -150,6 +159,17 @@ if(ENABLE_PYTHON)
COMPONENT python_samples)
endif()
# install speech demo files
if(SPEECH_LIBS_AND_DEMOS)
ie_cpack_add_component(speech_demo_files REQUIRED)
install(DIRECTORY ${TEMP}/deployment_tools
${TEMP}/data_processing
DESTINATION .
COMPONENT speech_demo_files)
endif()
#
# Developer package
#
@@ -173,7 +193,7 @@ configure_file(
# Coverage
#
if(COVERAGE)
if(ENABLE_COVERAGE)
include(coverage_ie)
endif()
@@ -197,6 +217,7 @@ function(register_extra_plugins)
# automatically import plugins from the 'plugins' folder
file(GLOB local_extra_plugins "plugins/*")
list(APPEND local_extra_plugins "${OpenVINO_MAIN_SOURCE_DIR}/docs/template_plugin")
foreach(plugin_path IN LISTS IE_EXTRA_PLUGINS local_extra_plugins)
get_filename_component(plugin_dir "${plugin_path}" NAME)

View File

@@ -25,9 +25,7 @@ endif()
if(DEFINED INTEL_VTUNE_DIR)
message(STATUS "INTEL_VTUNE_DIR = ${INTEL_VTUNE_DIR}")
find_path(ITT_INCLUDE_DIR
FILES
ittnotify.h
find_path(ITT_INCLUDE_DIR ittnotify.h
PATHS "${INTEL_VTUNE_DIR}/include/")
find_library(ITT_LIB

View File

@@ -118,7 +118,6 @@ function(addIeTarget)
if (ARG_ADD_CPPLINT)
# code style
add_cpplint_target(${ARG_NAME}_cpplint FOR_TARGETS ${ARG_NAME})
add_clang_format_target(${ARG_NAME}_clang_format FOR_TARGETS ${ARG_NAME})
endif()
if (ARG_DEVELOPER_PACKAGE)
# developer package

View File

@@ -22,6 +22,8 @@ endif()
if(ENABLE_CLANG_FORMAT)
add_custom_target(clang_format_check_all)
add_custom_target(clang_format_fix_all)
set_target_properties(clang_format_check_all clang_format_fix_all
PROPERTIES FOLDER clang_format)
set(CLANG_FORMAT_ALL_OUTPUT_FILES "" CACHE INTERNAL "All clang-format output files")
endif()
@@ -35,10 +37,6 @@ function(add_clang_format_target TARGET_NAME)
set(multiValueArgs "FOR_TARGETS" "FOR_SOURCES" "EXCLUDE_PATTERNS")
cmake_parse_arguments(CLANG_FORMAT "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
if(CLANG_FORMAT_ALL)
set(all ALL)
endif()
foreach(target IN LISTS CLANG_FORMAT_FOR_TARGETS)
get_target_property(target_sources "${target}" SOURCES)
list(APPEND CLANG_FORMAT_FOR_SOURCES ${target_sources})
@@ -95,7 +93,6 @@ function(add_clang_format_target TARGET_NAME)
"All clang-format output files")
add_custom_target(${TARGET_NAME}
${all}
DEPENDS ${all_output_files}
COMMENT "[clang-format] ${TARGET_NAME}")
@@ -113,6 +110,9 @@ function(add_clang_format_target TARGET_NAME)
"[clang-format] ${TARGET_NAME}_fix"
VERBATIM)
set_target_properties(${TARGET_NAME} ${TARGET_NAME}_fix
PROPERTIES FOLDER clang_format)
# if(CLANG_FORMAT_FOR_TARGETS)
# foreach(target IN LISTS CLANG_FORMAT_FOR_TARGETS)
# add_dependencies(${target} ${TARGET_NAME})

View File

@@ -4,7 +4,7 @@
if(DEFINED IE_MAIN_SOURCE_DIR AND TARGET inference_engine)
set(InferenceEngine_LIBRARIES inference_engine_legacy inference_engine
inference_engine_c_api inference_engine_nn_builder)
inference_engine_c_api)
else()
include("${CMAKE_CURRENT_LIST_DIR}/targets.cmake")
if(NOT WIN32)
@@ -30,5 +30,5 @@ else()
get_target_property(InferenceEngine_INCLUDE_DIRS IE::inference_engine INTERFACE_INCLUDE_DIRECTORIES)
set(InferenceEngine_LIBRARIES IE::inference_engine_legacy IE::inference_engine
IE::inference_engine_c_api IE::inference_engine_nn_builder)
IE::inference_engine_c_api)
endif()

View File

@@ -13,14 +13,17 @@ ie_coverage_capture(INFO_FILE "dldt"
# Generate reports
ie_coverage_extract(INPUT "dldt" OUTPUT "inference_engine_with_builders"
ie_coverage_extract(INPUT "dldt" OUTPUT "inference_engine"
PATTERNS "${DLDT_COVERAGE_BASE_DIRECTORY}/inference_engine/*"
"${DLDT_COVERAGE_BASE_DIRECTORY}/plugin_api/*")
ie_coverage_remove(INPUT "inference_engine_with_builders" OUTPUT "inference_engine"
PATTERNS "${DLDT_COVERAGE_BASE_DIRECTORY}/inference_engine/builders/*")
ie_coverage_genhtml(INFO_FILE "inference_engine"
PREFIX "${DLDT_COVERAGE_BASE_DIRECTORY}")
ie_coverage_extract(INPUT "dldt" OUTPUT "inference_engine_ir_reader"
PATTERNS "${DLDT_COVERAGE_BASE_DIRECTORY}/readers/*")
ie_coverage_genhtml(INFO_FILE "inference_engine_ir_reader"
PREFIX "${DLDT_COVERAGE_BASE_DIRECTORY}")
ie_coverage_extract(INPUT "dldt" OUTPUT "inference_engine_legacy"
PATTERNS "${DLDT_COVERAGE_BASE_DIRECTORY}/legacy_api/*")
ie_coverage_genhtml(INFO_FILE "inference_engine_legacy"

View File

@@ -3,16 +3,17 @@
#
if(ENABLE_CPPLINT)
find_host_package(PythonInterp)
find_package(Python3 COMPONENTS Interpreter)
if(NOT PYTHONINTERP_FOUND)
message(WARNING "Python interpreter was not found (required for cpplint check)")
if(NOT Python3_Interpreter_FOUND)
message(WARNING "Python3 interpreter was not found (required for cpplint check)")
set(ENABLE_CPPLINT OFF)
endif()
endif()
if(ENABLE_CPPLINT)
add_custom_target(cpplint_all ALL)
set_target_properties(cpplint_all PROPERTIES FOLDER cpplint)
set(CPPLINT_ALL_OUTPUT_FILES "" CACHE INTERNAL "All cpplint output files")
endif()
@@ -93,6 +94,7 @@ function(add_cpplint_target TARGET_NAME)
add_custom_target(${TARGET_NAME} ALL
DEPENDS ${all_output_files}
COMMENT "[cpplint] ${TARGET_NAME}")
set_target_properties(${TARGET_NAME} PROPERTIES FOLDER cpplint)
if(CPPLINT_FOR_TARGETS)
foreach(target IN LISTS CPPLINT_FOR_TARGETS)
@@ -168,4 +170,5 @@ function(add_cpplint_report_target)
add_custom_target(cpplint_report
DEPENDS "${html_output_file}"
COMMENT "[cpplint] Generate report")
set_target_properties(cpplint_report PROPERTIES FOLDER cpplint)
endfunction()

View File

@@ -47,7 +47,7 @@ file(WRITE "${OUTPUT_FILE}" "${formatted_output}")
if(NOT SKIP_RETURN_CODE)
# Pass through the cpplint return code
if(NOT result EQUAL 0)
if(NOT result EQUAL "0")
# Display the cpplint output to console (to parse it form IDE)
message("${output}")
message(FATAL_ERROR "[cpplint] Code style check failed for : ${INPUT_FILE}")

View File

@@ -4,6 +4,8 @@
cmake_policy(SET CMP0054 NEW)
include(models)
#we have number of dependencies stored on ftp
include(dependency_solver)
@@ -13,6 +15,23 @@ endif()
include(ExternalProject)
if (ENABLE_SAME_BRANCH_FOR_MODELS)
branchName(MODELS_BRANCH)
else()
set(MODELS_BRANCH "master")
endif()
if (ENABLE_DATA)
add_models_repo(${ENABLE_DATA} "data:https://github.com/openvinotoolkit/testdata.git")
set(MODELS_PATH "${TEMP}/models/src/data")
set(DATA_PATH "${MODELS_PATH}")
endif()
message(STATUS "MODELS_PATH=" ${MODELS_PATH})
fetch_models_and_validation_set()
include(linux_name)
if(COMMAND get_linux_name)
get_linux_name(LINUX_OS_NAME)
@@ -25,25 +44,58 @@ if (ENABLE_MYRIAD)
endif()
## enable cblas_gemm from OpenBLAS package
if (GEMM STREQUAL "OPENBLAS")
if (ENABLE_MKL_DNN AND GEMM STREQUAL "OPENBLAS")
if(AARCH64)
if(DEFINED ENV{THIRDPARTY_SERVER_PATH})
set(IE_PATH_TO_DEPS "$ENV{THIRDPARTY_SERVER_PATH}")
elseif(DEFINED THIRDPARTY_SERVER_PATH)
set(IE_PATH_TO_DEPS "${THIRDPARTY_SERVER_PATH}")
else()
message(WARNING "OpenBLAS is not found!")
endif()
if(DEFINED IE_PATH_TO_DEPS)
reset_deps_cache(OpenBLAS_DIR)
RESOLVE_DEPENDENCY(OpenBLAS
ARCHIVE_LIN "keembay/openblas_0.3.7_yocto_kmb.tar.xz"
TARGET_PATH "${TEMP}/openblas_0.3.7_yocto_kmb"
ENVIRONMENT "OpenBLAS_DIR")
update_deps_cache(OpenBLAS_DIR "${OpenBLAS}/lib/cmake/openblas" "Path to OpenBLAS package folder")
find_package(OpenBLAS QUIET)
if(OpenBLAS_FOUND)
set(BLAS_FOUND TRUE)
set(BLAS_INCLUDE_DIRS ${OpenBLAS_INCLUDE_DIRS})
set(BLAS_LIBRARIES ${OpenBLAS_LIBRARIES})
endif()
unset(IE_PATH_TO_DEPS)
endif()
endif()
if(NOT BLAS_LIBRARIES OR NOT BLAS_INCLUDE_DIRS)
find_package(BLAS REQUIRED)
if(BLAS_FOUND)
find_path(BLAS_INCLUDE_DIRS cblas.h)
else()
message(ERROR "OpenBLAS not found: install OpenBLAS or set -DBLAS_INCLUDE_DIRS=<path to dir with cblas.h> and -DBLAS_LIBRARIES=<path to libopenblas.so or openblas.lib>")
endif()
endif()
debug_message(STATUS "openblas=" ${BLAS_LIBRARIES})
endif ()
#MKL-ml package
## MKL-ML package
if (GEMM STREQUAL "MKL")
if(NOT MKLROOT)
message(FATAL_ERROR "MKLROOT not found: install MKL and set -DMKLROOT=<path_to_MKL>")
endif()
set(MKL ${MKLROOT})
debug_message(STATUS "mkl_ml=" ${MKLROOT})
if(NOT MKLROOT)
message(FATAL_ERROR "MKLROOT not found: install MKL and set -DMKLROOT=<path_to_MKL>")
endif()
set(MKL ${MKLROOT})
debug_message(STATUS "mkl_ml=" ${MKLROOT})
endif ()
## Intel OMP package
@@ -83,24 +135,29 @@ if (THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
if (WIN32 AND X86_64)
#TODO: add target_path to be platform specific as well, to avoid following if
RESOLVE_DEPENDENCY(TBB
ARCHIVE_WIN "tbb2020_20200214_win.zip"
ARCHIVE_WIN "tbb2020_20200415_win.zip"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
elseif(ANDROID) # Should be before LINUX due LINUX is detected as well
RESOLVE_DEPENDENCY(TBB
ARCHIVE_ANDROID "tbb2020_20191023_android.tgz"
ARCHIVE_ANDROID "tbb2020_20200404_android.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
elseif(LINUX AND X86_64)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_LIN "tbb2020_20200327_lin_strip.tgz"
ARCHIVE_LIN "tbb2020_20200415_lin_strip.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT")
elseif(LINUX AND AARCH64)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_LIN "keembay/tbb2020_38404_kmb.tgz"
TARGET_PATH "${TEMP}/tbb_yocto"
ENVIRONMENT "TBBROOT")
elseif(APPLE AND X86_64)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_MAC "tbb2020_20191023_mac.tgz"
ARCHIVE_MAC "tbb2020_20200404_mac.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
@@ -131,35 +188,64 @@ if (ENABLE_OPENCV)
set(OPENCV_VERSION "4.3.0")
set(OPENCV_BUILD "060")
if (WIN32 AND X86_64)
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_WIN "opencv_${OPENCV_VERSION}-${OPENCV_BUILD}.txz"
TARGET_PATH "${TEMP}/opencv_${OPENCV_VERSION}/opencv"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
elseif(APPLE AND X86_64)
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_MAC "opencv_${OPENCV_VERSION}-${OPENCV_BUILD}_osx.txz"
TARGET_PATH "${TEMP}/opencv_${OPENCV_VERSION}_osx/opencv"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
elseif(LINUX)
if (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "armv7l")
set(OPENCV_SUFFIX "debian9arm")
elseif (${LINUX_OS_NAME} STREQUAL "CentOS 7" OR CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.9")
set(OPENCV_SUFFIX "centos7")
elseif (${LINUX_OS_NAME} STREQUAL "Ubuntu 16.04")
set(OPENCV_SUFFIX "ubuntu16")
elseif (${LINUX_OS_NAME} STREQUAL "Ubuntu 18.04")
set(OPENCV_SUFFIX "ubuntu18")
set(OPENCV_BUILD_YOCTO "073")
if (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64")
if(DEFINED ENV{THIRDPARTY_SERVER_PATH})
set(IE_PATH_TO_DEPS "$ENV{THIRDPARTY_SERVER_PATH}")
elseif(DEFINED THIRDPARTY_SERVER_PATH)
set(IE_PATH_TO_DEPS "${THIRDPARTY_SERVER_PATH}")
else()
message(FATAL_ERROR "OpenCV is not available on current platform")
message(WARNING "OpenCV is not found!")
endif()
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_LIN "opencv_${OPENCV_VERSION}-${OPENCV_BUILD}_${OPENCV_SUFFIX}.txz"
TARGET_PATH "${TEMP}/opencv_${OPENCV_VERSION}_${OPENCV_SUFFIX}/opencv"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
if(DEFINED IE_PATH_TO_DEPS)
set(OPENCV_SUFFIX "yocto_kmb")
set(OPENCV_BUILD "${OPENCV_BUILD_YOCTO}")
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_LIN "opencv/opencv_${OPENCV_VERSION}-${OPENCV_BUILD}_${OPENCV_SUFFIX}.txz"
TARGET_PATH "${TEMP}/opencv_${OPENCV_VERSION}_${OPENCV_SUFFIX}/opencv"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
unset(IE_PATH_TO_DEPS)
endif()
else()
if (WIN32 AND X86_64)
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_WIN "opencv/opencv_${OPENCV_VERSION}-${OPENCV_BUILD}.txz"
TARGET_PATH "${TEMP}/opencv_${OPENCV_VERSION}/opencv"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
elseif(APPLE AND X86_64)
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_MAC "opencv/opencv_${OPENCV_VERSION}-${OPENCV_BUILD}_osx.txz"
TARGET_PATH "${TEMP}/opencv_${OPENCV_VERSION}_osx/opencv"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
elseif(LINUX)
if (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64")
set(OPENCV_SUFFIX "yocto_kmb")
set(OPENCV_BUILD "${OPENCV_BUILD_YOCTO}")
elseif (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "armv7l")
set(OPENCV_SUFFIX "debian9arm")
elseif (${LINUX_OS_NAME} STREQUAL "CentOS 7" OR CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.9")
set(OPENCV_SUFFIX "centos7")
elseif (${LINUX_OS_NAME} STREQUAL "Ubuntu 16.04")
set(OPENCV_SUFFIX "ubuntu16")
elseif (${LINUX_OS_NAME} STREQUAL "Ubuntu 18.04")
set(OPENCV_SUFFIX "ubuntu18")
else()
message(FATAL_ERROR "OpenCV is not available on current platform")
endif()
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_LIN "opencv/opencv_${OPENCV_VERSION}-${OPENCV_BUILD}_${OPENCV_SUFFIX}.txz"
TARGET_PATH "${TEMP}/opencv_${OPENCV_VERSION}_${OPENCV_SUFFIX}/opencv"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
endif()
endif()
if(ANDROID)
@@ -193,17 +279,17 @@ if (ENABLE_GNA)
libGNA_LIBRARIES_BASE_PATH)
if (GNA_LIBRARY_VERSION STREQUAL "GNA1")
RESOLVE_DEPENDENCY(GNA
ARCHIVE_UNIFIED "gna_20181120.zip"
ARCHIVE_UNIFIED "GNA/gna_20181120.zip"
TARGET_PATH "${TEMP}/gna")
else()
if(GNA_LIBRARY_VERSION STREQUAL "GNA1_1401")
set(GNA_VERSION "01.00.00.1401")
endif()
if(GNA_LIBRARY_VERSION STREQUAL "GNA2")
set(GNA_VERSION "02.00.00.0654")
set(GNA_VERSION "02.00.00.0925")
endif()
RESOLVE_DEPENDENCY(GNA
ARCHIVE_UNIFIED "GNA_${GNA_VERSION}.zip"
ARCHIVE_UNIFIED "GNA/GNA_${GNA_VERSION}.zip"
TARGET_PATH "${TEMP}/gna_${GNA_VERSION}"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+.[0-9]+).*")
endif()
@@ -211,6 +297,44 @@ if (ENABLE_GNA)
debug_message(STATUS "gna=" ${GNA})
endif()
if (ENABLE_SPEECH_DEMO)
reset_deps_cache(SPEECH_LIBS_AND_DEMOS)
if(DEFINED ENV{THIRDPARTY_SERVER_PATH})
set(IE_PATH_TO_DEPS "$ENV{THIRDPARTY_SERVER_PATH}")
elseif(DEFINED THIRDPARTY_SERVER_PATH)
set(IE_PATH_TO_DEPS "${THIRDPARTY_SERVER_PATH}")
else()
message(WARNING "Unable to locate Speech Demo")
endif()
if(DEFINED IE_PATH_TO_DEPS)
if (WIN32 AND X86_64)
RESOLVE_DEPENDENCY(SPEECH_LIBS_AND_DEMOS
ARCHIVE_WIN "speech_demo_1.0.0.746_windows.zip"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+.[0-9]+).*"
TARGET_PATH "${TEMP}/speech_demo_1.0.0.746")
debug_message(STATUS "speech_libs_and_demos=" ${SPEECH_LIBS_AND_DEMOS})
elseif (LINUX AND X86_64)
if (${LINUX_OS_NAME} STREQUAL "CentOS 7" OR CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.9")
RESOLVE_DEPENDENCY(SPEECH_LIBS_AND_DEMOS
ARCHIVE_LIN "speech_demo_1.0.0.746_centos.tgz"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+.[0-9]+).*"
TARGET_PATH "${TEMP}/speech_demo_1.0.0.746")
debug_message(STATUS "speech_libs_and_demos=" ${SPEECH_LIBS_AND_DEMOS})
else()
RESOLVE_DEPENDENCY(SPEECH_LIBS_AND_DEMOS
ARCHIVE_LIN "speech_demo_1.0.0.746_linux.tgz"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+.[0-9]+).*"
TARGET_PATH "${TEMP}/speech_demo_1.0.0.746")
debug_message(STATUS "speech_libs_and_demos=" ${SPEECH_LIBS_AND_DEMOS})
endif()
else()
message(FATAL_ERROR "Speech Demo is not available on current platform")
endif()
unset(IE_PATH_TO_DEPS)
endif()
update_deps_cache(SPEECH_LIBS_AND_DEMOS "${SPEECH_LIBS_AND_DEMOS}" "Path to SPEECH_LIBS_AND_DEMOS root folder")
endif()
configure_file(
"${IE_MAIN_SOURCE_DIR}/cmake/share/InferenceEngineConfig.cmake.in"
"${CMAKE_BINARY_DIR}/share/InferenceEngineConfig.cmake"

View File

@@ -11,7 +11,11 @@ file(TO_CMAKE_PATH "${CMAKE_CURRENT_LIST_DIR}" cache_path)
set(ie_options "@IE_OPTIONS@;CMAKE_BUILD_TYPE;CMAKE_SKIP_RPATH")
load_cache("${cache_path}" READ_WITH_PREFIX "" ${ie_options})
foreach(option IN LISTS ie_options)
if(NOT DEFINED "${option}")
load_cache("${cache_path}" READ_WITH_PREFIX "" ${option})
endif()
endforeach()
message(STATUS "The following CMake options are exported from Inference Engine Developer package")
message("")
@@ -21,6 +25,8 @@ endforeach()
message("")
set(gflags_DIR "@gflags_BINARY_DIR@")
# GNA lib dir
set(GNA "@GNA@")
# Targets
@@ -29,7 +35,7 @@ include("${CMAKE_CURRENT_LIST_DIR}/targets_developer.cmake")
set_property(TARGET IE::inference_engine PROPERTY IMPORTED_GLOBAL TRUE)
get_target_property(InferenceEngine_INCLUDE_DIRS IE::inference_engine INTERFACE_INCLUDE_DIRECTORIES)
set(InferenceEngine_LIBRARIES IE::inference_engine_legacy IE::inference_engine IE::inference_engine_nn_builder)
set(InferenceEngine_LIBRARIES IE::inference_engine_legacy IE::inference_engine)
#
# Common cmake includes
@@ -39,7 +45,7 @@ list(APPEND CMAKE_MODULE_PATH "${OpenVINO_MAIN_SOURCE_DIR}/cmake")
list(APPEND CMAKE_MODULE_PATH "${IE_MAIN_SOURCE_DIR}/cmake")
# generic stuff from developer package
include(developer_package NO_POLICY_SCOPE)
include(developer_package)
include(developer_package_ie)
# Don't threat deprecated API warnings as errors in 3rd party apps

View File

@@ -50,7 +50,7 @@ if (ENABLE_GNA)
if (UNIX AND NOT APPLE AND CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.4)
set (DEFAULT_GNA_LIB GNA1)
else()
set (DEFAULT_GNA_LIB GNA1_1401)
set (DEFAULT_GNA_LIB GNA2)
endif()
set(GNA_LIBRARY_VERSION "${DEFAULT_GNA_LIB}" CACHE STRING "GNAVersion")
set_property(CACHE GNA_LIBRARY_VERSION PROPERTY STRINGS "GNA1" "GNA1_1401" "GNA2")
@@ -62,30 +62,30 @@ if (ENABLE_GNA)
endif()
endif()
ie_option (ENABLE_IR_READER "Compile with IR readers / parsers" ON)
ie_option (ENABLE_VPU "vpu targeted plugins for inference engine" ON)
ie_dependent_option (ENABLE_MYRIAD "myriad targeted plugin for inference engine" ON "ENABLE_VPU" OFF)
ie_dependent_option (ENABLE_MYRIAD_NO_BOOT "myriad plugin will skip device boot" OFF "ENABLE_MYRIAD" OFF)
ie_option (ENABLE_TESTS "unit, behavior and functional tests" OFF)
ie_dependent_option (ENABLE_GAPI_TESTS "tests for GAPI kernels" OFF "ENABLE_TESTS" OFF)
ie_dependent_option (ENABLE_GAPI_TESTS "tests for GAPI kernels" ON "ENABLE_TESTS" OFF)
ie_dependent_option (GAPI_TEST_PERF "if GAPI unit tests should examine performance" OFF "ENABLE_GAPI_TESTS" OFF)
ie_dependent_option (ENABLE_MYRIAD_MVNC_TESTS "functional and behavior tests for mvnc api" OFF "ENABLE_TESTS;ENABLE_MYRIAD" OFF)
ie_dependent_option (ENABLE_SAMPLES "console samples are part of inference engine package" ON "NOT MINGW" OFF)
ie_dependent_option (ENABLE_DATA "fetch models from testdata repo" ON "ENABLE_FUNCTIONAL_TESTS;NOT ANDROID" OFF)
ie_dependent_option (ENABLE_SAME_BRANCH_FOR_MODELS "uses same branch for models and for inference engine, if not enabled models are taken from master" OFF "ENABLE_TESTS" OFF)
ie_dependent_option (ENABLE_BEH_TESTS "tests oriented to check inference engine API corecteness" ON "ENABLE_TESTS" OFF)
ie_dependent_option (ENABLE_FUNCTIONAL_TESTS "functional tests" ON "ENABLE_TESTS;ENABLE_IR_READER" OFF)
ie_dependent_option (ENABLE_FUNCTIONAL_TESTS "functional tests" ON "ENABLE_TESTS" OFF)
ie_dependent_option (ENABLE_SAMPLES "console samples are part of inference engine package" ON "NOT MINGW" OFF)
ie_dependent_option (ENABLE_SPEECH_DEMO "enable speech demo integration" ON "NOT APPLE;NOT ANDROID;X86 OR X86_64" OFF)
ie_option (ENABLE_FUZZING "instrument build for fuzzing" OFF)
ie_option (VERBOSE_BUILD "shows extra information about build" OFF)
@@ -96,18 +96,15 @@ ie_option (ENABLE_ALTERNATIVE_TEMP "in case of dependency conflict, to avoid mod
ie_option (ENABLE_OPENCV "enables OpenCV" ON)
ie_option (ENABLE_DEBUG_SYMBOLS "generates symbols for debugging" OFF)
ie_option (ENABLE_PYTHON "enables ie python bridge build" OFF)
ie_option (ENABLE_CPP_CCT "enables C++ version of Cross Check Tool" OFF)
ie_option (ENABLE_C "enables ie c bridge build" ON)
ie_dependent_option(ENABLE_CPPLINT "Enable cpplint checks during the build" OFF "OFF;UNIX;NOT APPLE;NOT ANDROID" OFF)
ie_dependent_option(ENABLE_CPPLINT "Enable cpplint checks during the build" ON "UNIX;NOT ANDROID" OFF)
ie_dependent_option(ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF "ENABLE_CPPLINT" OFF)
ie_option(ENABLE_CLANG_FORMAT "Enable clang-format checks during the build" OFF)
ie_option(ENABLE_CLANG_FORMAT "Enable clang-format checks during the build" ON)
set(IE_EXTRA_PLUGINS "" CACHE STRING "Extra paths for plugins to include into DLDT build tree")

View File

@@ -0,0 +1,81 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
if(ENABLE_DOCKER)
cmake_minimum_required(VERSION 3.3 FATAL_ERROR)
else()
cmake_minimum_required(VERSION 3.8 FATAL_ERROR)
endif()
cmake_policy(SET CMP0054 NEW)
find_package(Git REQUIRED)
set(MODELS_LST "")
set(MODELS_LST_TO_FETCH "")
function (add_models_repo add_to_fetcher model_name)
list(LENGTH ARGV add_models_args)
if (add_models_args EQUAL 3)
list(GET ARGV 2 branch_name)
else()
set(branch_name ${MODELS_BRANCH})
endif()
if (add_to_fetcher)
set(model_name "${model_name}:${branch_name}")
list(APPEND MODELS_LST_TO_FETCH ${model_name})
endif()
list(APPEND MODELS_LST ${model_name})
set(MODELS_LST_TO_FETCH ${MODELS_LST_TO_FETCH} PARENT_SCOPE)
set(MODELS_LST ${MODELS_LST} PARENT_SCOPE)
endfunction()
function(add_lfs_repo name prefix url tag)
if(TARGET ${name})
return()
endif()
ExternalProject_Add(${name}
PREFIX ${prefix}
GIT_REPOSITORY ${url}
GIT_TAG ${tag}
GIT_CONFIG "http.sslverify=false"
GIT_PROGRESS 1
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
LOG_DOWNLOAD ON)
execute_process(
COMMAND ${GIT_EXECUTABLE} lfs install --local --force
WORKING_DIRECTORY ${prefix}/src/${name}
OUTPUT_VARIABLE lfs_output
RESULT_VARIABLE lfs_var)
if(lfs_var)
message(FATAL_ERROR [=[
Failed to setup Git LFS: ${lfs_output}
Git lfs must be installed in order to fetch models
Please install it from https://git-lfs.github.com/
]=])
endif()
endfunction()
function (fetch_models_and_validation_set)
foreach(loop_var ${MODELS_LST_TO_FETCH})
string(REPLACE ":" ";" MODEL_CONFIG_LST ${loop_var})
list(GET MODEL_CONFIG_LST 0 folder_name)
list(GET MODEL_CONFIG_LST 1 git_url)
list(GET MODEL_CONFIG_LST 2 repo_name)
list(GET MODEL_CONFIG_LST 3 branch_name)
add_lfs_repo(
"${folder_name}"
"${TEMP}/models"
"${git_url}:${repo_name}"
"${branch_name}")
endforeach(loop_var)
endfunction()

View File

@@ -83,6 +83,15 @@ function(ie_add_plugin)
add_dependencies(${IE_PLUGIN_NAME} inference_engine_preproc)
endif()
# fake dependencies to build in the following order:
# IE -> IE readers -> IE inference plugins -> IE-based apps
if(TARGET inference_engine_ir_reader)
add_dependencies(${IE_PLUGIN_NAME} inference_engine_ir_reader)
endif()
if(TARGET inference_engine_onnx_reader)
add_dependencies(${IE_PLUGIN_NAME} inference_engine_onnx_reader)
endif()
# install rules
if(NOT IE_PLUGIN_SKIP_INSTALL)
@@ -90,8 +99,8 @@ function(ie_add_plugin)
ie_cpack_add_component(${install_component} REQUIRED DEPENDS core)
install(TARGETS ${IE_PLUGIN_NAME}
RUNTIME DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ${install_component}
ARCHIVE DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ${install_component}
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ${install_component}
ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ${install_component}
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ${install_component})
endif()
endfunction()

View File

@@ -18,7 +18,6 @@
# IE::inference_engine - The Inference Engine library
# IE::inference_engine_legacy - The Inference Engine library with legacy API for IR v7 and older.
# IE::inference_engine_c_api - The Inference Engine C API library
# IE::inference_engine_nn_builder - The Inference Engine NN Builder library
#
macro(ext_message TRACE_LEVEL)
@@ -40,7 +39,7 @@ if(TARGET IE::inference_engine)
set(InferenceEngine_FOUND TRUE)
get_target_property(InferenceEngine_INCLUDE_DIRS IE::inference_engine INTERFACE_INCLUDE_DIRECTORIES)
set(InferenceEngine_LIBRARIES IE::inference_engine_legacy IE::inference_engine
IE::inference_engine_c_api IE::inference_engine_nn_builder)
IE::inference_engine_c_api)
else()
if (WIN32)
set(_ARCH intel64)
@@ -88,29 +87,26 @@ else()
find_library(IE_RELEASE_LIBRARY inference_engine@IE_RELEASE_POSTFIX_WIN@ "${IE_LIB_REL_DIR}" NO_DEFAULT_PATH)
find_library(IE_LEGACY_RELEASE_LIBRARY inference_engine_legacy@IE_RELEASE_POSTFIX_WIN@ "${IE_LIB_REL_DIR}" NO_DEFAULT_PATH)
find_library(IE_C_API_RELEASE_LIBRARY inference_engine_c_api@IE_RELEASE_POSTFIX_WIN@ "${IE_LIB_REL_DIR}" NO_DEFAULT_PATH)
find_library(IE_NN_BUILDER_RELEASE_LIBRARY inference_engine_nn_builder@IE_RELEASE_POSTFIX_WIN@ "${IE_LIB_REL_DIR}" NO_DEFAULT_PATH)
elseif(APPLE)
find_library(IE_RELEASE_LIBRARY inference_engine@IE_RELEASE_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
find_library(IE_LEGACY_RELEASE_LIBRARY inference_engine_legacy@IE_RELEASE_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
find_library(IE_C_API_RELEASE_LIBRARY inference_engine_c_api@IE_RELEASE_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
find_library(IE_NN_BUILDER_RELEASE_LIBRARY inference_engine_nn_builder@IE_RELEASE_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
else()
find_library(IE_RELEASE_LIBRARY inference_engine@IE_RELEASE_POSTFIX_LIN@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
find_library(IE_LEGACY_RELEASE_LIBRARY inference_engine_legacy@IE_RELEASE_POSTFIX_LIN@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
find_library(IE_C_API_RELEASE_LIBRARY inference_engine_c_api@IE_RELEASE_POSTFIX_LIN@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
find_library(IE_NN_BUILDER_RELEASE_LIBRARY inference_engine_nn_builder@IE_RELEASE_POSTFIX_LIN@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
endif()
find_package_handle_standard_args( InferenceEngine
FOUND_VAR INFERENCEENGINE_FOUND
REQUIRED_VARS IE_RELEASE_LIBRARY IE_LEGACY_RELEASE_LIBRARY IE_C_API_RELEASE_LIBRARY IE_NN_BUILDER_RELEASE_LIBRARY IE_INCLUDE_DIR
REQUIRED_VARS IE_RELEASE_LIBRARY IE_LEGACY_RELEASE_LIBRARY IE_C_API_RELEASE_LIBRARY IE_INCLUDE_DIR
FAIL_MESSAGE "Some of mandatory Inference Engine components are not found. Please consult InferenceEgnineConfig.cmake module's help page.")
if(INFERENCEENGINE_FOUND)
# to keep this line for successful execution in CMake 2.8
set(InferenceEngine_FOUND TRUE)
foreach(ie_library_suffix "" "_legacy" "_c_api" "_nn_builder")
foreach(ie_library_suffix "" "_legacy" "_c_api")
string(TOUPPER "${ie_library_suffix}" ie_library_usuffix)
add_library(IE::inference_engine${ie_library_suffix} SHARED IMPORTED GLOBAL)
@@ -154,7 +150,7 @@ else()
set_target_properties(IE::inference_engine${ie_library_suffix} PROPERTIES
IMPORTED_LOCATION "${IE${ie_library_usuffix}_RELEASE_LIBRARY}"
INTERFACE_INCLUDE_DIRECTORIES "${IE_INCLUDE_DIR}")
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set_target_properties(IE::inference_engine${ie_library_suffix} PROPERTIES
INTERFACE_COMPILE_OPTIONS "-diag-warning=1786")
else()
@@ -167,7 +163,7 @@ else()
set(InferenceEngine_INCLUDE_DIRS ${IE_INCLUDE_DIR})
set(InferenceEngine_LIBRARIES IE::inference_engine_legacy IE::inference_engine
IE::inference_engine_c_api IE::inference_engine_nn_builder)
IE::inference_engine_c_api)
set(IE_EXTERNAL_DIR "${IE_ROOT_DIR}/external")
include("${IE_ROOT_DIR}/share/ie_parallel.cmake")

View File

@@ -19,7 +19,8 @@ set(VPU_SUPPORTED_FIRMWARES usb-ma2450 usb-ma2x8x pcie-ma248x)
# Default packages
#
set(FIRMWARE_PACKAGE_VERSION 1076)
set(FIRMWARE_PACKAGE_VERSION 1223)
set(VPU_CLC_MA2X8X_VERSION "movi-cltools-20.02.0")
#
# CMake variables to override default firmware files
@@ -37,7 +38,7 @@ foreach(firmware_name IN LISTS VPU_SUPPORTED_FIRMWARES)
reset_deps_cache(VPU_FIRMWARE_${firmware_name_upper}_FILE)
RESOLVE_DEPENDENCY(VPU_FIRMWARE_${firmware_name_upper}
ARCHIVE_UNIFIED firmware_${firmware_name}_${FIRMWARE_PACKAGE_VERSION}.zip
ARCHIVE_UNIFIED VPU/${firmware_name}/firmware_${firmware_name}_${FIRMWARE_PACKAGE_VERSION}.zip
TARGET_PATH "${TEMP}/vpu/firmware/${firmware_name}"
ENVIRONMENT "VPU_FIRMWARE_${firmware_name_upper}_FILE"
FOLDER)
@@ -82,7 +83,7 @@ foreach(firmware_name IN LISTS VPU_SUPPORTED_FIRMWARES)
VERBATIM)
install(FILES ${${var_name}}
DESTINATION ${IE_CPACK_LIBRARY_PATH}
DESTINATION ${IE_CPACK_RUNTIME_PATH}
COMPONENT myriad)
endforeach()
@@ -104,4 +105,107 @@ if(ANDROID)
set(LIBUSB_LIBRARY "${LIBUSB}/libs/${ANDROID_ABI}/libusb1.0.so")
log_rpath_from_dir(LIBUSB "${LIBUSB}/libs/${ANDROID_ABI}")
endif()
endif()
#
# OpenCL compiler
#
if(LINUX AND LINUX_OS_NAME MATCHES "Ubuntu")
if(DEFINED ENV{THIRDPARTY_SERVER_PATH})
set(IE_PATH_TO_DEPS "$ENV{THIRDPARTY_SERVER_PATH}")
elseif(DEFINED THIRDPARTY_SERVER_PATH)
set(IE_PATH_TO_DEPS "${THIRDPARTY_SERVER_PATH}")
else()
message(WARNING "VPU_OCL_COMPILER is not found. Some tests will skipped")
endif()
if(DEFINED IE_PATH_TO_DEPS)
message(STATUS "THIRDPARTY_SERVER_PATH=${IE_PATH_TO_DEPS}")
reset_deps_cache(VPU_CLC_MA2X8X_ROOT)
reset_deps_cache(VPU_CLC_MA2X8X_COMMAND)
RESOLVE_DEPENDENCY(VPU_CLC_MA2X8X
ARCHIVE_LIN "VPU_OCL_compiler/${VPU_CLC_MA2X8X_VERSION}.tar.gz"
TARGET_PATH "${TEMP}/vpu/clc/ma2x8x/${VPU_CLC_MA2X8X_VERSION}"
ENVIRONMENT "VPU_CLC_MA2X8X_COMMAND")
debug_message(STATUS "VPU_CLC_MA2X8X=" ${VPU_CLC_MA2X8X})
update_deps_cache(
VPU_CLC_MA2X8X_ROOT
"${VPU_CLC_MA2X8X}"
"[VPU] Root directory of OpenCL compiler")
update_deps_cache(
VPU_CLC_MA2X8X_COMMAND
"${VPU_CLC_MA2X8X}/bin/clc"
"[VPU] OpenCL compiler")
find_program(VPU_CLC_MA2X8X_COMMAND clc)
unset (IE_PATH_TO_DEPS)
endif()
endif()
#
# `vpu_custom_kernels` CMake target
#
add_library(vpu_custom_kernels INTERFACE)
function(add_vpu_compile_custom_kernels)
set(SRC_DIR "${IE_MAIN_SOURCE_DIR}/src/vpu/custom_kernels")
set(DST_DIR "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/vpu_custom_kernels")
file(MAKE_DIRECTORY "${DST_DIR}")
file(GLOB XML_FILES "${SRC_DIR}/*.xml")
file(GLOB CL_FILES "${SRC_DIR}/*.cl")
foreach(xml_file IN LISTS XML_FILES)
get_filename_component(xml_file_name ${xml_file} NAME)
set(out_file "${DST_DIR}/${xml_file_name}")
list(APPEND all_output_files ${out_file})
add_custom_command(
OUTPUT ${out_file}
COMMAND
${CMAKE_COMMAND} -E copy ${xml_file} ${out_file}
MAIN_DEPENDENCY ${xml_file}
COMMENT "[VPU] Copy ${xml_file} to ${DST_DIR}"
VERBATIM)
endforeach()
foreach(cl_file IN LISTS CL_FILES)
get_filename_component(cl_file_name ${cl_file} NAME_WE)
set(out_file "${DST_DIR}/${cl_file_name}.bin")
list(APPEND all_output_files ${out_file})
add_custom_command(
OUTPUT ${out_file}
COMMAND
${CMAKE_COMMAND} -E env
"SHAVE_LDSCRIPT_DIR=${VPU_CLC_MA2X8X}/ldscripts/"
"SHAVE_MA2X8XLIBS_DIR=${VPU_CLC_MA2X8X}/lib"
"SHAVE_MOVIASM_DIR=${VPU_CLC_MA2X8X}/bin"
"SHAVE_MYRIAD_LD_DIR=${VPU_CLC_MA2X8X}/bin"
${VPU_CLC_MA2X8X_COMMAND} --strip-binary-header ${cl_file} -o ${out_file}
MAIN_DEPENDENCY ${cl_file}
DEPENDS ${VPU_CLC_MA2X8X_COMMAND}
COMMENT "[VPU] Compile ${cl_file}"
VERBATIM)
endforeach()
add_custom_target(vpu_compile_custom_kernels
DEPENDS ${all_output_files}
COMMENT "[VPU] Compile custom kernels")
add_dependencies(vpu_custom_kernels vpu_compile_custom_kernels)
target_compile_definitions(vpu_custom_kernels INTERFACE "VPU_HAS_CUSTOM_KERNELS")
endfunction()
if(VPU_CLC_MA2X8X_COMMAND)
add_vpu_compile_custom_kernels()
endif()

View File

@@ -6,6 +6,10 @@ project(InferenceEngine_C_API)
add_subdirectory(src)
if(ENABLE_TESTS)
add_subdirectory(tests)
endif()
if(ENABLE_SAMPLES)
add_subdirectory(samples)
endif()

View File

@@ -31,6 +31,7 @@
#define IE_NODISCARD
#else
#if defined(_WIN32)
#define INFERENCE_ENGINE_C_API_CALLBACK __cdecl
#ifdef inference_engine_c_api_EXPORTS
#define INFERENCE_ENGINE_C_API(...) INFERENCE_ENGINE_C_API_EXTERN __declspec(dllexport) __VA_ARGS__ __cdecl
#else
@@ -43,6 +44,10 @@
#endif
#endif
#ifndef INFERENCE_ENGINE_C_API_CALLBACK
#define INFERENCE_ENGINE_C_API_CALLBACK
#endif
typedef struct ie_core ie_core_t;
typedef struct ie_network ie_network_t;
typedef struct ie_executable ie_executable_network_t;
@@ -284,7 +289,7 @@ typedef struct ie_blob_buffer {
* @brief Completion callback definition about the function and args
*/
typedef struct ie_complete_call_back {
void (*completeCallBackFunc)(void *args);
void (INFERENCE_ENGINE_C_API_CALLBACK *completeCallBackFunc)(void *args);
void *args;
}ie_complete_call_back_t;
@@ -371,6 +376,19 @@ INFERENCE_ENGINE_C_API(void) ie_core_versions_free(ie_core_versions_t *vers);
*/
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_read_network(ie_core_t *core, const char *xml, const char *weights_file, ie_network_t **network);
/**
* @brief Reads the model from an xml string and a blob of the bin part of the IR. Use the ie_network_free() method to free memory.
* @ingroup Core
* @param core A pointer to ie_core_t instance.
* @param xml_content Xml content of the IR.
* @param xml_content_size Number of bytes in the xml content of the IR.
* @param weight_blob Blob containing the bin part of the IR.
* @param network A pointer to the newly created network.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_read_network_from_memory(ie_core_t *core, const uint8_t *xml_content, size_t xml_content_size,
const ie_blob_t *weight_blob, ie_network_t **network);
/**
* @brief Creates an executable network from a network object. Users can create as many networks as they need and use
* them simultaneously (up to the limitation of the hardware resources). Use the ie_exec_network_free() method to free memory.

View File

@@ -24,6 +24,8 @@ target_link_libraries(${TARGET_NAME} PUBLIC ${OpenCV_LIBRARIES})
target_include_directories(${TARGET_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}")
set_target_properties(${TARGET_NAME} PROPERTIES FOLDER c_samples)
if(COMMAND add_cpplint_target)
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
endif()

View File

@@ -2,14 +2,6 @@
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME "hello_classification_c")
# create sample target
add_executable(${TARGET_NAME} main.c)
target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES} opencv_c_wraper)
if(COMMAND add_cpplint_target)
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
endif()
ie_add_sample(NAME hello_classification_c
SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.c"
DEPENDENCIES opencv_c_wraper)

View File

@@ -2,14 +2,6 @@
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME "hello_nv12_input_classification_c")
# create sample target
add_executable(${TARGET_NAME} main.c)
target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES})
if(COMMAND add_cpplint_target)
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
endif()
ie_add_sample(NAME hello_nv12_input_classification_c
SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.c"
DEPENDENCIES opencv_c_wraper)

View File

@@ -1,5 +1,5 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier : Apache-2.0
//
#include <stdlib.h>
@@ -167,7 +167,7 @@ int main(int argc, char **argv) {
// set input resize algorithm to enable input autoresize
status |= ie_network_set_input_resize_algorithm(network, input_name, RESIZE_BILINEAR);
// set input color format to NV12 to enable automatic input color format pre-processing
status |= ie_network_set_color_format(network, input_name, NV12 );
status |= ie_network_set_color_format(network, input_name, NV12);
if (status != OK)
goto err;

View File

@@ -2,14 +2,8 @@
# SPDX-License-Identifier: Apache-2.0
#
set (TARGET_NAME "object_detection_sample_ssd_c")
# create sample target
add_executable(${TARGET_NAME} main.c)
target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES} opencv_c_wraper)
if(COMMAND add_cpplint_target)
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
endif()
ie_add_sample(NAME object_detection_sample_ssd_c
SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.c"
HEADERS "${CMAKE_CURRENT_SOURCE_DIR}/object_detection_sample_ssd.h"
"${CMAKE_CURRENT_SOURCE_DIR}/c_w_dirent.h"
DEPENDENCIES opencv_c_wraper)

View File

@@ -1,5 +1,5 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier : Apache-2.0
//
#include <stdlib.h>
@@ -18,7 +18,7 @@
#define MAX_IMAGE 20
static const char *img_msg = NULL;
static const char *img_msg = NULL;
static const char *input_model = NULL;
static const char *device_name = "CPU";
static const char *custom_cldnn_msg = NULL;
@@ -38,7 +38,7 @@ int ParseAndCheckCommandLine(int argc, char *argv[]) {
printf("%sParsing input parameters\n", info);
while ((opt = getopt(argc, argv, string)) != -1) {
switch(opt) {
switch (opt) {
case 'h':
showUsage();
help = 1;
@@ -105,9 +105,9 @@ void readInputFilesArgument(const char *arg) {
const char *fileName = ep->d_name;
if (strcmp(fileName, ".") == 0 || strcmp(fileName, "..") == 0) continue;
char *file_path = (char *)calloc(strlen(arg) + strlen(ep->d_name) + 2, sizeof(char));
strcpy(file_path, arg);
strcat(file_path, "/");
strcat(file_path, ep->d_name);
memcpy(file_path, arg, strlen(arg));
memcpy(file_path + strlen(arg), "/", strlen("/"));
memcpy(file_path + strlen(arg) + strlen("/"), ep->d_name, strlen(ep->d_name) + 1);
if (file_num == 0) {
file_paths = (char **)calloc(1, sizeof(char *));
@@ -131,7 +131,7 @@ void readInputFilesArgument(const char *arg) {
dp = NULL;
} else {
char *file_path = (char *)calloc(strlen(arg) + 1, sizeof(char));
strcpy(file_path, arg);
memcpy(file_path, arg, strlen(arg) + 1);
if (file_num == 0) {
file_paths = (char **)calloc(1, sizeof(char *));
}
@@ -183,12 +183,12 @@ ie_config_t *parseConfig(const char *config_file, char comment) {
ie_config_t *cfg = NULL;
char key[256], value[256];
if (fscanf(file, "%s", key)!= EOF && fscanf(file, "%s", value) != EOF) {
char *cfg_name = (char *)calloc(strlen(key) + 1, sizeof(char));
char *cfg_value = (char *)calloc(strlen(value) + 1, sizeof(char));
strcpy(cfg_name, key);
strcpy(cfg_value, value);
memcpy(cfg_name, key, strlen(key) + 1);
memcpy(cfg_value, value, strlen(value) + 1);
ie_config_t *cfg_t = (ie_config_t *)calloc(1, sizeof(ie_config_t));
cfg_t->name = cfg_name;
cfg_t->value = cfg_value;
@@ -203,8 +203,8 @@ ie_config_t *parseConfig(const char *config_file, char comment) {
}
char *cfg_name = (char *)calloc(strlen(key) + 1, sizeof(char));
char *cfg_value = (char *)calloc(strlen(value) + 1, sizeof(char));
strcpy(cfg_name, key);
strcpy(cfg_value, value);
memcpy(cfg_name, key, strlen(key) + 1);
memcpy(cfg_value, value, strlen(value) + 1);
ie_config_t *cfg_t = (ie_config_t *)calloc(1, sizeof(ie_config_t));
cfg_t->name = cfg_name;
cfg_t->value = cfg_value;
@@ -213,7 +213,7 @@ ie_config_t *parseConfig(const char *config_file, char comment) {
cfg_temp = cfg_temp->next;
}
}
return cfg;
}
@@ -229,11 +229,11 @@ void config_free(ie_config_t *config) {
free((char *)config->name);
config->name = NULL;
}
if(config->value) {
if (config->value) {
free((char *)config->value);
config->value = NULL;
}
if(config->next) {
if (config->next) {
config = config->next;
}
@@ -345,8 +345,8 @@ int main(int argc, char **argv) {
// --------------------------- 4. Read IR Generated by ModelOptimizer (.xml and .bin files) ------------
input_weight = (char *)calloc(strlen(input_model) + 1, sizeof(char));
strncpy(input_weight, input_model, strlen(input_model)-4);
strcat(input_weight, ".bin");
memcpy(input_weight, input_model, strlen(input_model) - 4);
memcpy(input_weight + strlen(input_model) - 4, ".bin", strlen(".bin") + 1);
printf("%sLoading network files:\n", info);
printf("\t%s\n", input_model);
printf("\t%s\n", input_weight);
@@ -388,7 +388,7 @@ int main(int argc, char **argv) {
goto err;
/** Working with first input tensor that stores image **/
if(input_dim.ranks == 4) {
if (input_dim.ranks == 4) {
imageInputName = name;
input_height = input_dim.dims[2];
input_width = input_dim.dims[3];
@@ -399,9 +399,9 @@ int main(int argc, char **argv) {
goto err;
} else if (input_dim.ranks == 2) {
imInfoInputName = name;
status = ie_network_set_input_precision(network, name, FP32);
if(status !=OK || (input_dim.dims[1] != 3 && input_dim.dims[1] != 6)) {
if (status !=OK || (input_dim.dims[1] != 3 && input_dim.dims[1] != 6)) {
printf("Invalid input info. Should be 3 or 6 values length\n");
goto err;
}
@@ -590,7 +590,7 @@ int main(int argc, char **argv) {
dimensions_t imInfoDim;
status |= ie_blob_get_dims(input2, &imInfoDim);
//Fill input tensor with values
//Fill input tensor with values
ie_blob_buffer_t info_blob_buffer;
status |= ie_blob_get_buffer(input2, &info_blob_buffer);
if (status != OK) {
@@ -601,7 +601,7 @@ int main(int argc, char **argv) {
for (image_id = 0; image_id < batchSize; ++image_id) {
p[image_id * imInfoDim.dims[1] + 0] = (float)input_height;
p[image_id * imInfoDim.dims[1] + 1] = (float)input_width;
for (k = 2; k < imInfoDim.dims[1]; k++) {
p[image_id * imInfoDim.dims[1] + k] = 1.0f; // all scale factors are set to 1.0
}
@@ -616,7 +616,7 @@ int main(int argc, char **argv) {
status |= ie_infer_request_wait(infer_request, -1);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------------------
// --------------------------- 11. Process output -------------------------------------------------------
printf("%sProcessing output blobs\n", info);
@@ -634,7 +634,7 @@ int main(int argc, char **argv) {
int **classes = (int **)calloc(image_num, sizeof(int *));
rectangle_t **boxes = (rectangle_t **)calloc(image_num, sizeof(rectangle_t *));
int *object_num = (int *)calloc(image_num, sizeof(int));
for ( i = 0; i < image_num; ++i) {
for (i = 0; i < image_num; ++i) {
classes[i] = (int *)calloc(maxProposalCount, sizeof(int));
boxes[i] = (rectangle_t *)calloc(maxProposalCount, sizeof(rectangle_t));
object_num[i] = 0;
@@ -678,11 +678,11 @@ int main(int argc, char **argv) {
}
const char *out = "out_";
char str_num[16] = {0};
int2str(str_num, batch_id);
int2str(str_num, batch_id);
char *img_path = (char *)calloc(strlen(out) + strlen(str_num) + strlen(".bmp") + 1, sizeof(char));
strcpy(img_path, out);
strcat(img_path, str_num);
strcat(img_path, ".bmp");
memcpy(img_path, out, strlen(out));
memcpy(img_path + strlen(out), str_num, strlen(str_num));
memcpy(img_path + strlen(out) + strlen(str_num), ".bmp", strlen(".bmp") + 1);
image_save(img_path, &originalImages[batch_id]);
printf("%sImage %s created!\n", info, img_path);
free(img_path);

View File

@@ -21,6 +21,12 @@ target_include_directories(${TARGET_NAME} PUBLIC "${InferenceEngine_C_API_SOURCE
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
# Workaround to avoid warnings caused with bug in the avx512intrin.h of GCC5
if((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") AND
(CMAKE_CXX_COMPILER_VERSION VERSION_LESS_EQUAL 5.5))
set_target_properties(${TARGET_NAME} PROPERTIES LINK_FLAGS_RELEASE "-Wno-error=maybe-uninitialized -Wno-maybe-uninitialized")
endif()
# export
export(TARGETS ${TARGET_NAME} NAMESPACE IE:: APPEND FILE "${CMAKE_BINARY_DIR}/targets.cmake")
@@ -28,8 +34,8 @@ export(TARGETS ${TARGET_NAME} NAMESPACE IE:: APPEND FILE "${CMAKE_BINARY_DIR}/ta
# install
install(TARGETS ${TARGET_NAME}
RUNTIME DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core
ARCHIVE DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core
ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core)
install(DIRECTORY ${InferenceEngine_C_API_SOURCE_DIR}/include/

View File

@@ -310,6 +310,28 @@ IEStatusCode ie_core_read_network(ie_core_t *core, const char *xml, const char *
return status;
}
IEStatusCode ie_core_read_network_from_memory(ie_core_t *core, const uint8_t *xml_content, size_t xml_content_size, \
const ie_blob_t *weight_blob, ie_network_t **network) {
if (core == nullptr || xml_content == nullptr || network == nullptr || weight_blob == nullptr) {
return IEStatusCode::GENERAL_ERROR;
}
IEStatusCode status = IEStatusCode::OK;
try {
std::unique_ptr<ie_network_t> network_result(new ie_network_t);
network_result->object = core->object.ReadNetwork(std::string(reinterpret_cast<const char *>(xml_content),
reinterpret_cast<const char *>(xml_content + xml_content_size)), weight_blob->object);
*network = network_result.release();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
return status;
}
IEStatusCode ie_core_load_network(ie_core_t *core, const ie_network_t *network, const char *device_name, \
const ie_config_t *config, ie_executable_network_t **exe_network) {
IEStatusCode status = IEStatusCode::OK;

View File

@@ -0,0 +1,40 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME "InferenceEngineCAPITests")
# Find OpenCV components if exist
find_package(OpenCV COMPONENTS imgcodecs videoio imgproc QUIET)
if(NOT OpenCV_FOUND)
message(WARNING "OPENCV is disabled or not found, " ${TARGET_NAME} " is built without OPENCV support")
else()
add_definitions(-DUSE_OPENCV)
endif()
add_executable(${TARGET_NAME} ie_c_api_test.cpp test_model_repo.hpp)
target_link_libraries(${TARGET_NAME}
PRIVATE
inference_engine
inference_engine_c_api
${OpenCV_LIBRARIES}
commonTestUtils
)
target_compile_definitions(${TARGET_NAME}
PUBLIC ${ARGV}
DATA_PATH=\"${DATA_PATH}\"
MODELS_PATH=\"${MODELS_PATH}\" )
add_dependencies(${TARGET_NAME} MultiDevicePlugin)
if(ENABLE_MKL_DNN)
add_dependencies(${TARGET_NAME} MKLDNNPlugin)
endif()
if(ENABLE_CLDNN)
add_dependencies(${TARGET_NAME} clDNNPlugin)
endif()
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})

Some files were not shown because too many files have changed in this diff Show More