Compare commits
553 Commits
2023.0.1
...
2023.1.0.d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
54e969012d | ||
|
|
c7f2676961 | ||
|
|
db4e3bf83d | ||
|
|
9630766e03 | ||
|
|
48b47e754f | ||
|
|
b7e14deddd | ||
|
|
f9e4e32118 | ||
|
|
15e69c280a | ||
|
|
1b495e3221 | ||
|
|
2983b6abb0 | ||
|
|
df0bd18ed2 | ||
|
|
d13adf7ae8 | ||
|
|
ad0ac4bfb6 | ||
|
|
71a970546e | ||
|
|
92ec5991b2 | ||
|
|
c40efac569 | ||
|
|
5482e85082 | ||
|
|
a104e6218a | ||
|
|
6130a90d4e | ||
|
|
b022137d80 | ||
|
|
45125c24cc | ||
|
|
54bb74be38 | ||
|
|
ed087d9bf1 | ||
|
|
3276b8d657 | ||
|
|
c970d576a3 | ||
|
|
25573279e7 | ||
|
|
ec82af040b | ||
|
|
52b9df4a6d | ||
|
|
96a0c539bd | ||
|
|
bcd2463813 | ||
|
|
731ef78608 | ||
|
|
684ae645d5 | ||
|
|
08dc2cf1d6 | ||
|
|
05e8bd375e | ||
|
|
c05d8862cf | ||
|
|
a8a366de08 | ||
|
|
37c538d6bd | ||
|
|
b7cc327cb8 | ||
|
|
b4e608cf47 | ||
|
|
a9c4e4ab56 | ||
|
|
3519050ef0 | ||
|
|
429af27b86 | ||
|
|
b63e8433b0 | ||
|
|
b9575d9586 | ||
|
|
6143a4fa42 | ||
|
|
e54bd6ab1b | ||
|
|
3c619d8339 | ||
|
|
5c2925ada6 | ||
|
|
9943ffc259 | ||
|
|
fa545ffb1c | ||
|
|
555c083336 | ||
|
|
5574acc6ca | ||
|
|
3c623e890d | ||
|
|
26c4ed7ba8 | ||
|
|
358893c758 | ||
|
|
5aa03f8e30 | ||
|
|
74be4e04f3 | ||
|
|
957ab9b831 | ||
|
|
6f14a43ea6 | ||
|
|
2f59e5d697 | ||
|
|
79e935fe97 | ||
|
|
cb63b39c72 | ||
|
|
a4519f0a2c | ||
|
|
0b708b5eff | ||
|
|
42c93a70d4 | ||
|
|
945157cc7b | ||
|
|
f165d8cb2b | ||
|
|
a7442c8ed3 | ||
|
|
e56405ef3b | ||
|
|
f5dc8e7796 | ||
|
|
77584d301e | ||
|
|
3d79bd1ac5 | ||
|
|
55156f9a6c | ||
|
|
10ace822ef | ||
|
|
8031cfea98 | ||
|
|
9684f9184a | ||
|
|
483a040d52 | ||
|
|
aa32ff1df3 | ||
|
|
caedb6a6b4 | ||
|
|
7fd394e87e | ||
|
|
52834659c4 | ||
|
|
3e63ab0dc3 | ||
|
|
79683c24ca | ||
|
|
967faf878a | ||
|
|
bae926de22 | ||
|
|
5993c4942a | ||
|
|
277e759dcd | ||
|
|
92ac04dcac | ||
|
|
1761427ab1 | ||
|
|
d3461074ea | ||
|
|
b69c11d8ef | ||
|
|
63a5ec5762 | ||
|
|
008d9a83ce | ||
|
|
67dc220d38 | ||
|
|
9754117a61 | ||
|
|
838d792d96 | ||
|
|
d66e322529 | ||
|
|
ca0d40969a | ||
|
|
e631f65a9b | ||
|
|
b023119b9a | ||
|
|
661f66b5c5 | ||
|
|
0541a12730 | ||
|
|
e738c4e83f | ||
|
|
d95c49d888 | ||
|
|
dcba37d897 | ||
|
|
77711be786 | ||
|
|
af9204488d | ||
|
|
48dec1000e | ||
|
|
ec2db81468 | ||
|
|
6043bcb5c0 | ||
|
|
b25c8ef860 | ||
|
|
78303aecba | ||
|
|
883a70c91e | ||
|
|
4270dca591 | ||
|
|
c8f3ed814b | ||
|
|
7f1d26ddca | ||
|
|
2b05cf4d00 | ||
|
|
2d5e087b8b | ||
|
|
0743e9bfb5 | ||
|
|
df44f92a97 | ||
|
|
90a0e5f81a | ||
|
|
dd02a0f440 | ||
|
|
1588a33217 | ||
|
|
a106eb0d75 | ||
|
|
16bde0bba6 | ||
|
|
70e0caca4f | ||
|
|
8653f1cbd9 | ||
|
|
93689cc417 | ||
|
|
50c85f01ab | ||
|
|
a0e8d9a630 | ||
|
|
74100670ac | ||
|
|
cff083f83d | ||
|
|
c413825845 | ||
|
|
36a4c0cb2b | ||
|
|
7b86b427cb | ||
|
|
8adce06348 | ||
|
|
724eb94a1d | ||
|
|
67f7808fc4 | ||
|
|
c8e331003f | ||
|
|
21c2b513d1 | ||
|
|
a14a2c0eb0 | ||
|
|
1886ca483b | ||
|
|
34d4c81b76 | ||
|
|
3ef4dac6d5 | ||
|
|
ae73973c11 | ||
|
|
a0119fe33c | ||
|
|
2ff430fa80 | ||
|
|
e564c50d35 | ||
|
|
55ddbcaf23 | ||
|
|
67c0327cd1 | ||
|
|
97532622cb | ||
|
|
9e8d64bf70 | ||
|
|
6ad0e5a1e7 | ||
|
|
385cfee24a | ||
|
|
b1b1014a34 | ||
|
|
ad5b8f8687 | ||
|
|
58d79aa3a6 | ||
|
|
eb3e6a65eb | ||
|
|
bdfa970c7a | ||
|
|
912e4b6f0d | ||
|
|
f246015dd7 | ||
|
|
0b06d15178 | ||
|
|
0a94aeb1bb | ||
|
|
7fe13f341b | ||
|
|
655c21adf1 | ||
|
|
2547301fa7 | ||
|
|
ee659c1ce8 | ||
|
|
323dbec93c | ||
|
|
d8a78137a7 | ||
|
|
6df054b073 | ||
|
|
f023f5d672 | ||
|
|
13028397b7 | ||
|
|
3a1326fb58 | ||
|
|
84bd391369 | ||
|
|
4ca8d40e43 | ||
|
|
2ec9fe915c | ||
|
|
29f06692d6 | ||
|
|
94597d8391 | ||
|
|
13e9752012 | ||
|
|
ace4fb6ecd | ||
|
|
86e46e5e35 | ||
|
|
2799fe9855 | ||
|
|
6167619e28 | ||
|
|
1dafb405fd | ||
|
|
43bf90f90c | ||
|
|
8e97010595 | ||
|
|
0d9109acf3 | ||
|
|
0944295d61 | ||
|
|
3d8a620ac3 | ||
|
|
c0fb831c6e | ||
|
|
36625404eb | ||
|
|
db8d23231a | ||
|
|
a9ddc2b553 | ||
|
|
1ce447674e | ||
|
|
14da93c155 | ||
|
|
a0c8fdbd86 | ||
|
|
ec0daa5b10 | ||
|
|
acb4b1d37b | ||
|
|
eb588f0336 | ||
|
|
f670dc5a0d | ||
|
|
c3a54b0a6e | ||
|
|
b430802d0c | ||
|
|
5afbd4cf92 | ||
|
|
43d67b0a32 | ||
|
|
031f2cc7d1 | ||
|
|
e1ed6599f0 | ||
|
|
e0f241f382 | ||
|
|
266764171f | ||
|
|
482c030408 | ||
|
|
6d42a15e07 | ||
|
|
4b1d0fbc37 | ||
|
|
edf089bf22 | ||
|
|
7697bacfca | ||
|
|
2467f655b7 | ||
|
|
95fe905783 | ||
|
|
0e4855a87c | ||
|
|
18d6ece4e4 | ||
|
|
d53339ff67 | ||
|
|
ea04f8217d | ||
|
|
6b3a252f92 | ||
|
|
5e64a7c0c1 | ||
|
|
24cddecb57 | ||
|
|
97113b317f | ||
|
|
02124aece4 | ||
|
|
0b6b16c83a | ||
|
|
ac26216869 | ||
|
|
2dd0b75529 | ||
|
|
e45068bd64 | ||
|
|
caa2273b37 | ||
|
|
6c287986b7 | ||
|
|
b779dc3246 | ||
|
|
9176f31085 | ||
|
|
6187b3fe90 | ||
|
|
b167460fe3 | ||
|
|
f2017e8c2e | ||
|
|
b655fa55a1 | ||
|
|
65caa9d745 | ||
|
|
f49935323e | ||
|
|
34d4535170 | ||
|
|
fb4efe7203 | ||
|
|
2d7db5e3d3 | ||
|
|
a91446a875 | ||
|
|
a757506f6f | ||
|
|
dc36ec11b5 | ||
|
|
70b935dfe2 | ||
|
|
4d2d58b647 | ||
|
|
dba5de6513 | ||
|
|
d84face9ee | ||
|
|
263e51a1be | ||
|
|
047d2d1d7f | ||
|
|
2f2f0b850a | ||
|
|
f6141ccc89 | ||
|
|
9c63a9bbd0 | ||
|
|
5299c3378b | ||
|
|
328d852f5a | ||
|
|
6f7e9cd786 | ||
|
|
1ca75640ed | ||
|
|
25865201ef | ||
|
|
84f6deb757 | ||
|
|
a1a753bb03 | ||
|
|
a6d3f9d093 | ||
|
|
28d2e77a92 | ||
|
|
bd0117d648 | ||
|
|
8c6c46425b | ||
|
|
cccbf7ce7e | ||
|
|
3300543eac | ||
|
|
00f94426f1 | ||
|
|
dfb6c8ae38 | ||
|
|
57e23ffc0a | ||
|
|
334114844d | ||
|
|
72993e70c0 | ||
|
|
6a562268d3 | ||
|
|
b0b540aeaf | ||
|
|
6501d963fc | ||
|
|
0d86a82041 | ||
|
|
b2aaa10ef6 | ||
|
|
84f46bd048 | ||
|
|
cd4b920bc9 | ||
|
|
ad1fbe6684 | ||
|
|
307b666d99 | ||
|
|
ed8333a94c | ||
|
|
79f46c0d1f | ||
|
|
2df980aa9f | ||
|
|
9e646bf446 | ||
|
|
23258c8bcf | ||
|
|
b1b5d65951 | ||
|
|
1dad2c003b | ||
|
|
dd0060a582 | ||
|
|
ef041565a8 | ||
|
|
eeb552cc93 | ||
|
|
640359a5b3 | ||
|
|
1051dcae83 | ||
|
|
71dcdf8a28 | ||
|
|
5e299c1949 | ||
|
|
29f1ba9f42 | ||
|
|
b93b863bac | ||
|
|
9f1757f400 | ||
|
|
a21fe95ae6 | ||
|
|
3925abfb19 | ||
|
|
5edc5e7010 | ||
|
|
59555872f9 | ||
|
|
2e8548ca36 | ||
|
|
be96f5438c | ||
|
|
13c966f293 | ||
|
|
534101da92 | ||
|
|
02b848d0b8 | ||
|
|
e09b1a9fa2 | ||
|
|
fa428a12e6 | ||
|
|
0d3b636d1c | ||
|
|
60d55729aa | ||
|
|
14bdfbc016 | ||
|
|
d1e73f2ffe | ||
|
|
38fc26325d | ||
|
|
294eb1a4d4 | ||
|
|
e2cb6dafe1 | ||
|
|
65b734083c | ||
|
|
703e5421ca | ||
|
|
04904e5147 | ||
|
|
f8598be80b | ||
|
|
a52bfa216e | ||
|
|
854537c60e | ||
|
|
a72f17dec0 | ||
|
|
1f1bb6008a | ||
|
|
de2302a711 | ||
|
|
a591514250 | ||
|
|
4c92ffa563 | ||
|
|
4ccb6794a4 | ||
|
|
48f207f961 | ||
|
|
c5f4406c34 | ||
|
|
e69bcdb7a5 | ||
|
|
b69e94c297 | ||
|
|
77bd72081f | ||
|
|
added5c44e | ||
|
|
ef9b3e3cd1 | ||
|
|
fe1ac700f0 | ||
|
|
2302e10d3e | ||
|
|
9b52a77531 | ||
|
|
4ccc6e3034 | ||
|
|
0963c23b29 | ||
|
|
5b6bab2636 | ||
|
|
0501434032 | ||
|
|
0b72998631 | ||
|
|
1b24e15e1e | ||
|
|
f2e71cbd0a | ||
|
|
da9f6c557f | ||
|
|
dcd034fb4d | ||
|
|
2513db5a04 | ||
|
|
8b215ca363 | ||
|
|
50957cdddf | ||
|
|
9fafcabb7c | ||
|
|
41de4ba638 | ||
|
|
0b48fc7159 | ||
|
|
84db7d0ee6 | ||
|
|
54bbc9e603 | ||
|
|
4eebd3a976 | ||
|
|
b95aa84b45 | ||
|
|
36dbe95d9e | ||
|
|
d52efb96ea | ||
|
|
c72a950701 | ||
|
|
2e5468646c | ||
|
|
2680e9b7aa | ||
|
|
aec146ff8c | ||
|
|
7c84a586f9 | ||
|
|
fac6668ed1 | ||
|
|
ff0bea88cc | ||
|
|
fba272a298 | ||
|
|
7077ccc1b0 | ||
|
|
568f0eb21b | ||
|
|
d3d249afc6 | ||
|
|
b01cc9297f | ||
|
|
ea35e35090 | ||
|
|
8cda1fdc51 | ||
|
|
20cddb3a6e | ||
|
|
2343609a95 | ||
|
|
2f3efede4d | ||
|
|
2120e5ffe3 | ||
|
|
1173288777 | ||
|
|
1e878b6a01 | ||
|
|
e14d1b2a07 | ||
|
|
293fccc4fe | ||
|
|
a880cba9b7 | ||
|
|
a6b043b1ca | ||
|
|
db58355fad | ||
|
|
43acbf59b6 | ||
|
|
ae1beb296e | ||
|
|
d26c587198 | ||
|
|
bbf77b359a | ||
|
|
3cbc5581ab | ||
|
|
92a0108f0d | ||
|
|
96a80ffc3d | ||
|
|
84b9262aea | ||
|
|
2cc3a45959 | ||
|
|
04171416f4 | ||
|
|
87a39fb007 | ||
|
|
75fa11c80f | ||
|
|
b9fd6d69b3 | ||
|
|
06490957ca | ||
|
|
850d35eafa | ||
|
|
6ae318d6e3 | ||
|
|
78bb0c4837 | ||
|
|
5ff0081489 | ||
|
|
8a847cc817 | ||
|
|
27047e995a | ||
|
|
48fdee3a72 | ||
|
|
524a291c2b | ||
|
|
4cda67da15 | ||
|
|
04f216ce3e | ||
|
|
ab119faa0b | ||
|
|
2fb984b60f | ||
|
|
b4852b3bdf | ||
|
|
3525cae612 | ||
|
|
fe95ed27b1 | ||
|
|
5d9424f84d | ||
|
|
1ded4ede41 | ||
|
|
aa932d341a | ||
|
|
8fed094168 | ||
|
|
ac3d339811 | ||
|
|
49b0c7248a | ||
|
|
5df0a006df | ||
|
|
3394f654e7 | ||
|
|
808647dfb3 | ||
|
|
42ef81a9e6 | ||
|
|
22fc22a4fe | ||
|
|
8251399222 | ||
|
|
4303d10e26 | ||
|
|
dab190b988 | ||
|
|
464bf5937c | ||
|
|
ad03c631b9 | ||
|
|
a4314faf29 | ||
|
|
fe1d081e44 | ||
|
|
25a1968d57 | ||
|
|
078c5cb7e9 | ||
|
|
99a7fa1287 | ||
|
|
306df51001 | ||
|
|
ae1245ae92 | ||
|
|
c351335661 | ||
|
|
03256adf79 | ||
|
|
a6a9a44a0a | ||
|
|
aaf4fdb84f | ||
|
|
d99a5ab1ba | ||
|
|
c13423e2ca | ||
|
|
30395c3e96 | ||
|
|
f61d49a146 | ||
|
|
370b39e475 | ||
|
|
80bf2b4ef9 | ||
|
|
8c2f085c52 | ||
|
|
47f1c43d68 | ||
|
|
ce82fc78df | ||
|
|
076f71a4ce | ||
|
|
5eab00a116 | ||
|
|
014eafda00 | ||
|
|
c378a8e912 | ||
|
|
66e1af18b5 | ||
|
|
e7d94ba020 | ||
|
|
00eacd2a96 | ||
|
|
3b90165581 | ||
|
|
0a26372a4f | ||
|
|
0d4e9eb328 | ||
|
|
43936bd18a | ||
|
|
eb178a753b | ||
|
|
fd9d501306 | ||
|
|
585c33dfa6 | ||
|
|
d1d4633e42 | ||
|
|
9ea3553d5d | ||
|
|
af3d1d69dd | ||
|
|
94a7342548 | ||
|
|
f3fa1a5f96 | ||
|
|
8d81223a28 | ||
|
|
f93c8dda78 | ||
|
|
78cdf22040 | ||
|
|
fcee9833af | ||
|
|
000311d72e | ||
|
|
634c58903d | ||
|
|
48604e9092 | ||
|
|
9c3186b243 | ||
|
|
8e675c71c8 | ||
|
|
fb38fcef2b | ||
|
|
b6098fed90 | ||
|
|
b6bbf3c44c | ||
|
|
03ad798615 | ||
|
|
48281376a0 | ||
|
|
61fc3ae42a | ||
|
|
4fd42cdd62 | ||
|
|
7b4455d748 | ||
|
|
963f30a2fe | ||
|
|
175e169537 | ||
|
|
909d539698 | ||
|
|
974d2f6f89 | ||
|
|
2f9fe67691 | ||
|
|
557b189d83 | ||
|
|
0456528651 | ||
|
|
88c154953e | ||
|
|
bc6402fb27 | ||
|
|
c1933fcaf0 | ||
|
|
ec90869969 | ||
|
|
1d59dab362 | ||
|
|
3a0fa86ef9 | ||
|
|
9062b81edb | ||
|
|
7464b4d396 | ||
|
|
0afe835811 | ||
|
|
7aea8d6641 | ||
|
|
9291e5ecd2 | ||
|
|
e616bcbad8 | ||
|
|
52f32ae437 | ||
|
|
bda41b7321 | ||
|
|
96c5cd16d5 | ||
|
|
fd2ccab93b | ||
|
|
f22aaaf239 | ||
|
|
e282a25d94 | ||
|
|
4d7947efc8 | ||
|
|
1c286e4636 | ||
|
|
56290fee01 | ||
|
|
89d3eaa67f | ||
|
|
35cae6251c | ||
|
|
d9b04be3ae | ||
|
|
3b8bb1fb17 | ||
|
|
82ec92dd98 | ||
|
|
93980873a0 | ||
|
|
f6600665ba | ||
|
|
89167ddcf5 | ||
|
|
82dbca998a | ||
|
|
583f3e243c | ||
|
|
b6761beb55 | ||
|
|
14e146b7c8 | ||
|
|
b253342ea6 | ||
|
|
b7311d8907 | ||
|
|
52bf9abb8c | ||
|
|
2ae23dd1ad | ||
|
|
7743e175b7 | ||
|
|
0374a65fad | ||
|
|
7ac5bab343 | ||
|
|
9776f4d11f | ||
|
|
f4d78ff9d8 | ||
|
|
c6ec6d457f | ||
|
|
62ed45b9a0 | ||
|
|
058c07008c | ||
|
|
25ed941691 | ||
|
|
4ffde4c786 | ||
|
|
8c7a0800b6 | ||
|
|
9248ecadee | ||
|
|
e2baf7f0a2 | ||
|
|
2ae7d4428b | ||
|
|
43842e2486 | ||
|
|
740ee3c185 | ||
|
|
835f51a5d2 | ||
|
|
269ed1d9cc | ||
|
|
dd0b1f0ab3 | ||
|
|
94cf2f8321 | ||
|
|
100d56261a | ||
|
|
fbc14a2efb | ||
|
|
6be92361a2 | ||
|
|
b019868653 | ||
|
|
bf59a67d94 |
@@ -31,6 +31,13 @@ pr:
|
||||
- 'tools/*'
|
||||
- 'tests/layer_tests/*'
|
||||
|
||||
resources:
|
||||
repositories:
|
||||
- repository: vcpkg
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: microsoft/vcpkg
|
||||
|
||||
variables:
|
||||
- group: github
|
||||
|
||||
@@ -46,11 +53,13 @@ jobs:
|
||||
system.debug: true
|
||||
VSTS_HTTP_RETRY: 5
|
||||
VSTS_HTTP_TIMEOUT: 200
|
||||
BUILD_TYPE: Release
|
||||
BUILD_TYPE: Debug
|
||||
OPENVINO_REPO_DIR: $(Build.Repository.LocalPath)
|
||||
VCPKG_ROOT: $(OPENVINO_REPO_DIR)/../vcpkg
|
||||
WORK_DIR: $(Pipeline.Workspace)/_w
|
||||
BUILD_DIR: $(WORK_DIR)/build
|
||||
ANDROID_TOOLS: $(WORK_DIR)/android_tools
|
||||
ANDROID_NDK_HOME: $(WORK_DIR)/android_tools/ndk-bundle
|
||||
ANDROID_SDK_VERSION: 29
|
||||
ANDROID_ABI_CONFIG: arm64-v8a
|
||||
TMP_DIR: /mnt/tmp
|
||||
@@ -108,21 +117,25 @@ jobs:
|
||||
displayName: 'Make dir'
|
||||
|
||||
- checkout: self
|
||||
clean: 'true'
|
||||
submodules: 'true'
|
||||
clean: 'true'
|
||||
path: openvino
|
||||
|
||||
- checkout: vcpkg
|
||||
clean: 'true'
|
||||
path: vcpkg
|
||||
|
||||
- script: |
|
||||
set -e
|
||||
sudo -E $(OPENVINO_REPO_DIR)/install_build_dependencies.sh
|
||||
# Move into contrib install_build_dependencies.sh
|
||||
sudo apt --assume-yes install scons crossbuild-essential-arm64 default-jdk
|
||||
# Speed up build
|
||||
sudo apt -y --no-install-recommends install unzip
|
||||
wget https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip
|
||||
unzip ninja-linux.zip
|
||||
sudo cp -v ninja /usr/local/bin/
|
||||
# Install Android SDK, NDK and TOOLS
|
||||
# generic dependencies
|
||||
sudo -E apt --assume-yes install ccache scons default-jdk python3-pip ninja-build
|
||||
# vcpkg requires cmake 3.19 or later
|
||||
python3 -m pip install -U pip cmake
|
||||
# vcpkg's tool dependencies
|
||||
sudo -E apt --assume-yes install curl zip unzip tar
|
||||
# vcpkg tree of dependencies require extra packages
|
||||
sudo -E apt --assume-yes install pkg-config linux-libc-dev
|
||||
# Install Android SDK, NDK and Tools
|
||||
sudo apt -y --no-install-recommends install unzip
|
||||
wget https://dl.google.com/android/repository/commandlinetools-linux-7583922_latest.zip
|
||||
unzip commandlinetools-linux-7583922_latest.zip
|
||||
@@ -130,19 +143,34 @@ jobs:
|
||||
./cmdline-tools/bin/sdkmanager --sdk_root=$(ANDROID_TOOLS) --install "ndk-bundle" "platform-tools" "platforms;android-$(ANDROID_SDK_VERSION)"
|
||||
displayName: 'Install dependencies'
|
||||
|
||||
- script: |
|
||||
$(VCPKG_ROOT)/bootstrap-vcpkg.sh --disableMetrics
|
||||
# patch vcpkg default (community) toolchain to build only Release configuration
|
||||
echo "set(VCPKG_BUILD_TYPE release)" >> $(VCPKG_ROOT)/triplets/community/arm64-android.cmake
|
||||
displayName: 'Build vcpkg'
|
||||
|
||||
- task: CMake@1
|
||||
inputs:
|
||||
cmakeArgs: >
|
||||
-G "Ninja Multi-Config"
|
||||
-G Ninja
|
||||
-DCMAKE_VERBOSE_MAKEFILE=ON
|
||||
-DCMAKE_TOOLCHAIN_FILE=$(ANDROID_TOOLS)/ndk-bundle/build/cmake/android.toolchain.cmake
|
||||
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
|
||||
-DVCPKG_TARGET_TRIPLET=arm64-android
|
||||
-DVCPKG_HOST_TRIPLET=x64-linux-release
|
||||
-DCMAKE_TOOLCHAIN_FILE=$(VCPKG_ROOT)/scripts/buildsystems/vcpkg.cmake
|
||||
-DVCPKG_CHAINLOAD_TOOLCHAIN_FILE=$(ANDROID_NDK_HOME)/build/cmake/android.toolchain.cmake
|
||||
-DCMAKE_COMPILE_WARNING_AS_ERROR=ON
|
||||
-DANDROID_ABI=$(ANDROID_ABI_CONFIG)
|
||||
-DANDROID_STL=c++_shared
|
||||
-DANDROID_PLATFORM=$(ANDROID_SDK_VERSION)
|
||||
-DENABLE_PYTHON=OFF
|
||||
-DENABLE_SYSTEM_OPENCL=ON
|
||||
-DENABLE_SYSTEM_PROTOBUF=ON
|
||||
-DENABLE_SYSTEM_PUGIXML=ON
|
||||
-DENABLE_SYSTEM_SNAPPY=ON
|
||||
-DENABLE_SYSTEM_TBB=ON
|
||||
-DENABLE_SYSTEM_FLATBUFFERS=ON
|
||||
-DENABLE_INTEL_GPU=ON
|
||||
-DENABLE_TESTS=ON
|
||||
-DCMAKE_CXX_LINKER_LAUNCHER=ccache
|
||||
-DCMAKE_C_LINKER_LAUNCHER=ccache
|
||||
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache
|
||||
-DCMAKE_C_COMPILER_LAUNCHER=ccache
|
||||
-S $(OPENVINO_REPO_DIR)
|
||||
|
||||
@@ -32,13 +32,13 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: releases/2023/0
|
||||
ref: master
|
||||
|
||||
- repository: testdata
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: releases/2023/0
|
||||
ref: master
|
||||
|
||||
variables:
|
||||
- group: github
|
||||
@@ -304,32 +304,6 @@ jobs:
|
||||
- script: ls -alR $(INSTALL_DIR)
|
||||
displayName: 'List install test files'
|
||||
|
||||
# Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time
|
||||
- script: |
|
||||
export LD_LIBRARY_PATH=$(REPO_DIR)/temp/gna_03.05.00.1906/linux/x64:$(LD_LIBRARY_PATH)
|
||||
python3 -m pytest -s $(INSTALL_TEST_DIR)/pyngraph $(PYTHON_STATIC_ARGS) \
|
||||
--junitxml=$(INSTALL_TEST_DIR)/TEST-Pyngraph.xml \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_zoo_models.py \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_backend.py
|
||||
displayName: 'nGraph and IE Python Bindings Tests'
|
||||
|
||||
# Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time
|
||||
- script: |
|
||||
# For python imports to import pybind_mock_frontend
|
||||
export LD_LIBRARY_PATH=$(REPO_DIR)/temp/gna_03.05.00.1906/linux/x64:$(LD_LIBRARY_PATH)
|
||||
export PYTHONPATH=$(INSTALL_TEST_DIR):$(INSTALL_DIR)/python/python3.8:$PYTHONPATH
|
||||
python3 -m pytest -sv $(INSTALL_TEST_DIR)/pyopenvino $(PYTHON_STATIC_ARGS) \
|
||||
--junitxml=$(INSTALL_TEST_DIR)/TEST-Pyngraph.xml \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyopenvino/tests/test_utils/test_utils.py \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyopenvino/tests/test_onnx/test_zoo_models.py \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyopenvino/tests/test_onnx/test_backend.py
|
||||
displayName: 'Python API 2.0 Tests'
|
||||
|
||||
- script: |
|
||||
export LD_LIBRARY_PATH=$(REPO_DIR)/temp/gna_03.05.00.1906/linux/x64:$(LD_LIBRARY_PATH)
|
||||
python3 -m pytest -s $(INSTALL_TEST_DIR)/mo/unit_tests --junitxml=$(INSTALL_TEST_DIR)/TEST-ModelOptimizer.xml
|
||||
displayName: 'Model Optimizer UT'
|
||||
|
||||
- script: |
|
||||
sudo apt-get install libtbb-dev libpugixml-dev -y
|
||||
cmake --build $(BUILD_DIR) --target package --parallel
|
||||
@@ -434,8 +408,8 @@ jobs:
|
||||
displayName: 'GNA UT'
|
||||
enabled: 'false' # TODO: fix
|
||||
|
||||
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ieMultiPluginUnitTests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ieMultiPluginUnitTests.xml
|
||||
displayName: 'MULTI UT'
|
||||
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_auto_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_unit_tests.xml
|
||||
displayName: 'AUTO UT'
|
||||
|
||||
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_auto_batch_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_batch_unit_tests.xml
|
||||
displayName: 'AutoBatch UT'
|
||||
@@ -443,10 +417,6 @@ jobs:
|
||||
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_template_func_tests --gtest_filter=*smoke* --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-templateFuncTests.xml
|
||||
displayName: 'TEMPLATE FuncTests'
|
||||
|
||||
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_cpu_func_tests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_cpu_func_tests.xml
|
||||
displayName: 'CPU FuncTests'
|
||||
condition: and(succeeded(), eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'OFF'))
|
||||
|
||||
- script: |
|
||||
$(RUN_PREFIX) $(INSTALL_TEST_DIR)/InferenceEngineCAPITests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-InferenceEngineCAPITests.xml
|
||||
displayName: 'IE CAPITests'
|
||||
@@ -455,6 +425,34 @@ jobs:
|
||||
$(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_capi_test --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_capi_test.xml
|
||||
displayName: 'OV CAPITests'
|
||||
|
||||
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_auto_batch_func_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_batch_func_tests.xml
|
||||
displayName: 'AutoBatch FuncTests'
|
||||
|
||||
# Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time
|
||||
- script: |
|
||||
python3 -m pytest -s $(INSTALL_TEST_DIR)/pyngraph $(PYTHON_STATIC_ARGS) \
|
||||
--junitxml=$(INSTALL_TEST_DIR)/TEST-Pyngraph.xml \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_zoo_models.py \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_backend.py
|
||||
displayName: 'nGraph and IE Python Bindings Tests'
|
||||
|
||||
# Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time
|
||||
- script: |
|
||||
python3 -m pytest -sv $(INSTALL_TEST_DIR)/pyopenvino $(PYTHON_STATIC_ARGS) \
|
||||
--junitxml=$(INSTALL_TEST_DIR)/TEST-Pyngraph.xml \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyopenvino/tests/test_utils/test_utils.py \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyopenvino/tests/test_onnx/test_zoo_models.py \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyopenvino/tests/test_onnx/test_backend.py
|
||||
displayName: 'Python API 2.0 Tests'
|
||||
|
||||
- script: |
|
||||
python3 -m pytest -s $(INSTALL_TEST_DIR)/mo/unit_tests --junitxml=$(INSTALL_TEST_DIR)/TEST-ModelOptimizer.xml
|
||||
displayName: 'Model Optimizer UT'
|
||||
|
||||
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_cpu_func_tests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_cpu_func_tests.xml
|
||||
displayName: 'CPU FuncTests'
|
||||
condition: and(succeeded(), eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'OFF'))
|
||||
|
||||
- task: CMake@1
|
||||
inputs:
|
||||
cmakeArgs: >
|
||||
|
||||
@@ -46,8 +46,7 @@ jobs:
|
||||
system.debug: true
|
||||
VSTS_HTTP_RETRY: 5
|
||||
VSTS_HTTP_TIMEOUT: 200
|
||||
OPENVINO_ARCH: 'aarch64'
|
||||
NUM_PROC: 1
|
||||
NUM_PROC: 2
|
||||
BUILD_TYPE: Release
|
||||
OPENVINO_REPO_DIR: $(Build.Repository.LocalPath)
|
||||
BUILD_OPENVINO: $(WORK_DIR)/build
|
||||
@@ -57,7 +56,8 @@ jobs:
|
||||
TMP_DIR: /mnt/tmp
|
||||
OPENVINO_CCACHE_DIR: $(SHARE_DIR)/ccache/master/linux_arm64
|
||||
LD_LIBRARY_PATH: $(Agent.ToolsDirectory)/Python/$(OV_PYTHON_VERSION)/x64/lib
|
||||
OV_PYTHON_VERSION: 3.11.2 # Full version of Python its required for LD_LIBRARY_PATH. More details https://github.com/microsoft/azure-pipelines-tool-lib/blob/master/docs/overview.md#tool-cache
|
||||
OV_PYTHON_VERSION_MAJOR_MINOR: 3.11
|
||||
OV_PYTHON_VERSION: $(OV_PYTHON_VERSION_MAJOR_MINOR).2 # Full version of Python its required for LD_LIBRARY_PATH. More details https://github.com/microsoft/azure-pipelines-tool-lib/blob/master/docs/overview.md#tool-cache
|
||||
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
@@ -115,40 +115,61 @@ jobs:
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install -r $(OPENVINO_REPO_DIR)/src/bindings/python/requirements.txt
|
||||
python3 -m pip install -r $(OPENVINO_REPO_DIR)/src/bindings/python/wheel/requirements-dev.txt
|
||||
python3 -m pip install -r $(OPENVINO_REPO_DIR)/src/bindings/python/src/compatibility/openvino/requirements-dev.txt
|
||||
# install dependencies needed to build CPU plugin for ARM
|
||||
sudo -E apt --assume-yes install scons crossbuild-essential-arm64
|
||||
sudo -E apt --assume-yes install scons gcc-10-aarch64-linux-gnu g++-10-aarch64-linux-gnu
|
||||
# generic dependencies
|
||||
sudo -E apt --assume-yes install cmake ccache
|
||||
# Speed up build
|
||||
sudo -E apt -y --no-install-recommends install unzip
|
||||
wget https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip
|
||||
unzip ninja-linux.zip
|
||||
sudo cp -v ninja /usr/local/bin/
|
||||
displayName: 'Install dependencies'
|
||||
sudo -E apt --assume-yes install cmake ccache ninja-build unzip fdupes
|
||||
displayName: 'Install build dependencies'
|
||||
|
||||
- script: |
|
||||
set -e
|
||||
echo deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ focal main restricted > arm64-sources.list
|
||||
echo deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ focal-updates main restricted >> arm64-sources.list
|
||||
echo deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ focal universe >> arm64-sources.list
|
||||
echo deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ focal-updates universe >> arm64-sources.list
|
||||
echo deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ focal multiverse >> arm64-sources.list
|
||||
echo deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ focal-updates multiverse >> arm64-sources.list
|
||||
echo deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ focal-backports main restricted universe multiverse >> arm64-sources.list
|
||||
echo deb [arch=amd64] http://security.ubuntu.com/ubuntu/ focal-security main restricted >> arm64-sources.list
|
||||
echo deb [arch=amd64] http://security.ubuntu.com/ubuntu/ focal-security universe >> arm64-sources.list
|
||||
echo deb [arch=amd64] http://security.ubuntu.com/ubuntu/ focal-security multiverse >> arm64-sources.list
|
||||
echo deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ focal main >> arm64-sources.list
|
||||
echo deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ focal universe >> arm64-sources.list
|
||||
echo deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ focal-updates main >> arm64-sources.list
|
||||
echo deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ focal-security main >> arm64-sources.list
|
||||
sudo mv arm64-sources.list /etc/apt/sources.list.d/
|
||||
sudo -E dpkg --add-architecture arm64
|
||||
sudo -E apt-get update -o Dir::Etc::sourcelist=/etc/apt/sources.list.d/arm64-sources.list
|
||||
sudo -E apt-get install -y --no-install-recommends libpython3-dev:arm64
|
||||
displayName: 'Install arm64 libraries'
|
||||
|
||||
- script: |
|
||||
git submodule update --init -- $(OPENVINO_REPO_DIR)/src/plugins
|
||||
git submodule update --init -- $(OPENVINO_REPO_DIR)/thirdparty/gtest
|
||||
git submodule update --init -- $(OPENVINO_REPO_DIR)/thirdparty/open_model_zoo
|
||||
displayName: 'Init submodules for non Conan dependencies'
|
||||
|
||||
- script: |
|
||||
python3 -m pip install conan
|
||||
# install build profile compilers
|
||||
sudo -E apt --assume-yes install gcc g++
|
||||
# generate build profile
|
||||
conan profile detect
|
||||
# generate host profile for linux_arm64
|
||||
echo "include(default)" > $(BUILD_OPENVINO)/linux_arm64
|
||||
echo "[buildenv]" >> $(BUILD_OPENVINO)/linux_arm64
|
||||
echo "CC=aarch64-linux-gnu-gcc" >> $(BUILD_OPENVINO)/linux_arm64
|
||||
echo "CXX=aarch64-linux-gnu-g++" >> $(BUILD_OPENVINO)/linux_arm64
|
||||
echo "CC=aarch64-linux-gnu-gcc-10" >> $(BUILD_OPENVINO)/linux_arm64
|
||||
echo "CXX=aarch64-linux-gnu-g++-10" >> $(BUILD_OPENVINO)/linux_arm64
|
||||
# install OpenVINO dependencies
|
||||
export CMAKE_CXX_COMPILER_LAUNCHER=ccache
|
||||
export CMAKE_C_COMPILER_LAUNCHER=ccache
|
||||
conan install $(OPENVINO_REPO_DIR)/conanfile.txt \
|
||||
-pr:h $(BUILD_OPENVINO)/linux_arm64 \
|
||||
-s:h arch=armv8 \
|
||||
-of $(BUILD_OPENVINO) \
|
||||
-b missing
|
||||
env:
|
||||
CMAKE_CXX_COMPILER_LAUNCHER: ccache
|
||||
CMAKE_C_COMPILER_LAUNCHER: ccache
|
||||
CCACHE_DIR: $(OPENVINO_CCACHE_DIR)
|
||||
CCACHE_TEMPDIR: $(TMP_DIR)/ccache
|
||||
CCACHE_BASEDIR: $(Pipeline.Workspace)
|
||||
@@ -157,14 +178,20 @@ jobs:
|
||||
|
||||
- script: |
|
||||
source $(BUILD_OPENVINO)/conanbuild.sh
|
||||
# TODO: return tests building once GPU plugin migrates to Plugin API 2.0
|
||||
cmake \
|
||||
-G Ninja \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=ON \
|
||||
-DBUILD_SHARED_LIBS=ON \
|
||||
-DBUILD_SHARED_LIBS=OFF \
|
||||
-DCMAKE_COMPILE_WARNING_AS_ERROR=ON \
|
||||
-DENABLE_CPPLINT=OFF \
|
||||
-DENABLE_PYTHON=OFF \
|
||||
-DENABLE_TESTS=ON \
|
||||
-DENABLE_CPPLINT=ON \
|
||||
-DENABLE_INTEL_GPU=ON \
|
||||
-DENABLE_PYTHON=ON \
|
||||
-DENABLE_WHEEL=ON \
|
||||
-DPYBIND11_PYTHONLIBS_OVERWRITE=OFF \
|
||||
-DPYTHON_MODULE_EXTENSION=$(aarch64-linux-gnu-python3-config --extension-suffix) \
|
||||
-DPYTHON_LIBRARY=/usr/lib/aarch64-linux-gnu/libc-2.31.so \
|
||||
-DPYTHON_INCLUDE_DIR=$(Agent.ToolsDirectory)/Python/$(OV_PYTHON_VERSION)/x64/include/python$(OV_PYTHON_VERSION_MAJOR_MINOR) \
|
||||
-DENABLE_DATA=OFF \
|
||||
-DENABLE_SYSTEM_TBB=ON \
|
||||
-DENABLE_SYSTEM_PROTOBUF=ON \
|
||||
@@ -176,6 +203,7 @@ jobs:
|
||||
-DARM_COMPUTE_SCONS_JOBS=$(NUM_PROC) \
|
||||
-DCMAKE_INSTALL_PREFIX=$(INSTALL_OPENVINO) \
|
||||
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE) \
|
||||
-DENABLE_PYTHON_PACKAGING=ON \
|
||||
-S $(OPENVINO_REPO_DIR) \
|
||||
-B $(BUILD_OPENVINO)
|
||||
source $(BUILD_OPENVINO)/deactivate_conanbuild.sh
|
||||
@@ -192,8 +220,10 @@ jobs:
|
||||
- script: cmake --build $(BUILD_OPENVINO) --parallel --config $(BUILD_TYPE) --target install
|
||||
displayName: 'Install OpenVINO Runtime'
|
||||
|
||||
- task: PublishBuildArtifacts@1
|
||||
inputs:
|
||||
PathtoPublish: $(Build.ArtifactStagingDirectory)
|
||||
ArtifactName: 'openvino_aarch64_linux'
|
||||
displayName: 'Publish OpenVINO Runtime for ARM'
|
||||
- script: |
|
||||
source $(BUILD_OPENVINO)/conanbuild.sh
|
||||
$(INSTALL_OPENVINO)/samples/cpp/build_samples.sh
|
||||
source $(BUILD_OPENVINO)/deactivate_conanbuild.sh
|
||||
env:
|
||||
CMAKE_TOOLCHAIN_FILE: $(BUILD_OPENVINO)/conan_toolchain.cmake
|
||||
displayName: 'Build OpenVINO C++ samples'
|
||||
|
||||
@@ -35,7 +35,6 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: releases/2023/0
|
||||
|
||||
variables:
|
||||
- group: github
|
||||
|
||||
@@ -4,7 +4,7 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: releases/2023/0
|
||||
ref: master
|
||||
|
||||
variables:
|
||||
- group: github
|
||||
|
||||
@@ -42,13 +42,11 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: releases/2023/0
|
||||
|
||||
- repository: testdata
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: releases/2023/0
|
||||
|
||||
jobs:
|
||||
- job: CUDAPlugin_Lin
|
||||
|
||||
@@ -34,7 +34,7 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: releases/2023/0
|
||||
ref: master
|
||||
|
||||
jobs:
|
||||
- job: Lin_Debian
|
||||
@@ -62,6 +62,8 @@ jobs:
|
||||
SAMPLES_INSTALL_DIR: /usr/share/openvino/samples
|
||||
PYTHON_SAMPLES_INSTALL_DIR: $(INSTALL_DIR)/share/openvino/samples/python
|
||||
PYTHON_WHEEL_INSTALL_DIR: $HOME/.local/lib/python3.8/site-packages
|
||||
BUILD_VENV: $(WORK_DIR)/build_venv
|
||||
TEST_VENV: $(WORK_DIR)/test_venv
|
||||
TMP_DIR: /mnt/tmp
|
||||
SHARE_DIR: /mount/cinfsshare/onnxtestdata
|
||||
CCACHE_DIR: $(SHARE_DIR)/ccache/master/linux
|
||||
@@ -111,25 +113,32 @@ jobs:
|
||||
# 'clang' is used as a default compiler
|
||||
sudo apt --assume-yes install clang
|
||||
sudo apt --assume-yes install --no-install-recommends libopencv-imgproc-dev libopencv-imgcodecs-dev
|
||||
# For opencv-python: python3-setuptools and pip upgrade
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install -r $(REPO_DIR)/src/bindings/python/wheel/requirements-dev.txt
|
||||
python3 -m pip install -r $(REPO_DIR)/src/bindings/python/requirements.txt
|
||||
# install build dependencies
|
||||
(cd $(WORK_DIR) && python3 -m venv build_venv)
|
||||
$(BUILD_VENV)/bin/python3 -m pip install -U pip
|
||||
$(BUILD_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/src/bindings/python/wheel/requirements-dev.txt
|
||||
$(BUILD_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/src/bindings/python/requirements.txt
|
||||
# For running Python API tests
|
||||
python3 -m pip install -r $(REPO_DIR)/src/bindings/python/src/compatibility/openvino/requirements-dev.txt
|
||||
$(BUILD_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/src/bindings/python/src/compatibility/openvino/requirements-dev.txt
|
||||
# For running Paddle frontend unit tests
|
||||
python3 -m pip install -r $(REPO_DIR)/src/frontends/paddle/tests/requirements.txt
|
||||
$(BUILD_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/src/frontends/paddle/tests/requirements.txt
|
||||
# For running ONNX frontend unit tests
|
||||
python3 -m pip install -r $(REPO_DIR)/src/frontends/onnx/tests/requirements.txt
|
||||
$(BUILD_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/src/frontends/onnx/tests/requirements.txt
|
||||
# For running TensorFlow frontend unit tests
|
||||
python3 -m pip install -r $(REPO_DIR)/src/frontends/tensorflow/tests/requirements.txt
|
||||
$(BUILD_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/src/frontends/tensorflow/tests/requirements.txt
|
||||
# For MO unit tests
|
||||
python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_mxnet.txt
|
||||
python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_caffe.txt
|
||||
python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_kaldi.txt
|
||||
python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_onnx.txt
|
||||
python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_tf2.txt
|
||||
python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_dev.txt
|
||||
(cd $(WORK_DIR) && python3 -m venv test_venv)
|
||||
$(TEST_VENV)/bin/python3 -m pip install -U pip
|
||||
$(TEST_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_mxnet.txt
|
||||
$(TEST_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_caffe.txt
|
||||
$(TEST_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_kaldi.txt
|
||||
$(TEST_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_onnx.txt
|
||||
$(TEST_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_tf2.txt
|
||||
$(TEST_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_dev.txt
|
||||
$(TEST_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/src/frontends/paddle/tests/requirements.txt
|
||||
# for Python API tests
|
||||
/usr/bin/python3 -m pip install -r $(REPO_DIR)/src/bindings/python/requirements_test.txt
|
||||
/usr/bin/python3 -m pip uninstall -y numpy # apt-get install python3-numpy will be used
|
||||
# Speed up build
|
||||
sudo apt -y --no-install-recommends install unzip
|
||||
wget https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip
|
||||
@@ -137,7 +146,7 @@ jobs:
|
||||
sudo cp -v ninja /usr/local/bin/
|
||||
# Speed up tests
|
||||
git clone https://github.com/google/gtest-parallel.git
|
||||
displayName: 'Install dependencies'
|
||||
displayName: 'Install build dependencies'
|
||||
|
||||
# Should be after 'Install dependencies' because Git lfs is not installed
|
||||
- checkout: testdata
|
||||
@@ -155,7 +164,7 @@ jobs:
|
||||
-DCMAKE_COMPILE_WARNING_AS_ERROR=ON
|
||||
-DENABLE_PYTHON=ON
|
||||
-DENABLE_INTEL_GNA=OFF
|
||||
-DPYTHON_EXECUTABLE=/usr/bin/python3.8
|
||||
-DPYTHON_EXECUTABLE=$(BUILD_VENV)/bin/python3
|
||||
-DENABLE_TESTS=ON
|
||||
-DENABLE_FASTER_BUILD=ON
|
||||
-DENABLE_STRICT_DEPENDENCIES=OFF
|
||||
@@ -164,6 +173,7 @@ jobs:
|
||||
-DCMAKE_C_COMPILER_LAUNCHER=ccache
|
||||
-DCMAKE_CXX_LINKER_LAUNCHER=ccache
|
||||
-DCMAKE_C_LINKER_LAUNCHER=ccache
|
||||
-DENABLE_PYTHON_PACKAGING=ON
|
||||
-DCPACK_GENERATOR=DEB
|
||||
-S $(REPO_DIR)
|
||||
-B $(BUILD_DIR)
|
||||
@@ -213,44 +223,12 @@ jobs:
|
||||
- script: cmake -DCOMPONENT=tests -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P $(BUILD_LAYER_TESTS_DIR)/cmake_install.cmake
|
||||
displayName: 'Install Layer Tests'
|
||||
|
||||
- script: python3 -m pip install openvino-dev --find-links=$(INSTALL_DIR)/tools
|
||||
displayName: 'Install python wheels'
|
||||
|
||||
- script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -DCOMPONENT=tests -P $(BUILD_DIR)/cmake_install.cmake
|
||||
displayName: 'Install tests'
|
||||
|
||||
- script: ls -alR $(INSTALL_DIR)
|
||||
displayName: 'List install test files'
|
||||
|
||||
# Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time
|
||||
- script: |
|
||||
python3 -m pytest -s $(INSTALL_TEST_DIR)/pyngraph \
|
||||
--junitxml=$(INSTALL_TEST_DIR)/TEST-Pyngraph.xml \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_zoo_models.py \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_backend.py
|
||||
env:
|
||||
LD_LIBRARY_PATH: $(INSTALL_TEST_DIR)
|
||||
displayName: 'nGraph and IE Python Bindings Tests'
|
||||
|
||||
# Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time
|
||||
- script: |
|
||||
# Required by python imports to load requires libraries
|
||||
# - tests install dir for mock_py
|
||||
# - OpenVINO wheel installation dir for others frontend required by mock_py (is not part of wheel pkg)
|
||||
export LD_LIBRARY_PATH=$(PYTHON_WHEEL_INSTALL_DIR)/openvino/libs:$(INSTALL_TEST_DIR):$LD_LIBRARY_PATH
|
||||
# For python imports to import pybind_mock_frontend
|
||||
export PYTHONPATH=$(INSTALL_TEST_DIR):$PYTHONPATH
|
||||
python3 -m pytest -s $(INSTALL_TEST_DIR)/pyopenvino \
|
||||
--junitxml=$(INSTALL_TEST_DIR)/TEST-Pyngraph.xml \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyopenvino/tests/test_utils/test_utils.py \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyopenvino/tests/test_onnx/test_zoo_models.py \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyopenvino/tests/test_onnx/test_backend.py -v
|
||||
displayName: 'Python API 2.0 Tests'
|
||||
|
||||
- script: |
|
||||
python3 -m pytest -s $(INSTALL_TEST_DIR)/mo/unit_tests --junitxml=$(INSTALL_TEST_DIR)/TEST-ModelOptimizer.xml
|
||||
displayName: 'Model Optimizer UT'
|
||||
|
||||
- script: |
|
||||
sudo apt-get install libtbb-dev libpugixml-dev -y
|
||||
cmake --build $(BUILD_DIR) --config $(BUILD_TYPE) --target package --parallel
|
||||
@@ -262,15 +240,15 @@ jobs:
|
||||
sudo apt-get install --no-install-recommends gnupg wget -y
|
||||
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||
sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||
echo "deb https://apt.repos.intel.com/openvino/2022 focal main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2022.list
|
||||
sudo apt-get update -o Dir::Etc::sourcelist=/etc/apt/sources.list.d/intel-openvino-2022.list
|
||||
echo "deb https://apt.repos.intel.com/openvino/2023 ubuntu20 main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2023.list
|
||||
sudo apt-get update -o Dir::Etc::sourcelist=/etc/apt/sources.list.d/intel-openvino-2023.list
|
||||
sudo apt-get install openvino -y || exit 1
|
||||
# install our local one and make sure the conflicts are resolved
|
||||
sudo apt-get install --no-install-recommends dpkg-dev -y
|
||||
rm -r _CPack_Packages
|
||||
dpkg-scanpackages . /dev/null | gzip -9c > Packages.gz
|
||||
echo "deb [trusted=yes] file:$(BUILD_DIR) ./" | sudo tee /etc/apt/sources.list.d/openvino-local.list
|
||||
sudo apt-get update -o Dir::Etc::sourcelist=/etc/apt/sources.list.d/openvino-local.list
|
||||
sudo apt-get update
|
||||
sudo apt-get install openvino -y
|
||||
workingDirectory: $(BUILD_DIR)
|
||||
displayName: 'Install Debian packages'
|
||||
@@ -293,14 +271,12 @@ jobs:
|
||||
- script: $(SAMPLES_INSTALL_DIR)/c/build_samples.sh -i $(INSTALL_DIR)
|
||||
displayName: 'Build c samples'
|
||||
|
||||
- script: |
|
||||
$(INSTALL_TEST_DIR)/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-NGraphUT.xml
|
||||
- script: $(INSTALL_TEST_DIR)/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-NGraphUT.xml
|
||||
env:
|
||||
LD_LIBRARY_PATH: $(INSTALL_TEST_DIR)
|
||||
displayName: 'OV Core UT'
|
||||
|
||||
- script: |
|
||||
$(INSTALL_TEST_DIR)/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ONNXFrontend.xml
|
||||
- script: $(INSTALL_TEST_DIR)/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ONNXFrontend.xml
|
||||
env:
|
||||
LD_LIBRARY_PATH: $(INSTALL_TEST_DIR)
|
||||
displayName: 'ONNX Frontend Tests'
|
||||
@@ -317,14 +293,12 @@ jobs:
|
||||
LD_LIBRARY_PATH: $(INSTALL_TEST_DIR)
|
||||
displayName: 'TensorFlow Frontend Unit Tests'
|
||||
|
||||
- script: |
|
||||
$(INSTALL_TEST_DIR)/ov_tensorflow_common_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-TensorflowCommon.xml
|
||||
- script: $(INSTALL_TEST_DIR)/ov_tensorflow_common_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-TensorflowCommon.xml
|
||||
env:
|
||||
LD_LIBRARY_PATH: $(INSTALL_TEST_DIR)
|
||||
displayName: 'TensorFlow Common Unit Tests'
|
||||
|
||||
- script: |
|
||||
$(INSTALL_TEST_DIR)/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-TensorflowLite.xml
|
||||
- script: $(INSTALL_TEST_DIR)/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-TensorflowLite.xml
|
||||
env:
|
||||
LD_LIBRARY_PATH: $(INSTALL_TEST_DIR)
|
||||
displayName: 'TensorFlow Lite Frontend Unit Tests'
|
||||
@@ -332,21 +306,15 @@ jobs:
|
||||
- script: $(INSTALL_TEST_DIR)/ov_cpu_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_cpu_unit_tests.xml
|
||||
displayName: 'Intel CPU Unit Tests'
|
||||
|
||||
- script: $(INSTALL_TEST_DIR)/ieMultiPluginUnitTests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ieMultiPluginUnitTests.xml
|
||||
displayName: 'MULTI UT'
|
||||
- script: $(INSTALL_TEST_DIR)/ov_auto_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_unit_tests.xml
|
||||
displayName: 'AUTO UT'
|
||||
|
||||
- script: |
|
||||
$(INSTALL_TEST_DIR)/ov_template_func_tests --gtest_filter=*smoke* --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-templateFuncTests.xml
|
||||
- script: $(INSTALL_TEST_DIR)/ov_template_func_tests --gtest_filter=*smoke* --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-templateFuncTests.xml
|
||||
env:
|
||||
LD_LIBRARY_PATH: $(INSTALL_TEST_DIR)
|
||||
displayName: 'TEMPLATE FuncTests'
|
||||
|
||||
# run not all smoke filter to save time in post-commit
|
||||
- script: $(INSTALL_TEST_DIR)/ov_cpu_func_tests --gtest_filter=*OVCLass*:*CoreThreadingTests* --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_cpu_func_tests.xml
|
||||
displayName: 'CPU FuncTests'
|
||||
|
||||
- script: |
|
||||
$(INSTALL_TEST_DIR)/InferenceEngineCAPITests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-InferenceEngineCAPITests.xml
|
||||
- script: $(INSTALL_TEST_DIR)/InferenceEngineCAPITests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-InferenceEngineCAPITests.xml
|
||||
env:
|
||||
DATA_PATH: $(MODELS_PATH)
|
||||
MODELS_PATH: $(MODELS_PATH)
|
||||
@@ -358,6 +326,41 @@ jobs:
|
||||
MODELS_PATH: $(MODELS_PATH)
|
||||
displayName: 'OV CAPITests'
|
||||
|
||||
# Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time
|
||||
- script: |
|
||||
/usr/bin/python3 -m pytest -s $(INSTALL_TEST_DIR)/pyngraph \
|
||||
--junitxml=$(INSTALL_TEST_DIR)/TEST-Pyngraph.xml \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_zoo_models.py \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_backend.py
|
||||
env:
|
||||
LD_LIBRARY_PATH: $(INSTALL_TEST_DIR)
|
||||
displayName: 'nGraph and IE Python Bindings Tests'
|
||||
|
||||
# Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time
|
||||
- script: |
|
||||
/usr/bin/python3 -m pytest -s $(INSTALL_TEST_DIR)/pyopenvino \
|
||||
--junitxml=$(INSTALL_TEST_DIR)/TEST-Pyngraph.xml \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyopenvino/tests/test_utils/test_utils.py \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyopenvino/tests/test_onnx/test_zoo_models.py \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyopenvino/tests/test_onnx/test_backend.py -v
|
||||
env:
|
||||
# Required by python imports to load requires libraries
|
||||
# - tests install dir for mock_py
|
||||
LD_LIBRARY_PATH: $(INSTALL_TEST_DIR)
|
||||
# For python imports to import pybind_mock_frontend
|
||||
PYTHONPATH: $(INSTALL_TEST_DIR)
|
||||
displayName: 'Python API 2.0 Tests'
|
||||
|
||||
- script: |
|
||||
# TODO: fix 'No mock frontend API available'
|
||||
$(TEST_VENV)/bin/python3 -m pip install openvino-dev --find-links=$(INSTALL_DIR)/tools
|
||||
$(TEST_VENV)/bin/python3 -m pytest -s $(INSTALL_TEST_DIR)/mo/unit_tests --junitxml=$(INSTALL_TEST_DIR)/TEST-ModelOptimizer.xml
|
||||
displayName: 'Model Optimizer UT'
|
||||
|
||||
# run not all smoke filter to save time in post-commit
|
||||
- script: $(INSTALL_TEST_DIR)/ov_cpu_func_tests --gtest_filter=*OVCLass*:*CoreThreadingTests* --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_cpu_func_tests.xml
|
||||
displayName: 'CPU FuncTests'
|
||||
|
||||
- task: CMake@1
|
||||
inputs:
|
||||
cmakeArgs: >
|
||||
@@ -369,13 +372,10 @@ jobs:
|
||||
- script: cmake -DCOMPONENT=tests -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P $(BUILD_SAMPLES_TESTS_DIR)/cmake_install.cmake
|
||||
displayName: 'Install Samples Tests'
|
||||
|
||||
- script: python3 -m pip install -r $(INSTALL_TEST_DIR)/smoke_tests/requirements.txt
|
||||
displayName: 'Install dependencies for samples smoke tests'
|
||||
|
||||
- script: |
|
||||
export PATH=$HOME/.local/bin:$PATH
|
||||
/usr/bin/python3 -m pip install -r $(INSTALL_TEST_DIR)/smoke_tests/requirements.txt
|
||||
# GNA isn't a part of Debian package, so filter out that tests
|
||||
python3 -m pytest $(INSTALL_TEST_DIR)/smoke_tests/ -k "not GNA" --env_conf $(INSTALL_TEST_DIR)/smoke_tests/env_config.yml -s --junitxml=$(INSTALL_TEST_DIR)/TEST-SamplesSmokeTests.xml
|
||||
/usr/bin/python3 -m pytest $(INSTALL_TEST_DIR)/smoke_tests/ -k "not GNA" --env_conf $(INSTALL_TEST_DIR)/smoke_tests/env_config.yml -s --junitxml=$(INSTALL_TEST_DIR)/TEST-SamplesSmokeTests.xml
|
||||
env:
|
||||
IE_APP_PATH: $(INSTALL_DIR)/samples_bin
|
||||
LD_LIBRARY_PATH: $(INSTALL_DIR)/samples_bin
|
||||
@@ -385,16 +385,18 @@ jobs:
|
||||
displayName: 'Samples Smoke Tests'
|
||||
|
||||
- script: |
|
||||
python3 -m pip install -r $(LAYER_TESTS_DIR)/requirements.txt
|
||||
export PYTHONPATH=$(LAYER_TESTS_DIR):$PYTHONPATH
|
||||
python3 -m pytest $(LAYER_TESTS_DIR)/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=$(INSTALL_TEST_DIR)/TEST-tf_Roll.xmlTEST
|
||||
$(TEST_VENV)/bin/python3 -m pip install -r $(LAYER_TESTS_DIR)/requirements.txt
|
||||
$(TEST_VENV)/bin/python3 -m pytest $(LAYER_TESTS_DIR)/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=$(INSTALL_TEST_DIR)/TEST-tf_Roll.xmlTEST
|
||||
env:
|
||||
PYTHONPATH: $(LAYER_TESTS_DIR)
|
||||
displayName: 'TensorFlow 1 Layer Tests - Legacy FE'
|
||||
|
||||
- script: |
|
||||
python3 -m pip install -r $(LAYER_TESTS_DIR)/requirements.txt
|
||||
export PYTHONPATH=$(REPO_DIR)/tools/mo/:$(LAYER_TESTS_DIR):$PYTHONPATH
|
||||
export TEST_DEVICE=CPU
|
||||
$(RUN_PREFIX) python3 -m pytest $(LAYER_TESTS_DIR)/tensorflow_lite_tests/ --junitxml=$(INSTALL_TEST_DIR)/TEST-tfl_fe.xmlTEST
|
||||
$(TEST_VENV)/bin/python3 -m pip install -r $(LAYER_TESTS_DIR)/requirements.txt
|
||||
$(RUN_PREFIX) $(TEST_VENV)/bin/python3 -m pytest $(LAYER_TESTS_DIR)/tensorflow_lite_tests/ --junitxml=$(INSTALL_TEST_DIR)/TEST-tfl_fe.xmlTEST
|
||||
env:
|
||||
PYTHONPATH: $(REPO_DIR)/tools/mo/:$(LAYER_TESTS_DIR)
|
||||
TEST_DEVICE: CPU
|
||||
displayName: 'TensorFlow Lite Layer Tests - TFL FE'
|
||||
|
||||
- task: PublishTestResults@2
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# type: github
|
||||
# endpoint: openvinotoolkit
|
||||
# name: openvinotoolkit/testdata
|
||||
# ref: releases/2023/0
|
||||
# ref: master
|
||||
|
||||
jobs:
|
||||
- job: Lin_lohika
|
||||
|
||||
@@ -35,13 +35,13 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: releases/2023/0
|
||||
ref: master
|
||||
|
||||
- repository: testdata
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: releases/2023/0
|
||||
ref: master
|
||||
|
||||
variables:
|
||||
- group: github
|
||||
@@ -196,8 +196,8 @@ jobs:
|
||||
displayName: 'Intel CPU Unit Tests'
|
||||
enabled: 'false'
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ieMultiPluginUnitTests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ieMultiPluginUnitTests.xml
|
||||
displayName: 'MULTI UT'
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_auto_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_unit_tests.xml
|
||||
displayName: 'AUTO UT'
|
||||
enabled: 'false'
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_cpu_func_tests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_cpu_func_tests.xml
|
||||
|
||||
@@ -32,13 +32,13 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: releases/2023/0
|
||||
ref: master
|
||||
|
||||
- repository: testdata
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: releases/2023/0
|
||||
ref: master
|
||||
|
||||
jobs:
|
||||
- job: Win
|
||||
@@ -222,10 +222,14 @@ jobs:
|
||||
$(CMAKE_CMD) -DCOMPONENT=tests -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P $(BUILD_SAMPLES_TESTS_DIR)\cmake_install.cmake
|
||||
displayName: 'Install Samples Tests'
|
||||
|
||||
- script: $(INSTALL_DIR)\samples\cpp\build_samples_msvc.bat -i $(INSTALL_DIR)
|
||||
- script: |
|
||||
$(INSTALL_DIR)\samples\cpp\build_samples_msvc.bat -i $(INSTALL_DIR)
|
||||
if not exist %USERPROFILE%\Documents\Intel\OpenVINO\openvino_cpp_samples_build\ exit 1
|
||||
displayName: 'Build cpp samples'
|
||||
|
||||
- script: $(INSTALL_DIR)\samples\c\build_samples_msvc.bat -i $(INSTALL_DIR)
|
||||
- script: |
|
||||
$(INSTALL_DIR)\samples\c\build_samples_msvc.bat -i $(INSTALL_DIR)
|
||||
if not exist %USERPROFILE%\Documents\Intel\OpenVINO\openvino_c_samples_build\ exit 1
|
||||
displayName: 'Build c samples'
|
||||
|
||||
- script: python -m pip install -r $(INSTALL_TEST_DIR)\smoke_tests\requirements.txt
|
||||
@@ -302,8 +306,8 @@ jobs:
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_gna_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_gna_unit_tests.xml
|
||||
displayName: 'GNA UT'
|
||||
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ieMultiPluginUnitTests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ieMultiPluginUnitTests.xml
|
||||
displayName: 'MULTI UT'
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_auto_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_auto_unit_tests.xml
|
||||
displayName: 'AUTO UT'
|
||||
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_auto_batch_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_auto_batch_unit_tests.xml
|
||||
displayName: 'AutoBatch UT'
|
||||
@@ -311,9 +315,8 @@ jobs:
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_template_func_tests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-templateFuncTests.xml
|
||||
displayName: 'TEMPLATE FuncTests'
|
||||
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_cpu_func_tests --gtest_filter=*smoke* --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_cpu_func_tests.xml
|
||||
displayName: 'CPU FuncTests'
|
||||
condition: and(succeeded(), eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'OFF'))
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_auto_batch_func_tests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_auto_batch_func_tests.xml
|
||||
displayName: 'AutoBatch FuncTests'
|
||||
|
||||
- script: |
|
||||
call $(SETUPVARS) && $(INSTALL_TEST_DIR)\InferenceEngineCAPITests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-InferenceEngineCAPITests.xml
|
||||
@@ -323,6 +326,10 @@ jobs:
|
||||
call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_capi_test --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_capi_test.xml
|
||||
displayName: 'OV CAPITests'
|
||||
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_cpu_func_tests --gtest_filter=*smoke* --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_cpu_func_tests.xml
|
||||
displayName: 'CPU FuncTests'
|
||||
condition: and(succeeded(), eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'OFF'))
|
||||
|
||||
- task: PublishTestResults@2
|
||||
condition: always()
|
||||
inputs:
|
||||
|
||||
@@ -35,7 +35,6 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: releases/2023/0
|
||||
|
||||
variables:
|
||||
- group: github
|
||||
|
||||
@@ -72,5 +72,5 @@ RUN ninja install
|
||||
WORKDIR /openvino/src/bindings/python
|
||||
ENV OpenVINO_DIR=/openvino/dist/runtime/cmake
|
||||
ENV LD_LIBRARY_PATH=/openvino/dist/runtime/lib/intel64:/openvino/dist/runtime/3rdparty/tbb/lib
|
||||
ENV PYTHONPATH=/openvino/bin/intel64/${BUILD_TYPE}/python_api/python3.11:${PYTHONPATH}
|
||||
ENV PYTHONPATH=/openvino/bin/intel64/${BUILD_TYPE}/python:${PYTHONPATH}
|
||||
CMD tox
|
||||
|
||||
1
.github/labeler.yml
vendored
1
.github/labeler.yml
vendored
@@ -41,6 +41,7 @@
|
||||
|
||||
'category: dependency_changes':
|
||||
- '**/requirement*.txt'
|
||||
- '**/constraints*.txt'
|
||||
- 'scripts/**/*'
|
||||
- '.gitmodules'
|
||||
- '**/setup.py'
|
||||
|
||||
143
.github/workflows/coverage.yml
vendored
Normal file
143
.github/workflows/coverage.yml
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
name: Code coverage
|
||||
on: workflow_dispatch
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
Coverage:
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- { name: "Ubuntu gcc", os: ubuntu-latest-16-cores, cc: "gcc", cxx: "g++" }
|
||||
|
||||
steps:
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10.10'
|
||||
architecture: 'x64'
|
||||
|
||||
|
||||
- name: Setup ccache
|
||||
uses: hendrikmuhs/ccache-action@v1.2
|
||||
with:
|
||||
max-size: 50G
|
||||
|
||||
- name: Clone OpenVINO
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt --assume-yes update
|
||||
sudo -E ${{ github.workspace }}/install_build_dependencies.sh
|
||||
sudo apt --assume-yes install lcov
|
||||
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install -r ${{ github.workspace }}/src/bindings/python/wheel/requirements-dev.txt
|
||||
python3 -m pip install -r ${{ github.workspace }}/src/bindings/python/requirements.txt
|
||||
# For running Python API tests
|
||||
python3 -m pip install -r ${{ github.workspace }}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt
|
||||
# For running Paddle frontend unit tests
|
||||
python3 -m pip install -r ${{ github.workspace }}/src/frontends/paddle/tests/requirements.txt
|
||||
# For running ONNX frontend unit tests
|
||||
python3 -m pip install -r ${{ github.workspace }}/src/frontends/onnx/tests/requirements.txt
|
||||
# For running TensorFlow frontend unit tests
|
||||
python3 -m pip install -r ${{ github.workspace }}/src/frontends/tensorflow/tests/requirements.txt
|
||||
# For MO unit tests
|
||||
python3 -m pip install -r ${{ github.workspace }}/tools/mo/requirements_mxnet.txt
|
||||
python3 -m pip install -r ${{ github.workspace }}/tools/mo/requirements_caffe.txt
|
||||
python3 -m pip install -r ${{ github.workspace }}/tools/mo/requirements_kaldi.txt
|
||||
python3 -m pip install -r ${{ github.workspace }}/tools/mo/requirements_onnx.txt
|
||||
python3 -m pip install -r ${{ github.workspace }}/tools/mo/requirements_tf2.txt
|
||||
python3 -m pip install -r ${{ github.workspace }}/tools/mo/requirements_dev.txt
|
||||
|
||||
- name: Get number of CPU cores
|
||||
uses: SimenB/github-actions-cpu-cores@v1
|
||||
id: cpu-cores
|
||||
|
||||
- name: Build OpenVINO with CMake
|
||||
uses: ashutoshvarma/action-cmake-build@master
|
||||
with:
|
||||
build-dir: ${{ github.workspace }}/build
|
||||
cc: ${{ matrix.config.cc }}
|
||||
cxx: ${{ matrix.config.cxx }}
|
||||
configure-options: >
|
||||
-GNinja
|
||||
-DCMAKE_VERBOSE_MAKEFILE=ON
|
||||
-DENABLE_PYTHON=ON
|
||||
-DENABLE_ONEDNN_FOR_GPU=ON
|
||||
-DBUILD_SHARED_LIBS=ON
|
||||
-DENABLE_TESTS=ON
|
||||
-DENABLE_OV_ONNX_FRONTEND=ON
|
||||
-DENABLE_FASTER_BUILD=ON
|
||||
-DENABLE_STRICT_DEPENDENCIES=OFF
|
||||
-DENABLE_COVERAGE=ON
|
||||
-DCMAKE_C_COMPILER_LAUNCHER=ccache
|
||||
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache
|
||||
-DCMAKE_C_LINKER_LAUNCHER=ccache
|
||||
-DCMAKE_CXX_LINKER_LAUNCHER=ccache
|
||||
-DENABLE_SYSTEM_SNAPPY=ON
|
||||
build-type: Release
|
||||
parallel: ${{ steps.cpu-cores.outputs.count }}
|
||||
|
||||
- name: Install wheel packages
|
||||
run: cmake -DCOMPONENT=python_wheels -DCMAKE_INSTALL_PREFIX=${{ github.workspace }}/install_pkg -P '${{ github.workspace }}/build/cmake_install.cmake'
|
||||
|
||||
- name: Install python wheels
|
||||
run: python3 -m pip install openvino-dev --find-links=${{ github.workspace }}/install_pkg/tools
|
||||
|
||||
- name: List binaries
|
||||
run: ls -la ${{ github.workspace }}/bin/intel64/Release
|
||||
|
||||
- name: Install OpenVINO
|
||||
run: cmake -DCMAKE_INSTALL_PREFIX=${{ github.workspace }}/install_pkg -P '${{ github.workspace }}/build/cmake_install.cmake'
|
||||
|
||||
- name: Run OV core unit tests
|
||||
run: ${{ github.workspace }}/bin/intel64/Release/ov_core_unit_tests # --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ github.workspace }}/testdata/TEST-NGraphUT.xml
|
||||
|
||||
- name: Run IR frontend tests
|
||||
run: ${{ github.workspace }}/bin/intel64/Release/ov_ir_frontend_tests # --gtest_print_time=1 --gtest_output=xml:${{ github.workspace }}/testdata/TEST-IRFrontend.xml
|
||||
|
||||
- name: Run ONNX frontend tests
|
||||
run: ${{ github.workspace }}/bin/intel64/Release/ov_onnx_frontend_tests --gtest_filter=-*IE_GPU*
|
||||
|
||||
#- name: Run Paddle frontend unit tests
|
||||
# run: ${{ github.workspace }}/bin/intel64/Release/paddle_tests --gtest_filter=-*IE_GPU*
|
||||
|
||||
- name: Run TensorFlow frontend unit tests
|
||||
run: ${{ github.workspace }}/bin/intel64/Release/ov_tensorflow_frontend_tests --gtest_filter=-*IE_GPU*
|
||||
|
||||
- name: Build coverage with CMake
|
||||
uses: ashutoshvarma/action-cmake-build@master
|
||||
with:
|
||||
build-dir: ${{ github.workspace }}/coverage
|
||||
cc: ${{ matrix.config.cc }}
|
||||
cxx: ${{ matrix.config.cxx }}
|
||||
target: ov_coverage
|
||||
configure-options: >
|
||||
-DCMAKE_VERBOSE_MAKEFILE=ON
|
||||
-DCMAKE_C_COMPILER_LAUNCHER=ccache
|
||||
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache
|
||||
-DCMAKE_C_LINKER_LAUNCHER=ccache
|
||||
-DCMAKE_CXX_LINKER_LAUNCHER=ccache
|
||||
parallel: ${{ steps.cpu-cores.outputs.count }}
|
||||
|
||||
|
||||
- name: Print info
|
||||
run: |
|
||||
ls -laR
|
||||
pwd
|
||||
- name: Generate raport
|
||||
run: |
|
||||
lcov --capture --directory ${{ github.workspace }}/. --output-file coverage.info
|
||||
genhtml coverage.info --output-directory coverage-report
|
||||
- name: Collect coverage
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
verbose: true
|
||||
13
.github/workflows/py_checks.yml
vendored
13
.github/workflows/py_checks.yml
vendored
@@ -151,3 +151,16 @@ jobs:
|
||||
- name: Run Bandit
|
||||
run: python -m bandit -r ./ -f screen
|
||||
working-directory: src/bindings/python/src/compatibility/openvino
|
||||
|
||||
# layer_tests Flake code-style
|
||||
- name: Run flake8 on python tests in openvino/tests/layer_tests
|
||||
run: |
|
||||
modified_files=$(git diff --name-only)
|
||||
for file in $modified_files; do
|
||||
if [[ $file == "openvino/tests/layer_tests/"* ]]; then
|
||||
if [[ -f "$file" ]]; then
|
||||
python -m flake8 "$file" --config= ./setup.cfg
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
25
.github/workflows/stale_prs_and_issues.yml
vendored
Normal file
25
.github/workflows/stale_prs_and_issues.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: 'Close stale issues and PRs'
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v8
|
||||
with:
|
||||
stale-issue-message: 'This issue will be closed in a week because of 9 months of no activity.'
|
||||
stale-pr-message: 'This PR will be closed in a week because of 2 weeks of no activity.'
|
||||
close-issue-message: 'This issue was closed because it has been stalled for 9 months with no activity.'
|
||||
close-pr-message: 'This PR was closed because it has been stalled for 2 week with no activity.'
|
||||
days-before-pr-stale: 14
|
||||
days-before-issue-stale: 274
|
||||
days-before-close: 7
|
||||
ascending: true
|
||||
exempt-pr-labels: 'no_stale'
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -13,6 +13,7 @@ cmake-build*
|
||||
*.idea
|
||||
.vscode
|
||||
.vs/
|
||||
.vsconan/
|
||||
.DS_Store
|
||||
**/tags
|
||||
compile_commands.json
|
||||
|
||||
@@ -3,10 +3,11 @@
|
||||
#
|
||||
|
||||
if(DEFINED BUILD_SHARED_LIBS AND NOT BUILD_SHARED_LIBS)
|
||||
# 'target_link_libraries' does not work correctly when called from
|
||||
# 3.17: 'target_link_libraries' does not work correctly when called from
|
||||
# different directory where 'add_library' is called: CMake generates
|
||||
# incorrect OpenVINOConfig.cmake in this case
|
||||
cmake_minimum_required(VERSION 3.17)
|
||||
# 3.18: add_library cannot create ALIAS for non-GLOBAL targets
|
||||
cmake_minimum_required(VERSION 3.18)
|
||||
else()
|
||||
if(CPACK_GENERATOR STREQUAL "DEB")
|
||||
# we have to use CPACK_DEBIAN_PACKAGE_SHLIBDEPS_PRIVATE_DIRS variable
|
||||
@@ -120,7 +121,7 @@ if (ENABLE_TESTS)
|
||||
include(cmake/test_model_zoo.cmake)
|
||||
endif()
|
||||
|
||||
add_subdirectory(thirdparty)
|
||||
include(thirdparty/dependencies.cmake)
|
||||
add_subdirectory(src)
|
||||
|
||||
if(ENABLE_SAMPLES OR ENABLE_TESTS OR ENABLE_COMPILE_TOOL)
|
||||
|
||||
@@ -1,32 +1,32 @@
|
||||
# How to contribute to the OpenVINO repository
|
||||
|
||||
We welcome community contributions to OpenVINO™. Please read the following guide to learn how to find ideas for contribution, practices for good pull requests, checking your changes with our tests and more.
|
||||
We welcome community contributions to OpenVINO™. Please read the following guide to learn how to find ideas for contribution, follow best practices for pull requests, and test your changes with our established checks.
|
||||
|
||||
|
||||
## Before you start contributing you should
|
||||
|
||||
- Make sure you agree to contribute your code under [OpenVINO™ (Apache 2.0)](https://github.com/openvinotoolkit/openvino/blob/master/LICENSE) license.
|
||||
- Figure out what you’re going to contribute. If you don’t know what you are going to work on, navigate to the [Github "Issues" tab](https://github.com/openvinotoolkit/openvino/issues). Make sure that there isn't someone working on it. In the latter case you might provide support or suggestion in the issue or in the linked pull request.
|
||||
- If you are going to fix a bug, check that it's still exists in the latest release. This can be done by building the latest master branch, and make sure that the error is still reproducible there. We do not fix bugs that only affect older non-LTS releases like 2020.2 for example (more details about [branching strategy](https://github.com/openvinotoolkit/openvino/wiki/Branches)).
|
||||
- Make sure you agree to contribute your code under [OpenVINO™ (Apache 2.0) license](https://github.com/openvinotoolkit/openvino/blob/master/LICENSE).
|
||||
- Decide what you’re going to contribute. If you are not sure what you want to work on, check out [Contributions Welcome](https://github.com/openvinotoolkit/openvino/issues/17502). See if there isn't anyone already working on the subject you choose, in which case you may still contribute, providing support and suggestions for the given issue or pull request.
|
||||
- If you are going to fix a bug, check if it still exists. You can do it by building the latest master branch and making sure that the error is still reproducible there. We do not fix bugs that only affect older non-LTS releases like 2020.2, for example (see more details about our [branching strategy](https://github.com/openvinotoolkit/openvino/wiki/Branches)).
|
||||
|
||||
|
||||
## "Fork & Pull Request model" for code contribution
|
||||
|
||||
### [](https://github.com/openvinotoolkit/openvino/blob/master/CONTRIBUTING.md#the-instruction-in-brief)The instruction in brief
|
||||
|
||||
- Register at GitHub. Create your fork of OpenVINO™ repository [https://github.com/openvinotoolkit/openvino](https://github.com/openvinotoolkit/openvino) (see [https://help.github.com/articles/fork-a-repo](https://help.github.com/articles/fork-a-repo) for details).
|
||||
- Register at GitHub. Create your fork of the OpenVINO™ repository [https://github.com/openvinotoolkit/openvino](https://github.com/openvinotoolkit/openvino) (see [https://help.github.com/articles/fork-a-repo](https://help.github.com/articles/fork-a-repo) for details).
|
||||
- Install Git.
|
||||
- Set your user name and email address in a Git configuration according to GitHub account (see [https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup](https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup) for details).
|
||||
- Choose a task for yourself. It could be a bugfix or some new code.
|
||||
- Set your user name and email address in Git configuration according to the GitHub account (see [First-Time-Git-Setup](https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup) for details).
|
||||
- Choose a task for yourself. It may be a bugfix or an entirely new piece of code.
|
||||
- Choose a base branch for your work. More details about branches and policies are here: [Branches](https://github.com/openvinotoolkit/openvino/wiki/Branches)
|
||||
- Clone your fork to your computer.
|
||||
- Create a new branch (with a meaningful name) from the base branch you chose.
|
||||
- Modify / add the code following our [Coding Style Guide](./docs/dev/coding_style.md).
|
||||
- If you want to add a new sample, please look at this [Guide for contributing to C++/C/Python IE samples](https://github.com/openvinotoolkit/openvino/wiki/SampleContribute)
|
||||
- Create a new branch (give it a meaningful name) from the base branch of your choice.
|
||||
- Modify / add the code, following our [Coding Style Guide](./docs/dev/coding_style.md).
|
||||
- If you want to add a new sample, please have a look at the [Guide for contributing to C++/C/Python IE samples](https://github.com/openvinotoolkit/openvino/wiki/SampleContribute)
|
||||
- If you want to contribute to the documentation and want to add a new guide, follow that instruction [Documentation guidelines](https://github.com/openvinotoolkit/openvino/wiki/CodingStyleGuideLinesDocumentation)
|
||||
- Run testsuite locally:
|
||||
- execute each test binary from the artifacts directory, e.g. `<source dir>/bin/intel64/Release/ieFuncTests`
|
||||
- When you are done, make sure that your branch is to date with latest state of the branch you want to contribute to (e.g. `git fetch upstream && git merge upstream/master`), push your branch to your GitHub fork; then create a pull request from your branch to the base branch (see [https://help.github.com/articles/using-pull-requests](https://help.github.com/articles/using-pull-requests) for details).
|
||||
- When you are done, make sure that your branch is up to date with latest state of the branch you want to contribute to (e.g. `git fetch upstream && git merge upstream/master`). If so, push your branch to your GitHub fork and create a pull request from your branch to the base branch (see [using-pull-requests](https://help.github.com/articles/using-pull-requests) for details).
|
||||
|
||||
## Making a good pull request
|
||||
|
||||
@@ -34,22 +34,20 @@ Following these guidelines will increase the likelihood of your pull request bei
|
||||
|
||||
- One PR – one issue.
|
||||
- Build perfectly on your local system.
|
||||
- Choose the right base branch [Branches](https://github.com/openvinotoolkit/openvino/wiki/Branches).
|
||||
- Choose the right base branch, based on our [Branch Guidelines](https://github.com/openvinotoolkit/openvino/wiki/Branches).
|
||||
- Follow the [Coding Style Guide](./docs/dev/coding_style.md) for your code.
|
||||
- Update documentation using [Documentation guidelines](https://github.com/openvinotoolkit/openvino/wiki/CodingStyleGuideLinesDocumentation) if needed.
|
||||
- Document your contribution, if you decide it may benefit OpenVINO users. You may do it yourself by editing the files in the "docs" directory or contact someone working with documentation to provide them with the right information.
|
||||
- Cover your changes with test.
|
||||
- Add license at the top of new files [C++ example](https://github.com/openvinotoolkit/openvino/blob/master/samples/cpp/classification_sample_async/main.cpp#L1-L2), [Python example](https://github.com/openvinotoolkit/openvino/blob/master/samples/python/hello_classification/hello_classification.py#L3-L4).
|
||||
- Add enough information: a meaningful title, the reason why you made the commit and a link to the issue page if exists.
|
||||
- Remove unrelated to PR changes.
|
||||
- If it is still WIP and you want to check CI test results early then use _Draft_ PR.
|
||||
- Add the license statement at the top of new files [C++ example](https://github.com/openvinotoolkit/openvino/blob/master/samples/cpp/classification_sample_async/main.cpp#L1-L2), [Python example](https://github.com/openvinotoolkit/openvino/blob/master/samples/python/hello_classification/hello_classification.py#L3-L4).
|
||||
- Add proper information to the PR: a meaningful title, the reason why you made the commit, and a link to the issue page, if it exists.
|
||||
- Remove changes unrelated to the PR.
|
||||
- If it is still WIP and you want to check CI test results early, use a _Draft_ PR.
|
||||
- Submit your PR and become an OpenVINO™ contributor!
|
||||
|
||||
|
||||
## Testing and merging pull requests
|
||||
|
||||
Your pull request will be automatically tested by OpenVINO™'s precommit (testing status are automatically reported as "green" or "red" circles in precommit steps on PR's page). If any builders have failed, you need fix the issue. To rerun the automatic builds just push changes to your branch on GitHub. No need to close pull request and open a new one!
|
||||
Your pull request will be automatically tested by OpenVINO™'s precommit (testing statuses are automatically reported as "green" or "red" circles in precommit steps on the PR page). If any builders fail, you need to fix the issues before the PR can be merged. If you push any changes to your branch on GitHub the tests will re-run automatically. No need to close pull request and open a new one!
|
||||
|
||||
|
||||
## Merging PR
|
||||
|
||||
When the reviewer accepts the pull request and the pre-commit shows a "green" status, the review status is set to "Approved", which signals to the OpenVINO™ maintainers that they can merge your pull request.
|
||||
When an assigned reviewer accepts the pull request and the pre-commit is "green", the review status is set to "Approved", which informs OpenVINO™ maintainers that they can merge your pull request.
|
||||
@@ -1,5 +1,4 @@
|
||||
<div align="center">
|
||||
|
||||
<img src="docs/img/openvino-logo-purple-black.png" width="400px">
|
||||
|
||||
[](https://badge.fury.io/py/openvino)
|
||||
@@ -9,7 +8,6 @@
|
||||
[](https://pepy.tech/project/openvino)
|
||||
[](https://anaconda.org/conda-forge/openvino/files)
|
||||
[](https://formulae.brew.sh/formula/openvino)
|
||||
|
||||
</div>
|
||||
|
||||
## Contents:
|
||||
@@ -168,7 +166,9 @@ See [How to build OpenVINO](./docs/dev/build.md) to get more information about t
|
||||
|
||||
## How to contribute
|
||||
|
||||
See [CONTRIBUTING](./CONTRIBUTING.md) for details. Thank you!
|
||||
See [Contributions Welcome](https://github.com/openvinotoolkit/openvino/issues/17502) for good first issues.
|
||||
|
||||
See [CONTRIBUTING](./CONTRIBUTING.md) for contribution details. Thank you!
|
||||
|
||||
## Get a support
|
||||
|
||||
|
||||
@@ -9,67 +9,3 @@ set(CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
|
||||
set(CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++)
|
||||
set(CMAKE_STRIP arm-linux-gnueabihf-strip)
|
||||
set(PKG_CONFIG_EXECUTABLE arm-linux-gnueabihf-pkg-config CACHE PATH "Path to ARM pkg-config")
|
||||
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
|
||||
|
||||
macro(__cmake_find_root_save_and_reset)
|
||||
foreach(v
|
||||
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
|
||||
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
|
||||
)
|
||||
set(__save_${v} ${${v}})
|
||||
set(${v} NEVER)
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
macro(__cmake_find_root_restore)
|
||||
foreach(v
|
||||
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
|
||||
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
|
||||
)
|
||||
set(${v} ${__save_${v}})
|
||||
unset(__save_${v})
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
|
||||
# macro to find programs on the host OS
|
||||
macro(find_host_program)
|
||||
__cmake_find_root_save_and_reset()
|
||||
if(CMAKE_HOST_WIN32)
|
||||
SET(WIN32 1)
|
||||
SET(UNIX)
|
||||
elseif(CMAKE_HOST_APPLE)
|
||||
SET(APPLE 1)
|
||||
SET(UNIX)
|
||||
endif()
|
||||
find_program(${ARGN})
|
||||
SET(WIN32)
|
||||
SET(APPLE)
|
||||
SET(UNIX 1)
|
||||
__cmake_find_root_restore()
|
||||
endmacro()
|
||||
|
||||
# macro to find packages on the host OS
|
||||
macro(find_host_package)
|
||||
__cmake_find_root_save_and_reset()
|
||||
if(CMAKE_HOST_WIN32)
|
||||
SET(WIN32 1)
|
||||
SET(UNIX)
|
||||
elseif(CMAKE_HOST_APPLE)
|
||||
SET(APPLE 1)
|
||||
SET(UNIX)
|
||||
endif()
|
||||
find_package(${ARGN})
|
||||
SET(WIN32)
|
||||
SET(APPLE)
|
||||
SET(UNIX 1)
|
||||
__cmake_find_root_restore()
|
||||
endmacro()
|
||||
|
||||
@@ -9,67 +9,3 @@ set(CMAKE_C_COMPILER aarch64-linux-gnu-gcc)
|
||||
set(CMAKE_CXX_COMPILER aarch64-linux-gnu-g++)
|
||||
set(CMAKE_STRIP aarch64-linux-gnu-strip)
|
||||
set(PKG_CONFIG_EXECUTABLE aarch64-linux-gnu-pkg-config CACHE PATH "Path to ARM64 pkg-config")
|
||||
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
|
||||
|
||||
macro(__cmake_find_root_save_and_reset)
|
||||
foreach(v
|
||||
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
|
||||
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
|
||||
)
|
||||
set(__save_${v} ${${v}})
|
||||
set(${v} NEVER)
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
macro(__cmake_find_root_restore)
|
||||
foreach(v
|
||||
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
|
||||
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
|
||||
)
|
||||
set(${v} ${__save_${v}})
|
||||
unset(__save_${v})
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
|
||||
# macro to find programs on the host OS
|
||||
macro(find_host_program)
|
||||
__cmake_find_root_save_and_reset()
|
||||
if(CMAKE_HOST_WIN32)
|
||||
SET(WIN32 1)
|
||||
SET(UNIX)
|
||||
elseif(CMAKE_HOST_APPLE)
|
||||
SET(APPLE 1)
|
||||
SET(UNIX)
|
||||
endif()
|
||||
find_program(${ARGN})
|
||||
SET(WIN32)
|
||||
SET(APPLE)
|
||||
SET(UNIX 1)
|
||||
__cmake_find_root_restore()
|
||||
endmacro()
|
||||
|
||||
# macro to find packages on the host OS
|
||||
macro(find_host_package)
|
||||
__cmake_find_root_save_and_reset()
|
||||
if(CMAKE_HOST_WIN32)
|
||||
SET(WIN32 1)
|
||||
SET(UNIX)
|
||||
elseif(CMAKE_HOST_APPLE)
|
||||
SET(APPLE 1)
|
||||
SET(UNIX)
|
||||
endif()
|
||||
find_package(${ARGN})
|
||||
SET(WIN32)
|
||||
SET(APPLE)
|
||||
SET(UNIX 1)
|
||||
__cmake_find_root_restore()
|
||||
endmacro()
|
||||
|
||||
@@ -32,21 +32,24 @@ if(THREADING STREQUAL "OMP")
|
||||
TARGET_PATH "${TEMP}/omp"
|
||||
ENVIRONMENT "OMP"
|
||||
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*"
|
||||
SHA256 "62c68646747fb10f19b53217cb04a1e10ff93606f992e6b35eb8c31187c68fbf")
|
||||
SHA256 "62c68646747fb10f19b53217cb04a1e10ff93606f992e6b35eb8c31187c68fbf"
|
||||
USE_NEW_LOCATION TRUE)
|
||||
elseif(LINUX AND X86_64)
|
||||
RESOLVE_DEPENDENCY(OMP
|
||||
ARCHIVE_LIN "iomp.tgz"
|
||||
TARGET_PATH "${TEMP}/omp"
|
||||
ENVIRONMENT "OMP"
|
||||
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*"
|
||||
SHA256 "7832b16d82513ee880d97c27c7626f9525ebd678decf6a8fe6c38550f73227d9")
|
||||
SHA256 "7832b16d82513ee880d97c27c7626f9525ebd678decf6a8fe6c38550f73227d9"
|
||||
USE_NEW_LOCATION TRUE)
|
||||
elseif(APPLE AND X86_64)
|
||||
RESOLVE_DEPENDENCY(OMP
|
||||
ARCHIVE_MAC "iomp_20190130_mac.tgz"
|
||||
TARGET_PATH "${TEMP}/omp"
|
||||
ENVIRONMENT "OMP"
|
||||
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*"
|
||||
SHA256 "591ea4a7e08bbe0062648916f42bded71d24c27f00af30a8f31a29b5878ea0cc")
|
||||
SHA256 "591ea4a7e08bbe0062648916f42bded71d24c27f00af30a8f31a29b5878ea0cc"
|
||||
USE_NEW_LOCATION TRUE)
|
||||
else()
|
||||
message(FATAL_ERROR "Intel OMP is not available on current platform")
|
||||
endif()
|
||||
@@ -108,7 +111,8 @@ function(ov_download_tbb)
|
||||
ARCHIVE_ANDROID "tbb2020_20200404_android.tgz"
|
||||
TARGET_PATH "${TEMP}/tbb"
|
||||
ENVIRONMENT "TBBROOT"
|
||||
SHA256 "f42d084224cc2d643314bd483ad180b081774608844000f132859fca3e9bf0ce")
|
||||
SHA256 "f42d084224cc2d643314bd483ad180b081774608844000f132859fca3e9bf0ce"
|
||||
USE_NEW_LOCATION TRUE)
|
||||
elseif(LINUX AND X86_64 AND OV_GLIBC_VERSION VERSION_GREATER_EQUAL 2.17)
|
||||
# build oneTBB 2021.2.1 with gcc 4.8 (glibc 2.17)
|
||||
RESOLVE_DEPENDENCY(TBB
|
||||
@@ -122,7 +126,8 @@ function(ov_download_tbb)
|
||||
ARCHIVE_LIN "keembay/tbb2020_38404_kmb_lic.tgz"
|
||||
TARGET_PATH "${TEMP}/tbb_yocto"
|
||||
ENVIRONMENT "TBBROOT"
|
||||
SHA256 "321261ff2eda6d4568a473cb883262bce77a93dac599f7bd65d2918bdee4d75b")
|
||||
SHA256 "321261ff2eda6d4568a473cb883262bce77a93dac599f7bd65d2918bdee4d75b"
|
||||
USE_NEW_LOCATION TRUE)
|
||||
elseif(APPLE AND X86_64)
|
||||
# build oneTBB 2021.2.1 with OS version 11.4
|
||||
RESOLVE_DEPENDENCY(TBB
|
||||
@@ -327,8 +332,8 @@ if(ENABLE_INTEL_GNA)
|
||||
GNA_LIB_DIR
|
||||
libGNA_INCLUDE_DIRS
|
||||
libGNA_LIBRARIES_BASE_PATH)
|
||||
set(GNA_VERSION "03.05.00.1906")
|
||||
set(GNA_HASH "4a5be86d9c026b0e10afac2a57fc7c99d762b30e3d506abb3a3380fbcfe2726e")
|
||||
set(GNA_VERSION "03.05.00.2116")
|
||||
set(GNA_HASH "960350567702bda17276ac4c060d7524fb7ce7ced785004bd861c81ff2bfe2c5")
|
||||
|
||||
set(FILES_TO_EXTRACT_LIST gna_${GNA_VERSION}/include)
|
||||
if(WIN32)
|
||||
|
||||
@@ -8,6 +8,12 @@ if(NOT DEFINED IEDevScripts_DIR)
|
||||
message(FATAL_ERROR "IEDevScripts_DIR is not defined")
|
||||
endif()
|
||||
|
||||
macro(ov_set_if_not_defined var value)
|
||||
if(NOT DEFINED ${var})
|
||||
set(${var} ${value})
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
set(OLD_CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH})
|
||||
set(CMAKE_MODULE_PATH "${IEDevScripts_DIR}")
|
||||
|
||||
@@ -71,23 +77,8 @@ endfunction()
|
||||
# For cross-compilation
|
||||
#
|
||||
|
||||
# Search packages for the host system instead of packages for the target system
|
||||
# in case of cross compilation these macros should be defined by the toolchain file
|
||||
if(NOT COMMAND find_host_package)
|
||||
macro(find_host_package)
|
||||
find_package(${ARGN})
|
||||
endmacro()
|
||||
endif()
|
||||
if(NOT COMMAND find_host_library)
|
||||
macro(find_host_library)
|
||||
find_library(${ARGN})
|
||||
endmacro()
|
||||
endif()
|
||||
if(NOT COMMAND find_host_program)
|
||||
macro(find_host_program)
|
||||
find_program(${ARGN})
|
||||
endmacro()
|
||||
endif()
|
||||
include(cross_compile/find_commands)
|
||||
include(cross_compile/native_compile)
|
||||
|
||||
#
|
||||
# Common scripts
|
||||
@@ -166,12 +157,6 @@ else()
|
||||
endif()
|
||||
add_definitions(-DIE_BUILD_POSTFIX=\"${IE_BUILD_POSTFIX}\")
|
||||
|
||||
macro(ov_set_if_not_defined var value)
|
||||
if(NOT DEFINED ${var})
|
||||
set(${var} ${value})
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
ov_set_if_not_defined(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
|
||||
ov_set_if_not_defined(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
|
||||
ov_set_if_not_defined(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
|
||||
@@ -179,7 +164,7 @@ ov_set_if_not_defined(CMAKE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
|
||||
ov_set_if_not_defined(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
|
||||
|
||||
if(CPACK_GENERATOR MATCHES "^(DEB|RPM)$")
|
||||
# to make sure that lib/<multiarch-tuple> is created on Debian
|
||||
# to make sure that lib/<multiarch-triplet> is created on Debian
|
||||
set(CMAKE_INSTALL_PREFIX "/usr" CACHE PATH "Cmake install prefix" FORCE)
|
||||
endif()
|
||||
|
||||
@@ -196,10 +181,6 @@ if(APPLE)
|
||||
message(FATAL_ERROR "Internal error: OV_CPACK_LIBRARYDIR is not defined, while it's required to initialize RPATH")
|
||||
endif()
|
||||
|
||||
if(CPACK_GENERATOR STREQUAL "BREW")
|
||||
set(CMAKE_SKIP_INSTALL_RPATH OFF)
|
||||
endif()
|
||||
|
||||
# WA for Xcode generator + object libraries issue:
|
||||
# https://gitlab.kitware.com/cmake/cmake/issues/20260
|
||||
# http://cmake.3232098.n2.nabble.com/XCODE-DEPEND-HELPER-make-Deletes-Targets-Before-and-While-They-re-Built-td7598277.html
|
||||
@@ -247,17 +228,6 @@ endif()
|
||||
|
||||
# General flags
|
||||
|
||||
macro(ov_install_static_lib target comp)
|
||||
if(NOT BUILD_SHARED_LIBS)
|
||||
get_target_property(target_type ${target} TYPE)
|
||||
if(target_type STREQUAL "STATIC_LIBRARY")
|
||||
set_target_properties(${target} PROPERTIES EXCLUDE_FROM_ALL OFF)
|
||||
endif()
|
||||
install(TARGETS ${target} EXPORT OpenVINOTargets
|
||||
ARCHIVE DESTINATION ${OV_CPACK_ARCHIVEDIR} COMPONENT ${comp} ${ARGN})
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
@@ -312,7 +282,6 @@ function(ov_mark_target_as_cc)
|
||||
endfunction()
|
||||
|
||||
include(python_requirements)
|
||||
include(native_compile)
|
||||
|
||||
# Code style utils
|
||||
|
||||
|
||||
@@ -163,7 +163,22 @@ function(addIeTargetTest)
|
||||
|
||||
addIeTarget(TYPE EXECUTABLE NAME ${ARG_NAME} ${ARG_UNPARSED_ARGUMENTS})
|
||||
|
||||
add_test(NAME ${ARG_NAME} COMMAND ${ARG_NAME})
|
||||
if(EMSCRIPTEN)
|
||||
set(JS_BIN_NAME "${ARG_NAME}.js")
|
||||
set(JS_APP_NAME "${ARG_NAME}_js.js")
|
||||
set(JS_TEST_APP "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${JS_APP_NAME}")
|
||||
file(WRITE ${JS_TEST_APP} "// Copyright (C) 2018-2023 Intel Corporation\n")
|
||||
file(APPEND ${JS_TEST_APP} "// SPDX-License-Identifier: Apache-2.0\n")
|
||||
file(APPEND ${JS_TEST_APP} "//\n")
|
||||
file(APPEND ${JS_TEST_APP} "// JS test app\n")
|
||||
file(APPEND ${JS_TEST_APP} "const createModule = require(\"./${JS_BIN_NAME}\");\n")
|
||||
file(APPEND ${JS_TEST_APP} "createModule().then(function(Module) {});\n")
|
||||
file(APPEND ${JS_TEST_APP} " ")
|
||||
# node version>= 16.8.0, else need add "--experimental-wasm-threads --experimental-wasm-bulk-memory" option
|
||||
add_test(NAME ${ARG_NAME} COMMAND node ${JS_TEST_APP})
|
||||
else()
|
||||
add_test(NAME ${ARG_NAME} COMMAND ${ARG_NAME})
|
||||
endif()
|
||||
set_property(TEST ${ARG_NAME} PROPERTY LABELS ${ARG_LABELS})
|
||||
|
||||
install(TARGETS ${ARG_NAME}
|
||||
|
||||
@@ -54,6 +54,8 @@ macro(ov_deprecated_no_errors)
|
||||
endif()
|
||||
elseif(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX)
|
||||
set(ie_c_cxx_deprecated_no_errors "-Wno-error=deprecated-declarations")
|
||||
# Suppress #warning messages
|
||||
set(ie_c_cxx_deprecated_no_errors "${ie_c_cxx_deprecated_no_errors} -Wno-cpp")
|
||||
else()
|
||||
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
@@ -73,7 +75,7 @@ macro(ov_dev_package_no_errors)
|
||||
if(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX)
|
||||
set(ie_c_cxx_dev_no_errors "-Wno-all")
|
||||
if(SUGGEST_OVERRIDE_SUPPORTED)
|
||||
set(ie_cxx_dev_no_errors "${ie_c_cxx_dev_no_errors} -Wno-error=suggest-override")
|
||||
set(ie_cxx_dev_no_errors "-Wno-error=suggest-override")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@@ -250,6 +252,20 @@ function(ov_force_include target scope header_file)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
#
|
||||
# ov_abi_free_target(<target name>)
|
||||
#
|
||||
# Marks target to be compiliance in CXX ABI free manner
|
||||
#
|
||||
function(ov_abi_free_target target)
|
||||
# To guarantee OpenVINO can be used with gcc versions 7 through 12.2
|
||||
# - https://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Dialect-Options.html
|
||||
# - https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html
|
||||
if(CMAKE_COMPILER_IS_GNUCXX AND NOT MINGW64)
|
||||
target_compile_options(${target} PRIVATE $<$<COMPILE_LANGUAGE:CXX>:-Wabi=11>)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
#
|
||||
# ie_python_minimal_api(<target>)
|
||||
#
|
||||
@@ -410,6 +426,13 @@ else()
|
||||
# Warn if an undefined identifier is evaluated in an #if directive. Such identifiers are replaced with zero.
|
||||
ie_add_compiler_flags(-Wundef)
|
||||
|
||||
# To guarantee OpenVINO can be used with gcc versions 7 through 12
|
||||
# - https://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Dialect-Options.html
|
||||
# - https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wabi=11")
|
||||
endif()
|
||||
|
||||
#
|
||||
# Warnings as errors
|
||||
#
|
||||
|
||||
160
cmake/developer_package/cross_compile/find_commands.cmake
Normal file
160
cmake/developer_package/cross_compile/find_commands.cmake
Normal file
@@ -0,0 +1,160 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Search packages for the host system instead of packages for the target system
|
||||
# in case of cross compilation these macros should be defined by the toolchain file
|
||||
|
||||
if(CMAKE_CROSSCOMPILING AND NOT (OV_ARCH STREQUAL OV_HOST_ARCH AND
|
||||
CMAKE_SYSTEM_NAME STREQUAL CMAKE_HOST_SYSTEM_NAME))
|
||||
# don't look at directories which are part of PATH (with removed bin / sbin at the end)
|
||||
# like /opt/homebrew on macOS where we cannot use system env path, because brew's
|
||||
# dependencies will be found, but at the same time we need to find flatbufffers and
|
||||
# other build system dependencies
|
||||
# ov_set_if_not_defined(CMAKE_FIND_USE_SYSTEM_ENVIRONMENT_PATH OFF)
|
||||
ov_set_if_not_defined(CMAKE_FIND_USE_SYSTEM_PACKAGE_REGISTRY OFF)
|
||||
# it contains /usr and if we set this var to OFF, then CMAKE_FIND_ROOT_PATH is ignored
|
||||
# ov_set_if_not_defined(CMAKE_FIND_USE_CMAKE_SYSTEM_PATH OFF)
|
||||
if(LINUX)
|
||||
# set root paths (overridden to /usr/lib/<CMAKE_LIBRARY_ARCHITECTURE>/cmake)
|
||||
# CMAKE_LIBRARY_ARCHITECTURE is defined automatically by cmake after trying the compilers
|
||||
# ov_set_if_not_defined(CMAKE_FIND_ROOT_PATH "/usr")
|
||||
endif()
|
||||
|
||||
# controling CMAKE_FIND_ROOT_PATH usage
|
||||
ov_set_if_not_defined(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
ov_set_if_not_defined(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
|
||||
ov_set_if_not_defined(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
|
||||
ov_set_if_not_defined(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
|
||||
endif()
|
||||
|
||||
macro(__ov_cmake_find_system_path_save_and_reset)
|
||||
foreach(v
|
||||
CMAKE_FIND_USE_SYSTEM_ENVIRONMENT_PATH
|
||||
CMAKE_FIND_USE_SYSTEM_PACKAGE_REGISTRY
|
||||
CMAKE_FIND_USE_CMAKE_SYSTEM_PATH
|
||||
)
|
||||
if(DEFINED ${v})
|
||||
set(__ov_save_${v} ${${v}})
|
||||
else()
|
||||
set(__ov_save_${v} ON)
|
||||
endif()
|
||||
set(${v} ON)
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
macro(__ov_cmake_find_system_path_restore)
|
||||
foreach(v
|
||||
CMAKE_FIND_USE_SYSTEM_ENVIRONMENT_PATH
|
||||
CMAKE_FIND_USE_SYSTEM_PACKAGE_REGISTRY
|
||||
CMAKE_FIND_USE_CMAKE_SYSTEM_PATH
|
||||
)
|
||||
set(${v} ${__ov_save_${v}})
|
||||
unset(__ov_save_${v})
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
macro(__ov_cmake_find_root_save_and_reset)
|
||||
foreach(v
|
||||
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
|
||||
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
|
||||
)
|
||||
set(__ov_save_${v} ${${v}})
|
||||
set(${v} NEVER)
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
macro(__ov_cmake_find_root_restore)
|
||||
foreach(v
|
||||
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
|
||||
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
|
||||
)
|
||||
set(${v} ${__ov_save_${v}})
|
||||
unset(__ov_save_${v})
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
macro(__ov_cmake_target_flags_save_and_reset)
|
||||
foreach(v WIN32 UNIX LINUX APPLE ANDROID BSD)
|
||||
set(__ov_target_save_${v} ${${v}})
|
||||
unset(${v})
|
||||
endforeach()
|
||||
|
||||
if(CMAKE_HOST_WIN32)
|
||||
set(WIN32 1)
|
||||
elseif(CMAKE_HOST_APPLE)
|
||||
set(APPLE 1)
|
||||
set(UNIX 1)
|
||||
elseif(CMAKE_HOST_LINUX)
|
||||
set(LINUX 1)
|
||||
set(UNIX 1)
|
||||
elseif(CMAKE_HOST_UNIX)
|
||||
set(UNIX 1)
|
||||
elseif(CMAKE_HOST_BSD)
|
||||
set(BSD 1)
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
macro(__ov_cmake_target_flags_restore)
|
||||
foreach(v WIN32 UNIX LINUX APPLE ANDROID BSD)
|
||||
set(${v} ${__ov_target_save_${v}})
|
||||
unset(__ov_target_save_${v})
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
if(CMAKE_CROSSCOMPILING)
|
||||
# macro to find programs on the host OS
|
||||
if(NOT COMMAND find_host_package)
|
||||
macro(find_host_package)
|
||||
__ov_cmake_find_root_save_and_reset()
|
||||
__ov_cmake_target_flags_save_and_reset()
|
||||
__ov_cmake_find_system_path_save_and_reset()
|
||||
find_package(${ARGN})
|
||||
__ov_cmake_find_system_path_restore()
|
||||
__ov_cmake_target_flags_restore()
|
||||
__ov_cmake_find_root_restore()
|
||||
endmacro()
|
||||
endif()
|
||||
if(NOT COMMAND find_host_program)
|
||||
macro(find_host_program)
|
||||
__ov_cmake_find_root_save_and_reset()
|
||||
__ov_cmake_target_flags_save_and_reset()
|
||||
__ov_cmake_find_system_path_save_and_reset()
|
||||
find_program(${ARGN})
|
||||
__ov_cmake_find_system_path_restore()
|
||||
__ov_cmake_target_flags_restore()
|
||||
__ov_cmake_find_root_restore()
|
||||
endmacro()
|
||||
endif()
|
||||
if(NOT COMMAND find_host_library)
|
||||
macro(find_host_library)
|
||||
__ov_cmake_find_root_save_and_reset()
|
||||
__ov_cmake_target_flags_save_and_reset()
|
||||
__ov_cmake_find_system_path_save_and_reset()
|
||||
find_library(${ARGN})
|
||||
__ov_cmake_find_system_path_restore()
|
||||
__ov_cmake_target_flags_restore()
|
||||
__ov_cmake_find_root_restore()
|
||||
endmacro()
|
||||
endif()
|
||||
else()
|
||||
if(NOT COMMAND find_host_package)
|
||||
macro(find_host_package)
|
||||
find_package(${ARGN})
|
||||
endmacro()
|
||||
endif()
|
||||
if(NOT COMMAND find_host_program)
|
||||
macro(find_host_program)
|
||||
find_program(${ARGN})
|
||||
endmacro()
|
||||
endif()
|
||||
if(NOT COMMAND find_host_library)
|
||||
macro(find_host_library)
|
||||
find_library(${ARGN})
|
||||
endmacro()
|
||||
endif()
|
||||
endif()
|
||||
@@ -272,6 +272,10 @@ macro(ov_add_frontend)
|
||||
# must be called after all target_link_libraries
|
||||
ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME})
|
||||
|
||||
# since frontends are user-facing component which can be linked against,
|
||||
# then we need to mark it to be CXX ABI free
|
||||
ov_abi_free_target(${TARGET_NAME})
|
||||
|
||||
# installation
|
||||
|
||||
if(NOT OV_FRONTEND_SKIP_INSTALL)
|
||||
|
||||
@@ -11,29 +11,32 @@ set(ncc_style_bin_dir "${CMAKE_CURRENT_BINARY_DIR}/ncc_naming_style")
|
||||
|
||||
# find python3
|
||||
|
||||
find_host_package(PythonInterp 3 QUIET)
|
||||
if(NOT PYTHONINTERP_FOUND)
|
||||
message(WARNING "Python3 interpreter was not found (required for ncc naming style check)")
|
||||
set(ENABLE_NCC_STYLE OFF)
|
||||
if(ENABLE_NCC_STYLE)
|
||||
find_host_package(PythonInterp 3 QUIET)
|
||||
if(NOT PYTHONINTERP_FOUND)
|
||||
message(WARNING "Python3 interpreter was not found (required for ncc naming style check)")
|
||||
set(ENABLE_NCC_STYLE OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(PYTHON_VERSION_MINOR EQUAL 6)
|
||||
set(clang_version 10)
|
||||
elseif(PYTHON_VERSION_MINOR EQUAL 7)
|
||||
set(clang_version 11)
|
||||
elseif(PYTHON_VERSION_MINOR EQUAL 8)
|
||||
set(clang_version 12)
|
||||
elseif(PYTHON_VERSION_MINOR EQUAL 9)
|
||||
set(clang_version 12)
|
||||
elseif(PYTHON_VERSION_MINOR EQUAL 10)
|
||||
set(clang_version 14)
|
||||
elseif(PYTHON_VERSION_MINOR EQUAL 11)
|
||||
set(clang_version 14)
|
||||
else()
|
||||
message(WARNING "Cannot suggest clang package for python ${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}")
|
||||
if(ENABLE_NCC_STYLE)
|
||||
if(PYTHON_VERSION_MINOR EQUAL 6)
|
||||
set(clang_version 10)
|
||||
elseif(PYTHON_VERSION_MINOR EQUAL 7)
|
||||
set(clang_version 11)
|
||||
elseif(PYTHON_VERSION_MINOR EQUAL 8)
|
||||
set(clang_version 12)
|
||||
elseif(PYTHON_VERSION_MINOR EQUAL 9)
|
||||
set(clang_version 12)
|
||||
elseif(PYTHON_VERSION_MINOR EQUAL 10)
|
||||
set(clang_version 14)
|
||||
elseif(PYTHON_VERSION_MINOR EQUAL 11)
|
||||
set(clang_version 14)
|
||||
else()
|
||||
message(WARNING "Cannot suggest clang package for python ${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
if(ENABLE_NCC_STYLE)
|
||||
# try to find_package(Clang QUIET)
|
||||
# ClangConfig.cmake contains bug that if libclang-XX-dev is not
|
||||
@@ -58,16 +61,22 @@ endif()
|
||||
# Since we were able to find_package(Clang) in a separate process
|
||||
# let's try to find in current process
|
||||
if(ENABLE_NCC_STYLE)
|
||||
if(WIN32)
|
||||
set(CLANG_LIB_NAME libclang.dll)
|
||||
find_host_program(CLANG NAMES ${CLANG_LIB_NAME} PATHS ENV PATH)
|
||||
if(CLANG)
|
||||
set(libclang_location ${CLANG})
|
||||
endif()
|
||||
elseif(APPLE)
|
||||
if(CMAKE_HOST_WIN32)
|
||||
find_host_program(libclang_location NAMES libclang.dll
|
||||
PATHS $ENV{PATH}
|
||||
NO_CMAKE_FIND_ROOT_PATH)
|
||||
elseif(CMAKE_HOST_APPLE)
|
||||
set(_old_CMAKE_FIND_LIBRARY_PREFIXES ${CMAKE_FIND_LIBRARY_PREFIXES})
|
||||
set(_old_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
set(CMAKE_FIND_LIBRARY_PREFIXES "lib")
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".dylib")
|
||||
find_host_library(libclang_location NAMES clang
|
||||
PATHS /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib
|
||||
DOC "Path to clang library")
|
||||
DOC "Path to clang library"
|
||||
NO_DEFAULT_PATH
|
||||
NO_CMAKE_FIND_ROOT_PATH)
|
||||
set(CMAKE_FIND_LIBRARY_PREFIXES ${_old_CMAKE_FIND_LIBRARY_PREFIXES})
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ${_old_CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
else()
|
||||
find_host_package(Clang QUIET)
|
||||
endif()
|
||||
|
||||
@@ -21,14 +21,22 @@ macro(ov_common_libraries_cpack_set_dirs)
|
||||
endif()
|
||||
set(OV_WHEEL_RUNTIMEDIR ${OV_CPACK_RUNTIMEDIR})
|
||||
set(OV_CPACK_ARCHIVEDIR ${CMAKE_INSTALL_LIBDIR})
|
||||
set(OV_CPACK_PLUGINSDIR ${OV_CPACK_RUNTIMEDIR}/openvino-${OpenVINO_VERSION})
|
||||
set(OV_CPACK_IE_CMAKEDIR ${CMAKE_INSTALL_LIBDIR}/cmake/inferenceengine${OpenVINO_VERSION})
|
||||
set(OV_CPACK_NGRAPH_CMAKEDIR ${CMAKE_INSTALL_LIBDIR}/cmake/ngraph${OpenVINO_VERSION})
|
||||
set(OV_CPACK_OPENVINO_CMAKEDIR ${CMAKE_INSTALL_LIBDIR}/cmake/openvino${OpenVINO_VERSION})
|
||||
if(CPACK_GENERATOR MATCHES "^(CONAN|VCPKG)$")
|
||||
set(OV_CPACK_IE_CMAKEDIR ${CMAKE_INSTALL_DATADIR}/openvino)
|
||||
set(OV_CPACK_NGRAPH_CMAKEDIR ${CMAKE_INSTALL_DATADIR}/openvino)
|
||||
set(OV_CPACK_OPENVINO_CMAKEDIR ${CMAKE_INSTALL_DATADIR}/openvino)
|
||||
set(OV_CPACK_PLUGINSDIR ${OV_CPACK_RUNTIMEDIR})
|
||||
else()
|
||||
set(OV_CPACK_IE_CMAKEDIR ${CMAKE_INSTALL_LIBDIR}/cmake/inferenceengine${OpenVINO_VERSION})
|
||||
set(OV_CPACK_NGRAPH_CMAKEDIR ${CMAKE_INSTALL_LIBDIR}/cmake/ngraph${OpenVINO_VERSION})
|
||||
set(OV_CPACK_OPENVINO_CMAKEDIR ${CMAKE_INSTALL_LIBDIR}/cmake/openvino${OpenVINO_VERSION})
|
||||
set(OV_CPACK_PLUGINSDIR ${OV_CPACK_RUNTIMEDIR}/openvino-${OpenVINO_VERSION})
|
||||
endif()
|
||||
set(OV_CPACK_LICENSESDIR licenses)
|
||||
|
||||
ov_get_pyversion(pyversion)
|
||||
if(pyversion)
|
||||
# should not be used in production; only by setup.py install
|
||||
set(OV_CPACK_PYTHONDIR ${CMAKE_INSTALL_LIBDIR}/${pyversion}/site-packages)
|
||||
endif()
|
||||
|
||||
@@ -45,6 +53,10 @@ macro(ov_common_libraries_cpack_set_dirs)
|
||||
set(IE_CPACK_LIBRARY_PATH ${OV_CPACK_LIBRARYDIR})
|
||||
set(IE_CPACK_RUNTIME_PATH ${OV_CPACK_RUNTIMEDIR})
|
||||
set(IE_CPACK_ARCHIVE_PATH ${OV_CPACK_ARCHIVEDIR})
|
||||
|
||||
if(CPACK_GENERATOR STREQUAL "BREW")
|
||||
set(CMAKE_SKIP_INSTALL_RPATH OFF)
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
ov_common_libraries_cpack_set_dirs()
|
||||
@@ -67,9 +79,47 @@ macro(ov_override_component_names)
|
||||
set(OV_CPACK_COMP_CPP_SAMPLES "samples")
|
||||
set(OV_CPACK_COMP_C_SAMPLES "${OV_CPACK_COMP_CPP_SAMPLES}")
|
||||
# move requirements.txt to core-dev
|
||||
set(OV_CPACK_COMP_DEV_REQ_FILES "${OV_CPACK_COMP_CORE_DEV}")
|
||||
# set(OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES "${OV_CPACK_COMP_CORE_DEV}")
|
||||
# move core_tools to core-dev
|
||||
# set(OV_CPACK_COMP_CORE_TOOLS "${OV_CPACK_COMP_CORE_DEV}")
|
||||
endmacro()
|
||||
|
||||
ov_override_component_names()
|
||||
|
||||
#
|
||||
# Override include / exclude rules for components
|
||||
# This is required to exclude some files from installation
|
||||
# (e.g. debian packages don't require setupvars scripts)
|
||||
#
|
||||
|
||||
macro(ov_define_component_include_rules)
|
||||
# core components
|
||||
unset(OV_CPACK_COMP_CORE_EXCLUDE_ALL)
|
||||
set(OV_CPACK_COMP_CORE_C_EXCLUDE_ALL ${OV_CPACK_COMP_CORE_EXCLUDE_ALL})
|
||||
unset(OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL)
|
||||
set(OV_CPACK_COMP_CORE_C_DEV_EXCLUDE_ALL ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL})
|
||||
# licensing
|
||||
set(OV_CPACK_COMP_LICENSING_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
# samples
|
||||
set(OV_CPACK_COMP_CPP_SAMPLES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
set(OV_CPACK_COMP_C_SAMPLES_EXCLUDE_ALL ${OV_CPACK_COMP_CPP_SAMPLES_EXCLUDE_ALL})
|
||||
set(OV_CPACK_COMP_PYTHON_SAMPLES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
# python
|
||||
set(OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
set(OV_CPACK_COMP_PYTHON_IE_API_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL})
|
||||
set(OV_CPACK_COMP_PYTHON_NGRAPH_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL})
|
||||
# we don't pack artifacts of setup.py install, because it's called explicitly in conda / brew
|
||||
# or not used at all like in cases with conan / vcpkg
|
||||
set(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL})
|
||||
# we don't need wheels in package, it's used installed only in open source distribution
|
||||
set(OV_CPACK_COMP_PYTHON_WHEELS_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
# tools
|
||||
set(OV_CPACK_COMP_CORE_TOOLS_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
set(OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
set(OV_CPACK_COMP_DEPLOYMENT_MANAGER_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
# scripts
|
||||
set(OV_CPACK_COMP_INSTALL_DEPENDENCIES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
set(OV_CPACK_COMP_SETUPVARS_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
endmacro()
|
||||
|
||||
ov_define_component_include_rules()
|
||||
|
||||
@@ -69,13 +69,59 @@ macro(ov_override_component_names)
|
||||
set(OV_CPACK_COMP_CPP_SAMPLES "samples")
|
||||
set(OV_CPACK_COMP_C_SAMPLES "${OV_CPACK_COMP_CPP_SAMPLES}")
|
||||
# move requirements.txt to core-dev
|
||||
set(OV_CPACK_COMP_DEV_REQ_FILES "${OV_CPACK_COMP_CORE_DEV}")
|
||||
# set(OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES "${OV_CPACK_COMP_CORE_DEV}")
|
||||
# move core_tools to core-dev
|
||||
# set(OV_CPACK_COMP_CORE_TOOLS "${OV_CPACK_COMP_CORE_DEV}")
|
||||
endmacro()
|
||||
|
||||
ov_override_component_names()
|
||||
|
||||
#
|
||||
# Override include / exclude rules for components
|
||||
# This is required to exclude some files from installation
|
||||
# (e.g. debian packages don't require setupvars scripts)
|
||||
#
|
||||
|
||||
macro(ov_define_component_include_rules)
|
||||
# core components
|
||||
unset(OV_CPACK_COMP_CORE_EXCLUDE_ALL)
|
||||
set(OV_CPACK_COMP_CORE_C_EXCLUDE_ALL ${OV_CPACK_COMP_CORE_EXCLUDE_ALL})
|
||||
unset(OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL)
|
||||
set(OV_CPACK_COMP_CORE_C_DEV_EXCLUDE_ALL ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL})
|
||||
# licensing
|
||||
set(OV_CPACK_COMP_LICENSING_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
# samples
|
||||
unset(OV_CPACK_COMP_CPP_SAMPLES_EXCLUDE_ALL)
|
||||
set(OV_CPACK_COMP_C_SAMPLES_EXCLUDE_ALL ${OV_CPACK_COMP_CPP_SAMPLES_EXCLUDE_ALL})
|
||||
if(ENABLE_PYTHON_PACKAGING)
|
||||
unset(OV_CPACK_COMP_PYTHON_SAMPLES_EXCLUDE_ALL)
|
||||
else()
|
||||
set(OV_CPACK_COMP_PYTHON_SAMPLES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
# python
|
||||
if(ENABLE_PYTHON_PACKAGING)
|
||||
# pack artifacts of setup.py install
|
||||
unset(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL)
|
||||
else()
|
||||
set(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
# we don't pack python components itself, we pack artifacts of setup.py install
|
||||
set(OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
set(OV_CPACK_COMP_PYTHON_IE_API_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL})
|
||||
set(OV_CPACK_COMP_PYTHON_NGRAPH_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL})
|
||||
# we don't need wheels in Debian packages
|
||||
set(OV_CPACK_COMP_PYTHON_WHEELS_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
# tools
|
||||
set(OV_CPACK_COMP_CORE_TOOLS_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
set(OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
set(OV_CPACK_COMP_DEPLOYMENT_MANAGER_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
# scripts
|
||||
set(OV_CPACK_COMP_INSTALL_DEPENDENCIES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
set(OV_CPACK_COMP_SETUPVARS_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
endmacro()
|
||||
|
||||
ov_define_component_include_rules()
|
||||
|
||||
#
|
||||
# Common Debian specific settings
|
||||
#
|
||||
@@ -138,9 +184,9 @@ endmacro()
|
||||
ov_debian_specific_settings()
|
||||
|
||||
# needed to override cmake auto generated files
|
||||
set(def_postinst "${OpenVINO_BINARY_DIR}/_CPack_Packages/postinst")
|
||||
set(def_postrm "${OpenVINO_BINARY_DIR}/_CPack_Packages/postrm")
|
||||
set(def_triggers "${OpenVINO_BINARY_DIR}/_CPack_Packages/triggers")
|
||||
set(def_postinst "${CMAKE_CURRENT_BINARY_DIR}/_CPack_Packages/postinst")
|
||||
set(def_postrm "${CMAKE_CURRENT_BINARY_DIR}/_CPack_Packages/postrm")
|
||||
set(def_triggers "${CMAKE_CURRENT_BINARY_DIR}/_CPack_Packages/triggers")
|
||||
|
||||
set(triggers_content "activate-noawait ldconfig\n\n")
|
||||
set(post_content "#!/bin/sh\n\nset -e;\nset -e\n\n")
|
||||
@@ -276,7 +322,7 @@ macro(ov_debian_add_latest_component comp)
|
||||
set(upper_case "${ucomp}_LATEST")
|
||||
|
||||
set(CPACK_COMPONENT_${upper_case}_DESCRIPTION "${CPACK_COMPONENT_${ucomp}_DESCRIPTION}")
|
||||
set(CPACK_COMPONENT_${upper_case}_ARCHITECTURE "all")
|
||||
set(CPACK_DEBIAN_${upper_case}_PACKAGE_ARCHITECTURE "all")
|
||||
set(CPACK_COMPONENT_${upper_case}_DEPENDS "${comp}")
|
||||
set(${comp_name}_copyright "generic")
|
||||
|
||||
|
||||
@@ -2,40 +2,78 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# installation directory
|
||||
set(CPACK_PACKAGE_INSTALL_DIRECTORY "Intel")
|
||||
macro(ov_nsis_specific_settings)
|
||||
# installation directory
|
||||
set(CPACK_PACKAGE_INSTALL_DIRECTORY "Intel")
|
||||
|
||||
# TODO: provide icons
|
||||
# set(CPACK_NSIS_MUI_ICON "")
|
||||
# set(CPACK_NSIS_MUI_UNIICON "${CPACK_NSIS_MUI_ICON}")
|
||||
# set(CPACK_NSIS_MUI_WELCOMEFINISHPAGE_BITMAP "")
|
||||
# set(CPACK_NSIS_MUI_UNWELCOMEFINISHPAGE_BITMAP "")
|
||||
# set(CPACK_NSIS_MUI_HEADERIMAGE "")
|
||||
# TODO: provide icons
|
||||
# set(CPACK_NSIS_MUI_ICON "")
|
||||
# set(CPACK_NSIS_MUI_UNIICON "${CPACK_NSIS_MUI_ICON}")
|
||||
# set(CPACK_NSIS_MUI_WELCOMEFINISHPAGE_BITMAP "")
|
||||
# set(CPACK_NSIS_MUI_UNWELCOMEFINISHPAGE_BITMAP "")
|
||||
# set(CPACK_NSIS_MUI_HEADERIMAGE "")
|
||||
|
||||
# we allow to install several packages at once
|
||||
set(CPACK_NSIS_ENABLE_UNINSTALL_BEFORE_INSTALL OFF)
|
||||
set(CPACK_NSIS_MODIFY_PATH OFF)
|
||||
# we allow to install several packages at once
|
||||
set(CPACK_NSIS_ENABLE_UNINSTALL_BEFORE_INSTALL OFF)
|
||||
set(CPACK_NSIS_MODIFY_PATH OFF)
|
||||
|
||||
set(CPACK_NSIS_DISPLAY_NAME "Intel(R) OpenVINO(TM) ${OpenVINO_VERSION}")
|
||||
set(CPACK_NSIS_PACKAGE_NAME "Intel(R) OpenVINO(TM) ToolKit, v. ${OpenVINO_VERSION}.${OpenVINO_PATCH_VERSION}")
|
||||
set(CPACK_NSIS_DISPLAY_NAME "Intel(R) OpenVINO(TM) ${OpenVINO_VERSION}")
|
||||
set(CPACK_NSIS_PACKAGE_NAME "Intel(R) OpenVINO(TM) ToolKit, v. ${OpenVINO_VERSION}.${OpenVINO_PATCH_VERSION}")
|
||||
|
||||
# contact
|
||||
set(CPACK_NSIS_CONTACT "CPACK_NSIS_CONTACT")
|
||||
# contact
|
||||
set(CPACK_NSIS_CONTACT "CPACK_NSIS_CONTACT")
|
||||
|
||||
# links in menu
|
||||
set(CPACK_NSIS_MENU_LINKS
|
||||
"https://docs.openvinoo.ai" "OpenVINO Documentation")
|
||||
# links in menu
|
||||
set(CPACK_NSIS_MENU_LINKS "https://docs.openvinoo.ai" "OpenVINO Documentation")
|
||||
|
||||
# welcome and finish titles
|
||||
set(CPACK_NSIS_WELCOME_TITLE "Welcome to Intel(R) Distribution of OpenVINO(TM) Toolkit installation")
|
||||
set(CPACK_NSIS_FINISH_TITLE "")
|
||||
# welcome and finish titles
|
||||
set(CPACK_NSIS_WELCOME_TITLE "Welcome to Intel(R) Distribution of OpenVINO(TM) Toolkit installation")
|
||||
set(CPACK_NSIS_FINISH_TITLE "")
|
||||
|
||||
# autoresize?
|
||||
set(CPACK_NSIS_MANIFEST_DPI_AWARE ON)
|
||||
# autoresize?
|
||||
set(CPACK_NSIS_MANIFEST_DPI_AWARE ON)
|
||||
|
||||
# branding text
|
||||
set(CPACK_NSIS_BRANDING_TEXT "Intel(R) Corp.")
|
||||
set(CPACK_NSIS_BRANDING_TEXT_TRIM_POSITION RIGHT)
|
||||
# branding text
|
||||
set(CPACK_NSIS_BRANDING_TEXT "Intel(R) Corp.")
|
||||
set(CPACK_NSIS_BRANDING_TEXT_TRIM_POSITION RIGHT)
|
||||
|
||||
# don't set this variable since we need a user to agree with a lincense
|
||||
# set(CPACK_NSIS_IGNORE_LICENSE_PAGE OFF)
|
||||
# don't set this variable since we need a user to agree with a lincense
|
||||
# set(CPACK_NSIS_IGNORE_LICENSE_PAGE OFF)
|
||||
endmacro()
|
||||
|
||||
ov_nsis_specific_settings()
|
||||
|
||||
#
|
||||
# Override include / exclude rules for components
|
||||
# This is required to exclude some files from installation
|
||||
# (e.g. NSIS packages don't require wheels to be packacged)
|
||||
#
|
||||
|
||||
macro(ov_define_component_include_rules)
|
||||
# core components
|
||||
unset(OV_CPACK_COMP_CORE_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_CORE_C_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_CORE_C_DEV_EXCLUDE_ALL)
|
||||
# licensing
|
||||
unset(OV_CPACK_COMP_LICENSING_EXCLUDE_ALL)
|
||||
# samples
|
||||
unset(OV_CPACK_COMP_CPP_SAMPLES_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_C_SAMPLES_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_PYTHON_SAMPLES_EXCLUDE_ALL)
|
||||
# python
|
||||
unset(OV_CPACK_COMP_PYTHON_IE_API_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_PYTHON_NGRAPH_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL)
|
||||
set(OV_CPACK_COMP_PYTHON_WHEELS_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
set(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
# tools
|
||||
unset(OV_CPACK_COMP_CORE_TOOLS_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_DEPLOYMENT_MANAGER_EXCLUDE_ALL)
|
||||
# scripts
|
||||
unset(OV_CPACK_COMP_INSTALL_DEPENDENCIES_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_SETUPVARS_EXCLUDE_ALL)
|
||||
endmacro()
|
||||
|
||||
ov_define_component_include_rules()
|
||||
|
||||
@@ -5,7 +5,21 @@
|
||||
include(CPackComponent)
|
||||
|
||||
#
|
||||
# ov_get_pyversion()
|
||||
# ov_install_static_lib(<target> <comp>)
|
||||
#
|
||||
macro(ov_install_static_lib target comp)
|
||||
if(NOT BUILD_SHARED_LIBS)
|
||||
get_target_property(target_type ${target} TYPE)
|
||||
if(target_type STREQUAL "STATIC_LIBRARY")
|
||||
set_target_properties(${target} PROPERTIES EXCLUDE_FROM_ALL OFF)
|
||||
endif()
|
||||
install(TARGETS ${target} EXPORT OpenVINOTargets
|
||||
ARCHIVE DESTINATION ${OV_CPACK_ARCHIVEDIR} COMPONENT ${comp} ${ARGN})
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
#
|
||||
# ov_get_pyversion(<OUT pyversion>)
|
||||
#
|
||||
function(ov_get_pyversion pyversion)
|
||||
find_package(PythonInterp 3 QUIET)
|
||||
@@ -29,16 +43,12 @@ macro(ov_cpack_set_dirs)
|
||||
set(OV_CPACK_NGRAPH_CMAKEDIR runtime/cmake)
|
||||
set(OV_CPACK_OPENVINO_CMAKEDIR runtime/cmake)
|
||||
set(OV_CPACK_DOCDIR docs)
|
||||
set(OV_CPACK_LICENSESDIR ${OV_CPACK_DOCDIR}/licenses)
|
||||
set(OV_CPACK_LICENSESDIR licenses)
|
||||
set(OV_CPACK_SAMPLESDIR samples)
|
||||
set(OV_CPACK_WHEELSDIR tools)
|
||||
set(OV_CPACK_TOOLSDIR tools)
|
||||
set(OV_CPACK_DEVREQDIR tools)
|
||||
|
||||
ov_get_pyversion(pyversion)
|
||||
if(pyversion)
|
||||
set(OV_CPACK_PYTHONDIR python/${pyversion})
|
||||
endif()
|
||||
set(OV_CPACK_PYTHONDIR python)
|
||||
|
||||
if(WIN32)
|
||||
set(OV_CPACK_LIBRARYDIR runtime/lib/${ARCH_FOLDER}/$<CONFIG>)
|
||||
@@ -138,10 +148,11 @@ macro(ov_define_component_names)
|
||||
set(OV_CPACK_COMP_PYTHON_IE_API "pyie")
|
||||
set(OV_CPACK_COMP_PYTHON_NGRAPH "pyngraph")
|
||||
set(OV_CPACK_COMP_PYTHON_OPENVINO "pyopenvino")
|
||||
set(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE "pyopenvino_package")
|
||||
set(OV_CPACK_COMP_PYTHON_WHEELS "python_wheels")
|
||||
# tools
|
||||
set(OV_CPACK_COMP_CORE_TOOLS "core_tools")
|
||||
set(OV_CPACK_COMP_DEV_REQ_FILES "openvino_dev_req_files")
|
||||
set(OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES "openvino_dev_req_files")
|
||||
set(OV_CPACK_COMP_DEPLOYMENT_MANAGER "deployment_manager")
|
||||
# scripts
|
||||
set(OV_CPACK_COMP_INSTALL_DEPENDENCIES "install_dependencies")
|
||||
@@ -150,20 +161,69 @@ endmacro()
|
||||
|
||||
ov_define_component_names()
|
||||
|
||||
# Include Debian specific configuration file:
|
||||
# - overrides directories set by ov_debian_cpack_set_dirs()
|
||||
# - merges some components using ov_override_component_names()
|
||||
# - sets ov_debian_specific_settings() with DEB generator variables
|
||||
# - defines the following helper functions:
|
||||
# - ov_add_lintian_suppression()
|
||||
# - ov_add_latest_component()
|
||||
# default components for case when CPACK_GENERATOR is not set (i.e. default open source user)
|
||||
macro(ov_define_component_include_rules)
|
||||
# core components
|
||||
unset(OV_CPACK_COMP_CORE_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_CORE_C_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_CORE_C_DEV_EXCLUDE_ALL)
|
||||
# licensing
|
||||
unset(OV_CPACK_COMP_LICENSING_EXCLUDE_ALL)
|
||||
# samples
|
||||
unset(OV_CPACK_COMP_CPP_SAMPLES_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_C_SAMPLES_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_PYTHON_SAMPLES_EXCLUDE_ALL)
|
||||
# python
|
||||
unset(OV_CPACK_COMP_PYTHON_IE_API_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_PYTHON_NGRAPH_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_PYTHON_WHEELS_EXCLUDE_ALL)
|
||||
# TODO: think about python entry points
|
||||
# maybe we can create entry points without python interpreter and use it in debian / rpm as well?
|
||||
set(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
# tools
|
||||
unset(OV_CPACK_COMP_CORE_TOOLS_EXCLUDE_ALL)
|
||||
set(OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
unset(OV_CPACK_COMP_DEPLOYMENT_MANAGER_EXCLUDE_ALL)
|
||||
# scripts
|
||||
unset(OV_CPACK_COMP_INSTALL_DEPENDENCIES_EXCLUDE_ALL)
|
||||
unset(OV_CPACK_COMP_SETUPVARS_EXCLUDE_ALL)
|
||||
endmacro()
|
||||
|
||||
ov_define_component_include_rules()
|
||||
|
||||
#
|
||||
# Include generator specific configuration file:
|
||||
# 1. Overrides directories set by ov_<debian | rpm | common_libraries>_cpack_set_dirs()
|
||||
# This is requried, because different generator use different locations for installed files
|
||||
# 2. Merges some components using ov_override_component_names()
|
||||
# This is required, because different generators have different set of components
|
||||
# (e.g. C and C++ API are separate components)
|
||||
# 3. Exclude some components using ov_define_component_include_rules()
|
||||
# This steps exclude some files from installation by defining variables meaning EXCLUDE_ALL
|
||||
# 4. Sets ov_<debian | rpm | ...>_specific_settings() with DEB generator variables
|
||||
# This 'callback' is later called from ov_cpack (wrapper for standard cpack) to set
|
||||
# per-component settings (e.g. package names, dependencies, versions and system dependencies)
|
||||
# 5. (Optional) Defines the following helper functions, which can be used by 3rdparty modules:
|
||||
# Debian:
|
||||
# - ov_debian_add_changelog_and_copyright()
|
||||
# - ov_debian_add_lintian_suppression()
|
||||
# - ov_debian_generate_conflicts()
|
||||
# - ov_debian_add_latest_component()
|
||||
# RPM:
|
||||
# - ov_rpm_add_rpmlint_suppression()
|
||||
# - ov_rpm_generate_conflicts()
|
||||
# - ov_rpm_copyright()
|
||||
# - ov_rpm_add_latest_component()
|
||||
#
|
||||
if(CPACK_GENERATOR STREQUAL "DEB")
|
||||
include(packaging/debian/debian)
|
||||
elseif(CPACK_GENERATOR STREQUAL "RPM")
|
||||
include(packaging/rpm/rpm)
|
||||
elseif(CPACK_GENERATOR STREQUAL "NSIS")
|
||||
include(packaging/nsis)
|
||||
elseif(CPACK_GENERATOR MATCHES "^(CONDA-FORGE|BREW|CONAN)$")
|
||||
elseif(CPACK_GENERATOR MATCHES "^(CONDA-FORGE|BREW|CONAN|VCPKG)$")
|
||||
include(packaging/common-libraries)
|
||||
endif()
|
||||
|
||||
|
||||
@@ -24,11 +24,6 @@ macro(ov_rpm_cpack_set_dirs)
|
||||
set(OV_CPACK_DOCDIR ${CMAKE_INSTALL_DATADIR}/doc/openvino-${OpenVINO_VERSION})
|
||||
set(OV_CPACK_LICENSESDIR ${OV_CPACK_DOCDIR}/licenses)
|
||||
|
||||
# TODO:
|
||||
# 1. define python installation directories for RPM packages
|
||||
# 2. make sure only a single version of python API can be installed at the same time (define conflicts section)
|
||||
# set(OV_CPACK_PYTHONDIR lib/python3/dist-packages)
|
||||
|
||||
ov_get_pyversion(pyversion)
|
||||
if(pyversion)
|
||||
set(OV_CPACK_PYTHONDIR ${CMAKE_INSTALL_LIBDIR}/${pyversion}/site-packages)
|
||||
@@ -70,13 +65,59 @@ macro(ov_override_component_names)
|
||||
set(OV_CPACK_COMP_C_SAMPLES "${OV_CPACK_COMP_CPP_SAMPLES}")
|
||||
# set(OV_CPACK_COMP_PYTHON_SAMPLES "${OV_CPACK_COMP_CPP_SAMPLES}")
|
||||
# move requirements.txt to core-dev
|
||||
set(OV_CPACK_COMP_DEV_REQ_FILES "${OV_CPACK_COMP_CORE_DEV}")
|
||||
# set(OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES "${OV_CPACK_COMP_CORE_DEV}")
|
||||
# move core_tools to core-dev
|
||||
# set(OV_CPACK_COMP_CORE_TOOLS "${OV_CPACK_COMP_CORE_DEV}")
|
||||
endmacro()
|
||||
|
||||
ov_override_component_names()
|
||||
|
||||
#
|
||||
# Override include / exclude rules for components
|
||||
# This is required to exclude some files from installation
|
||||
# (e.g. rpm packages don't require setupvars scripts or deployment_manager)
|
||||
#
|
||||
|
||||
macro(ov_define_component_include_rules)
|
||||
# core components
|
||||
unset(OV_CPACK_COMP_CORE_EXCLUDE_ALL)
|
||||
set(OV_CPACK_COMP_CORE_C_EXCLUDE_ALL ${OV_CPACK_COMP_CORE_EXCLUDE_ALL})
|
||||
unset(OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL)
|
||||
set(OV_CPACK_COMP_CORE_C_DEV_EXCLUDE_ALL ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL})
|
||||
# licensing
|
||||
set(OV_CPACK_COMP_LICENSING_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
# samples
|
||||
unset(OV_CPACK_COMP_CPP_SAMPLES_EXCLUDE_ALL)
|
||||
set(OV_CPACK_COMP_C_SAMPLES_EXCLUDE_ALL ${OV_CPACK_COMP_CPP_SAMPLES_EXCLUDE_ALL})
|
||||
if(ENABLE_PYTHON_PACKAGING)
|
||||
unset(OV_CPACK_COMP_PYTHON_SAMPLES_EXCLUDE_ALL)
|
||||
else()
|
||||
set(OV_CPACK_COMP_PYTHON_SAMPLES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
# python
|
||||
if(ENABLE_PYTHON_PACKAGING)
|
||||
# pack artifacts of setup.py install
|
||||
unset(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL)
|
||||
else()
|
||||
set(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
# we don't pack python components itself, we pack artifacts of setup.py install
|
||||
set(OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
set(OV_CPACK_COMP_PYTHON_IE_API_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL})
|
||||
set(OV_CPACK_COMP_PYTHON_NGRAPH_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL})
|
||||
# we don't need wheels in RPM packages
|
||||
set(OV_CPACK_COMP_PYTHON_WHEELS_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
# tools
|
||||
set(OV_CPACK_COMP_CORE_TOOLS_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
set(OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
set(OV_CPACK_COMP_DEPLOYMENT_MANAGER_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
# scripts
|
||||
set(OV_CPACK_COMP_INSTALL_DEPENDENCIES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
set(OV_CPACK_COMP_SETUPVARS_EXCLUDE_ALL EXCLUDE_FROM_ALL)
|
||||
endmacro()
|
||||
|
||||
ov_define_component_include_rules()
|
||||
|
||||
#
|
||||
# Common RPM specific settings
|
||||
#
|
||||
@@ -98,9 +139,7 @@ macro(ov_rpm_specific_settings)
|
||||
# use rpmlint to check packages in post-build step
|
||||
set(CPACK_POST_BUILD_SCRIPTS "${IEDevScripts_DIR}/packaging/rpm/post_build.cmake")
|
||||
# enable for debug cpack run
|
||||
if(NOT DEFINED CPACK_RPM_PACKAGE_DEBUG)
|
||||
set(CPACK_RPM_PACKAGE_DEBUG OFF)
|
||||
endif()
|
||||
ov_set_if_not_defined(CPACK_RPM_PACKAGE_DEBUG OFF)
|
||||
|
||||
# naming convention for rpm package files
|
||||
set(CPACK_RPM_FILE_NAME "RPM-DEFAULT")
|
||||
|
||||
@@ -108,6 +108,10 @@ function(ov_add_plugin)
|
||||
add_cpplint_target(${OV_PLUGIN_NAME}_cpplint FOR_TARGETS ${OV_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter})
|
||||
endif()
|
||||
|
||||
# plugins does not have to be CXX ABI free, because nobody links with plugins,
|
||||
# but let's add this mark to see how it goes
|
||||
ov_abi_free_target(${OV_PLUGIN_NAME})
|
||||
|
||||
add_dependencies(ov_plugins ${OV_PLUGIN_NAME})
|
||||
|
||||
# install rules
|
||||
|
||||
@@ -17,59 +17,56 @@ if(WIN32 AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
endif()
|
||||
|
||||
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
|
||||
set(host_arch_flag X86_64)
|
||||
set(OV_HOST_ARCH X86_64)
|
||||
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "i686.*|i386.*|x86.*|amd64.*|AMD64.*")
|
||||
set(host_arch_flag X86)
|
||||
set(OV_HOST_ARCH X86)
|
||||
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "^(arm64.*|aarch64.*|AARCH64.*|ARM64.*)")
|
||||
set(host_arch_flag AARCH64)
|
||||
set(OV_HOST_ARCH AARCH64)
|
||||
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "^(arm.*|ARM.*)")
|
||||
set(host_arch_flag ARM)
|
||||
set(OV_HOST_ARCH ARM)
|
||||
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "^riscv64$")
|
||||
set(host_arch_flag RISCV64)
|
||||
set(OV_HOST_ARCH RISCV64)
|
||||
endif()
|
||||
|
||||
set(HOST_${host_arch_flag} ON)
|
||||
|
||||
macro(_ov_detect_arch_by_processor_type)
|
||||
if(CMAKE_OSX_ARCHITECTURES AND APPLE)
|
||||
if(CMAKE_OSX_ARCHITECTURES STREQUAL "arm64")
|
||||
set(AARCH64 ON)
|
||||
set(OV_ARCH AARCH64)
|
||||
elseif(CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64")
|
||||
set(X86_64 ON)
|
||||
set(OV_ARCH X86_64)
|
||||
elseif(CMAKE_OSX_ARCHITECTURES MATCHES ".*x86_64.*" AND CMAKE_OSX_ARCHITECTURES MATCHES ".*arm64.*")
|
||||
set(UNIVERSAL2 ON)
|
||||
set(OV_ARCH UNIVERSAL2)
|
||||
else()
|
||||
message(FATAL_ERROR "Unsupported value: CMAKE_OSX_ARCHITECTURES = ${CMAKE_OSX_ARCHITECTURES}")
|
||||
endif()
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
|
||||
set(X86_64 ON)
|
||||
set(OV_ARCH X86_64)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "i686.*|i386.*|x86.*|amd64.*|AMD64.*|wasm")
|
||||
set(X86 ON)
|
||||
set(OV_ARCH X86)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm64.*|aarch64.*|AARCH64.*|ARM64.*|armv8)")
|
||||
set(AARCH64 ON)
|
||||
set(OV_ARCH AARCH64)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm.*|ARM.*)")
|
||||
set(ARM ON)
|
||||
set(OV_ARCH ARM)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^riscv64$")
|
||||
set(RISCV64 ON)
|
||||
set(OV_ARCH RISCV64)
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
macro(_ov_process_msvc_generator_platform)
|
||||
# if cmake -A <ARM|ARM64|x64|Win32> is passed
|
||||
if(CMAKE_GENERATOR_PLATFORM STREQUAL "ARM64")
|
||||
set(AARCH64 ON)
|
||||
set(OV_ARCH AARCH64)
|
||||
elseif(CMAKE_GENERATOR_PLATFORM STREQUAL "ARM")
|
||||
set(ARM ON)
|
||||
set(OV_ARCH ARM)
|
||||
elseif(CMAKE_GENERATOR_PLATFORM STREQUAL "x64")
|
||||
set(X86_64 ON)
|
||||
set(OV_ARCH X86_64)
|
||||
elseif(CMAKE_GENERATOR_PLATFORM STREQUAL "Win32")
|
||||
set(X86 ON)
|
||||
set(OV_ARCH X86)
|
||||
else()
|
||||
_ov_detect_arch_by_processor_type()
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
# TODO: why OpenCV is found by cmake
|
||||
if(MSVC64 OR MINGW64)
|
||||
_ov_process_msvc_generator_platform()
|
||||
elseif(MINGW OR (MSVC AND NOT CMAKE_CROSSCOMPILING))
|
||||
@@ -78,6 +75,9 @@ else()
|
||||
_ov_detect_arch_by_processor_type()
|
||||
endif()
|
||||
|
||||
set(HOST_${OV_HOST_ARCH} ON)
|
||||
set(${OV_ARCH} ON)
|
||||
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "Emscripten")
|
||||
set(EMSCRIPTEN ON)
|
||||
endif()
|
||||
@@ -115,7 +115,7 @@ get_property(OV_GENERATOR_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG
|
||||
|
||||
function(ov_glibc_version)
|
||||
# cmake needs to look at glibc version only when we build for Linux on Linux
|
||||
if(CMAKE_HOST_LINUX AND LINUX)
|
||||
if(LINUX)
|
||||
function(ov_get_definition definition var)
|
||||
execute_process(COMMAND echo "#include <errno.h>"
|
||||
COMMAND "${CMAKE_CXX_COMPILER}" -xc - -E -dM
|
||||
|
||||
@@ -24,9 +24,9 @@ endif()
|
||||
|
||||
ie_dependent_option (ENABLE_INTEL_GPU "GPU OpenCL-based plugin for OpenVINO Runtime" ${ENABLE_INTEL_GPU_DEFAULT} "X86_64 OR AARCH64;NOT APPLE;NOT WINDOWS_STORE;NOT WINDOWS_PHONE" OFF)
|
||||
|
||||
if (ANDROID OR (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0))
|
||||
# oneDNN doesn't support old compilers and android builds for now, so we'll
|
||||
# build GPU plugin without oneDNN
|
||||
if (ANDROID OR (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0) OR NOT BUILD_SHARED_LIBS)
|
||||
# oneDNN doesn't support old compilers and android builds for now, so we'll build GPU plugin without oneDNN
|
||||
# also, in case of static build CPU's and GPU's oneDNNs will conflict, so we are disabling GPU's one in this case
|
||||
set(ENABLE_ONEDNN_FOR_GPU_DEFAULT OFF)
|
||||
else()
|
||||
set(ENABLE_ONEDNN_FOR_GPU_DEFAULT ON)
|
||||
@@ -35,8 +35,8 @@ endif()
|
||||
ie_dependent_option (ENABLE_ONEDNN_FOR_GPU "Enable oneDNN with GPU support" ${ENABLE_ONEDNN_FOR_GPU_DEFAULT} "ENABLE_INTEL_GPU" OFF)
|
||||
|
||||
ie_option (ENABLE_DEBUG_CAPS "enable OpenVINO debug capabilities at runtime" OFF)
|
||||
ie_dependent_option (ENABLE_GPU_DEBUG_CAPS "enable GPU debug capabilities at runtime" ON "ENABLE_DEBUG_CAPS;ENABLE_INTEL_CPU" OFF)
|
||||
ie_dependent_option (ENABLE_CPU_DEBUG_CAPS "enable CPU debug capabilities at runtime" ON "ENABLE_DEBUG_CAPS;ENABLE_INTEL_GPU" OFF)
|
||||
ie_dependent_option (ENABLE_GPU_DEBUG_CAPS "enable GPU debug capabilities at runtime" ON "ENABLE_DEBUG_CAPS;ENABLE_INTEL_GPU" OFF)
|
||||
ie_dependent_option (ENABLE_CPU_DEBUG_CAPS "enable CPU debug capabilities at runtime" ON "ENABLE_DEBUG_CAPS;ENABLE_INTEL_CPU" OFF)
|
||||
|
||||
ie_option (ENABLE_PROFILING_ITT "Build with ITT tracing. Optionally configure pre-built ittnotify library though INTEL_VTUNE_DIR variable." OFF)
|
||||
|
||||
@@ -49,13 +49,17 @@ Supported values:\
|
||||
ie_option (ENABLE_PROFILING_FIRST_INFERENCE "Build with ITT tracing of first inference time." ON)
|
||||
|
||||
ie_option_enum(SELECTIVE_BUILD "Enable OpenVINO conditional compilation or statistics collection. \
|
||||
In case SELECTIVE_BUILD is enabled, the SELECTIVE_BUILD_STAT variable should contain the path to the collected InelSEAPI statistics. \
|
||||
In case SELECTIVE_BUILD is enabled, the SELECTIVE_BUILD_STAT variable should contain the path to the collected IntelSEAPI statistics. \
|
||||
Usage: -DSELECTIVE_BUILD=ON -DSELECTIVE_BUILD_STAT=/path/*.csv" OFF
|
||||
ALLOWED_VALUES ON OFF COLLECT)
|
||||
|
||||
ie_option (ENABLE_DOCS "Build docs using Doxygen" OFF)
|
||||
|
||||
find_package(PkgConfig QUIET)
|
||||
if(NOT ANDROID)
|
||||
# on Android build FindPkgConfig.cmake finds host system pkg-config, which is not appropriate
|
||||
find_package(PkgConfig QUIET)
|
||||
endif()
|
||||
|
||||
ie_dependent_option (ENABLE_PKGCONFIG_GEN "Enable openvino.pc pkg-config file generation" ON "LINUX OR APPLE;PkgConfig_FOUND;BUILD_SHARED_LIBS" OFF)
|
||||
|
||||
#
|
||||
@@ -80,7 +84,7 @@ else()
|
||||
set(ENABLE_TBBBIND_2_5_DEFAULT OFF)
|
||||
endif()
|
||||
|
||||
ie_dependent_option (ENABLE_TBBBIND_2_5 "Enable TBBBind_2_5 static usage in OpenVINO runtime" ${ENABLE_TBBBIND_2_5_DEFAULT} "THREADING MATCHES TBB" OFF)
|
||||
ie_dependent_option (ENABLE_TBBBIND_2_5 "Enable TBBBind_2_5 static usage in OpenVINO runtime" ${ENABLE_TBBBIND_2_5_DEFAULT} "THREADING MATCHES TBB; NOT APPLE" OFF)
|
||||
|
||||
ie_dependent_option (ENABLE_INTEL_GNA "GNA support for OpenVINO Runtime" ON
|
||||
"NOT APPLE;NOT ANDROID;X86_64;CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 5.4" OFF)
|
||||
@@ -122,7 +126,7 @@ ie_option(ENABLE_OV_IR_FRONTEND "Enable IR FrontEnd" ON)
|
||||
ie_option(ENABLE_OV_TF_FRONTEND "Enable TensorFlow FrontEnd" ON)
|
||||
ie_option(ENABLE_OV_TF_LITE_FRONTEND "Enable TensorFlow Lite FrontEnd" ON)
|
||||
ie_dependent_option(ENABLE_SNAPPY_COMPRESSION "Enables compression support for TF FE" ON
|
||||
"ENABLE_OV_TF_FRONTEND" ON)
|
||||
"ENABLE_OV_TF_FRONTEND" OFF)
|
||||
|
||||
if(CMAKE_HOST_LINUX AND LINUX)
|
||||
# Debian packages are enabled on Ubuntu systems
|
||||
@@ -148,6 +152,15 @@ else()
|
||||
set(ENABLE_SYSTEM_PUGIXML_DEFAULT OFF)
|
||||
endif()
|
||||
|
||||
if(ANDROID)
|
||||
# when protobuf from /usr/include is used, then Android toolchain ignores include paths
|
||||
# but if we build for Android using vcpkg / conan / etc where flatbuffers is not located in
|
||||
# the /usr/include folders, we can still use 'system' flatbuffers
|
||||
set(ENABLE_SYSTEM_FLATBUFFERS_DEFAULT OFF)
|
||||
else()
|
||||
set(ENABLE_SYSTEM_FLATBUFFERS_DEFAULT ON)
|
||||
endif()
|
||||
|
||||
# users wants to use his own TBB version, specific either via env vars or cmake options
|
||||
if(DEFINED ENV{TBBROOT} OR DEFINED ENV{TBB_DIR} OR DEFINED TBB_DIR OR DEFINED TBBROOT)
|
||||
set(ENABLE_SYSTEM_TBB_DEFAULT OFF)
|
||||
@@ -159,7 +172,7 @@ ie_dependent_option (ENABLE_SYSTEM_TBB "Enables use of system TBB" ${ENABLE_SYS
|
||||
# available out of box on all systems (like RHEL, UBI)
|
||||
ie_option (ENABLE_SYSTEM_PUGIXML "Enables use of system PugiXML" ${ENABLE_SYSTEM_PUGIXML_DEFAULT})
|
||||
# the option is on by default, because we use only flatc compiler and don't use any libraries
|
||||
ie_dependent_option(ENABLE_SYSTEM_FLATBUFFERS "Enables use of system flatbuffers" ON
|
||||
ie_dependent_option(ENABLE_SYSTEM_FLATBUFFERS "Enables use of system flatbuffers" ${ENABLE_SYSTEM_FLATBUFFERS_DEFAULT}
|
||||
"ENABLE_OV_TF_LITE_FRONTEND" OFF)
|
||||
ie_dependent_option (ENABLE_SYSTEM_OPENCL "Enables use of system OpenCL" ${ENABLE_SYSTEM_LIBS_DEFAULT}
|
||||
"ENABLE_INTEL_GPU" OFF)
|
||||
@@ -171,6 +184,10 @@ ie_dependent_option (ENABLE_SYSTEM_PROTOBUF "Enables use of system Protobuf" OFF
|
||||
ie_dependent_option (ENABLE_SYSTEM_SNAPPY "Enables use of system version of Snappy" OFF
|
||||
"ENABLE_SNAPPY_COMPRESSION" OFF)
|
||||
|
||||
# temporary option until we enable this by default when review python API distribution
|
||||
ie_dependent_option (ENABLE_PYTHON_PACKAGING "Enables packaging of Python API in APT / YUM" OFF
|
||||
"ENABLE_PYTHON;UNIX" OFF)
|
||||
|
||||
ie_option(ENABLE_OPENVINO_DEBUG "Enable output for OPENVINO_DEBUG statements" OFF)
|
||||
|
||||
if(NOT BUILD_SHARED_LIBS AND ENABLE_OV_TF_FRONTEND)
|
||||
|
||||
@@ -10,28 +10,14 @@ macro(ov_cpack_settings)
|
||||
set(cpack_components_all ${CPACK_COMPONENTS_ALL})
|
||||
unset(CPACK_COMPONENTS_ALL)
|
||||
foreach(item IN LISTS cpack_components_all)
|
||||
# filter out some components, which are not needed to be wrapped to conda-forge | brew | conan
|
||||
if(# python is not a part of conda | brew | conan
|
||||
NOT item MATCHES "^${OV_CPACK_COMP_PYTHON_OPENVINO}_python.*" AND
|
||||
# python wheels are not needed to be wrapped by conda | brew packages
|
||||
NOT item STREQUAL OV_CPACK_COMP_PYTHON_WHEELS AND
|
||||
# skip C / C++ / Python samples
|
||||
NOT item STREQUAL OV_CPACK_COMP_CPP_SAMPLES AND
|
||||
NOT item STREQUAL OV_CPACK_COMP_C_SAMPLES AND
|
||||
NOT item STREQUAL OV_CPACK_COMP_PYTHON_SAMPLES AND
|
||||
string(TOUPPER ${item} UPPER_COMP)
|
||||
# filter out some components, which are not needed to be wrapped to conda-forge | brew | conan | vcpkg
|
||||
if(NOT OV_CPACK_COMP_${UPPER_COMP}_EXCLUDE_ALL AND
|
||||
# even for case of system TBB we have installation rules for wheels packages
|
||||
# so, need to skip this explicitly since they are installed in `host` section
|
||||
NOT item MATCHES "^tbb(_dev)?$" AND
|
||||
# the same for pugixml
|
||||
NOT item STREQUAL "pugixml" AND
|
||||
# we have `license_file` field in conda meta.yml
|
||||
NOT item STREQUAL OV_CPACK_COMP_LICENSING AND
|
||||
# compile_tool is not needed
|
||||
NOT item STREQUAL OV_CPACK_COMP_CORE_TOOLS AND
|
||||
# not appropriate components
|
||||
NOT item STREQUAL OV_CPACK_COMP_DEPLOYMENT_MANAGER AND
|
||||
NOT item STREQUAL OV_CPACK_COMP_INSTALL_DEPENDENCIES AND
|
||||
NOT item STREQUAL OV_CPACK_COMP_SETUPVARS)
|
||||
NOT item STREQUAL "pugixml")
|
||||
list(APPEND CPACK_COMPONENTS_ALL ${item})
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
@@ -44,29 +44,22 @@ macro(ov_cpack_settings)
|
||||
set(cpack_components_all ${CPACK_COMPONENTS_ALL})
|
||||
unset(CPACK_COMPONENTS_ALL)
|
||||
foreach(item IN LISTS cpack_components_all)
|
||||
string(TOUPPER ${item} UPPER_COMP)
|
||||
# filter out some components, which are not needed to be wrapped to .deb package
|
||||
if(# skip OpenVINO Pyhon API and samples
|
||||
if(NOT OV_CPACK_COMP_${UPPER_COMP}_EXCLUDE_ALL AND
|
||||
# skip OpenVINO Python API (pattern in form of "<pyie | pyopenvino | pyngraph>_python${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR}")
|
||||
NOT item MATCHES "^${OV_CPACK_COMP_PYTHON_OPENVINO}_python.*" AND
|
||||
NOT item STREQUAL OV_CPACK_COMP_PYTHON_SAMPLES AND
|
||||
# python wheels are not needed to be wrapped by debian packages
|
||||
NOT item STREQUAL OV_CPACK_COMP_PYTHON_WHEELS AND
|
||||
# because in case of .deb package, pyopenvino_package_python${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR} is installed
|
||||
(NOT item MATCHES "^${OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE}_python.*" OR ENABLE_PYTHON_PACKAGING) AND
|
||||
# see ticket # 82605
|
||||
NOT item STREQUAL "gna" AND
|
||||
# don't install Intel OpenMP during debian
|
||||
# don't install Intel OpenMP
|
||||
NOT item STREQUAL "omp" AND
|
||||
# even for case of system TBB we have installation rules for wheels packages
|
||||
# so, need to skip this explicitly
|
||||
NOT item MATCHES "^tbb(_dev)?$" AND
|
||||
# the same for pugixml
|
||||
NOT item STREQUAL "pugixml" AND
|
||||
# we have copyright file for debian package
|
||||
NOT item STREQUAL OV_CPACK_COMP_LICENSING AND
|
||||
# compile_tool is not needed
|
||||
NOT item STREQUAL OV_CPACK_COMP_CORE_TOOLS AND
|
||||
# not appropriate components
|
||||
NOT item STREQUAL OV_CPACK_COMP_DEPLOYMENT_MANAGER AND
|
||||
NOT item STREQUAL OV_CPACK_COMP_INSTALL_DEPENDENCIES AND
|
||||
NOT item STREQUAL OV_CPACK_COMP_SETUPVARS)
|
||||
NOT item STREQUAL "pugixml")
|
||||
list(APPEND CPACK_COMPONENTS_ALL ${item})
|
||||
endif()
|
||||
endforeach()
|
||||
@@ -93,7 +86,8 @@ macro(ov_cpack_settings)
|
||||
# - 2022.1.0 is the last public release with debian packages from Intel install team
|
||||
# - 2022.1.1, 2022.2 do not have debian packages enabled, distributed only as archives
|
||||
# - 2022.3 is the first release where Debian updated packages are introduced, others 2022.3.X are LTS
|
||||
2022.3.0 2022.3.1 2022.3.2 2022.3.3 2022.3.4 2022.3.5 2023.0.0
|
||||
2022.3.0 2022.3.1 2022.3.2 2022.3.3 2022.3.4 2022.3.5
|
||||
2023.0.0
|
||||
)
|
||||
|
||||
#
|
||||
@@ -117,7 +111,7 @@ macro(ov_cpack_settings)
|
||||
|
||||
# hetero
|
||||
if(ENABLE_HETERO)
|
||||
set(CPACK_COMPONENT_HETERO_DESCRIPTION "OpenVINO Hetero plugin")
|
||||
set(CPACK_COMPONENT_HETERO_DESCRIPTION "OpenVINO Hetero software plugin")
|
||||
set(CPACK_COMPONENT_HETERO_DEPENDS "${OV_CPACK_COMP_CORE}")
|
||||
set(CPACK_DEBIAN_HETERO_PACKAGE_NAME "libopenvino-hetero-plugin-${cpack_name_ver}")
|
||||
set(CPACK_DEBIAN_HETERO_PACKAGE_CONTROL_EXTRA "${def_postinst};${def_postrm}")
|
||||
@@ -127,7 +121,7 @@ macro(ov_cpack_settings)
|
||||
|
||||
# auto batch
|
||||
if(ENABLE_AUTO_BATCH)
|
||||
set(CPACK_COMPONENT_BATCH_DESCRIPTION "OpenVINO Automatic Batching plugin")
|
||||
set(CPACK_COMPONENT_BATCH_DESCRIPTION "OpenVINO Automatic Batching software plugin")
|
||||
set(CPACK_COMPONENT_BATCH_DEPENDS "${OV_CPACK_COMP_CORE}")
|
||||
set(CPACK_DEBIAN_BATCH_PACKAGE_NAME "libopenvino-auto-batch-plugin-${cpack_name_ver}")
|
||||
set(CPACK_DEBIAN_BATCH_PACKAGE_CONTROL_EXTRA "${def_postinst};${def_postrm}")
|
||||
@@ -138,9 +132,9 @@ macro(ov_cpack_settings)
|
||||
# multi / auto plugins
|
||||
if(ENABLE_MULTI)
|
||||
if(ENABLE_AUTO)
|
||||
set(CPACK_COMPONENT_MULTI_DESCRIPTION "OpenVINO Auto / Multi plugin")
|
||||
set(CPACK_COMPONENT_MULTI_DESCRIPTION "OpenVINO Auto / Multi software plugin")
|
||||
else()
|
||||
set(CPACK_COMPONENT_MULTI_DESCRIPTION "OpenVINO Multi plugin")
|
||||
set(CPACK_COMPONENT_MULTI_DESCRIPTION "OpenVINO Multi software plugin")
|
||||
endif()
|
||||
set(CPACK_COMPONENT_MULTI_DEPENDS "${OV_CPACK_COMP_CORE}")
|
||||
set(CPACK_DEBIAN_MULTI_PACKAGE_NAME "libopenvino-auto-plugin-${cpack_name_ver}")
|
||||
@@ -148,7 +142,7 @@ macro(ov_cpack_settings)
|
||||
_ov_add_plugin(multi ON)
|
||||
set(multi_copyright "generic")
|
||||
elseif(ENABLE_AUTO)
|
||||
set(CPACK_COMPONENT_AUTO_DESCRIPTION "OpenVINO Auto plugin")
|
||||
set(CPACK_COMPONENT_AUTO_DESCRIPTION "OpenVINO Auto software plugin")
|
||||
set(CPACK_COMPONENT_AUTO_DEPENDS "${OV_CPACK_COMP_CORE}")
|
||||
set(CPACK_DEBIAN_AUTO_PACKAGE_NAME "libopenvino-auto-plugin-${cpack_name_ver}")
|
||||
set(CPACK_DEBIAN_AUTO_PACKAGE_CONTROL_EXTRA "${def_postinst};${def_postrm}")
|
||||
@@ -160,11 +154,11 @@ macro(ov_cpack_settings)
|
||||
if(ENABLE_INTEL_CPU)
|
||||
if(ARM OR AARCH64)
|
||||
set(CPACK_DEBIAN_CPU_PACKAGE_NAME "libopenvino-arm-cpu-plugin-${cpack_name_ver}")
|
||||
set(CPACK_COMPONENT_CPU_DESCRIPTION "ARM® CPU plugin")
|
||||
set(CPACK_COMPONENT_CPU_DESCRIPTION "ARM® CPU inference plugin")
|
||||
set(cpu_copyright "arm_cpu")
|
||||
elseif(X86 OR X86_64)
|
||||
set(CPACK_DEBIAN_CPU_PACKAGE_NAME "libopenvino-intel-cpu-plugin-${cpack_name_ver}")
|
||||
set(CPACK_COMPONENT_CPU_DESCRIPTION "Intel® CPU plugin")
|
||||
set(CPACK_COMPONENT_CPU_DESCRIPTION "Intel® CPU inference plugin")
|
||||
set(cpu_copyright "generic")
|
||||
else()
|
||||
message(FATAL_ERROR "Unsupported CPU architecture: ${CMAKE_SYSTEM_PROCESSOR}")
|
||||
@@ -176,19 +170,19 @@ macro(ov_cpack_settings)
|
||||
|
||||
# intel-gpu
|
||||
if(ENABLE_INTEL_GPU)
|
||||
set(CPACK_COMPONENT_GPU_DESCRIPTION "Intel® Processor Graphics plugin")
|
||||
set(CPACK_COMPONENT_GPU_DESCRIPTION "Intel® Processor Graphics inference plugin")
|
||||
set(CPACK_COMPONENT_GPU_DEPENDS "${OV_CPACK_COMP_CORE}")
|
||||
set(CPACK_DEBIAN_GPU_PACKAGE_NAME "libopenvino-intel-gpu-plugin-${cpack_name_ver}")
|
||||
set(CPACK_DEBIAN_GPU_PACKAGE_CONTROL_EXTRA "${def_postinst};${def_postrm}")
|
||||
# auto batch exhances GPU
|
||||
# set(CPACK_DEBIAN_BATCH_PACKAGE_ENHANCES "${CPACK_DEBIAN_GPU_PACKAGE_NAME} = (${cpack_full_ver})")
|
||||
# set(CPACK_DEBIAN_BATCH_PACKAGE_ENHANCES "${CPACK_DEBIAN_GPU_PACKAGE_NAME} (= ${cpack_full_ver})")
|
||||
_ov_add_plugin(gpu OFF)
|
||||
set(gpu_copyright "generic")
|
||||
endif()
|
||||
|
||||
# intel-gna
|
||||
if(ENABLE_INTEL_GNA AND "gna" IN_LIST CPACK_COMPONENTS_ALL)
|
||||
set(CPACK_COMPONENT_GNA_DESCRIPTION "Intel® Gaussian Neural Accelerator")
|
||||
set(CPACK_COMPONENT_GNA_DESCRIPTION "Intel® Gaussian Neural Accelerator inference plugin")
|
||||
set(CPACK_COMPONENT_GNA_DEPENDS "${OV_CPACK_COMP_CORE}")
|
||||
set(CPACK_DEBIAN_GNA_PACKAGE_NAME "libopenvino-intel-gna-plugin-${cpack_name_ver}")
|
||||
# since we have libgna.so we need to call ldconfig and have `def_triggers` here
|
||||
@@ -298,28 +292,32 @@ macro(ov_cpack_settings)
|
||||
#
|
||||
|
||||
set(CPACK_COMPONENT_CORE_DEV_DESCRIPTION "Intel(R) Distribution of OpenVINO(TM) Toolkit C / C++ Development files")
|
||||
set(CPACK_COMPONENT_CORE_DEV_DEPENDS "${OV_CPACK_COMP_CORE};${frontends}")
|
||||
set(CPACK_COMPONENT_CORE_DEV_DEPENDS "${OV_CPACK_COMP_CORE}")
|
||||
list(APPEND CPACK_COMPONENT_CORE_DEV_DEPENDS ${frontends})
|
||||
set(CPACK_DEBIAN_CORE_DEV_PACKAGE_NAME "libopenvino-dev-${cpack_name_ver}")
|
||||
ov_debian_generate_conflicts("${OV_CPACK_COMP_CORE_DEV}" ${conflicting_versions})
|
||||
set(${OV_CPACK_COMP_CORE_DEV}_copyright "generic")
|
||||
|
||||
#
|
||||
# Python bindings
|
||||
# Python API
|
||||
#
|
||||
|
||||
if(ENABLE_PYTHON)
|
||||
if(ENABLE_PYTHON_PACKAGING)
|
||||
ov_get_pyversion(pyversion)
|
||||
set(python_component "${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion}")
|
||||
set(python_component "${OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE}_${pyversion}")
|
||||
string(TOUPPER "${pyversion}" pyversion)
|
||||
|
||||
set(CPACK_COMPONENT_PYOPENVINO_${pyversion}_DESCRIPTION "OpenVINO Python bindings")
|
||||
set(CPACK_COMPONENT_PYOPENVINO_${pyversion}_DEPENDS "${OV_CPACK_COMP_CORE}")
|
||||
list(APPEND CPACK_COMPONENT_PYOPENVINO_${pyversion}_DEPENDS ${installed_plugins})
|
||||
list(APPEND CPACK_COMPONENT_PYOPENVINO_${pyversion}_DEPENDS ${frontends})
|
||||
set(CPACK_COMPONENT_PYOPENVINO_PACKAGE_${pyversion}_DESCRIPTION "OpenVINO Python API")
|
||||
set(CPACK_COMPONENT_PYOPENVINO_PACKAGE_${pyversion}_DEPENDS "${OV_CPACK_COMP_CORE}")
|
||||
list(APPEND CPACK_COMPONENT_PYOPENVINO_PACKAGE_${pyversion}_DEPENDS ${installed_plugins})
|
||||
list(APPEND CPACK_COMPONENT_PYOPENVINO_PACKAGE_${pyversion}_DEPENDS ${frontends})
|
||||
|
||||
set(CPACK_DEBIAN_PYOPENVINO_${pyversion}_PACKAGE_NAME "libopenvino-python-${cpack_name_ver}")
|
||||
set(CPACK_DEBIAN_PYOPENVINO_${pyversion}_PACKAGE_CONTROL_EXTRA "${def_postinst};${def_postrm}")
|
||||
set(CPACK_DEBIAN_PYOPENVINO_${pyversion}_PACKAGE_DEPENDS "python3")
|
||||
set(CPACK_DEBIAN_PYOPENVINO_PACKAGE_${pyversion}_PACKAGE_NAME "python3-openvino")
|
||||
set(python_package "${CPACK_DEBIAN_PYOPENVINO_PACKAGE_${pyversion}_PACKAGE_NAME} (= ${cpack_full_ver})")
|
||||
set(CPACK_DEBIAN_PYOPENVINO_PACKAGE_${pyversion}_PACKAGE_DEPENDS "python3, python3-numpy")
|
||||
|
||||
# we can have a single python installed, so we need to generate conflicts for all other versions
|
||||
ov_debian_generate_conflicts(${python_component} ${conflicting_versions})
|
||||
|
||||
# TODO: fix all the warnings
|
||||
ov_debian_add_lintian_suppression(${python_component}
|
||||
@@ -329,6 +327,10 @@ macro(ov_cpack_settings)
|
||||
"executable-not-elf-or-script"
|
||||
# all directories
|
||||
"non-standard-dir-perm"
|
||||
# usr/bin/benchmark_app
|
||||
"binary-without-manpage"
|
||||
# usr/bin/benchmark_app
|
||||
"non-standard-executable-perm"
|
||||
# all python files
|
||||
"non-standard-file-perm")
|
||||
set(${python_component}_copyright "generic")
|
||||
@@ -360,11 +362,12 @@ macro(ov_cpack_settings)
|
||||
set(samples_copyright "generic")
|
||||
|
||||
# python_samples
|
||||
if(ENABLE_PYTHON)
|
||||
if(ENABLE_PYTHON_PACKAGING)
|
||||
set(CPACK_COMPONENT_PYTHON_SAMPLES_DESCRIPTION "Intel(R) Distribution of OpenVINO(TM) Toolkit Python Samples")
|
||||
set(CPACK_COMPONENT_PYTHON_SAMPLES_DEPENDS "${python_component}")
|
||||
set(CPACK_DEBIAN_PYTHON_SAMPLES_PACKAGE_NAME "openvino-samples-python-${cpack_name_ver}")
|
||||
set(CPACK_DEBIAN_PYTHON_SAMPLES_PACKAGE_DEPENDS "python3")
|
||||
set(python_samples_package "${CPACK_DEBIAN_PYTHON_SAMPLES_PACKAGE_NAME} (= ${cpack_full_ver})")
|
||||
set(CPACK_DEBIAN_PYTHON_SAMPLES_PACKAGE_DEPENDS "python3, ${python_package}")
|
||||
set(CPACK_DEBIAN_PYTHON_SAMPLES_PACKAGE_ARCHITECTURE "all")
|
||||
set(python_samples_copyright "generic")
|
||||
endif()
|
||||
@@ -395,6 +398,9 @@ macro(ov_cpack_settings)
|
||||
# all openvino
|
||||
set(CPACK_COMPONENT_OPENVINO_DESCRIPTION "Intel(R) Distribution of OpenVINO(TM) Toolkit Libraries and Development files")
|
||||
set(CPACK_COMPONENT_OPENVINO_DEPENDS "libraries_dev;${OV_CPACK_COMP_CPP_SAMPLES}")
|
||||
if(ENABLE_PYTHON_PACKAGING)
|
||||
list(APPEND CPACK_DEBIAN_OPENVINO_PACKAGE_DEPENDS "${python_package}, ${python_samples_package}")
|
||||
endif()
|
||||
set(CPACK_DEBIAN_OPENVINO_PACKAGE_NAME "openvino-${cpack_name_ver}")
|
||||
set(CPACK_DEBIAN_OPENVINO_PACKAGE_ARCHITECTURE "all")
|
||||
ov_debian_generate_conflicts(openvino ${conflicting_versions})
|
||||
|
||||
@@ -6,7 +6,7 @@ if(CPACK_GENERATOR STREQUAL "DEB")
|
||||
include(cmake/packaging/debian.cmake)
|
||||
elseif(CPACK_GENERATOR STREQUAL "RPM")
|
||||
include(cmake/packaging/rpm.cmake)
|
||||
elseif(CPACK_GENERATOR MATCHES "^(CONDA-FORGE|BREW|CONAN)$")
|
||||
elseif(CPACK_GENERATOR MATCHES "^(CONDA-FORGE|BREW|CONAN|VCPKG)$")
|
||||
include(cmake/packaging/common-libraries.cmake)
|
||||
elseif(CPACK_GENERATOR STREQUAL "NSIS")
|
||||
include(cmake/packaging/nsis.cmake)
|
||||
|
||||
@@ -30,30 +30,23 @@ macro(ov_cpack_settings)
|
||||
set(cpack_components_all ${CPACK_COMPONENTS_ALL})
|
||||
unset(CPACK_COMPONENTS_ALL)
|
||||
foreach(item IN LISTS cpack_components_all)
|
||||
# filter out some components, which are not needed to be wrapped to .deb package
|
||||
if(# skip OpenVINO Pyhon API and samples
|
||||
string(TOUPPER ${item} UPPER_COMP)
|
||||
# filter out some components, which are not needed to be wrapped to .rpm package
|
||||
if(NOT OV_CPACK_COMP_${UPPER_COMP}_EXCLUDE_ALL AND
|
||||
# skip OpenVINO Python API (pattern in form of "<pyie | pyopenvino | pyngraph>_python${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR}")
|
||||
NOT item MATCHES "^${OV_CPACK_COMP_PYTHON_OPENVINO}_python.*" AND
|
||||
NOT item STREQUAL OV_CPACK_COMP_PYTHON_SAMPLES AND
|
||||
# python wheels are not needed to be wrapped by rpm packages
|
||||
NOT item STREQUAL OV_CPACK_COMP_PYTHON_WHEELS AND
|
||||
# because in case of .rpm package, pyopenvino_package_python${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR} is installed
|
||||
(NOT item MATCHES "^${OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE}_python.*" OR ENABLE_PYTHON_PACKAGING) AND
|
||||
# see ticket # 82605
|
||||
NOT item STREQUAL "gna" AND
|
||||
# don't install Intel OpenMP during rpm
|
||||
# don't install Intel OpenMP
|
||||
NOT item STREQUAL "omp" AND
|
||||
# even for case of system TBB we have installation rules for wheels packages
|
||||
# so, need to skip this explicitly
|
||||
NOT item MATCHES "^tbb(_dev)?$" AND
|
||||
# the same for pugixml
|
||||
NOT item STREQUAL "pugixml" AND
|
||||
# we have copyright file for rpm package
|
||||
NOT item STREQUAL OV_CPACK_COMP_LICENSING AND
|
||||
# compile_tool is not needed
|
||||
NOT item STREQUAL OV_CPACK_COMP_CORE_TOOLS AND
|
||||
# not appropriate components
|
||||
NOT item STREQUAL OV_CPACK_COMP_DEPLOYMENT_MANAGER AND
|
||||
NOT item STREQUAL OV_CPACK_COMP_INSTALL_DEPENDENCIES AND
|
||||
NOT item STREQUAL OV_CPACK_COMP_SETUPVARS)
|
||||
list(APPEND CPACK_COMPONENTS_ALL ${item})
|
||||
NOT item STREQUAL "pugixml")
|
||||
list(APPEND CPACK_COMPONENTS_ALL ${item})
|
||||
endif()
|
||||
endforeach()
|
||||
list(REMOVE_DUPLICATES CPACK_COMPONENTS_ALL)
|
||||
@@ -79,7 +72,8 @@ macro(ov_cpack_settings)
|
||||
# - 2022.1.0 is the last public release with rpm packages from Intel install team
|
||||
# - 2022.1.1, 2022.2 do not have rpm packages enabled, distributed only as archives
|
||||
# - 2022.3 is the first release where RPM updated packages are introduced, others 2022.3.X are LTS
|
||||
2022.3.0 2022.3.1 2022.3.2 2022.3.3 2022.3.4 2022.3.5 2023.0.0
|
||||
2022.3.0 2022.3.1 2022.3.2 2022.3.3 2022.3.4 2022.3.5
|
||||
2023.0.0
|
||||
)
|
||||
|
||||
find_host_program(rpmlint_PROGRAM NAMES rpmlint DOC "Path to rpmlint")
|
||||
@@ -121,7 +115,7 @@ macro(ov_cpack_settings)
|
||||
|
||||
# hetero
|
||||
if(ENABLE_HETERO)
|
||||
set(CPACK_COMPONENT_HETERO_DESCRIPTION "OpenVINO Hetero plugin")
|
||||
set(CPACK_COMPONENT_HETERO_DESCRIPTION "OpenVINO Hetero software plugin")
|
||||
set(CPACK_RPM_HETERO_PACKAGE_REQUIRES "${core_package}")
|
||||
set(CPACK_RPM_HETERO_PACKAGE_NAME "libopenvino-hetero-plugin-${cpack_name_ver}")
|
||||
_ov_add_package(plugin_packages hetero)
|
||||
@@ -130,7 +124,7 @@ macro(ov_cpack_settings)
|
||||
|
||||
# auto batch
|
||||
if(ENABLE_AUTO_BATCH)
|
||||
set(CPACK_COMPONENT_BATCH_DESCRIPTION "OpenVINO Automatic Batching plugin")
|
||||
set(CPACK_COMPONENT_BATCH_DESCRIPTION "OpenVINO Automatic Batching software plugin")
|
||||
set(CPACK_RPM_BATCH_PACKAGE_REQUIRES "${core_package}")
|
||||
set(CPACK_RPM_BATCH_PACKAGE_NAME "libopenvino-auto-batch-plugin-${cpack_name_ver}")
|
||||
_ov_add_package(plugin_packages batch)
|
||||
@@ -140,16 +134,16 @@ macro(ov_cpack_settings)
|
||||
# multi / auto plugins
|
||||
if(ENABLE_MULTI)
|
||||
if(ENABLE_AUTO)
|
||||
set(CPACK_COMPONENT_MULTI_DESCRIPTION "OpenVINO Auto / Multi plugin")
|
||||
set(CPACK_COMPONENT_MULTI_DESCRIPTION "OpenVINO Auto / Multi software plugin")
|
||||
else()
|
||||
set(CPACK_COMPONENT_MULTI_DESCRIPTION "OpenVINO Multi plugin")
|
||||
set(CPACK_COMPONENT_MULTI_DESCRIPTION "OpenVINO Multi software plugin")
|
||||
endif()
|
||||
set(CPACK_RPM_MULTI_PACKAGE_REQUIRES "${core_package}")
|
||||
set(CPACK_RPM_MULTI_PACKAGE_NAME "libopenvino-auto-plugin-${cpack_name_ver}")
|
||||
_ov_add_package(plugin_packages multi)
|
||||
set(multi_copyright "generic")
|
||||
elseif(ENABLE_AUTO)
|
||||
set(CPACK_COMPONENT_AUTO_DESCRIPTION "OpenVINO Auto plugin")
|
||||
set(CPACK_COMPONENT_AUTO_DESCRIPTION "OpenVINO Auto software plugin")
|
||||
set(CPACK_RPM_AUTO_PACKAGE_REQUIRES "${core_package}")
|
||||
set(CPACK_RPM_AUTO_PACKAGE_NAME "libopenvino-auto-plugin-${cpack_name_ver}")
|
||||
_ov_add_package(plugin_packages auto)
|
||||
@@ -160,11 +154,11 @@ macro(ov_cpack_settings)
|
||||
if(ENABLE_INTEL_CPU)
|
||||
if(ARM OR AARCH64)
|
||||
set(CPACK_RPM_CPU_PACKAGE_NAME "libopenvino-arm-cpu-plugin-${cpack_name_ver}")
|
||||
set(CPACK_COMPONENT_CPU_DESCRIPTION "ARM® CPU plugin")
|
||||
set(CPACK_COMPONENT_CPU_DESCRIPTION "ARM® CPU inference plugin")
|
||||
set(cpu_copyright "arm_cpu")
|
||||
elseif(X86 OR X86_64)
|
||||
set(CPACK_RPM_CPU_PACKAGE_NAME "libopenvino-intel-cpu-plugin-${cpack_name_ver}")
|
||||
set(CPACK_COMPONENT_CPU_DESCRIPTION "Intel® CPU")
|
||||
set(CPACK_COMPONENT_CPU_DESCRIPTION "Intel® CPU inference plugin")
|
||||
set(cpu_copyright "generic")
|
||||
else()
|
||||
message(FATAL_ERROR "Unsupported CPU architecture: ${CMAKE_SYSTEM_PROCESSOR}")
|
||||
@@ -175,7 +169,7 @@ macro(ov_cpack_settings)
|
||||
|
||||
# intel-gpu
|
||||
if(ENABLE_INTEL_GPU)
|
||||
set(CPACK_COMPONENT_GPU_DESCRIPTION "Intel® Processor Graphics")
|
||||
set(CPACK_COMPONENT_GPU_DESCRIPTION "Intel® Processor Graphics inference plugin")
|
||||
set(CPACK_RPM_GPU_PACKAGE_REQUIRES "${core_package}")
|
||||
set(CPACK_RPM_GPU_PACKAGE_NAME "libopenvino-intel-gpu-plugin-${cpack_name_ver}")
|
||||
_ov_add_package(plugin_packages gpu)
|
||||
@@ -184,7 +178,7 @@ macro(ov_cpack_settings)
|
||||
|
||||
# intel-gna
|
||||
if(ENABLE_INTEL_GNA AND "gna" IN_LIST CPACK_COMPONENTS_ALL)
|
||||
set(CPACK_COMPONENT_GNA_DESCRIPTION "Intel® Gaussian Neural Accelerator")
|
||||
set(CPACK_COMPONENT_GNA_DESCRIPTION "Intel® Gaussian Neural Accelerator inference plugin")
|
||||
set(CPACK_RPM_GNA_PACKAGE_REQUIRES "${core_package}")
|
||||
set(CPACK_RPM_GNA_PACKAGE_NAME "libopenvino-intel-gna-plugin-${cpack_name_ver}")
|
||||
_ov_add_package(plugin_packages gna)
|
||||
@@ -272,17 +266,26 @@ macro(ov_cpack_settings)
|
||||
# Python bindings
|
||||
#
|
||||
|
||||
if(ENABLE_PYTHON)
|
||||
if(ENABLE_PYTHON_PACKAGING)
|
||||
ov_get_pyversion(pyversion)
|
||||
set(python_component "${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion}")
|
||||
string(TOUPPER "${pyversion}" pyversion)
|
||||
set(python_component "${OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE}_${pyversion}")
|
||||
string(TOUPPER "${pyversion}" pyversion_upper)
|
||||
|
||||
set(CPACK_COMPONENT_PYOPENVINO_${pyversion}_DESCRIPTION "OpenVINO Python bindings")
|
||||
set(CPACK_RPM_PYOPENVINO_${pyversion}_PACKAGE_REQUIRES
|
||||
"${core_package}, ${frontend_packages}, ${plugin_packages}, python3")
|
||||
set(CPACK_RPM_PYOPENVINO_${pyversion}_PACKAGE_NAME "libopenvino-python-${cpack_name_ver}")
|
||||
set(python_package "${CPACK_RPM_PYOPENVINO_${pyversion}_PACKAGE_NAME} = ${cpack_full_ver}")
|
||||
set(CPACK_COMPONENT_PYOPENVINO_PACKAGE_${pyversion_upper}_DESCRIPTION "OpenVINO Python API")
|
||||
set(CPACK_RPM_PYOPENVINO_PACKAGE_${pyversion_upper}_PACKAGE_REQUIRES
|
||||
"${core_package}, ${frontend_packages}, ${plugin_packages}, python3, python3-numpy")
|
||||
set(CPACK_RPM_PYOPENVINO_PACKAGE_${pyversion_upper}_PACKAGE_NAME "python3-openvino")
|
||||
set(python_package "${CPACK_RPM_PYOPENVINO_PACKAGE_${pyversion_upper}_PACKAGE_NAME} = ${cpack_full_ver}")
|
||||
set(${python_component}_copyright "generic")
|
||||
|
||||
# we can have a single python installed, so we need to generate conflicts for all other versions
|
||||
ov_rpm_generate_conflicts(${python_component} ${conflicting_versions})
|
||||
|
||||
ov_rpm_add_rpmlint_suppression("${python_component}"
|
||||
# all directories
|
||||
"non-standard-dir-perm /usr/lib64/${pyversion}/site-packages/openvino/*"
|
||||
"non-standard-dir-perm /usr/lib64/${pyversion}/site-packages/ngraph/*"
|
||||
)
|
||||
endif()
|
||||
|
||||
#
|
||||
@@ -317,12 +320,20 @@ macro(ov_cpack_settings)
|
||||
set(samples_copyright "generic")
|
||||
|
||||
# python_samples
|
||||
if(ENABLE_PYTHON)
|
||||
if(ENABLE_PYTHON_PACKAGING)
|
||||
set(CPACK_COMPONENT_PYTHON_SAMPLES_DESCRIPTION "Intel(R) Distribution of OpenVINO(TM) Toolkit Python Samples")
|
||||
set(CPACK_RPM_PYTHON_SAMPLES_PACKAGE_REQUIRES "${python_package}, python3")
|
||||
set(CPACK_RPM_PYTHON_SAMPLES_PACKAGE_NAME "openvino-samples-python-${cpack_name_ver}")
|
||||
set(python_samples_package "${CPACK_RPM_PYTHON_SAMPLES_PACKAGE_NAME} = ${cpack_full_ver}")
|
||||
set(CPACK_RPM_PYTHON_SAMPLES_PACKAGE_ARCHITECTURE "noarch")
|
||||
set(python_samples_copyright "generic")
|
||||
|
||||
ov_rpm_add_rpmlint_suppression(${OV_CPACK_COMP_PYTHON_SAMPLES}
|
||||
# all files
|
||||
"non-executable-script /usr/share/openvino/samples/python/*"
|
||||
# similar requirements.txt files
|
||||
"files-duplicate /usr/share/openvino/samples/python/*"
|
||||
)
|
||||
endif()
|
||||
|
||||
#
|
||||
@@ -353,6 +364,9 @@ macro(ov_cpack_settings)
|
||||
# all openvino
|
||||
set(CPACK_COMPONENT_OPENVINO_DESCRIPTION "Intel(R) Distribution of OpenVINO(TM) Toolkit Libraries and Development files")
|
||||
set(CPACK_RPM_OPENVINO_PACKAGE_REQUIRES "${libraries_dev_package}, ${samples_package}")
|
||||
if(ENABLE_PYTHON_PACKAGING)
|
||||
set(CPACK_DEBIAN_OPENVINO_PACKAGE_DEPENDS "${CPACK_RPM_OPENVINO_PACKAGE_REQUIRES}, ${python_package}, ${python_samples_package}")
|
||||
endif()
|
||||
set(CPACK_RPM_OPENVINO_PACKAGE_NAME "openvino-${cpack_name_ver}")
|
||||
set(CPACK_RPM_OPENVINO_PACKAGE_ARCHITECTURE "noarch")
|
||||
ov_rpm_generate_conflicts(openvino ${conflicting_versions})
|
||||
|
||||
@@ -238,7 +238,15 @@ macro(_ov_find_pugixml)
|
||||
if(_ov_pugixml_pkgconfig_interface AND NOT ANDROID)
|
||||
_ov_find_dependency(PkgConfig)
|
||||
elseif(_ov_pugixml_cmake_interface)
|
||||
_ov_find_dependency(PugiXML REQUIRED)
|
||||
_ov_find_dependency(PugiXML NAMES PugiXML pugixml)
|
||||
endif()
|
||||
|
||||
# see https://cmake.org/cmake/help/latest/command/add_library.html#alias-libraries
|
||||
# cmake older than 3.18 cannot create an alias for imported non-GLOBAL targets
|
||||
# so, we have to use 'GLOBAL' property for cases when we call from OpenVINODeveloperPackage
|
||||
# because the alias openvino::pugixml is created later
|
||||
if(CMAKE_VERSION VERSION_LESS 3.18 AND OpenVINODeveloperPackage_DIR)
|
||||
set(_ov_pugixml_visibility GLOBAL)
|
||||
endif()
|
||||
|
||||
if(PugiXML_FOUND)
|
||||
@@ -248,9 +256,15 @@ macro(_ov_find_pugixml)
|
||||
set(_ov_pugixml_target pugixml::pugixml)
|
||||
endif()
|
||||
if(OpenVINODeveloperPackage_DIR)
|
||||
set_property(TARGET ${_ov_pugixml_target} PROPERTY IMPORTED_GLOBAL ON)
|
||||
# align with build tree
|
||||
add_library(openvino::pugixml ALIAS ${_ov_pugixml_target})
|
||||
# align with build tree and create alias
|
||||
if(_ov_pugixml_visibility STREQUAL "GLOBAL")
|
||||
set_target_properties(${_ov_pugixml_target} PROPERTIES IMPORTED_GLOBAL TRUE)
|
||||
endif()
|
||||
# check whether openvino::pugixml is already defined in case of
|
||||
# OpenVINODeveloperPackageConfig.cmake is found multiple times
|
||||
if(NOT TARGET openvino::pugixml)
|
||||
add_library(openvino::pugixml ALIAS ${_ov_pugixml_target})
|
||||
endif()
|
||||
endif()
|
||||
unset(_ov_pugixml_target)
|
||||
elseif(PkgConfig_FOUND)
|
||||
@@ -265,7 +279,7 @@ macro(_ov_find_pugixml)
|
||||
${pkg_config_quiet_arg}
|
||||
${pkg_config_required_arg}
|
||||
IMPORTED_TARGET
|
||||
GLOBAL
|
||||
${_ov_pugixml_visibility}
|
||||
pugixml)
|
||||
|
||||
unset(pkg_config_quiet_arg)
|
||||
@@ -273,7 +287,11 @@ macro(_ov_find_pugixml)
|
||||
|
||||
if(pugixml_FOUND)
|
||||
if(OpenVINODeveloperPackage_DIR)
|
||||
add_library(openvino::pugixml ALIAS PkgConfig::pugixml)
|
||||
# check whether openvino::pugixml is already defined in case of
|
||||
# OpenVINODeveloperPackageConfig.cmake is found multiple times
|
||||
if(NOT TARGET openvino::pugixml)
|
||||
add_library(openvino::pugixml ALIAS PkgConfig::pugixml)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# PATCH: on Ubuntu 18.04 pugixml.pc contains incorrect include directories
|
||||
@@ -295,6 +313,8 @@ macro(_ov_find_pugixml)
|
||||
message(FATAL_ERROR "Failed to find system pugixml in OpenVINO Developer Package")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
unset(_ov_pugixml_visibility)
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
@@ -314,7 +334,7 @@ macro(_ov_find_ade)
|
||||
# whether 'ade' is found via find_package
|
||||
set(_ENABLE_SYSTEM_ADE "@ade_FOUND@")
|
||||
if(_OV_ENABLE_GAPI_PREPROCESSING AND _ENABLE_SYSTEM_ADE)
|
||||
_ov_find_dependency(ade 0.1.2)
|
||||
_ov_find_dependency(ade @ade_VERSION@)
|
||||
endif()
|
||||
unset(_OV_ENABLE_GAPI_PREPROCESSING)
|
||||
unset(_ENABLE_SYSTEM_ADE)
|
||||
@@ -325,20 +345,15 @@ macro(_ov_find_intel_cpu_dependencies)
|
||||
if(_OV_ENABLE_CPU_ACL)
|
||||
if(_ov_as_external_package)
|
||||
set_and_check(ARM_COMPUTE_LIB_DIR "@PACKAGE_ARM_COMPUTE_LIB_DIR@")
|
||||
set(_ov_find_acl_options NO_DEFAULT_PATH)
|
||||
set(_ov_find_acl_path "${CMAKE_CURRENT_LIST_DIR}")
|
||||
set(ACL_DIR "${CMAKE_CURRENT_LIST_DIR}")
|
||||
else()
|
||||
set_and_check(_ov_find_acl_path "@PACKAGE_FIND_ACL_PATH@")
|
||||
set_and_check(ACL_DIR "@PACKAGE_FIND_ACL_PATH@")
|
||||
endif()
|
||||
|
||||
_ov_find_dependency(ACL
|
||||
NO_MODULE
|
||||
PATHS "${_ov_find_acl_path}"
|
||||
${_ov_find_acl_options})
|
||||
_ov_find_dependency(ACL)
|
||||
|
||||
unset(ARM_COMPUTE_LIB_DIR)
|
||||
unset(_ov_find_acl_path)
|
||||
unset(_ov_find_acl_options)
|
||||
endif()
|
||||
unset(_OV_ENABLE_CPU_ACL)
|
||||
endmacro()
|
||||
@@ -375,9 +390,8 @@ endmacro()
|
||||
|
||||
macro(_ov_find_protobuf_frontend_dependency)
|
||||
set(_OV_ENABLE_SYSTEM_PROTOBUF "@ENABLE_SYSTEM_PROTOBUF@")
|
||||
# TODO: remove check for target existence
|
||||
if(_OV_ENABLE_SYSTEM_PROTOBUF AND NOT TARGET protobuf::libprotobuf)
|
||||
_ov_find_dependency(Protobuf @Protobuf_VERSION@ EXACT)
|
||||
if(_OV_ENABLE_SYSTEM_PROTOBUF)
|
||||
_ov_find_dependency(Protobuf @Protobuf_VERSION@ EXACT NAMES Protobuf protobuf)
|
||||
endif()
|
||||
unset(_OV_ENABLE_SYSTEM_PROTOBUF)
|
||||
endmacro()
|
||||
@@ -385,8 +399,7 @@ endmacro()
|
||||
macro(_ov_find_tensorflow_frontend_dependencies)
|
||||
set(_OV_ENABLE_SYSTEM_SNAPPY "@ENABLE_SYSTEM_SNAPPY@")
|
||||
set(_ov_snappy_lib "@ov_snappy_lib@")
|
||||
# TODO: remove check for target existence
|
||||
if(_OV_ENABLE_SYSTEM_SNAPPY AND NOT TARGET ${_ov_snappy_lib})
|
||||
if(_OV_ENABLE_SYSTEM_SNAPPY)
|
||||
_ov_find_dependency(Snappy @Snappy_VERSION@ EXACT)
|
||||
endif()
|
||||
unset(_OV_ENABLE_SYSTEM_SNAPPY)
|
||||
|
||||
@@ -15,7 +15,7 @@ list(APPEND ov_options CMAKE_CXX_COMPILER_LAUNCHER CMAKE_C_COMPILER_LAUNCHER
|
||||
CMAKE_CXX_LINKER_LAUNCHER CMAKE_C_LINKER_LAUNCHER
|
||||
CMAKE_BUILD_TYPE CMAKE_SKIP_RPATH CMAKE_INSTALL_PREFIX
|
||||
CMAKE_OSX_ARCHITECTURES CMAKE_OSX_DEPLOYMENT_TARGET
|
||||
CMAKE_CONFIGURATION_TYPES CMAKE_DEFAULT_BUILD_TYPE)
|
||||
CPACK_GENERATOR)
|
||||
file(TO_CMAKE_PATH "${CMAKE_CURRENT_LIST_DIR}" cache_path)
|
||||
|
||||
message(STATUS "The following CMake options are exported from OpenVINO Developer package")
|
||||
@@ -33,7 +33,11 @@ set(ENABLE_PLUGINS_XML ON)
|
||||
|
||||
# for samples in 3rd party projects
|
||||
if(ENABLE_SAMPLES)
|
||||
set_and_check(gflags_DIR "@gflags_BINARY_DIR@")
|
||||
if("@gflags_FOUND@")
|
||||
set_and_check(gflags_DIR "@gflags_DIR@")
|
||||
else()
|
||||
set_and_check(gflags_DIR "@gflags_BINARY_DIR@")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
set(CMAKE_SYSTEM_NAME Linux)
|
||||
set(CMAKE_SYSTEM_PROCESSOR i386)
|
||||
|
||||
set(CMAKE_STRIP i686-linux-gnu-strip)
|
||||
set(PKG_CONFIG_EXECUTABLE i686-linux-gnu-pkg-config CACHE PATH "Path to 32-bits pkg-config")
|
||||
|
||||
set(CMAKE_CXX_FLAGS_INIT "-m32")
|
||||
|
||||
@@ -14,82 +14,6 @@
|
||||
set(CMAKE_SYSTEM_NAME Windows)
|
||||
set(CMAKE_SYSTEM_PROCESSOR x86_64)
|
||||
|
||||
set(CMAKE_C_COMPILER x86_64-w64-mingw32-gcc-posix)
|
||||
set(CMAKE_CXX_COMPILER x86_64-w64-mingw32-g++-posix)
|
||||
set(CMAKE_C_COMPILER x86_64-w64-mingw32-gcc)
|
||||
set(CMAKE_CXX_COMPILER x86_64-w64-mingw32-g++)
|
||||
set(PKG_CONFIG_EXECUTABLE x86_64-w64-mingw32-pkg-config CACHE PATH "Path to Windows x86_64 pkg-config")
|
||||
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
|
||||
|
||||
macro(__cmake_find_root_save_and_reset)
|
||||
foreach(v
|
||||
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
|
||||
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
|
||||
)
|
||||
set(__save_${v} ${${v}})
|
||||
set(${v} NEVER)
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
macro(__cmake_find_root_restore)
|
||||
foreach(v
|
||||
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
|
||||
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
|
||||
)
|
||||
set(${v} ${__save_${v}})
|
||||
unset(__save_${v})
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
|
||||
# macro to find programs on the host OS
|
||||
macro(find_host_program)
|
||||
__cmake_find_root_save_and_reset()
|
||||
if(CMAKE_HOST_WIN32)
|
||||
SET(WIN32 1)
|
||||
SET(UNIX)
|
||||
SET(APPLE)
|
||||
elseif(CMAKE_HOST_APPLE)
|
||||
SET(APPLE 1)
|
||||
SET(UNIX)
|
||||
SET(WIN32)
|
||||
elseif(CMAKE_HOST_UNIX)
|
||||
SET(UNIX 1)
|
||||
SET(WIN32)
|
||||
SET(APPLE)
|
||||
endif()
|
||||
find_program(${ARGN})
|
||||
SET(WIN32 1)
|
||||
SET(APPLE)
|
||||
SET(UNIX)
|
||||
__cmake_find_root_restore()
|
||||
endmacro()
|
||||
|
||||
# macro to find packages on the host OS
|
||||
macro(find_host_package)
|
||||
__cmake_find_root_save_and_reset()
|
||||
if(CMAKE_HOST_WIN32)
|
||||
SET(WIN32 1)
|
||||
SET(UNIX)
|
||||
SET(APPLE)
|
||||
elseif(CMAKE_HOST_APPLE)
|
||||
SET(APPLE 1)
|
||||
SET(WIN32)
|
||||
SET(UNIX)
|
||||
elseif(CMAKE_HOST_UNIX)
|
||||
SET(UNIX 1)
|
||||
SET(WIN32)
|
||||
SET(APPLE)
|
||||
endif()
|
||||
find_package(${ARGN})
|
||||
SET(WIN32 1)
|
||||
SET(APPLE)
|
||||
SET(UNIX)
|
||||
__cmake_find_root_restore()
|
||||
endmacro()
|
||||
|
||||
@@ -35,66 +35,3 @@ set(CMAKE_MODULE_LINKER_FLAGS_INIT "-L${CMAKE_SYSROOT}/lib")
|
||||
|
||||
set(CMAKE_C_STANDARD_LIBRARIES_INIT "-latomic" CACHE STRING "" FORCE)
|
||||
set(CMAKE_CXX_STANDARD_LIBRARIES_INIT "-latomic" CACHE STRING "" FORCE)
|
||||
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
|
||||
|
||||
macro(__cmake_find_root_save_and_reset)
|
||||
foreach(v
|
||||
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
|
||||
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
|
||||
)
|
||||
set(__save_${v} ${${v}})
|
||||
set(${v} NEVER)
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
macro(__cmake_find_root_restore)
|
||||
foreach(v
|
||||
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
|
||||
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
|
||||
)
|
||||
set(${v} ${__save_${v}})
|
||||
unset(__save_${v})
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
# macro to find programs on the host OS
|
||||
macro(find_host_program)
|
||||
__cmake_find_root_save_and_reset()
|
||||
if(CMAKE_HOST_WIN32)
|
||||
SET(WIN32 1)
|
||||
SET(UNIX)
|
||||
elseif(CMAKE_HOST_APPLE)
|
||||
SET(APPLE 1)
|
||||
SET(UNIX)
|
||||
endif()
|
||||
find_program(${ARGN})
|
||||
SET(WIN32)
|
||||
SET(APPLE)
|
||||
SET(UNIX 1)
|
||||
__cmake_find_root_restore()
|
||||
endmacro()
|
||||
|
||||
# macro to find packages on the host OS
|
||||
macro(find_host_package)
|
||||
__cmake_find_root_save_and_reset()
|
||||
if(CMAKE_HOST_WIN32)
|
||||
SET(WIN32 1)
|
||||
SET(UNIX)
|
||||
elseif(CMAKE_HOST_APPLE)
|
||||
SET(APPLE 1)
|
||||
SET(UNIX)
|
||||
endif()
|
||||
find_package(${ARGN})
|
||||
SET(WIN32)
|
||||
SET(APPLE)
|
||||
SET(UNIX 1)
|
||||
__cmake_find_root_restore()
|
||||
endmacro()
|
||||
|
||||
@@ -2,74 +2,13 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Install compiler on debian using:
|
||||
# apt-get install -y gcc-x86-64-linux-gnu g++-x86-64-linux-gnu binutils-x86-64-linux-gnu pkg-config-x86-64-linux-gnu
|
||||
|
||||
set(CMAKE_SYSTEM_NAME Linux)
|
||||
set(CMAKE_SYSTEM_PROCESSOR amd64)
|
||||
|
||||
set(CMAKE_C_COMPILER x86_64-linux-gnu-gcc)
|
||||
set(CMAKE_CXX_COMPILER x86_64-linux-gnu-g++)
|
||||
set(CMAKE_STRIP x86_64-linux-gnu-strip)
|
||||
set(PKG_CONFIG_EXECUTABLE "NOT-FOUND" CACHE PATH "Path to amd64 pkg-config")
|
||||
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
|
||||
|
||||
macro(__cmake_find_root_save_and_reset)
|
||||
foreach(v
|
||||
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
|
||||
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
|
||||
)
|
||||
set(__save_${v} ${${v}})
|
||||
set(${v} NEVER)
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
macro(__cmake_find_root_restore)
|
||||
foreach(v
|
||||
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
|
||||
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
|
||||
)
|
||||
set(${v} ${__save_${v}})
|
||||
unset(__save_${v})
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
|
||||
# macro to find programs on the host OS
|
||||
macro(find_host_program)
|
||||
__cmake_find_root_save_and_reset()
|
||||
if(CMAKE_HOST_WIN32)
|
||||
SET(WIN32 1)
|
||||
SET(UNIX)
|
||||
elseif(CMAKE_HOST_APPLE)
|
||||
SET(APPLE 1)
|
||||
SET(UNIX)
|
||||
endif()
|
||||
find_program(${ARGN})
|
||||
SET(WIN32)
|
||||
SET(APPLE)
|
||||
SET(UNIX 1)
|
||||
__cmake_find_root_restore()
|
||||
endmacro()
|
||||
|
||||
# macro to find packages on the host OS
|
||||
macro(find_host_package)
|
||||
__cmake_find_root_save_and_reset()
|
||||
if(CMAKE_HOST_WIN32)
|
||||
SET(WIN32 1)
|
||||
SET(UNIX)
|
||||
elseif(CMAKE_HOST_APPLE)
|
||||
SET(APPLE 1)
|
||||
SET(UNIX)
|
||||
endif()
|
||||
find_package(${ARGN})
|
||||
SET(WIN32)
|
||||
SET(APPLE)
|
||||
SET(UNIX 1)
|
||||
__cmake_find_root_restore()
|
||||
endmacro()
|
||||
set(PKG_CONFIG_EXECUTABLE x86_64-linux-gnu-pkg-config CACHE PATH "Path to amd64 pkg-config")
|
||||
|
||||
@@ -5,9 +5,9 @@ pugixml/[>=1.10]
|
||||
protobuf/[>=3.20.3]
|
||||
ittapi/[>=3.23.0]
|
||||
zlib/[>=1.2.8]
|
||||
opencl-icd-loader/[>=2022.09.30]
|
||||
# opencl-clhpp-headers/[>=2022.09.30]
|
||||
opencl-headers/[>=2022.09.30]
|
||||
opencl-icd-loader/2023.04.17
|
||||
opencl-clhpp-headers/2023.04.17
|
||||
opencl-headers/2023.04.17
|
||||
xbyak/[>=6.62]
|
||||
snappy/[>=1.1.7]
|
||||
gflags/2.2.2
|
||||
@@ -24,8 +24,6 @@ flatbuffers/[>=22.9.24]
|
||||
|
||||
[options]
|
||||
protobuf/*:lite=True
|
||||
onetbb/*:tbbmalloc=True
|
||||
onetbb/*:tbbproxy=True
|
||||
flatbuffers/*:header_only=True
|
||||
|
||||
[generators]
|
||||
|
||||
85
cspell.json
Normal file
85
cspell.json
Normal file
@@ -0,0 +1,85 @@
|
||||
{
|
||||
"version": "0.2",
|
||||
"ignorePaths": [],
|
||||
"dictionaryDefinitions": [],
|
||||
"dictionaries": [],
|
||||
"words": [
|
||||
"armhf",
|
||||
"autoremove",
|
||||
"bblayers",
|
||||
"bitbake",
|
||||
"buildtools",
|
||||
"caffe",
|
||||
"chrpath",
|
||||
"devel",
|
||||
"devtoolset",
|
||||
"dgpu",
|
||||
"diffstat",
|
||||
"Dockerfiles",
|
||||
"dpkg",
|
||||
"DWORD",
|
||||
"endsphinxdirective",
|
||||
"epel",
|
||||
"ERRORLEVEL",
|
||||
"executionpolicy",
|
||||
"fafe",
|
||||
"globbing",
|
||||
"gmmlib",
|
||||
"GNAs",
|
||||
"googlenet",
|
||||
"gpgcheck",
|
||||
"gpgkey",
|
||||
"hashfile",
|
||||
"HKLM",
|
||||
"HOSTTOOLS",
|
||||
"iigd",
|
||||
"insmod",
|
||||
"intelocl",
|
||||
"kaldi",
|
||||
"Khronos",
|
||||
"ldconfig",
|
||||
"libopencl",
|
||||
"libpython",
|
||||
"linmac",
|
||||
"LTSC",
|
||||
"maxdepth",
|
||||
"mklink",
|
||||
"monodepth",
|
||||
"mxnet",
|
||||
"nocache",
|
||||
"noglob",
|
||||
"nohup",
|
||||
"norestart",
|
||||
"ocloc",
|
||||
"onnx",
|
||||
"opencl",
|
||||
"openembedded",
|
||||
"openvino",
|
||||
"PACKAGECONFIG",
|
||||
"patchelf",
|
||||
"pkgdata",
|
||||
"pkgs",
|
||||
"pypi",
|
||||
"pzstd",
|
||||
"quantizer",
|
||||
"Redistributable",
|
||||
"remotesigned",
|
||||
"repolist",
|
||||
"rmmod",
|
||||
"servercore",
|
||||
"setupvars",
|
||||
"SETX",
|
||||
"skylake",
|
||||
"sphinxdirective",
|
||||
"toctree",
|
||||
"Uninstallation",
|
||||
"userspace",
|
||||
"venv",
|
||||
"WDDM",
|
||||
"WORKDIR",
|
||||
"yocto",
|
||||
"zstd"
|
||||
],
|
||||
"ignoreWords": [],
|
||||
"import": []
|
||||
}
|
||||
@@ -53,5 +53,5 @@ This section describes how to obtain and prepare your model for work with OpenVI
|
||||
* :doc:`See the supported formats and how to use them in your project <Supported_Model_Formats>`.
|
||||
* :doc:`Convert different model formats to the ov.Model format <openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide>`.
|
||||
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
OpenVINO™ is not just one tool. It is an expansive ecosystem of utilities, providing a comprehensive workflow for deep learning solution development. Learn more about each of them to reach the full potential of OpenVINO™ Toolkit.
|
||||
|
||||
|
||||
|
||||
**Neural Network Compression Framework (NNCF)**
|
||||
|
||||
A suite of advanced algorithms for Neural Network inference optimization with minimal accuracy drop. NNCF applies quantization, filter pruning, binarization and sparsity algorithms to PyTorch and TensorFlow models during training.
|
||||
@@ -37,7 +36,6 @@ More resources:
|
||||
* `GitHub <https://github.com/openvinotoolkit/training_extensions>`__
|
||||
* `Documentation <https://openvinotoolkit.github.io/training_extensions/stable/guide/get_started/introduction.html>`__
|
||||
|
||||
|
||||
**OpenVINO™ Security Add-on**
|
||||
|
||||
A solution for Model Developers and Independent Software Vendors to use secure packaging and secure model execution.
|
||||
@@ -63,21 +61,17 @@ More resources:
|
||||
|
||||
Compile tool is now deprecated. If you need to compile a model for inference on a specific device, use the following script:
|
||||
|
||||
.. tab-set::
|
||||
.. tab:: python
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
.. doxygensnippet:: docs/snippets/export_compiled_model.py
|
||||
:language: python
|
||||
:fragment: [export_compiled_model]
|
||||
|
||||
.. doxygensnippet:: docs/snippets/export_compiled_model.py
|
||||
:language: python
|
||||
:fragment: [export_compiled_model]
|
||||
.. tab:: cpp
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/export_compiled_model.cpp
|
||||
:language: cpp
|
||||
:fragment: [export_compiled_model]
|
||||
.. doxygensnippet:: docs/snippets/export_compiled_model.cpp
|
||||
:language: cpp
|
||||
:fragment: [export_compiled_model]
|
||||
|
||||
|
||||
To learn which device supports the import / export functionality, see the :doc:`feature support matrix <openvino_docs_OV_UG_Working_with_devices>`.
|
||||
@@ -92,5 +86,6 @@ A web-based tool for deploying deep learning models. Built on the core of OpenVI
|
||||
|
||||
OpenVINO™ Integration with TensorFlow will no longer be supported as of OpenVINO release 2023.0. As part of the 2023.0 release, OpenVINO will feature a significantly enhanced TensorFlow user experience within native OpenVINO without needing offline model conversions. :doc:`Learn more <openvino_docs_MO_DG_TensorFlow_Frontend>`.
|
||||
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
|
||||
@@ -194,4 +194,4 @@ See Also
|
||||
* :doc:`Using OpenVINO Runtime Samples <openvino_docs_OV_UG_Samples_Overview>`
|
||||
* :doc:`Hello Shape Infer SSD sample <openvino_inference_engine_samples_hello_reshape_ssd_README>`
|
||||
|
||||
@endsphinxdirective
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -96,7 +96,7 @@ In this case, you can directly say that 'MyRelu' -> ``Relu`` mapping should be u
|
||||
:fragment: [frontend_extension_MyRelu]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
:sync: python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
@@ -181,7 +181,7 @@ input name at position ``i`` maps to OpenVINO operation input at position ``i``
|
||||
|
||||
Let's see the following example. Like previously, we'd like to map ``CustomOperation`` in the original model,
|
||||
to OpenVINO ``CustomOperation`` as is (so their name and attributes names match). This time, that framework operation
|
||||
inputs and outputs are not stricly ordered and can be identified by their names ``A``, ``B``, ``C`` for inputs
|
||||
inputs and outputs are not strictly ordered and can be identified by their names ``A``, ``B``, ``C`` for inputs
|
||||
and ``X``, ``Y`` for outputs. Those inputs and outputs can be mapped to OpenVINO operation, such that inputs
|
||||
``A``, ``B``, ``C`` map to OpenVINO ``CustomOperation`` first, second and third input and ``X`` and ``Y``
|
||||
outputs map to OpenVINO ``CustomOperation`` first and second output respectively.
|
||||
@@ -337,7 +337,7 @@ from ONNX according to the formula: ``ThresholdedRelu(x, alpha) -> Multiply(x, C
|
||||
:fragment: [frontend_extension_ThresholdedReLU_header]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
:sync: python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
@@ -353,7 +353,7 @@ from ONNX according to the formula: ``ThresholdedRelu(x, alpha) -> Multiply(x, C
|
||||
:fragment: [frontend_extension_ThresholdedReLU]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
:sync: python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
@@ -384,7 +384,7 @@ corresponding outputs of the original framework operation in the same order.
|
||||
Some frameworks require output names of the operation to be provided during conversion.
|
||||
For PaddlePaddle operations, it is generally necessary to provide names for all outputs using the ``NamedOutputs`` container.
|
||||
Usually those names can be found in source code of the individual operation in PaddlePaddle code.
|
||||
The next example shows such conversion for the ``top_k_v2`` operation.
|
||||
The following example shows such conversion for the ``top_k_v2`` operation.
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
|
||||
@@ -33,7 +33,7 @@ AsyncInferRequest()
|
||||
|
||||
The main goal of the ``AsyncInferRequest`` constructor is to define a device pipeline ``m_pipeline``. The example below demonstrates ``m_pipeline`` creation with the following stages:
|
||||
|
||||
* ``infer_preprocess_and_start_pipeline`` is a CPU ligthweight task to submit tasks to a remote device.
|
||||
* ``infer_preprocess_and_start_pipeline`` is a CPU lightweight task to submit tasks to a remote device.
|
||||
* ``wait_pipeline`` is a CPU non-compute task that waits for a response from a remote device.
|
||||
* ``infer_postprocess`` is a CPU compute task.
|
||||
|
||||
|
||||
@@ -83,6 +83,7 @@ Detailed Guides
|
||||
* :doc:`Quantized networks <openvino_docs_ov_plugin_dg_quantized_models>`
|
||||
* :doc:`Low precision transformations <openvino_docs_OV_UG_lpt>` guide
|
||||
* :doc:`Writing OpenVINO™ transformations <openvino_docs_transformations>` guide
|
||||
* `Integration with AUTO Plugin <https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/auto/docs/integration.md>`__
|
||||
|
||||
API References
|
||||
##############
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
@sphinxdirective
|
||||
|
||||
|
||||
Performance varies by use, configuration and other factors. Learn more at `www.intel.com/PerformanceIndex <https://www.intel.com/PerformanceIndex>`__.
|
||||
Performance varies by use, configuration and other factors. Learn more at [www.intel.com/PerformanceIndex](https://www.intel.com/PerformanceIndex).
|
||||
|
||||
Performance results are based on testing as of dates shown in configurations and may not reflect all publicly available updates. See backup for configuration details. No product or component can be absolutely secure.
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ Overview of Artificial Neural Networks Representation
|
||||
A deep learning network is usually represented as a directed graph describing the flow of data from the network input data to the inference results.
|
||||
Input data can be in the form of images, video, text, audio, or preprocessed information representing objects from the target area of interest.
|
||||
|
||||
Here is an illustration sof a small graph representing a model that consists of a single Convolutional layer and activation function:
|
||||
Here is an illustration of a small graph representing a model that consists of a single Convolutional layer and activation function:
|
||||
|
||||
.. image:: _static/images/small_IR_graph_demonstration.png
|
||||
|
||||
@@ -52,7 +52,7 @@ A set consists of several groups of operations:
|
||||
|
||||
* Generic element-wise arithmetic tensor operations such as ``Add``, ``Subtract``, and ``Multiply``.
|
||||
|
||||
* Comparison operations that compare two numeric tensors and produce boolean tensors, for example, ``Less``, ``Equeal``, ``Greater``.
|
||||
* Comparison operations that compare two numeric tensors and produce boolean tensors, for example, ``Less``, ``Equal``, ``Greater``.
|
||||
|
||||
* Logical operations that are dealing with boolean tensors, for example, ``And``, ``Xor``, ``Not``.
|
||||
|
||||
|
||||
@@ -128,7 +128,7 @@ Information about layer precision is also stored in the performance counters.
|
||||
resnet\_model/add\_5/fq\_input\_1 NOT\_RUN FakeQuantize undef 0 0
|
||||
=========================================================== ============= ============== ===================== ================= ==============
|
||||
|
||||
| The ``exeStatus`` column of the table includes the following possible values:
|
||||
| The ``execStatus`` column of the table includes the following possible values:
|
||||
| - ``EXECUTED`` - the layer was executed by standalone primitive.
|
||||
| - ``NOT_RUN`` - the layer was not executed by standalone primitive or was fused with another operation and executed in another layer primitive.
|
||||
|
|
||||
|
||||
@@ -18,7 +18,7 @@ Example of converting a PyTorch model directly from memory:
|
||||
.. code-block:: python
|
||||
|
||||
import torchvision
|
||||
|
||||
|
||||
model = torchvision.models.resnet50(pretrained=True)
|
||||
ov_model = convert_model(model)
|
||||
|
||||
@@ -34,7 +34,7 @@ Example of using native Python classes to set ``input_shape``, ``mean_values`` a
|
||||
.. code-block:: python
|
||||
|
||||
from openvino.runtime import PartialShape, Layout
|
||||
|
||||
|
||||
ov_model = convert_model(model, input_shape=PartialShape([1,3,100,100]), mean_values=[127, 127, 127], layout=Layout("NCHW"))
|
||||
|
||||
Example of using strings for setting ``input_shape``, ``mean_values`` and ``layout``:
|
||||
@@ -53,7 +53,7 @@ Example of using a tuple in the ``input`` parameter to cut a model:
|
||||
|
||||
ov_model = convert_model(model, input=("input_name", [3], np.float32))
|
||||
|
||||
For complex cases, when a value needs to be set in the ``input`` parameter, the ``InputCutInfo`` class can be used. ``InputCutInfo`` accepts four parameters: ``name``, ``shape``, ``type``, and ``value``.
|
||||
For complex cases, when a value needs to be set in the ``input`` parameter, the ``InputCutInfo`` class can be used. ``InputCutInfo`` accepts four parameters: ``name``, ``shape``, ``type``, and ``value``.
|
||||
|
||||
``InputCutInfo("input_name", [3], np.float32, [0.5, 2.1, 3.4])`` is equivalent of ``InputCutInfo(name="input_name", shape=[3], type=np.float32, value=[0.5, 2.1, 3.4])``.
|
||||
|
||||
@@ -69,15 +69,15 @@ Example of using ``InputCutInfo`` to freeze an input with value:
|
||||
.. code-block:: python
|
||||
|
||||
from openvino.tools.mo import convert_model, InputCutInfo
|
||||
|
||||
|
||||
ov_model = convert_model(model, input=InputCutInfo("input_name", [3], np.float32, [0.5, 2.1, 3.4]))
|
||||
|
||||
To set parameters for models with multiple inputs, use ``list`` of parameters.
|
||||
Parameters supporting ``list``:
|
||||
Parameters supporting ``list``:
|
||||
|
||||
* input
|
||||
* input_shape
|
||||
* layout
|
||||
* layout
|
||||
* source_layout
|
||||
* dest_layout
|
||||
* mean_values
|
||||
@@ -97,7 +97,7 @@ Example of using the ``Layout`` class to set the layout of a model input:
|
||||
|
||||
from openvino.runtime import Layout
|
||||
from openvino.tools.mo import convert_model
|
||||
|
||||
|
||||
ov_model = convert_model(model, source_layout=Layout("NCHW"))
|
||||
|
||||
To set both source and destination layouts in the ``layout`` parameter, use the ``LayoutMap`` class. ``LayoutMap`` accepts two parameters: ``source_layout`` and ``target_layout``.
|
||||
@@ -109,7 +109,7 @@ Example of using the ``LayoutMap`` class to change the layout of a model input:
|
||||
.. code-block:: python
|
||||
|
||||
from openvino.tools.mo import convert_model, LayoutMap
|
||||
|
||||
|
||||
ov_model = convert_model(model, layout=LayoutMap("NCHW", "NHWC"))
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -6,11 +6,8 @@
|
||||
|
||||
All of the issues below refer to :doc:`legacy functionalities <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer>`.
|
||||
|
||||
|
||||
If your question is not covered by the topics below, use the `OpenVINO Support page <https://software.intel.com/en-us/openvino-toolkit/documentation/get-started>`__, where you can participate on a free forum.
|
||||
|
||||
If your question is not covered by the topics below, use the
|
||||
`OpenVINO Support page <https://community.intel.com/t5/Intel-Distribution-of-OpenVINO/bd-p/distribution-openvino-toolkit>`__,
|
||||
If your question is not covered by the topics below, use the
|
||||
`OpenVINO Support page <https://community.intel.com/t5/Intel-Distribution-of-OpenVINO/bd-p/distribution-openvino-toolkit>`__,
|
||||
where you can participate in a free forum discussion.
|
||||
|
||||
.. warning::
|
||||
@@ -338,7 +335,7 @@ Q31. What does the message "Input port > 0 in --input is not supported if --inpu
|
||||
|
||||
**A:** When using the ``PORT:NODE`` notation for the ``--input`` command line argument and ``PORT`` > 0, you should specify ``--input_shape`` for this input. This is a limitation of the current Model Optimizer implementation.
|
||||
|
||||
.. note: It is no longer relevant message since the limitation on input port index for model truncation has been resolved.
|
||||
> **NOTE**: It is no longer relevant message since the limitation on input port index for model truncation has been resolved.
|
||||
|
||||
.. _question-32:
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
|
||||
.. warning::
|
||||
|
||||
Note that OpenVINO support for Caffe is currently being deprecated and will be removed entirely in the future.
|
||||
@@ -43,17 +44,17 @@ CLI Examples Using Caffe-Specific Parameters
|
||||
++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
* Launching model conversion for `bvlc_alexnet.caffemodel <https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet>`__ with a specified `prototxt` file. This is needed when the name of the Caffe model and the `.prototxt` file are different or are placed in different directories. Otherwise, it is enough to provide only the path to the input `model.caffemodel` file.
|
||||
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
|
||||
mo --input_model bvlc_alexnet.caffemodel --input_proto bvlc_alexnet.prototxt
|
||||
|
||||
|
||||
* Launching model conversion for `bvlc_alexnet.caffemodel <https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet>`__ with a specified `CustomLayersMapping` file. This is the legacy method of quickly enabling model conversion if your model has custom layers. This requires the Caffe system on the computer. Example of ``CustomLayersMapping.xml`` can be found in ``<OPENVINO_INSTALLATION_DIR>/mo/front/caffe/CustomLayersMapping.xml.example``. The optional parameters without default values and not specified by the user in the ``.prototxt`` file are removed from the Intermediate Representation, and nested parameters are flattened:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
mo --input_model bvlc_alexnet.caffemodel -k CustomLayersMapping.xml --disable_omitting_optional --enable_flattening_nested_params
|
||||
|
||||
|
||||
This example shows a multi-input model with input layers: ``data``, ``rois``
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
@@ -2,18 +2,19 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
|
||||
.. warning::
|
||||
|
||||
Note that OpenVINO support for Apache MXNet is currently being deprecated and will be removed entirely in the future.
|
||||
|
||||
To convert an MXNet model, run model conversion with the path to the ``.params`` file of the input model:
|
||||
To convert an MXNet model, run Model Optimizer with the path to the ``.params`` file of the input model:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model model-file-0000.params
|
||||
|
||||
|
||||
Using MXNet-Specific Conversion Parameters
|
||||
Using MXNet-Specific Conversion Parameters
|
||||
##########################################
|
||||
|
||||
The following list provides the MXNet-specific parameters.
|
||||
@@ -39,7 +40,7 @@ The following list provides the MXNet-specific parameters.
|
||||
Use only if your topology is one of ssd gluoncv topologies
|
||||
|
||||
|
||||
.. note::
|
||||
.. note::
|
||||
|
||||
By default, model conversion API does not use the Apache MXNet loader. It transforms the topology to another format which is compatible with the latest version of Apache MXNet. However, the Apache MXNet loader is required for models trained with lower version of Apache MXNet. If your model was trained with an Apache MXNet version lower than 1.0.0, specify the ``--legacy_mxnet_model`` key to enable the Apache MXNet loader. Note that the loader does not support models with custom layers. In this case, you must manually recompile Apache MXNet with custom layers and install it in your environment.
|
||||
|
||||
@@ -76,3 +77,4 @@ See the :doc:`Model Conversion Tutorials <openvino_docs_MO_DG_prepare_model_conv
|
||||
* :doc:`Convert MXNet Style Transfer Model <openvino_docs_MO_DG_prepare_model_convert_model_mxnet_specific_Convert_Style_Transfer_From_MXNet>`
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
|
||||
@@ -2,13 +2,20 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
This page provides general instructions on how to convert a model from a PaddlePaddle format to the OpenVINO IR format using Model Optimizer. The instructions are different depending on PaddlePaddle model format.
|
||||
|
||||
Converting PaddlePaddle Model Inference Format
|
||||
##############################################
|
||||
|
||||
PaddlePaddle inference model includes ``.pdmodel`` (storing model structure) and ``.pdiparams`` (storing model weight). For how to export PaddlePaddle inference model, please refer to the `Exporting PaddlePaddle Inference Model <https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/beginner/model_save_load_cn.html>`__ Chinese guide.
|
||||
|
||||
To convert a PaddlePaddle model, use the ``mo`` script and specify the path to the input ``.pdmodel`` model file:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model <INPUT_MODEL>.pdmodel
|
||||
|
||||
**For example,** this command converts a yolo v3 PaddlePaddle network to OpenVINO IR network:
|
||||
**For example**, this command converts a yolo v3 PaddlePaddle network to OpenVINO IR network:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
|
||||
@@ -112,7 +112,7 @@ TensorFlow 2 SavedModel format has a specific graph structure due to eager execu
|
||||
pruning, find custom input nodes in the ``StatefulPartitionedCall/*`` subgraph.
|
||||
|
||||
Since the 2023.0 release, direct pruning of models in SavedModel format is not supported.
|
||||
It is essential to freeze the model before pruning. Use the following code snippet for model freezing:
|
||||
It is essential to freeze the model before pruning. Use the following code snippet for model freezing:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
|
||||
@@ -111,7 +111,7 @@ The patch modifies the framework code by adding a special command-line argument
|
||||
+ else:
|
||||
+ state_dict = torch.load(path, map_location=torch.device('cpu'))
|
||||
|
||||
# For backward compatability, remove these (the new variable is called layers)
|
||||
# For backward compatibility, remove these (the new variable is called layers)
|
||||
for key in list(state_dict.keys()):
|
||||
@@ -673,8 +679,11 @@ class Yolact(nn.Module):
|
||||
else:
|
||||
|
||||
@@ -19,11 +19,14 @@
|
||||
|
||||
**OpenVINO IR (Intermediate Representation)** - the proprietary format of OpenVINO™, benefiting from the full extent of its features.
|
||||
|
||||
**ONNX, PaddlePaddle, TensorFlow, TensorFlow Lite** - formats supported directly, which means they can be used with
|
||||
OpenVINO Runtime without any prior conversion. For a guide on how to run inference on ONNX, PaddlePaddle, or TensorFlow,
|
||||
**ONNX, PaddlePaddle, TensorFlow, TensorFlow Lite** - formats supported directly, which means they can be used with
|
||||
OpenVINO Runtime without any prior conversion. For a guide on how to run inference on ONNX, PaddlePaddle, or TensorFlow,
|
||||
see how to :doc:`Integrate OpenVINO™ with Your Application <openvino_docs_OV_UG_Integrate_OV_with_your_application>`.
|
||||
|
||||
**MXNet, Caffe, Kaldi** - formats supported indirectly, which means they need to be converted to OpenVINO IR before running inference. The conversion is done with Model Conversion API and in some cases may involve intermediate steps.
|
||||
**MXNet, Caffe, Kaldi** - legacy formats that need to be converted to OpenVINO IR before running inference.
|
||||
The model conversion in some cases may involve intermediate steps. OpenVINO is currently proceeding
|
||||
**to deprecate these formats** and **remove their support entirely in the future**.
|
||||
|
||||
|
||||
Refer to the following articles for details on conversion for different formats and models:
|
||||
|
||||
|
||||
@@ -149,7 +149,7 @@ Converting a GNMT Model to the IR
|
||||
|
||||
**Step 1**. Clone the GitHub repository and check out the commit:
|
||||
|
||||
1. Clone the NMT reposirory:
|
||||
1. Clone the NMT repository:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ To download the model for IR conversion, follow the instructions:
|
||||
|
||||
1. Create new directory to store the model:
|
||||
|
||||
.. code-block:: shell
|
||||
.. code-block:: sh
|
||||
|
||||
mkdir lm_1b
|
||||
|
||||
|
||||
@@ -322,4 +322,4 @@ Additional Resources
|
||||
* :doc:`Model Optimizer Extensions <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions>`
|
||||
* :doc:`Extending Model Optimizer with Caffe Python Layers <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers>`
|
||||
|
||||
@endsphinxdirective
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -104,4 +104,4 @@ Additional Resources
|
||||
* :doc:`Graph Traversal and Modification Using Ports and Connections <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer_Model_Optimizer_Ports_Connections>`
|
||||
* :doc:`Model Optimizer Extensions <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions>`
|
||||
|
||||
@endsphinxdirective
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -54,4 +54,4 @@ Additional Resources
|
||||
* :doc:`Graph Traversal and Modification Using Ports and Connections <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer_Model_Optimizer_Ports_Connections>`
|
||||
* :doc:`Extending Model Optimizer with Caffe Python Layers <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers>`
|
||||
|
||||
@endsphinxdirective
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -104,4 +104,4 @@ Additional Resources
|
||||
* :doc:`Model Optimizer Extensions <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions>`
|
||||
* :doc:`Extending Model Optimizer with Caffe Python Layers <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers>`
|
||||
|
||||
@endsphinxdirective
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -104,4 +104,4 @@ Additional Resources
|
||||
* :doc:`Model Optimizer Extensions <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions>`
|
||||
* :doc:`Extending Model Optimizer with Caffe Python Layers <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers>`
|
||||
|
||||
@endsphinxdirective
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -180,4 +180,4 @@ Additional Resources
|
||||
* :doc:`Model Optimizer Extensions <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions>`
|
||||
* :doc:`Extending Model Optimizer with Caffe Python Layers <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers>`
|
||||
|
||||
@endsphinxdirective
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -596,4 +596,4 @@ Additional Resources
|
||||
* :doc:`Model Optimizer Extensions <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions>`
|
||||
* :doc:`Extending Model Optimizer with Caffe Python Layers <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers>`
|
||||
|
||||
@endsphinxdirective
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -1,126 +1,115 @@
|
||||
# Debugging Auto-Device Plugin {#openvino_docs_OV_UG_supported_plugins_AUTO_debugging}
|
||||
|
||||
## Using Debug Log
|
||||
In case of execution problems, just like all other plugins, Auto-Device provides the user with information on exceptions and error values. If the returned data is not enough for debugging purposes, more information may be acquired by means of `ov::log::Level`.
|
||||
|
||||
There are six levels of logs, which can be called explicitly or set via the `OPENVINO_LOG_LEVEL` environment variable (can be overwritten by `compile_model()` or `set_property()`):
|
||||
|
||||
0 - ov::log::Level::NO
|
||||
1 - ov::log::Level::ERR
|
||||
2 - ov::log::Level::WARNING
|
||||
3 - ov::log::Level::INFO
|
||||
4 - ov::log::Level::DEBUG
|
||||
5 - ov::log::Level::TRACE
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
Using Debug Log
|
||||
###############
|
||||
.. tab:: C++
|
||||
|
||||
In case of execution problems, just like all other plugins, Auto-Device provides the user with information on exceptions and error values. If the returned data is not enough for debugging purposes, more information may be acquired by means of ``ov::log::Level``.
|
||||
.. doxygensnippet:: docs/snippets/AUTO6.cpp
|
||||
:language: cpp
|
||||
:fragment: [part6]
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
There are six levels of logs, which can be called explicitly or set via the ``OPENVINO_LOG_LEVEL`` environment variable (can be overwritten by ``compile_model()`` or ``set_property()``):
|
||||
.. doxygensnippet:: docs/snippets/ov_auto.py
|
||||
:language: python
|
||||
:fragment: [part6]
|
||||
|
||||
0 - `ov::log::Level::NO <https://docs.openvino.ai/2023.0/enumov_1_1log_1_1Level.html#doxid-group-ov-runtime-cpp-prop-api-1gga9868e1ed6b0286d17cdb0ab85b2cc66bac2f3f489a00553e7a01d369c103c7251>`__
|
||||
.. tab:: OS environment variable
|
||||
|
||||
1 - `ov::log::Level::ERR <https://docs.openvino.ai/2023.0/enumov_1_1log_1_1Level.html#doxid-group-ov-runtime-cpp-prop-api-1gga9868e1ed6b0286d17cdb0ab85b2cc66bac2f3f489a00553e7a01d369c103c7251>`__
|
||||
.. code-block:: sh
|
||||
|
||||
2 - `ov::log::Level::WARNING <https://docs.openvino.ai/2023.0/enumov_1_1log_1_1Level.html#doxid-group-ov-runtime-cpp-prop-api-1gga9868e1ed6b0286d17cdb0ab85b2cc66bac2f3f489a00553e7a01d369c103c7251>`__
|
||||
|
||||
3 - `ov::log::Level::INFO <https://docs.openvino.ai/2023.0/enumov_1_1log_1_1Level.html#doxid-group-ov-runtime-cpp-prop-api-1gga9868e1ed6b0286d17cdb0ab85b2cc66bac2f3f489a00553e7a01d369c103c7251>`__
|
||||
|
||||
4 - `ov::log::Level::DEBUG <https://docs.openvino.ai/2023.0/enumov_1_1log_1_1Level.html#doxid-group-ov-runtime-cpp-prop-api-1gga9868e1ed6b0286d17cdb0ab85b2cc66bac2f3f489a00553e7a01d369c103c7251>`__
|
||||
|
||||
5 - `ov::log::Level::TRACE <https://docs.openvino.ai/2023.0/enumov_1_1log_1_1Level.html#doxid-group-ov-runtime-cpp-prop-api-1gga9868e1ed6b0286d17cdb0ab85b2cc66bac2f3f489a00553e7a01d369c103c7251>`__
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/AUTO6.cpp
|
||||
:language: cpp
|
||||
:fragment: [part6]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_auto.py
|
||||
:language: python
|
||||
:fragment: [part6]
|
||||
|
||||
.. tab-item:: OS environment variable
|
||||
:sync: os_env_variable
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
When defining it via the variable,
|
||||
a number needs to be used instead of a log level name, e.g.:
|
||||
|
||||
Linux
|
||||
export OPENVINO_LOG_LEVEL=0
|
||||
|
||||
Windows
|
||||
set OPENVINO_LOG_LEVEL=0
|
||||
When defining it via the variable,
|
||||
a number needs to be used instead of a log level name, e.g.:
|
||||
|
||||
Linux
|
||||
export OPENVINO_LOG_LEVEL=0
|
||||
|
||||
Windows
|
||||
set OPENVINO_LOG_LEVEL=0
|
||||
@endsphinxdirective
|
||||
|
||||
The property returns information in the following format:
|
||||
|
||||
@sphinxdirective
|
||||
.. code-block:: sh
|
||||
|
||||
[time]LOG_LEVEL[file] [PLUGIN]: message
|
||||
@endsphinxdirective
|
||||
|
||||
in which the ``LOG_LEVEL`` is represented by the first letter of its name (ERROR being an exception and using its full name). For example:
|
||||
in which the `LOG_LEVEL` is represented by the first letter of its name (ERROR being an exception and using its full name). For example:
|
||||
|
||||
@sphinxdirective
|
||||
.. code-block:: sh
|
||||
|
||||
[17:09:36.6188]D[plugin.cpp:167] deviceName:GPU, defaultDeviceID:, uniqueName:GPU_
|
||||
[17:09:36.6242]I[executable_network.cpp:181] [AUTOPLUGIN]:select device:GPU
|
||||
[17:09:36.6809]ERROR[executable_network.cpp:384] [AUTOPLUGIN] load failed, GPU:[ GENERAL_ERROR ]
|
||||
@endsphinxdirective
|
||||
|
||||
|
||||
Instrumentation and Tracing Technology
|
||||
######################################
|
||||
## Instrumentation and Tracing Technology
|
||||
|
||||
All major performance calls of both OpenVINO™ Runtime and the AUTO plugin are instrumented with Instrumentation and Tracing Technology (ITT) APIs. To enable ITT in OpenVINO™ Runtime, compile it with the following option:
|
||||
|
||||
@sphinxdirective
|
||||
.. code-block:: sh
|
||||
|
||||
-DENABLE_PROFILING_ITT=ON
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
For more information, you can refer to:
|
||||
* [Intel® VTune™ Profiler User Guide](https://www.intel.com/content/www/us/en/develop/documentation/vtune-help/top/api-support/instrumentation-and-tracing-technology-apis.html)
|
||||
|
||||
* `Intel® VTune™ Profiler User Guide <https://www.intel.com/content/www/us/en/develop/documentation/vtune-help/top/api-support/instrumentation-and-tracing-technology-apis.html>`__
|
||||
### Analyze Code Performance on Linux
|
||||
|
||||
Analyze Code Performance on Linux
|
||||
+++++++++++++++++++++++++++++++++
|
||||
|
||||
You can analyze code performance using Intel® VTune™ Profiler. For more information and installation instructions refer to the `installation guide (PDF) <https://software.intel.com/content/www/us/en/develop/download/intel-vtune-install-guide-linux-os.html>`__
|
||||
You can analyze code performance using Intel® VTune™ Profiler. For more information and installation instructions refer to the [installation guide (PDF)](https://software.intel.com/content/www/us/en/develop/download/intel-vtune-install-guide-linux-os.html)
|
||||
With Intel® VTune™ Profiler installed you can configure your analysis with the following steps:
|
||||
|
||||
1. Open Intel® VTune™ Profiler GUI on the host machine with the following command:
|
||||
@sphinxdirective
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cd /vtune install dir/intel/oneapi/vtune/2021.6.0/env
|
||||
source vars.sh
|
||||
vtune-gui
|
||||
|
||||
|
||||
2. Select **Configure Analysis**
|
||||
|
||||
3. In the **where** pane, select **Local Host**
|
||||
|
||||
.. image:: _static/images/OV_UG_supported_plugins_AUTO_debugging-img01-localhost.png
|
||||
:align: center
|
||||
|
||||
4. In the **what** pane, specify your target application/script on the local system.
|
||||
|
||||
.. image:: _static/images/OV_UG_supported_plugins_AUTO_debugging-img02-launch.png
|
||||
:align: center
|
||||
|
||||
5. In the **how** pane, choose and configure the analysis type you want to perform, for example, **Hotspots Analysis**: identify the most time-consuming functions and drill down to see time spent on each line of source code. Focus optimization efforts on hot code for the greatest performance impact.
|
||||
|
||||
.. image:: _static/images/OV_UG_supported_plugins_AUTO_debugging-img03-hotspots.png
|
||||
:align: center
|
||||
|
||||
6. Start the analysis by clicking the start button. When it is done, you will get a summary of the run, including top hotspots and top tasks in your application:
|
||||
|
||||
.. image:: _static/images/OV_UG_supported_plugins_AUTO_debugging-img04-vtunesummary.png
|
||||
:align: center
|
||||
|
||||
7. To analyze ITT info related to the Auto plugin, click on the **Bottom-up** tab, choose the **Task Domain/Task Type/Function/Call Stack** from the dropdown list - Auto plugin-related ITT info is under the MULTIPlugin task domain:
|
||||
|
||||
.. image:: _static/images/OV_UG_supported_plugins_AUTO_debugging-img05-vtunebottomup.png
|
||||
:align: center
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cd /vtune install dir/intel/oneapi/vtune/2021.6.0/env
|
||||
source vars.sh
|
||||
vtune-gui
|
||||
@endsphinxdirective
|
||||
|
||||
2. select **Configure Analysis**
|
||||
3. In the **where** pane, select **Local Host**
|
||||
@sphinxdirective
|
||||
.. image:: _static/images/OV_UG_supported_plugins_AUTO_debugging-img01-localhost.png
|
||||
:align: center
|
||||
@endsphinxdirective
|
||||
4. In the **what** pane, specify your target application/script on the local system.
|
||||
@sphinxdirective
|
||||
.. image:: _static/images/OV_UG_supported_plugins_AUTO_debugging-img02-launch.png
|
||||
:align: center
|
||||
@endsphinxdirective
|
||||
5. In the **how** pane, choose and configure the analysis type you want to perform, for example, **Hotspots Analysis**:
|
||||
identify the most time-consuming functions and drill down to see time spent on each line of source code. Focus optimization efforts on hot code for the greatest performance impact.
|
||||
@sphinxdirective
|
||||
.. image:: _static/images/OV_UG_supported_plugins_AUTO_debugging-img03-hotspots.png
|
||||
:align: center
|
||||
@endsphinxdirective
|
||||
6. Start the analysis by clicking the start button. When it is done, you will get a summary of the run, including top hotspots and top tasks in your application:
|
||||
@sphinxdirective
|
||||
.. image:: _static/images/OV_UG_supported_plugins_AUTO_debugging-img04-vtunesummary.png
|
||||
:align: center
|
||||
@endsphinxdirective
|
||||
7. To analyze ITT info related to the Auto plugin, click on the **Bottom-up** tab, choose the **Task Domain/Task Type/Function/Call Stack** from the dropdown list - Auto plugin-related ITT info is under the MULTIPlugin task domain:
|
||||
@sphinxdirective
|
||||
.. image:: _static/images/OV_UG_supported_plugins_AUTO_debugging-img05-vtunebottomup.png
|
||||
:align: center
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
|
||||
Abs-1 <openvino_docs_ops_arithmetic_Abs_1>
|
||||
Acos-1 <openvino_docs_ops_arithmetic_Acos_1>
|
||||
Acosh-3 <openvino_docs_ops_arithmetic_Acosh_3>
|
||||
@@ -82,6 +82,7 @@
|
||||
Greater-1 <openvino_docs_ops_comparison_Greater_1>
|
||||
GroupConvolutionBackpropData-1 <openvino_docs_ops_convolution_GroupConvolutionBackpropData_1>
|
||||
GroupConvolution-1 <openvino_docs_ops_convolution_GroupConvolution_1>
|
||||
GroupNormalization-12 <openvino_docs_ops_normalization_GroupNormalization_12>
|
||||
HardSigmoid-1 <openvino_docs_ops_activation_HardSigmoid_1>
|
||||
HSigmoid-5 <openvino_docs_ops_activation_HSigmoid_5>
|
||||
HSwish-4 <openvino_docs_ops_activation_HSwish_4>
|
||||
@@ -136,6 +137,7 @@
|
||||
PReLU-1 <openvino_docs_ops_activation_PReLU_1>
|
||||
PSROIPooling-1 <openvino_docs_ops_detection_PSROIPooling_1>
|
||||
Pad-1 <openvino_docs_ops_movement_Pad_1>
|
||||
Pad-12 <openvino_docs_ops_movement_Pad_12>
|
||||
Parameter-1 <openvino_docs_ops_infrastructure_Parameter_1>
|
||||
Power-1 <openvino_docs_ops_arithmetic_Power_1>
|
||||
PriorBoxClustered-1 <openvino_docs_ops_detection_PriorBoxClustered_1>
|
||||
|
||||
@@ -120,6 +120,10 @@ The ``start_async`` function call is not required to be synchronized - it waits
|
||||
:language: python
|
||||
:fragment: [asyncinferqueue]
|
||||
|
||||
.. warning::
|
||||
|
||||
``InferRequest`` objects that can be acquired by iterating over a ``AsyncInferQueue`` object or by ``[id]`` guaranteed to work with read-only methods like getting tensors.
|
||||
Any mutating methods (e.g. start_async, set_callback) of a single request will put the parent AsyncInferQueue object in an invalid state.
|
||||
|
||||
Acquiring Results from Requests
|
||||
-------------------------------
|
||||
|
||||
@@ -0,0 +1,288 @@
|
||||
# OpenVINO Conditional Compilation for Optimal Binary Size{#opevino_conditional_compilation_deploying_guide}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
Conditional compilation can significantly reduce the binary size of the OpenVINO package by excluding unnecessary components for inference of particular models. This is particularly useful for edge and client deployment scenarios, where reducing the size of application binary is important.
|
||||
|
||||
.. important::
|
||||
|
||||
The tradeoff of conditional compilation is that the produced OpenVINO runtime can only run inference for the models and platforms which conditional compilation was applied.
|
||||
|
||||
|
||||
Lean more in the `conditional_compilation_guide <https://github.com/openvinotoolkit/openvino/blob/master/docs/dev/conditional_compilation.md>`__ and `Conditional_compilation_developer_guide <https://github.com/openvinotoolkit/openvino/blob/master/src/common/conditional_compilation/docs/develop_cc_for_new_component.md>`__
|
||||
|
||||
There are two steps to reduce binary size of the OpenVINO runtime library with conditional compilation:
|
||||
|
||||
- Apply ``SELECTIVE_BUILD=COLLECT`` and ``DENABLE_PROFILING_ITT=ON`` build options to enable analysis mode of conditional compilation to collect statistics data using ``itt``.
|
||||
|
||||
- Apply ``SELECTIVE_BUILD=ON`` and ``SELECTIVE_BUILD_STAT=<statistics_data.csv>`` build options to exclude inactive code region with the help of previous statistics data and get the final OpenVINO package.
|
||||
|
||||
.. note::
|
||||
|
||||
install ``Python`` to help collect statistics data.
|
||||
|
||||
|
||||
Conditional Compilation for Multiple Models
|
||||
############################################
|
||||
|
||||
Stage 1: collecting statistics information about code usage
|
||||
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
- Build OpenVINO with ``SELECTIVE_BUILD=COLLECT`` option
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
git submodule init
|
||||
git submodule update
|
||||
mkdir build && cd build
|
||||
cmake -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT ..
|
||||
cmake --build .
|
||||
|
||||
- Build ITT collector for code usage analysis
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cmake --build . --target sea_itt_lib
|
||||
|
||||
- Run the target application under the ITT collector for code usage analysis of each model
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
python thirdparty/itt_collector/runtool/sea_runtool.py --bindir ${OPENVINO_LIBRARY_DIR} -o ${MY_MODEL_RESULT} ! ./benchmark_app -niter 1 -nireq 1 -m ${MY_MODEL}.xml
|
||||
|
||||
Then, statistics information are generated and stored into .cvs format files under ``{MY_MODEL_RESULT}`` directory.
|
||||
|
||||
.. tip::
|
||||
|
||||
If you want to run other application rather than benchmark_app to get statistics data, please make sure to limit inference request number and iterations to avoid too long profiling time and too large statistics data.
|
||||
|
||||
|
||||
You can run this `script <https://github.com/openvinotoolkit/openvino/blob/master/src/common/conditional_compilation/scripts/ccheader.py>`__ to get the generated header file from csv files, and to confirm whether the statistics is correct. This step will be implicitly done in stage 2 of conditional compilation, skip it, if not needed.
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
python3.8 ../../src/common/conditional_compilation/scripts/ccheader.py --stat ${csv_files} --out conditional_compilation_gen.h
|
||||
|
||||
The conditional_compilation_gen.h looks like this:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#pragma once
|
||||
|
||||
#define tbb_bind_TBBbindSystemTopology 1
|
||||
#define tbb_bind_task_arena_initialize 1
|
||||
|
||||
#define ov_opset_opset1_Parameter 1
|
||||
#define ov_opset_opset1_Constant 1
|
||||
#define ov_opset_opset1_Convolution 1
|
||||
#define ov_opset_opset1_Add 1
|
||||
#define ov_opset_opset1_Relu 1
|
||||
#define ov_opset_opset1_GroupConvolution 1
|
||||
#define ov_opset_opset3_ShapeOf 1
|
||||
#define ov_opset_opset1_Squeeze 1
|
||||
#define ov_opset_opset4_Range 1
|
||||
#define ov_opset_opset1_ReduceMean 1
|
||||
#define ov_opset_opset1_Softmax 1
|
||||
#define ov_opset_opset1_Result 1
|
||||
|
||||
#define ov_op_v0_Parameter_visit_attributes 1
|
||||
#define ov_op_v0_Parameter_validate_and_infer_types 1
|
||||
#define ov_op_v0_Parameter_clone_with_new_inputs 1
|
||||
#define ov_op_v0_Constant_visit_attributes 1
|
||||
#define ov_op_v0_Constant_clone_with_new_inputs 1
|
||||
#define ov_op_v1_Convolution_visit_attributes 1
|
||||
#define ov_op_v1_Convolution_validate_and_infer_types 1
|
||||
#define ov_op_v1_Convolution_clone_with_new_inputs 1
|
||||
#define ov_op_v0_util_BinaryElementwiseArithmetic_visit_attributes 1
|
||||
#define ov_op_v1_Add_visit_attributes 1
|
||||
#define ov_op_v0_util_BinaryElementwiseArithmetic_validate_and_infer_types 1
|
||||
#define ov_op_v1_Add_clone_with_new_inputs 1
|
||||
#define ov_op_v0_Relu_visit_attributes 1
|
||||
#define ov_op_util_UnaryElementwiseArithmetic_validate_and_infer_types 1
|
||||
#define ov_op_v0_Relu_clone_with_new_inputs 1
|
||||
#define ov_op_v1_GroupConvolution_visit_attributes 1
|
||||
#define ov_op_v1_GroupConvolution_validate_and_infer_types 1
|
||||
#define ov_op_v1_GroupConvolution_clone_with_new_inputs 1
|
||||
#define ov_op_v3_ShapeOf_visit_attributes 1
|
||||
#define ov_op_v3_ShapeOf_validate_and_infer_types 1
|
||||
#define ov_op_v3_ShapeOf_clone_with_new_inputs 1
|
||||
#define ov_op_v0_Squeeze_visit_attributes 1
|
||||
...
|
||||
|
||||
|
||||
Stage 2: build resulting OpenVINO package
|
||||
++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
Based on the statistics information, re-build OpenVINO to generate the optimal binary size of OpenVINO binaries
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cmake -DSELECTIVE_BUILD=ON -DSELECTIVE_BUILD_STAT=${ABSOLUTE_PATH_TO_STATISTICS_FILES}/*.csv -DENABLE_PROFILING_ITT=OFF ..
|
||||
cmake --build .
|
||||
|
||||
.. tip::
|
||||
|
||||
The recommended scenario for conditional complication is static build of OpenVINO. In this case you can add ``-DBUILD_SHARED_LIBS=OFF`` to enable static build to get optimal binary size benefit.
|
||||
|
||||
|
||||
Conditional Compilation for Different Instruction Set Architectures(ISAs)
|
||||
#########################################################################
|
||||
|
||||
The steps are almost same with building for different models, except for collecting different statistics data on different ``ISAs``.
|
||||
Run the target application under the ITT collector for code usage analysis on each ``ISAs``:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
python thirdparty/itt_collector/runtool/sea_runtool.py --bindir ${OPENVINO_LIBRARY_DIR} -o ${MY_MODEL_RESULT} ! ./benchmark_app -niter 1 -nireq 1 -m ${MY_MODEL}.xml
|
||||
|
||||
Put all CSV files together for ``stage 2`` to generate resulting OpenVINO binaries:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cmake -DSELECTIVE_BUILD=ON -DSELECTIVE_BUILD_STAT=${ABSOLUTE_PATH_TO_STATISTICS_FILES}/*.csv -DENABLE_PROFILING_ITT=OFF ..
|
||||
cmake --build .
|
||||
|
||||
|
||||
Device-agnostic Conditional Compilation (POC)
|
||||
#############################################
|
||||
In some cases, adopting conditional compilation is necessary to support multiple different ``SKUs``(Stock Keeping Unit: is usually a string of numbers and alphabets used by the manufacturer to identify their product), but there may be limitations in collecting statistics information for every target hardware. To achieve this, conditional compilation must be capable of running a model on an accelerator with all previous SKUs.
|
||||
|
||||
Conditional compilation requires the initial collection of statistical information to exclude unused code regions, such as ops and kernels. To do this, all included ops and kernels must be executed at least once. For multiple SKUs, it is necessary for all ops and kernels that will be used by any of the SKUs to be encountered at least once in the profiling data. If the profiling is done on a CPU platform, it is impossible without using an emulator.
|
||||
|
||||
A simple method is to leverage `SDE <https://www.intel.com/content/www/us/en/developer/articles/license/pre-release-license-agreement-for-software-development-emulator.html>`__ to emulate different CPU's SKU to generate multiple statistics CSV files for different SKUs, for example:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
for cpu in spr adl tgl icl skl; do
|
||||
python ../thirdparty/itt_collector/runtool/sea_runtool.py --bindir ${OPENVINO_LIBRARY_DIR} -o ${MY_MODEL_RESULT} ! sde -$cpu -- ./benchmark_app -niter 1 -nireq 1 -m ${MY_MODEL}.xml
|
||||
done
|
||||
|
||||
Considering that JIT kernels can be affected by L1/L2/L3 cache size and the number of CPU cores, there also is a simple method to emulate L2/L3 cache size and CPU core's number.
|
||||
|
||||
- L2/L3 cache emulation
|
||||
|
||||
Hack the function of get cache size
|
||||
|
||||
``unsigned int dnnl::impl::cpu::platform::get_per_core_cache_size(int level)``
|
||||
|
||||
to make it return emulated cache size in analyzed stage, the simplest way is to leverage environment variable to pass the emulated cache size, for example:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#if defined(SELECTIVE_BUILD_ANALYZER)
|
||||
if (level == 2) {
|
||||
const char* L2_cache_size = std::getenv("OV_CC_L2_CACHE_SIZE");
|
||||
if (L2_cache_size) {
|
||||
int size = std::atoi(L2_cache_size);
|
||||
if (size > 0) {
|
||||
return size;
|
||||
}
|
||||
}
|
||||
} else if (level == 3) {
|
||||
const char* L3_cache_size = std::getenv("OV_CC_L3_CACHE_SIZE");
|
||||
if (L3_cache_size) {
|
||||
int size = std::atoi(L3_cache_size);
|
||||
if (size > 0) {
|
||||
return size;
|
||||
}
|
||||
}
|
||||
} else if (level == 1) {
|
||||
const char* L1_cache_size = std::getenv("OV_CC_L1_CACHE_SIZE");
|
||||
if (L1_cache_size) {
|
||||
int size = std::atoi(L1_cache_size);
|
||||
if (size > 0) {
|
||||
return size;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
- CPU core number emulation
|
||||
|
||||
Leverage ``numactl`` tool to control core number.
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
python thirdparty/itt_collector/runtool/sea_runtool.py --bindir ${OPENVINO_LIBRARY_DIR} -o ${MY_MODEL_RESULT} ! numactl -C 0-$core_num ./benchmark_app -niter 1 -nireq 1 -m ${MY_MODEL}.xml
|
||||
|
||||
|
||||
Once the SKUs are decided, you can collect CPU information(CPUID, L1/L2/L3 cache size, core number) and then profile each pair of (CPUID, L1/L2/L3 cache size, core number) to get profiling CSV files, then apply all CSV files to generate final conditional compilation package.
|
||||
|
||||
Example of generation a conditional compilation package on a single system:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
export OV_CC_L1_CACHE_SIZE=<L1 cache size>
|
||||
export OV_CC_L2_CACHE_SIZE=<L2 cache size>
|
||||
export OV_CC_L3_CACHE_SIZE=<L3 cache size>
|
||||
python thirdparty/itt_collector/runtool/sea_runtool.py --bindir ${OPENVINO_LIBRARY_DIR} -o ${MY_MODEL_RESULT} ! sde -spr -- numactl -C 0-$core_num ./benchmark_app -niter 1 -nireq 1 -m ${MY_MODEL}.xml
|
||||
|
||||
Perform the above steps for each SKUs information (CPUID, L1/L2/L3 cache size, core number) to collect all generated statistics CSV files together, and provide them to build resulting OpenVINO package.
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cmake -DSELECTIVE_BUILD=ON -DSELECTIVE_BUILD_STAT=${ABSOLUTE_PATH_TO_STATISTICS_FILES}/*.csv -DENABLE_PROFILING_ITT=OFF ..
|
||||
cmake --build .
|
||||
|
||||
|
||||
How to Enable Conditional Compilation on Windows
|
||||
################################################
|
||||
|
||||
Find detailed information in the Building OpenVINO static libraries `document <https://github.com/openvinotoolkit/openvino/blob/master/docs/dev/static_libaries.md>`__ .
|
||||
|
||||
|
||||
Stage 1: Selective build analyzed stage
|
||||
++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
Build OpenVINO with conditional compilation enabled:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
call C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvar64.bat
|
||||
set OPENVINO_HOME=D:\work_path\openvino
|
||||
cd %OPENVINO_HOME%
|
||||
md build_cc
|
||||
cd build_cc
|
||||
cmake -G Ninja -Wno-dev -DCMAKE_BUILD_TYPE=Debug -DENABLE_CPPLINT=OFF -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF -DENABLE_FASTER_BUILD=ON -DENABLE_SANITIZER=OFF -DTHREADING=TBB -DBUILD_SHARED_LIBS=OFF -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT -DENABLE_INTEL_GPU=OFF -DENABLE_INTEL_GNA=OFF -DENABLE_MULTI=OFF -DENABLE_AUTO=OFF -DENABLE_AUTO_BATCH=OFF -DENABLE_HETERO=OFF -DENABLE_TEMPLATE=OFF -DENABLE_OV_ONNX_FRONTEND=OFF -DENABLE_OV_PADDLE_FRONTEND=OFF -DENABLE_OV_PYTORCH_FRONTEND=OFF -DENABLE_OV_TF_FRONTEND=OFF -DCMAKE_INSTALL_PREFIX=install ..
|
||||
|
||||
cmake --build . --config Debug
|
||||
|
||||
|
||||
Collect statistics data
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cd %OPENVINO_HOME%\build_cc
|
||||
cmake --build . --config Debug --target sea_itt_lib
|
||||
cd %OPENVINO_HOME%
|
||||
set PATH=%PATH%;%OPENVINO_HOME%\\temp\tbb\bin
|
||||
mkdir cc_data
|
||||
cd %OPENVINO_HOME%\cc_data
|
||||
python3 ..\thirdparty\itt_collector\runtool\sea_runtool.py --bindir ..\bin\intel64\Debug -o %OPENVINO_HOME%\cc_data\data ! ..\bin\intel64\Debug\benchmark_app.exe -niter 1 -nireq 1 -m <your_model.xml>
|
||||
|
||||
.. note::
|
||||
|
||||
This stage is for profiling data. The choice of whether to build with Debug or Release depends on your specific requirements.
|
||||
|
||||
|
||||
Stage 2: build resulting OpenVINO package
|
||||
+++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
Generate final optimal binaries size of OpenVINO package
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cd %OPENVINO_HOME%
|
||||
md build
|
||||
cd build
|
||||
|
||||
cmake -G "Visual Studio 16 2019" -A x64 -DENABLE_CPPLINT=OFF -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF -DCMAKE_BUILD_TYPE=Release -DENABLE_FASTER_BUILD=ON -DENABLE_PROFILING_ITT=OFF -DSELECTIVE_BUILD=ON -DENABLE_INTEL_GPU=OFF -DENABLE_INTEL_GNA=OFF -DENABLE_MULTI=OFF -DENABLE_AUTO=OFF -DENABLE_AUTO_BATCH=OFF -DENABLE_HETERO=OFF -DENABLE_TEMPLATE=OFF -DENABLE_OV_ONNX_FRONTEND=OFF -DENABLE_OV_PADDLE_FRONTEND=OFF -DENABLE_OV_PYTORCH_FRONTEND=OFF -DENABLE_OV_TF_FRONTEND=OFF -DSELECTIVE_BUILD_STAT=%OPENVINO_HOME%\cc_data\*.csv -DBUILD_SHARED_LIBS=OFF -DENABLE_LTO=ON -DENABLE_ONEDNN_FOR_GPU=OFF -DENABLE_GAPI_PREPROCESSING=OFF -DENABLE_OV_TF_LITE_FRONTEND=OFF -DENABLE_CLDNN=OFF -DENABLE_PROFILING_FIRST_INFERENCE=OFF -DENABLE_INTEL_MYRIAD_COMMON=OFF -DENABLE_INTEL_MYRIAD=OFF ..
|
||||
|
||||
cmake --build . --config Release
|
||||
|
||||
|
||||
.. tip::
|
||||
|
||||
``-DSELECTIVE_BUILD=ON`` and ``-DSELECTIVE_BUILD_STAT=%OPENVINO_HOME%\cc_data\*.csv`` are required, and ``-DBUILD_SHARED_LIBS=OFF`` is recommended.
|
||||
|
||||
@endsphinxdirective
|
||||
@@ -39,31 +39,27 @@ Running Deployment Manager in Interactive Mode
|
||||
|
||||
To launch the Deployment Manager in interactive mode, open a new terminal window, go to the Deployment Manager tool directory, and run the tool script without parameters:
|
||||
|
||||
.. tab-set::
|
||||
.. tab:: Linux
|
||||
|
||||
.. tab-item:: Linux
|
||||
:sync: linux
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cd <INSTALL_DIR>/tools/deployment_manager
|
||||
./deployment_manager.py
|
||||
|
||||
.. tab-item:: Windows
|
||||
:sync: windows
|
||||
|
||||
.. code-block:: bat
|
||||
|
||||
cd <INSTALL_DIR>\tools\deployment_manager
|
||||
.\deployment_manager.py
|
||||
|
||||
.. tab-item:: macOS
|
||||
:sync: macos
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cd <INSTALL_DIR>/tools/deployment_manager
|
||||
./deployment_manager.py
|
||||
.. code-block:: sh
|
||||
|
||||
cd <INSTALL_DIR>/tools/deployment_manager
|
||||
|
||||
./deployment_manager.py
|
||||
|
||||
.. tab:: Windows
|
||||
|
||||
.. code-block:: bat
|
||||
|
||||
cd <INSTALL_DIR>\tools\deployment_manager
|
||||
.\deployment_manager.py
|
||||
|
||||
.. tab:: macOS
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cd <INSTALL_DIR>/tools/deployment_manager
|
||||
./deployment_manager.py
|
||||
|
||||
|
||||
The target device selection dialog is displayed:
|
||||
@@ -101,31 +97,26 @@ Running Deployment Manager in Standard CLI Mode
|
||||
|
||||
To launch the Deployment Manager tool in the standard mode: open a new terminal window, go to the Deployment Manager tool directory, and run the tool command with the following syntax:
|
||||
|
||||
.. tab-set::
|
||||
.. tab:: Linux
|
||||
|
||||
.. tab-item:: Linux
|
||||
:sync: linux
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cd <INSTALL_DIR>/tools/deployment_manager
|
||||
./deployment_manager.py <--targets> [--output_dir] [--archive_name] [--user_data]
|
||||
|
||||
.. tab-item:: Windows
|
||||
:sync: windows
|
||||
|
||||
.. code-block:: bat
|
||||
|
||||
cd <INSTALL_DIR>\tools\deployment_manager
|
||||
.\deployment_manager.py <--targets> [--output_dir] [--archive_name] [--user_data]
|
||||
|
||||
.. tab-item:: macOS
|
||||
:sync: macos
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cd <INSTALL_DIR>/tools/deployment_manager
|
||||
./deployment_manager.py <--targets> [--output_dir] [--archive_name] [--user_data]
|
||||
.. code-block:: sh
|
||||
|
||||
cd <INSTALL_DIR>/tools/deployment_manager
|
||||
./deployment_manager.py <--targets> [--output_dir] [--archive_name] [--user_data]
|
||||
|
||||
.. tab:: Windows
|
||||
|
||||
.. code-block:: bat
|
||||
|
||||
cd <INSTALL_DIR>\tools\deployment_manager
|
||||
.\deployment_manager.py <--targets> [--output_dir] [--archive_name] [--user_data]
|
||||
|
||||
.. tab:: macOS
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cd <INSTALL_DIR>/tools/deployment_manager
|
||||
./deployment_manager.py <--targets> [--output_dir] [--archive_name] [--user_data]
|
||||
|
||||
|
||||
The following options are available:
|
||||
@@ -153,28 +144,23 @@ To deploy the OpenVINO Runtime components from the development machine to the ta
|
||||
|
||||
2. Extract the archive to the destination directory on the target system. If the name of your archive is different from the default one shown below, replace ``openvino_deployment_package`` with your specified name.
|
||||
|
||||
.. tab-set::
|
||||
.. tab:: Linux
|
||||
|
||||
.. tab-item:: Linux
|
||||
:sync: linux
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
tar xf openvino_deployment_package.tar.gz -C <destination_dir>
|
||||
|
||||
.. tab-item:: Windows
|
||||
:sync: windows
|
||||
|
||||
.. code-block:: bat
|
||||
|
||||
Use the archiver of your choice to unzip the file.
|
||||
|
||||
.. tab-item:: macOS
|
||||
:sync: macos
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
tar xf openvino_deployment_package.tar.gz -C <destination_dir>
|
||||
.. code-block:: sh
|
||||
|
||||
tar xf openvino_deployment_package.tar.gz -C <destination_dir>
|
||||
|
||||
.. tab:: Windows
|
||||
|
||||
.. code-block:: bat
|
||||
|
||||
Use the archiver of your choice to unzip the file.
|
||||
|
||||
.. tab:: macOS
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
tar xf openvino_deployment_package.tar.gz -C <destination_dir>
|
||||
|
||||
|
||||
Now, the package is extracted to the destination directory. The following files and subdirectories are created:
|
||||
@@ -194,31 +180,26 @@ To deploy the OpenVINO Runtime components from the development machine to the ta
|
||||
|
||||
4. Set up the environment variables:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Linux
|
||||
:sync: linux
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cd <destination_dir>/openvino/
|
||||
source ./setupvars.sh
|
||||
|
||||
.. tab-item:: Windows
|
||||
:sync: windows
|
||||
|
||||
.. code-block:: bat
|
||||
|
||||
cd <destination_dir>\openvino\
|
||||
.\setupvars.bat
|
||||
|
||||
.. tab-item:: macOS
|
||||
:sync: macos
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cd <destination_dir>/openvino/
|
||||
source ./setupvars.sh
|
||||
.. tab:: Linux
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cd <destination_dir>/openvino/
|
||||
source ./setupvars.sh
|
||||
|
||||
.. tab:: Windows
|
||||
|
||||
.. code-block:: bat
|
||||
|
||||
cd <destination_dir>\openvino\
|
||||
.\setupvars.bat
|
||||
|
||||
.. tab:: macOS
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cd <destination_dir>/openvino/
|
||||
source ./setupvars.sh
|
||||
|
||||
|
||||
Now, you have finished the deployment of the OpenVINO Runtime components to the target system.
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
Deploy Application with Deployment Manager <openvino_docs_install_guides_deployment_manager_tool>
|
||||
Local Distribution Libraries <openvino_docs_deploy_local_distribution>
|
||||
Optimize Binaries Size <opevino_conditional_compilation_deploying_guide>
|
||||
|
||||
|
||||
.. note::
|
||||
@@ -23,7 +24,7 @@ Local Deployment Options
|
||||
- using PIP package manager on PyPI - the default approach for Python-based applications;
|
||||
- using Docker images - if the application should be deployed as a Docker image, use a pre-built OpenVINO™ Runtime Docker image as a base image in the Dockerfile for the application container image. For more information about OpenVINO Docker images, refer to :doc:`Installing OpenVINO on Linux from Docker <openvino_docs_install_guides_installing_openvino_docker_linux>`
|
||||
|
||||
Furthermore, to customize your OpenVINO Docker image, use the `Docker CI Framework <https://github.com/openvinotoolkit/docker_ci>`__ to generate a Dockerfile and built the image.
|
||||
Furthermore, to customize your OpenVINO Docker image, use the `Docker CI Framework <https://github.com/openvinotoolkit/docker_ci>` to generate a Dockerfile and built the image.
|
||||
|
||||
- Grab a necessary functionality of OpenVINO together with your application, also called "local distribution":
|
||||
|
||||
|
||||
@@ -2,22 +2,22 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
With a local distribution, each C or C++ application/installer will have its own copies of OpenVINO Runtime binaries. However, OpenVINO has a scalable plugin-based architecture, which means that some components can be loaded in runtime only when they are really needed. Therefore, it is important to understand which minimal set of libraries is really needed to deploy the application. This guide helps you to achieve that goal.
|
||||
With local distribution, each C or C++ application/installer has its own copies of OpenVINO Runtime binaries. However, OpenVINO has a scalable plugin-based architecture, which means that some components can be loaded in runtime only when they are really needed. This guide helps you understand what minimal set of libraries is required to deploy the application.
|
||||
|
||||
Local distribution is also appropriate for OpenVINO binaries built from sources using `Build instructions <https://github.com/openvinotoolkit/openvino/wiki#how-to-build>`__,
|
||||
but the guide below supposes OpenVINO Runtime is built dynamically. For case of `Static OpenVINO Runtime <https://github.com/openvinotoolkit/openvino/blob/master/docs/dev/static_libaries.md>`__ select the required OpenVINO capabilities on CMake configuration stage using `CMake Options for Custom Compilation <https://github.com/openvinotoolkit/openvino/blob/master/docs/dev/cmake_options_for_custom_comiplation.md>`__, the build and link the OpenVINO components into the final application.
|
||||
Local distribution is also suitable for OpenVINO binaries built from source using `Build instructions <https://github.com/openvinotoolkit/openvino/wiki#how-to-build>`__,
|
||||
but this guide assumes that OpenVINO Runtime is built dynamically. For `Static OpenVINO Runtime <https://github.com/openvinotoolkit/openvino/blob/master/docs/dev/static_libaries.md>`__, select the required OpenVINO capabilities at the CMake configuration stage using `CMake Options for Custom Compilation <https://github.com/openvinotoolkit/openvino/blob/master/docs/dev/cmake_options_for_custom_compilation.md>`__, then build and link the OpenVINO components to the final application.
|
||||
|
||||
.. note::
|
||||
|
||||
The steps below are operating system independent and refer to a library file name without any prefixes (like ``lib`` on Unix systems) or suffixes (like ``.dll`` on Windows OS). Do not put ``.lib`` files on Windows OS to the distribution, because such files are needed only on a linker stage.
|
||||
The steps below are independent of the operating system and refer to the library file name without any prefixes (like ``lib`` on Unix systems) or suffixes (like ``.dll`` on Windows OS). Do not put ``.lib`` files on Windows OS to the distribution because such files are needed only at a linker stage.
|
||||
|
||||
|
||||
Library Requirements for C++ and C Languages
|
||||
############################################
|
||||
|
||||
Independent on the language used to write the application, the ``openvino`` library must always be put to the final distribution, since it's a core library which orchestrates with all the inference and frontend plugins. In Intel® Distribution of OpenVINO™ toolkit, ``openvino`` depends on the TBB libraries which are used by OpenVINO Runtime to optimally saturate the devices with computations, so it must be put to the distribution package.
|
||||
Regardless of the programming language of an application, the ``openvino`` library must always be included in its final distribution. This core library manages all inference and frontend plugins. The ``openvino`` library depends on the TBB libraries which are used by OpenVINO Runtime to optimally saturate devices with computations.
|
||||
|
||||
If your application is written with C language, you need to put the ``openvino_c`` library additionally.
|
||||
If your application is in C language, you need to additionally include the ``openvino_c`` library.
|
||||
|
||||
The ``plugins.xml`` file with information about inference devices must also be taken as a support file for ``openvino``.
|
||||
|
||||
@@ -39,71 +39,71 @@ For each inference device, OpenVINO Runtime has its own plugin library:
|
||||
- ``openvino_intel_gna_plugin`` for :doc:`Intel® GNA devices <openvino_docs_OV_UG_supported_plugins_GNA>`.
|
||||
- ``openvino_arm_cpu_plugin`` for :doc:`ARM CPU devices <openvino_docs_OV_UG_supported_plugins_CPU>`.
|
||||
|
||||
Depending on what devices are used in the app, the appropriate libraries need to be put to the distribution package.
|
||||
Depending on which devices are used in the app, the corresponding libraries should be included in the distribution package.
|
||||
|
||||
As it is shown on the picture above, some plugin libraries may have OS-specific dependencies which are either backend libraries or additional supports files with firmware, etc. Refer to the table below for details:
|
||||
As shown in the picture above, some plugin libraries may have OS-specific dependencies which are either backend libraries or additional supports files with firmware, etc. Refer to the table below for details:
|
||||
|
||||
.. dropdown:: Windows OS:
|
||||
.. tab-set::
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
.. tab-item:: Windows OS
|
||||
|
||||
* - Device
|
||||
- Dependency
|
||||
* - CPU
|
||||
- ``-``
|
||||
* - GPU
|
||||
- ``OpenCL.dll``, ``cache.json``
|
||||
* - GNA
|
||||
- ``gna.dll``
|
||||
* - Arm® CPU
|
||||
- ``-``
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Device
|
||||
- Dependency
|
||||
* - CPU
|
||||
- ``-``
|
||||
* - GPU
|
||||
- ``OpenCL.dll``, ``cache.json``
|
||||
* - GNA
|
||||
- ``gna.dll``
|
||||
* - Arm® CPU
|
||||
- ``-``
|
||||
|
||||
.. dropdown:: Linux OS:
|
||||
.. tab-item:: Linux OS
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Device
|
||||
- Dependency
|
||||
* - CPU
|
||||
- ``-``
|
||||
* - GPU
|
||||
- ``libOpenCL.so``, ``cache.json``
|
||||
* - GNA
|
||||
- ``gna.dll``
|
||||
* - Arm® CPU
|
||||
- ``-``
|
||||
* - Device
|
||||
- Dependency
|
||||
* - CPU
|
||||
- ``-``
|
||||
* - GPU
|
||||
- ``libOpenCL.so``, ``cache.json``
|
||||
* - GNA
|
||||
- ``gna.dll``
|
||||
* - Arm® CPU
|
||||
- ``-``
|
||||
|
||||
.. tab-item:: MacOS
|
||||
|
||||
.. dropdown:: MacOS:
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Device
|
||||
- Dependency
|
||||
* - CPU
|
||||
- ``-``
|
||||
* - Arm® CPU
|
||||
- ``-``
|
||||
* - Device
|
||||
- Dependency
|
||||
* - CPU
|
||||
- ``-``
|
||||
* - Arm® CPU
|
||||
- ``-``
|
||||
|
||||
|
||||
Libraries for Execution Modes
|
||||
+++++++++++++++++++++++++++++
|
||||
|
||||
The ``HETERO``, ``MULTI``, ``BATCH`` and ``AUTO`` execution modes can also be used explicitly or implicitly by the application. Use the following recommendation scheme to decide whether to put the appropriate libraries to the distribution package:
|
||||
The ``HETERO``, ``MULTI``, ``BATCH`` and ``AUTO`` execution modes can also be used by the application explicitly or implicitly. Use the following recommendation scheme to decide whether to add the appropriate libraries to the distribution package:
|
||||
|
||||
- If :doc:`AUTO <openvino_docs_OV_UG_supported_plugins_AUTO>` is used explicitly in the application or `ov::Core::compile_model <classov_1_1Core.html#doxid-classov-1-1-core-1a46555f0803e8c29524626be08e7f5c5a>`__ is used without specifying a device, put ``openvino_auto_plugin`` to the distribution.
|
||||
|
||||
.. note::
|
||||
|
||||
Automatic Device Selection relies on :doc:`[inference device plugins <openvino_docs_OV_UG_Working_with_devices>`. If you are not sure about what inference devices are available on target system, put all the inference plugin libraries to the distribution. If `ov::device::priorities <groupov_runtime_cpp_prop_api.html#doxid-group-ov-runtime-cpp-prop-api-1gae88af90a18871677f39739cb0ef0101e>`__ is used for `AUTO` to specify a limited device list, grab the corresponding device plugins only.
|
||||
Automatic Device Selection relies on :doc:`inference device plugins <openvino_docs_OV_UG_Working_with_devices>`. If you are not sure which inference devices are available on the target system, put all inference plugin libraries in the distribution. If `ov::device::priorities <groupov_runtime_cpp_prop_api.html#doxid-group-ov-runtime-cpp-prop-api-1gae88af90a18871677f39739cb0ef0101e>`__ is used for `AUTO` to specify a limited device list, grab the corresponding device plugins only.
|
||||
|
||||
- If :doc:`MULTI <openvino_docs_OV_UG_Running_on_multiple_devices>` is used explicitly, put ``openvino_auto_plugin`` to the distribution.
|
||||
- If :doc:`HETERO <openvino_docs_OV_UG_Hetero_execution>` is either used explicitly or `ov::hint::performance_mode <groupov_runtime_cpp_prop_api.html#doxid-group-ov-runtime-cpp-prop-api-1ga2691fe27acc8aa1d1700ad40b6da3ba2>`__ is used with GPU, put ``openvino_hetero_plugin`` to the distribution.
|
||||
- If :doc:`BATCH <openvino_docs_OV_UG_Automatic_Batching>` is either used explicitly or ``ov::hint::performance_mode`` is used with GPU, put ``openvino_batch_plugin`` to the distribution.
|
||||
- If :doc:`MULTI <openvino_docs_OV_UG_Running_on_multiple_devices>` is used explicitly, put ``openvino_auto_plugin`` in the distribution.
|
||||
- If :doc:`HETERO <openvino_docs_OV_UG_Hetero_execution>` is either used explicitly or `ov::hint::performance_mode <groupov_runtime_cpp_prop_api.html#doxid-group-ov-runtime-cpp-prop-api-1ga2691fe27acc8aa1d1700ad40b6da3ba2>`__ is used with GPU, put ``openvino_hetero_plugin`` in the distribution.
|
||||
- If :doc:`BATCH <openvino_docs_OV_UG_Automatic_Batching>` is either used explicitly or ``ov::hint::performance_mode`` is used with GPU, put ``openvino_batch_plugin`` in the distribution.
|
||||
|
||||
Frontend Libraries for Reading Models
|
||||
+++++++++++++++++++++++++++++++++++++
|
||||
@@ -111,16 +111,16 @@ Frontend Libraries for Reading Models
|
||||
OpenVINO Runtime uses frontend libraries dynamically to read models in different formats:
|
||||
|
||||
- ``openvino_ir_frontend`` is used to read OpenVINO IR.
|
||||
- ``openvino_tensorflow_frontend`` is used to read TensorFlow file format.
|
||||
- ``openvino_tensorflow_lite_frontend`` is used to read TensorFlow Lite file format.
|
||||
- ``openvino_onnx_frontend`` is used to read ONNX file format.
|
||||
- ``openvino_paddle_frontend`` is used to read Paddle file format.
|
||||
- ``openvino_tensorflow_frontend`` is used to read the TensorFlow file format.
|
||||
- ``openvino_tensorflow_lite_frontend`` is used to read the TensorFlow Lite file format.
|
||||
- ``openvino_onnx_frontend`` is used to read the ONNX file format.
|
||||
- ``openvino_paddle_frontend`` is used to read the Paddle file format.
|
||||
|
||||
Depending on the model format types that are used in the application in `ov::Core::read_model <classov_1_1Core.html#doxid-classov-1-1-core-1ae0576a95f841c3a6f5e46e4802716981>`__, pick up the appropriate libraries.
|
||||
Depending on the model format types that are used in the application in `ov::Core::read_model <classov_1_1Core.html#doxid-classov-1-1-core-1ae0576a95f841c3a6f5e46e4802716981>`__, select the appropriate libraries.
|
||||
|
||||
.. note::
|
||||
|
||||
To optimize the size of final distribution package, you are recommended to convert models to OpenVINO IR by using :doc:`model conversion API <openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide>`. This way you don't have to keep TensorFlow, TensorFlow Lite, ONNX, PaddlePaddle, and other frontend libraries in the distribution package.
|
||||
To optimize the size of the final distribution package, it is recommended to convert models to OpenVINO IR by using :doc:`model conversion API <openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide>`. This way you do not have to keep TensorFlow, TensorFlow Lite, ONNX, PaddlePaddle, and other frontend libraries in the distribution package.
|
||||
|
||||
(Legacy) Preprocessing via G-API
|
||||
++++++++++++++++++++++++++++++++
|
||||
@@ -136,27 +136,34 @@ Examples
|
||||
|
||||
**CPU + OpenVINO IR in C application**
|
||||
|
||||
In this example, the application is written in C language, performs inference on CPU, and reads models stored as the OpenVINO IR format. The following libraries are used:
|
||||
- The ``openvino_c`` library is a main dependency of the application. It links against this library.
|
||||
In this example, the application is written in C, performs inference on CPU, and reads models stored in the OpenVINO IR format.
|
||||
|
||||
The following libraries are used: ``openvino_c``, ``openvino``, ``openvino_intel_cpu_plugin``, and ``openvino_ir_frontend``.
|
||||
|
||||
- The ``openvino_c`` library is a main dependency of the application. The app links against this library.
|
||||
- The ``openvino`` library is used as a private dependency for ``openvino_c`` and is also used in the deployment.
|
||||
- ``openvino_intel_cpu_plugin`` is used for inference.
|
||||
- ``openvino_ir_frontend`` is used to read source models.
|
||||
|
||||
**MULTI execution on GPU and CPU in `tput` mode**
|
||||
|
||||
In this example, the application is written in C++, performs inference :doc:`simultaneously on GPU and CPU devices <openvino_docs_OV_UG_Running_on_multiple_devices>` with the `ov::hint::PerformanceMode::THROUGHPUT <enumov_1_1hint_1_1PerformanceMode.html#doxid-group-ov-runtime-cpp-prop-api-1gga032aa530efa40760b79af14913d48d73a50f9b1f40c078d242af7ec323ace44b3>`__ property set, and reads models stored in the ONNX format. The following libraries are used:
|
||||
In this example, the application is written in C++, performs inference :doc:`simultaneously on GPU and CPU devices <openvino_docs_OV_UG_Running_on_multiple_devices>` with the `ov::hint::PerformanceMode::THROUGHPUT <enumov_1_1hint_1_1PerformanceMode.html#doxid-group-ov-runtime-cpp-prop-api-1gga032aa530efa40760b79af14913d48d73a50f9b1f40c078d242af7ec323ace44b3>`__ property set, and reads models stored in the ONNX format.
|
||||
|
||||
- The ``openvino`` library is a main dependency of the application. It links against this library.
|
||||
The following libraries are used: ``openvino``, ``openvino_intel_gpu_plugin``, ``openvino_intel_cpu_plugin``, ``openvino_auto_plugin``, ``openvino_auto_batch_plugin``, and ``openvino_onnx_frontend``.
|
||||
|
||||
- The ``openvino`` library is a main dependency of the application. The app links against this library.
|
||||
- ``openvino_intel_gpu_plugin`` and ``openvino_intel_cpu_plugin`` are used for inference.
|
||||
- ``openvino_auto_plugin`` is used for Multi-Device Execution.
|
||||
- ``openvino_auto_batch_plugin`` can be also put to the distribution to improve the saturation of :doc:`Intel® GPU <openvino_docs_OV_UG_supported_plugins_GPU>` device. If there is no such plugin, :doc:`Automatic Batching <openvino_docs_OV_UG_Automatic_Batching>` is turned off.
|
||||
- ``openvino_auto_batch_plugin`` can be also put in the distribution to improve the saturation of :doc:`Intel® GPU <openvino_docs_OV_UG_supported_plugins_GPU>` device. If there is no such plugin, :doc:`Automatic Batching <openvino_docs_OV_UG_Automatic_Batching>` is turned off.
|
||||
- ``openvino_onnx_frontend`` is used to read source models.
|
||||
|
||||
**Auto-Device Selection between GPU and CPU**
|
||||
|
||||
In this example, the application is written in C++, performs inference with the :doc:`Automatic Device Selection <openvino_docs_OV_UG_supported_plugins_AUTO>` mode, limiting device list to GPU and CPU, and reads models :doc:`created using C++ code <openvino_docs_OV_UG_Model_Representation>`. The following libraries are used:
|
||||
In this example, the application is written in C++, performs inference with the :doc:`Automatic Device Selection <openvino_docs_OV_UG_supported_plugins_AUTO>` mode, limiting device list to GPU and CPU, and reads models :doc:`created using C++ code <openvino_docs_OV_UG_Model_Representation>`.
|
||||
|
||||
- The ``openvino`` library is a main dependency of the application. It links against this library.
|
||||
The following libraries are used: ``openvino``, ``openvino_auto_plugin``, ``openvino_intel_gpu_plugin``, and ``openvino_intel_cpu_plugin``.
|
||||
|
||||
- The ``openvino`` library is a main dependency of the application. The app links against this library.
|
||||
- ``openvino_auto_plugin`` is used to enable Automatic Device Selection.
|
||||
- ``openvino_intel_gpu_plugin`` and ``openvino_intel_cpu_plugin`` are used for inference. AUTO selects between CPU and GPU devices according to their physical existence on the deployed machine.
|
||||
- No frontend library is needed because ``ov::Model`` is created in code.
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:5adda5656edc4bbe13c6aea87465f19d6e9457ef07fd9fb48fe4a25c674937e8
|
||||
size 165861
|
||||
@@ -118,7 +118,7 @@ Compile the model for a specific device using ``ov::Core::compile_model()``:
|
||||
:fragment: [part2_4]
|
||||
|
||||
.. tab-item:: TensorFlow Lite
|
||||
:sync: tf_lite
|
||||
:sync: tflite
|
||||
|
||||
.. doxygensnippet:: docs/snippets/src/main.cpp
|
||||
:language: cpp
|
||||
@@ -165,7 +165,7 @@ Compile the model for a specific device using ``ov::Core::compile_model()``:
|
||||
:fragment: [part2_4]
|
||||
|
||||
.. tab-item:: TensorFlow Lite
|
||||
:sync: tf_lite
|
||||
:sync: tflite
|
||||
|
||||
.. doxygensnippet:: docs/snippets/src/main.py
|
||||
:language: python
|
||||
@@ -212,7 +212,7 @@ Compile the model for a specific device using ``ov::Core::compile_model()``:
|
||||
:fragment: [part2_4]
|
||||
|
||||
.. tab-item:: TensorFlow Lite
|
||||
:sync: tf_lite
|
||||
:sync: tflite
|
||||
|
||||
.. doxygensnippet:: docs/snippets/src/main.c
|
||||
:language: cpp
|
||||
|
||||
@@ -1,140 +0,0 @@
|
||||
# [DEPRECATED] The LowLatency Transformation {#openvino_docs_OV_UG_lowlatency_deprecated}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
The deprecated LowLatency transformation changes the structure of the network containing :doc:`TensorIterator <openvino_docs_ops_infrastructure_TensorIterator_1>` and :doc:`Loop <openvino_docs_ops_infrastructure_Loop_5>` operations by adding the ability to work with the state, inserting the :doc:`Assign <openvino_docs_ops_infrastructure_Assign_3>` / :doc:`ReadValue <openvino_docs_ops_infrastructure_ReadValue_3>` layers, as shown in the picture below.
|
||||
|
||||
.. image:: _static/images/applying_low_latency.svg
|
||||
|
||||
After applying the transformation, ``ReadValue`` operations can receive other operations as an input, as shown in the picture above. These inputs should set the initial value for initialization of ``ReadValue`` operations. However, such initialization is not supported in the current State API implementation. Input values are ignored and the initial values for the ``ReadValue`` operations are set to 0 unless otherwise specified by the user via :ref:`State API <openvino-state-api>`.
|
||||
|
||||
Steps to Apply LowLatency
|
||||
#########################
|
||||
|
||||
1. Get CNNNetwork. Either way is acceptable:
|
||||
|
||||
* :doc:`from IR or ONNX model <openvino_docs_OV_UG_Integrate_OV_with_your_application>`
|
||||
* :doc:`from ov::Model <openvino_docs_OV_UG_Model_Representation>`
|
||||
|
||||
2. :doc:`Reshape <openvino_docs_OV_UG_ShapeInference>` the CNNNetwork network if necessary.
|
||||
|
||||
An example of such a **necessary case** is when the ``sequence_lengths`` dimension of input > 1, and it means that ``TensorIterator`` layer will have ``number_iterations`` > 1. The inputs of the network should be reshaped to set ``sequence_dimension`` to exactly 1.
|
||||
|
||||
Usually, the following exception, which occurs after applying a transform when trying to infer the network in a plugin, indicates the need to apply the reshape feature:
|
||||
``C++ exception with description "Function is incorrect. The Assign and ReadValue operations must be used in pairs in the network."``
|
||||
This means that there are several pairs of ``Assign``/``ReadValue`` operations with the same ``variable_id`` in the network and operations were inserted into each iteration of the ``TensorIterator``.
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
// Network before reshape: Parameter (name: X, shape: [2 (sequence_lengths), 1, 16]) -> TensorIterator (num_iteration = 2, axis = 0) -> ...
|
||||
|
||||
cnnNetwork.reshape({"X" : {1, 1, 16});
|
||||
|
||||
// Network after reshape: Parameter (name: X, shape: [1 (sequence_lengths), 1, 16]) -> TensorIterator (num_iteration = 1, axis = 0) -> ...
|
||||
|
||||
|
||||
3. Apply the LowLatency transformation.
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "ie_transformations.hpp"
|
||||
|
||||
...
|
||||
|
||||
InferenceEngine::LowLatency(cnnNetwork);
|
||||
|
||||
|
||||
**State naming rule**: A name of a state is a concatenation of names: original ``TensorIterator`` operation, parameter of the body, and additional suffix ``variable_`` + ``id`` (0-base indexing, new indexing for each ``TensorIterator``). Use these rules to predict the name of the inserted state after the transformation is applied. For example:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
// Precondition in ngraph::function.
|
||||
// Created TensorIterator and Parameter in body of TensorIterator with names
|
||||
std::string tensor_iterator_name = "TI_name"
|
||||
std::string body_parameter_name = "param_name"
|
||||
std::string idx = "0"; // it's a first variable in the network
|
||||
|
||||
// The State will be named "TI_name/param_name/variable_0"
|
||||
auto state_name = tensor_iterator_name + "//" + body_parameter_name + "//" + "variable_" + idx;
|
||||
|
||||
InferenceEngine::CNNNetwork cnnNetwork = InferenceEngine::CNNNetwork{function};
|
||||
InferenceEngine::LowLatency(cnnNetwork);
|
||||
|
||||
InferenceEngine::ExecutableNetwork executableNetwork = core->LoadNetwork(/*cnnNetwork, targetDevice, configuration*/);
|
||||
|
||||
// Try to find the Variable by name
|
||||
auto states = executableNetwork.QueryState();
|
||||
for (auto& state : states) {
|
||||
auto name = state.GetName();
|
||||
if (name == state_name) {
|
||||
// some actions
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
4. Use state API. See the :ref:`OpenVINO state API <openvino-state-api>` and the :ref:`Example of stateful network inference <example-of-stateful-model-inference>` sections.
|
||||
|
||||
Known Limitations for the LowLatency
|
||||
####################################
|
||||
|
||||
1. Parameters connected directly to ``ReadValues`` (states) after the transformation is applied are not allowed.
|
||||
|
||||
Unnecessary parameters may remain on the graph after applying the transformation. The automatic handling of this case inside the transformation is currently not possible. Such parameters should be removed manually from `ngraph::Function <classngraph.html#doxid-classngraph-1a14d7fe7c605267b52c145579e12d2a5f>`__ or replaced with a constant.
|
||||
|
||||
.. image:: _static/images/low_latency_limitation_1.svg
|
||||
:scale: 70 %
|
||||
|
||||
**Current solutions:**
|
||||
|
||||
* Replace a parameter with a constant (freeze) with the ``[0, 0, 0 … 0]`` value via :doc:`ModelOptimizer CLI <openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model>`: the ``--input`` or ``--freeze_placeholder_with_value`` parameters.
|
||||
* Use nGraph API to replace a parameter with a constant, as shown in the example below:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
// nGraph example. How to replace Parameter with Constant.
|
||||
auto func = cnnNetwork.getFunction();
|
||||
// Creating the new Constant with zero values.
|
||||
auto new_const = std::make_shared<ngraph::opset6::Constant>( /*type, shape, std::vector with zeros*/ );
|
||||
for (const auto& param : func->get_parameters()) {
|
||||
// Trying to find the problematic Constant by name.
|
||||
if (param->get_friendly_name() == "param_name") {
|
||||
// Replacing the problematic Param with a Constant.
|
||||
ngraph::replace_node(param, new_const);
|
||||
// Removing problematic Parameter from ngraph::function
|
||||
func->remove_parameter(param);
|
||||
}
|
||||
}
|
||||
|
||||
2. Unable to execute reshape precondition to apply the transformation correctly.
|
||||
|
||||
Networks can be non-reshapable. The most common reason is that the value of shapes is hardcoded in the constant somewhere in the network.
|
||||
|
||||
.. image:: _static/images/low_latency_limitation_2.svg
|
||||
:scale: 70 %
|
||||
|
||||
|
||||
**Current solutions:**
|
||||
|
||||
* Trim non-reshapable layers via :doc:`ModelOptimizer CLI <openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model>` : the ``--input`` and ``--output`` parameters. For example, the parameter and the problematic constant (as shown in the picture above) can be trimmed using the ``--input Reshape_layer_name`` command-line option.
|
||||
* Use nGraph API to replace the problematic constant, as shown in the example below:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
// nGraph example. How to replace a Constant with hardcoded values of shapes in the network with another one with the new values.
|
||||
// Assume we know which Constant (const_with_hardcoded_shape) prevents the reshape from being applied.
|
||||
// Then we can find this Constant by name on the network and replace it with a new one with the correct shape.
|
||||
auto func = cnnNetwork.getFunction();
|
||||
// Creating the new Constant with a correct shape.
|
||||
// For the example shown in the picture above, the new values of the Constant should be 1, 1, 10 instead of 1, 49, 10
|
||||
auto new_const = std::make_shared<ngraph::opset6::Constant>( /*type, shape, value_with_correct_shape*/ );
|
||||
for (const auto& node : func->get_ops()) {
|
||||
// Trying to find the problematic Constant by name.
|
||||
if (node->get_friendly_name() == "name_of_non_reshapable_const") {
|
||||
auto const_with_hardcoded_shape = std::dynamic_pointer_cast<ngraph::opset6::Constant>(node);
|
||||
// Replacing the problematic Constant with a new one. Do this for all the problematic Constants in the network, then
|
||||
// you can apply the reshape feature.
|
||||
ngraph::replace_node(const_with_hardcoded_shape, new_const);
|
||||
}
|
||||
}
|
||||
|
||||
@endsphinxdirective
|
||||
@@ -514,7 +514,7 @@ API 2.0 fills inputs with data of the ``I64`` precision (aligned with the origin
|
||||
|
||||
|
||||
.. tab-item:: Model created in code
|
||||
:sync: model_created_in_code
|
||||
:sync: model
|
||||
|
||||
.. tab-set::
|
||||
|
||||
|
||||
@@ -102,79 +102,67 @@ It is possible to build applications without the CMake interface by using: MSVC
|
||||
|
||||
**With Inference Engine of previous versions**:
|
||||
|
||||
.. tab-set::
|
||||
.. tab:: Include dirs
|
||||
|
||||
.. tab-item:: Include dirs
|
||||
:sync: inc-dirs
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
<INSTALL_DIR>/deployment_tools/inference_engine/include
|
||||
<INSTALL_DIR>/deployment_tools/ngraph/include
|
||||
|
||||
.. tab-item:: Path to libs
|
||||
:sync: path-libs
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
<INSTALL_DIR>/deployment_tools/inference_engine/lib/intel64/Release
|
||||
<INSTALL_DIR>/deployment_tools/ngraph/lib/
|
||||
|
||||
.. tab-item:: Shared libs
|
||||
:sync: shared-libs
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
// UNIX systems
|
||||
inference_engine.so ngraph.so
|
||||
|
||||
// Windows
|
||||
inference_engine.dll ngraph.dll
|
||||
|
||||
.. tab-item:: (Windows) .lib files
|
||||
:sync: windows-lib-files
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ngraph.lib
|
||||
inference_engine.lib
|
||||
.. code-block:: sh
|
||||
|
||||
<INSTALL_DIR>/deployment_tools/inference_engine/include
|
||||
<INSTALL_DIR>/deployment_tools/ngraph/include
|
||||
|
||||
.. tab:: Path to libs
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
<INSTALL_DIR>/deployment_tools/inference_engine/lib/intel64/Release
|
||||
<INSTALL_DIR>/deployment_tools/ngraph/lib/
|
||||
|
||||
.. tab:: Shared libs
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
// UNIX systems
|
||||
inference_engine.so ngraph.so
|
||||
|
||||
// Windows
|
||||
inference_engine.dll ngraph.dll
|
||||
|
||||
.. tab:: (Windows) .lib files
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ngraph.lib
|
||||
inference_engine.lib
|
||||
|
||||
**With OpenVINO Runtime 2022.1 (API 2.0)**:
|
||||
|
||||
.. tab-set::
|
||||
.. tab:: Include dirs
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
<INSTALL_DIR>/runtime/include
|
||||
|
||||
.. tab:: Path to libs
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
<INSTALL_DIR>/runtime/lib/intel64/Release
|
||||
|
||||
.. tab:: Shared libs
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
// UNIX systems
|
||||
openvino.so
|
||||
|
||||
// Windows
|
||||
openvino.dll
|
||||
|
||||
.. tab:: (Windows) .lib files
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
openvino.lib
|
||||
|
||||
.. tab-item:: Include dirs
|
||||
:sync: inc-dirs
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
<INSTALL_DIR>/runtime/include
|
||||
|
||||
.. tab-item:: Path to libs
|
||||
:sync: path-libs
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
<INSTALL_DIR>/runtime/lib/intel64/Release
|
||||
|
||||
.. tab-item:: Shared libs
|
||||
:sync: shared-libs
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
// UNIX systems
|
||||
openvino.so
|
||||
|
||||
// Windows
|
||||
openvino.dll
|
||||
|
||||
.. tab-item:: (Windows) .lib files
|
||||
:sync: windows-lib-files
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
openvino.lib
|
||||
|
||||
|
||||
Clearer Library Structure for Deployment
|
||||
########################################
|
||||
|
||||
@@ -227,34 +227,6 @@ Using Image Scaling
|
||||
Converting Color Space
|
||||
++++++++++++++++++++++
|
||||
|
||||
**Inference Engine API**
|
||||
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_preprocessing_migration.cpp
|
||||
:language: cpp
|
||||
:fragment: color_space
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_preprocessing_migration.py
|
||||
:language: python
|
||||
:fragment: color_space
|
||||
|
||||
.. tab-item:: C
|
||||
:sync: c
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_preprocessing_migration.c
|
||||
:language: c
|
||||
:fragment: c_api_ppp
|
||||
|
||||
|
||||
|
||||
**API 2.0**
|
||||
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ OpenVINO™ Runtime enables you to use different approaches to work with model i
|
||||
:fragment: [all_inputs_ouputs]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
:sync: python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_model_snippets.py
|
||||
:language: cpp
|
||||
@@ -63,7 +63,7 @@ OpenVINO™ Runtime provides two types for shape representation:
|
||||
:fragment: [ov:partial_shape]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
:sync: python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_model_snippets.py
|
||||
:language: cpp
|
||||
@@ -108,7 +108,7 @@ To build an ``:ref:`ov::Model <doxid-classov_1_1_model>``` instance from ``opset
|
||||
:fragment: [ov:include]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
:sync: python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_model_snippets.py
|
||||
:language: cpp
|
||||
@@ -127,7 +127,7 @@ The following code demonstrates how to create a simple model:
|
||||
:fragment: [ov:create_simple_model]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
:sync: python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_model_snippets.py
|
||||
:language: cpp
|
||||
@@ -146,7 +146,7 @@ The following code creates a model with several outputs:
|
||||
:fragment: [ov:create_advanced_model]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
:sync: python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_model_snippets.py
|
||||
:language: cpp
|
||||
@@ -172,7 +172,7 @@ OpenVINO™ provides several debug capabilities:
|
||||
:fragment: [ov:visualize]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
:sync: python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_model_snippets.py
|
||||
:language: cpp
|
||||
@@ -208,7 +208,7 @@ OpenVINO™ provides several debug capabilities:
|
||||
:fragment: [ov:serialize]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
:sync: python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_model_snippets.py
|
||||
:language: cpp
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
:hidden:
|
||||
|
||||
openvino_docs_OV_UG_lowlatency2
|
||||
openvino_docs_OV_UG_lowlatency_deprecated
|
||||
|
||||
|
||||
Several use cases require processing of data sequences. When length of a sequence is known and small enough,
|
||||
@@ -197,11 +196,7 @@ refer to the speech sample and a demo in the :doc:`Samples Overview <openvino_do
|
||||
LowLatency Transformations
|
||||
##########################
|
||||
|
||||
If the original framework does not have a special API for working with states, OpenVINO representation will not contain ``Assign``/``ReadValue`` layers after importing the model. For example, if the original ONNX model contains RNN operations, OpenVINO IR will contain :doc:`TensorIterator <openvino_docs_ops_infrastructure_TensorIterator_1>` operations and the values will be obtained only after execution of the whole ``TensorIterator`` primitive. Intermediate values from each iteration will not be available. Working with these intermediate values of each iteration is enabled by special :doc:`LowLatency <openvino_docs_OV_UG_lowlatency_deprecated>` and :doc:`LowLatency2 <openvino_docs_OV_UG_lowlatency2>` transformations, which also help receive these values with a low latency after each infer request.
|
||||
|
||||
.. note::
|
||||
|
||||
It is recommended to use LowLatency2, as LowLatency transformation has already been deprecated.
|
||||
If the original framework does not have a special API for working with states, OpenVINO representation will not contain ``Assign``/``ReadValue`` layers after importing the model. For example, if the original ONNX model contains RNN operations, OpenVINO IR will contain :doc:`TensorIterator <openvino_docs_ops_infrastructure_TensorIterator_1>` operations and the values will be obtained only after execution of the whole ``TensorIterator`` primitive. Intermediate values from each iteration will not be available. Working with these intermediate values of each iteration is enabled by special and :doc:`LowLatency2 <openvino_docs_OV_UG_lowlatency2>` transformation, which also help receive these values with a low latency after each infer request.
|
||||
|
||||
TensorIterator/Loop operations
|
||||
++++++++++++++++++++++++++++++
|
||||
|
||||
@@ -45,21 +45,18 @@ Note that OpenVINO™ Runtime enables you to use “GPU” as an alias for “GP
|
||||
|
||||
The following commands are accepted by the API:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/MULTI0.cpp
|
||||
:language: cpp
|
||||
:fragment: [part0]
|
||||
.. tab:: C++
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_multi.py
|
||||
:language: python
|
||||
:fragment: [MULTI_0]
|
||||
.. doxygensnippet:: docs/snippets/MULTI0.cpp
|
||||
:language: cpp
|
||||
:fragment: [part0]
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_multi.py
|
||||
:language: python
|
||||
:fragment: [MULTI_0]
|
||||
|
||||
|
||||
To check what devices are present in the system, you can use the Device API. For information on how to do it, check :doc:`Query device properties and configuration <openvino_docs_OV_UG_query_api>`.
|
||||
@@ -70,21 +67,18 @@ Configuring Individual Devices and Creating the Multi-Device On Top
|
||||
|
||||
As mentioned previously, executing inference with MULTI may be set up by configuring individual devices before creating the "MULTI" device on top. It may be considered for performance reasons.
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/MULTI4.cpp
|
||||
:language: cpp
|
||||
:fragment: [part4]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_multi.py
|
||||
:language: python
|
||||
:fragment: [MULTI_4]
|
||||
.. tab:: C++
|
||||
|
||||
.. doxygensnippet:: docs/snippets/MULTI4.cpp
|
||||
:language: cpp
|
||||
:fragment: [part4]
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_multi.py
|
||||
:language: python
|
||||
:fragment: [MULTI_4]
|
||||
|
||||
|
||||
Alternatively, you can combine all the individual device settings into a single config file and load it for MULTI to parse. See the code example in the next section.
|
||||
@@ -95,13 +89,12 @@ Querying the Optimal Number of Inference Requests
|
||||
When using MULTI, you don't need to sum over included devices yourself, you can query the optimal number of requests directly,
|
||||
using the :doc:`configure devices <openvino_docs_OV_UG_query_api>` property:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
|
||||
.. doxygensnippet:: docs/snippets/MULTI5.cpp
|
||||
:language: cpp
|
||||
:fragment: [part5]
|
||||
.. tab:: C++
|
||||
|
||||
.. doxygensnippet:: docs/snippets/MULTI5.cpp
|
||||
:language: cpp
|
||||
:fragment: [part5]
|
||||
|
||||
|
||||
Using the Multi-Device with OpenVINO Samples and Benchmarking Performance
|
||||
|
||||
@@ -255,7 +255,7 @@ To determine if the output has dynamic dimensions, the ``partial_shape`` propert
|
||||
:fragment: ov_dynamic_shapes:print_dynamic
|
||||
|
||||
|
||||
If the output has any dynamic dimensions, they will be reported as ``?`` or as a range (e.g. ``1..10``).
|
||||
If the output has any dynamic dimensions, they will be reported as ``?`` or as a range (e.g.``1..10``).
|
||||
|
||||
Output layers can also be checked for dynamic dimensions using the ``partial_shape.is_dynamic()`` property. This can be used on an entire output layer, or on an individual dimension, as shown in these examples:
|
||||
|
||||
|
||||
@@ -49,21 +49,18 @@ Using the Performance Hints: Basic API
|
||||
|
||||
In the example code snippet below, ``ov::hint::PerformanceMode::THROUGHPUT`` is specified for the ``ov::hint::performance_mode`` property for ``compile_model``:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_auto_batching.cpp
|
||||
:language: cpp
|
||||
:fragment: [compile_model]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_auto_batching.py
|
||||
:language: python
|
||||
:fragment: [compile_model]
|
||||
.. tab:: C++
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_auto_batching.cpp
|
||||
:language: cpp
|
||||
:fragment: [compile_model]
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_auto_batching.py
|
||||
:language: python
|
||||
:fragment: [compile_model]
|
||||
|
||||
|
||||
Additional (Optional) Hints from the App
|
||||
@@ -72,21 +69,18 @@ Additional (Optional) Hints from the App
|
||||
For an application that processes 4 video streams, the most future-proof way to communicate the limitation of the parallel slack is to equip the performance hint with the optional ``ov::hint::num_requests`` configuration key set to 4.
|
||||
As mentioned earlier, this will limit the batch size for the GPU and the number of inference streams for the CPU. Thus, each device uses the ``ov::hint::num_requests`` while converting the hint to the actual device configuration options:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_auto_batching.cpp
|
||||
:language: cpp
|
||||
:fragment: [hint_num_requests]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_auto_batching.py
|
||||
:language: python
|
||||
:fragment: [hint_num_requests]
|
||||
.. tab:: C++
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_auto_batching.cpp
|
||||
:language: cpp
|
||||
:fragment: [hint_num_requests]
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_auto_batching.py
|
||||
:language: python
|
||||
:fragment: [hint_num_requests]
|
||||
|
||||
|
||||
Optimal Number of Inference Requests
|
||||
@@ -94,21 +88,18 @@ Optimal Number of Inference Requests
|
||||
|
||||
The hints are used on the presumption that the application queries ``ov::optimal_number_of_infer_requests`` to create and run the returned number of requests simultaneously:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_auto_batching.cpp
|
||||
:language: cpp
|
||||
:fragment: [query_optimal_num_requests]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_auto_batching.py
|
||||
:language: python
|
||||
:fragment: [query_optimal_num_requests]
|
||||
.. tab:: C++
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_auto_batching.cpp
|
||||
:language: cpp
|
||||
:fragment: [query_optimal_num_requests]
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_auto_batching.py
|
||||
:language: python
|
||||
:fragment: [query_optimal_num_requests]
|
||||
|
||||
|
||||
While an application is free to create more requests if needed (for example to support asynchronous inputs population) **it is very important to at least run the** ``ov::optimal_number_of_infer_requests`` **of the inference requests in parallel**. It is recommended for efficiency, or device utilization, reasons.
|
||||
@@ -135,21 +126,18 @@ Combining the Hints and Individual Low-Level Settings
|
||||
While sacrificing the portability to some extent, it is possible to combine the hints with individual device-specific settings.
|
||||
For example, use ``ov::hint::PerformanceMode::THROUGHPUT`` to prepare a general configuration and override any of its specific values:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_auto_batching.cpp
|
||||
:language: cpp
|
||||
:fragment: [hint_plus_low_level]
|
||||
.. tab:: C++
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_auto_batching.py
|
||||
:language: python
|
||||
:fragment: [hint_plus_low_level]
|
||||
.. doxygensnippet:: docs/snippets/ov_auto_batching.cpp
|
||||
:language: cpp
|
||||
:fragment: [hint_plus_low_level]
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_auto_batching.py
|
||||
:language: python
|
||||
:fragment: [hint_plus_low_level]
|
||||
|
||||
|
||||
Testing Performance of the Hints with the Benchmark_App
|
||||
|
||||
@@ -11,7 +11,7 @@ Previously, these 2 precisions were interrelated, and model storage precision co
|
||||
|
||||
With the ``2023.0`` release this behavior has been changed and the inference precision no longer depends on the precision of IR. Now users have several knobs to find the balance between model performance and accuracy.
|
||||
|
||||
Essentially, the IR precision becomes a way of compressing the model by reducing the precision of the weights, and it does not affect how the devices execute the model. This change clears up a lot of confusion where, for example, you couldn't execute a high-performance model on the GPU by default, and the behavior between devicess was different.
|
||||
Essentially, the IR precision becomes a way of compressing the model by reducing the precision of the weights, and it does not affect how the devices execute the model. This change clears up a lot of confusion where, for example, you couldn't execute a high-performance model on the GPU by default, and the behavior between devices was different.
|
||||
|
||||
This guide will focus on how to control inference precision. And using lower precision is important for performance because compute bandwidth tends to be higher for smaller data types, and hardware often has special blocks for efficient multiply-accumulate operations with smaller data types only (e.g. Intel Xᵉ Matrix Extensions (XMX) on GPU and Intel Advanced Matrix Extensions (AMX) on CPU do not support ``f32``). Also, I/O operations requires less memory due to the smaller tensor byte size. This guide will focus on how to control inference precision.
|
||||
|
||||
@@ -28,21 +28,17 @@ If the model has been quantized using :doc:`OpenVINO optimization tools <ptq_int
|
||||
|
||||
Code examples:
|
||||
|
||||
.. tab-set::
|
||||
.. tab:: C++
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/cpu/ov_execution_mode.cpp
|
||||
:language: cpp
|
||||
:fragment: [ov:execution_mode:part0]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/cpu/ov_execution_mode.py
|
||||
:language: python
|
||||
:fragment: [ov:execution_mode:part0]
|
||||
.. doxygensnippet:: docs/snippets/cpu/ov_execution_mode.cpp
|
||||
:language: cpp
|
||||
:fragment: [ov:execution_mode:part0]
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/cpu/ov_execution_mode.py
|
||||
:language: python
|
||||
:fragment: [ov:execution_mode:part0]
|
||||
|
||||
Inference Precision
|
||||
###################
|
||||
|
||||
@@ -103,9 +103,9 @@ Additional Resources
|
||||
* :doc:`Preprocessing Details <openvino_docs_OV_UG_Preprocessing_Details>`
|
||||
* :doc:`Layout API overview <openvino_docs_OV_UG_Layout_Overview>`
|
||||
* :doc:`Model Optimizer - Optimize Preprocessing Computation <openvino_docs_MO_DG_Additional_Optimization_Use_Cases>`
|
||||
* :doc:`Model Caching Overview <openvino_docs_OV_UG_Model_caching_overview>`
|
||||
* The ``:ref:`ov::preprocess::PrePostProcessor <doxid-classov-1-1preprocess-1-1-pre-post-processor>``` C++ class documentation
|
||||
* The ``:ref:`ov::pass::Serialize <doxid-classov-1-1pass-1-1-serialize>``` - pass to serialize model to XML/BIN
|
||||
* The ``:ref:`ov::set_batch <doxid-namespaceov-1a3314e2ff91fcc9ffec05b1a77c37862b>``` - update batch dimension for a given model
|
||||
* :doc:`Model Caching Overview<openvino_docs_OV_UG_Model_caching_overview>`
|
||||
* The `ov::preprocess::PrePostProcessor <classov_1_1preprocess_1_1PrePostProcessor.html#doxid-classov-1-1preprocess-1-1-pre-post-processor>` C++ class documentation
|
||||
* The `ov::pass::Serialize <classov_1_1pass_1_1Serialize.html#doxid-classov-1-1pass-1-1-serialize>` - pass to serialize model to XML/BIN
|
||||
* The `ov::set_batch <namespaceov.html#doxid-namespaceov-1a3314e2ff91fcc9ffec05b1a77c37862b>` - update batch dimension for a given model
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -46,18 +46,15 @@ Supported Inference Data Types
|
||||
|
||||
CPU plugin supports the following data types as inference precision of internal primitives:
|
||||
|
||||
- Floating-point data types:
|
||||
|
||||
- ``f32`` (Intel® x86-64, Arm®)
|
||||
- ``bf16``(Intel® x86-64)
|
||||
- Integer data types:
|
||||
|
||||
- ``i32`` (Intel® x86-64, Arm®)
|
||||
- Quantized data types:
|
||||
|
||||
- ``u8`` (Intel® x86-64)
|
||||
- ``i8`` (Intel® x86-64)
|
||||
- ``u1`` (Intel® x86-64)
|
||||
| - Floating-point data types:
|
||||
| - f32 (Intel® x86-64, Arm®)
|
||||
| - bf16 (Intel® x86-64)
|
||||
| - Integer data types:
|
||||
| - i32 (Intel® x86-64, Arm®)
|
||||
| - Quantized data types:
|
||||
| - u8 (Intel® x86-64)
|
||||
| - i8 (Intel® x86-64)
|
||||
| - u1 (Intel® x86-64)
|
||||
|
||||
:doc:`Hello Query Device C++ Sample <openvino_inference_engine_samples_hello_query_device_README>` can be used to print out supported data types for all detected devices.
|
||||
|
||||
@@ -85,8 +82,8 @@ Floating Point Data Types Specifics
|
||||
|
||||
CPU plugin supports the following floating-point data types as inference precision of internal primitives:
|
||||
|
||||
- ``f32`` (Intel® x86-64, Arm®)
|
||||
- ``bf16`` (Intel® x86-64)
|
||||
- f32 (Intel® x86-64, Arm®)
|
||||
- bf16 (Intel® x86-64)
|
||||
|
||||
The default floating-point precision of a CPU primitive is ``f32``. To support the ``f16`` OpenVINO IR the plugin internally converts
|
||||
all the ``f16`` values to ``f32`` and all the calculations are performed using the native precision of ``f32``.
|
||||
@@ -279,19 +276,19 @@ For more details, see :doc:`preprocessing API guide <openvino_docs_OV_UG_Preproc
|
||||
|
||||
.. dropdown:: The CPU plugin support for handling tensor precision conversion is limited to the following ov::element types:
|
||||
|
||||
* ``bf16``
|
||||
* ``f16``
|
||||
* ``f32``
|
||||
* ``f64``
|
||||
* ``i8``
|
||||
* ``i16``
|
||||
* ``i32``
|
||||
* ``i64``
|
||||
* ``u8``
|
||||
* ``u16``
|
||||
* ``u32``
|
||||
* ``u64``
|
||||
* ``boolean``
|
||||
* bf16
|
||||
* f16
|
||||
* f32
|
||||
* f64
|
||||
* i8
|
||||
* i16
|
||||
* i32
|
||||
* i64
|
||||
* u8
|
||||
* u16
|
||||
* u32
|
||||
* u64
|
||||
* boolean
|
||||
|
||||
|
||||
Model Caching
|
||||
@@ -359,9 +356,7 @@ Read-only properties
|
||||
|
||||
External Dependencies
|
||||
###########################################################
|
||||
|
||||
For some performance-critical DL operations, the CPU plugin uses third-party libraries:
|
||||
|
||||
- `oneDNN <https://github.com/oneapi-src/oneDNN>`__ (Intel® x86-64, Arm®)
|
||||
- `Compute Library <https://github.com/ARM-software/ComputeLibrary>`__ (Arm®)
|
||||
|
||||
@@ -383,48 +378,40 @@ User can use the following properties to limit available CPU resource for model
|
||||
- ``ov::hint::enable_hyper_threading`` limits the use of one or two logical processors per CPU core when platform has CPU hyperthreading enabled.
|
||||
If there is only one logical processor per CPU core, such as Efficient-cores, this property has no effect, and CPU inference uses all logical processors.
|
||||
|
||||
.. tab-set::
|
||||
.. tab:: C++
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/cpu/multi_threading.cpp
|
||||
:language: cpp
|
||||
:fragment: [ov:intel_cpu:multi_threading:part0]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/cpu/multi_threading.py
|
||||
:language: python
|
||||
:fragment: [ov:intel_cpu:multi_threading:part0]
|
||||
.. doxygensnippet:: docs/snippets/cpu/multi_threading.cpp
|
||||
:language: cpp
|
||||
:fragment: [ov:intel_cpu:multi_threading:part0]
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/cpu/multi_threading.py
|
||||
:language: python
|
||||
:fragment: [ov:intel_cpu:multi_threading:part0]
|
||||
|
||||
.. note::
|
||||
|
||||
|
||||
``ov::hint::scheduling_core_type`` and ``ov::hint::enable_hyper_threading`` only support Intel® x86-64 CPU on Linux and Windows in current release.
|
||||
|
||||
By default, OpenVINO Runtime will enable CPU threads pinning for better performance. User also can use property ``ov::hint::enable_cpu_pinning`` to switch it off. Disable threads pinning might be benefitial in complex applications with several workloads executed in parallel.
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/cpu/multi_threading.cpp
|
||||
:language: cpp
|
||||
:fragment: [ov:intel_cpu:multi_threading:part1]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/cpu/multi_threading.py
|
||||
:language: python
|
||||
:fragment: [ov:intel_cpu:multi_threading:part1]
|
||||
By default, OpenVINO Runtime will enable CPU threads pinning for better performance. User also can use property ``ov::hint::enable_cpu_pinning`` to switch it off. Disable threads pinning might be beneficial in complex applications with several workloads executed in parallel.
|
||||
|
||||
.. tab:: C++
|
||||
|
||||
.. doxygensnippet:: docs/snippets/cpu/multi_threading.cpp
|
||||
:language: cpp
|
||||
:fragment: [ov:intel_cpu:multi_threading:part1]
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/cpu/multi_threading.py
|
||||
:language: python
|
||||
:fragment: [ov:intel_cpu:multi_threading:part1]
|
||||
|
||||
user can check the :doc:`optimization guide <openvino_docs_deployment_optimization_guide_tput_advanced>` for details on multi-stream execution
|
||||
|
||||
.. note::
|
||||
|
||||
|
||||
``ov::hint::enable_cpu_pinning`` only support Linux in current release.
|
||||
|
||||
Denormals Optimization
|
||||
@@ -455,21 +442,18 @@ effectiveness and safety of the settings.
|
||||
|
||||
To enable denormals optimization in the application, the ``denormals_optimization`` property must be set to ``True``:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_denormals.cpp
|
||||
:language: cpp
|
||||
:fragment: [ov:intel_cpu:denormals_optimization:part0]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_denormals.py
|
||||
:language: python
|
||||
:fragment: [ov:intel_cpu:denormals_optimization:part0]
|
||||
.. tab:: C++
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_denormals.cpp
|
||||
:language: cpp
|
||||
:fragment: [ov:intel_cpu:denormals_optimization:part0]
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_denormals.py
|
||||
:language: python
|
||||
:fragment: [ov:intel_cpu:denormals_optimization:part0]
|
||||
|
||||
|
||||
Sparse weights decompression (Intel® x86-64)
|
||||
@@ -499,21 +483,17 @@ which means the option is disabled.
|
||||
|
||||
Code examples of how to use ``sparse_weights_decompression_rate``:
|
||||
|
||||
.. tab-set::
|
||||
.. tab:: C++
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/cpu/ov_sparse_weights_decompression.cpp
|
||||
:language: cpp
|
||||
:fragment: [ov:intel_cpu:sparse_weights_decompression:part0]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/cpu/ov_sparse_weights_decompression.py
|
||||
:language: python
|
||||
:fragment: [ov:intel_cpu:sparse_weights_decompression:part0]
|
||||
.. doxygensnippet:: docs/snippets/cpu/ov_sparse_weights_decompression.cpp
|
||||
:language: cpp
|
||||
:fragment: [ov:intel_cpu:sparse_weights_decompression:part0]
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/cpu/ov_sparse_weights_decompression.py
|
||||
:language: python
|
||||
:fragment: [ov:intel_cpu:sparse_weights_decompression:part0]
|
||||
|
||||
|
||||
.. note::
|
||||
@@ -528,7 +508,6 @@ from perf counters log. The "exec type" field will contain the implementation ty
|
||||
|
||||
MatMul_1800 EXECUTED layerType: FullyConnected execType: brgemm_avx512_amx_sparse_I8 realTime (ms): 0.050000 cpuTime (ms): 0.050000
|
||||
|
||||
|
||||
Limitations
|
||||
-----------------------------------------------------------
|
||||
|
||||
|
||||
@@ -29,20 +29,20 @@ Feature Support Matrix
|
||||
|
||||
The table below demonstrates support of key features by OpenVINO device plugins.
|
||||
|
||||
========================================================================================= =============== =============== =============== ========================
|
||||
Capability CPU GPU GNA Arm® CPU
|
||||
========================================================================================= =============== =============== =============== ========================
|
||||
:doc:`Heterogeneous execution <openvino_docs_OV_UG_Hetero_execution>` Yes Yes No Yes
|
||||
:doc:`Multi-device execution <openvino_docs_OV_UG_Running_on_multiple_devices>` Yes Yes Partial Yes
|
||||
:doc:`Automatic batching <openvino_docs_OV_UG_Automatic_Batching>` No Yes No No
|
||||
:doc:`Multi-stream execution <openvino_docs_deployment_optimization_guide_tput>` Yes Yes No Yes
|
||||
:doc:`Models caching <openvino_docs_OV_UG_Model_caching_overview>` Yes Partial Yes No
|
||||
:doc:`Dynamic shapes <openvino_docs_OV_UG_DynamicShapes>` Yes Partial No No
|
||||
:doc:`Import/Export <openvino_ecosystem>` Yes No Yes No
|
||||
:doc:`Preprocessing acceleration <openvino_docs_OV_UG_Preprocessing_Overview>` Yes Yes No Partial
|
||||
:doc:`Stateful models <openvino_docs_OV_UG_model_state_intro>` Yes No Yes No
|
||||
:doc:`Extensibility <openvino_docs_Extensibility_UG_Intro>` Yes Yes No No
|
||||
========================================================================================= =============== =============== =============== ========================
|
||||
========================================================================================= ============================ =============== ===============
|
||||
Capability CPU GPU GNA
|
||||
========================================================================================= ============================ =============== ===============
|
||||
:doc:`Heterogeneous execution <openvino_docs_OV_UG_Hetero_execution>` Yes Yes No
|
||||
:doc:`Multi-device execution <openvino_docs_OV_UG_Running_on_multiple_devices>` Yes Yes Partial
|
||||
:doc:`Automatic batching <openvino_docs_OV_UG_Automatic_Batching>` No Yes No
|
||||
:doc:`Multi-stream execution <openvino_docs_deployment_optimization_guide_tput>` Yes (Intel® x86-64 only) Yes No
|
||||
:doc:`Models caching <openvino_docs_OV_UG_Model_caching_overview>` Yes Partial Yes
|
||||
:doc:`Dynamic shapes <openvino_docs_OV_UG_DynamicShapes>` Yes Partial No
|
||||
:doc:`Import/Export <openvino_ecosystem>` Yes No Yes
|
||||
:doc:`Preprocessing acceleration <openvino_docs_OV_UG_Preprocessing_Overview>` Yes Yes No
|
||||
:doc:`Stateful models <openvino_docs_OV_UG_model_state_intro>` Yes No Yes
|
||||
:doc:`Extensibility <openvino_docs_Extensibility_UG_Intro>` Yes Yes No
|
||||
========================================================================================= ============================ =============== ===============
|
||||
|
||||
For more details on plugin-specific feature limitations, see the corresponding plugin pages.
|
||||
|
||||
@@ -66,14 +66,11 @@ The OpenVINO Runtime API features dedicated methods of enumerating devices and t
|
||||
|
||||
A simple programmatic way to enumerate the devices and use with the multi-device is as follows:
|
||||
|
||||
.. tab-set::
|
||||
.. tab:: C++
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/MULTI2.cpp
|
||||
:language: cpp
|
||||
:fragment: [part2]
|
||||
.. doxygensnippet:: docs/snippets/MULTI2.cpp
|
||||
:language: cpp
|
||||
:fragment: [part2]
|
||||
|
||||
|
||||
|
||||
@@ -90,14 +87,12 @@ For example, this is how two GPUs can be listed (iGPU is always GPU.0):
|
||||
|
||||
So, the explicit configuration to use both would be "MULTI:GPU.1,GPU.0". Accordingly, the code that loops over all available devices of the "GPU" type only is as follows:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/MULTI3.cpp
|
||||
:language: cpp
|
||||
:fragment: [part3]
|
||||
.. tab:: C++
|
||||
|
||||
.. doxygensnippet:: docs/snippets/MULTI3.cpp
|
||||
:language: cpp
|
||||
:fragment: [part3]
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -213,17 +213,14 @@ Profiling
|
||||
The GNA plugin allows turning on profiling, using the ``ov::enable_profiling`` property.
|
||||
With the following methods, you can collect profiling information with various performance data about execution on GNA:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
``ov::InferRequest::get_profiling_info``
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
``openvino.runtime.InferRequest.get_profiling_info``
|
||||
.. tab:: C++
|
||||
|
||||
``ov::InferRequest::get_profiling_info``
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
``openvino.runtime.InferRequest.get_profiling_info``
|
||||
|
||||
|
||||
The current GNA implementation calculates counters for the whole utterance scoring and does not provide per-layer information.
|
||||
@@ -259,33 +256,31 @@ Read-write Properties
|
||||
|
||||
In order to take effect, the following parameters must be set before model compilation or passed as additional arguments to ``ov::Core::compile_model()``:
|
||||
|
||||
- ``ov::cache_dir``
|
||||
- ``ov::enable_profiling``
|
||||
- ``ov::hint::inference_precision``
|
||||
- ``ov::hint::num_requests``
|
||||
- ``ov::intel_gna::compile_target``
|
||||
- ``ov::intel_gna::firmware_model_image_path``
|
||||
- ``ov::intel_gna::execution_target``
|
||||
- ``ov::intel_gna::pwl_design_algorithm``
|
||||
- ``ov::intel_gna::pwl_max_error_percent``
|
||||
- ``ov::intel_gna::scale_factors_per_input``
|
||||
- ov::cache_dir
|
||||
- ov::enable_profiling
|
||||
- ov::hint::inference_precision
|
||||
- ov::hint::num_requests
|
||||
- ov::intel_gna::compile_target
|
||||
- ov::intel_gna::firmware_model_image_path
|
||||
- ov::intel_gna::execution_target
|
||||
- ov::intel_gna::pwl_design_algorithm
|
||||
- ov::intel_gna::pwl_max_error_percent
|
||||
- ov::intel_gna::scale_factors_per_input
|
||||
|
||||
These parameters can be changed after model compilation ``ov::CompiledModel::set_property``:
|
||||
|
||||
- ``ov::hint::performance_mode``
|
||||
- ``ov::intel_gna::execution_mode``
|
||||
- ``ov::log::level``
|
||||
- ov::hint::performance_mode
|
||||
- ov::intel_gna::execution_mode
|
||||
- ov::log::level
|
||||
|
||||
Read-only Properties
|
||||
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
- ``ov::available_devices``
|
||||
- ``ov::device::capabilities``
|
||||
- ``ov::device::full_name``
|
||||
- ``ov::intel_gna::library_full_version``
|
||||
- ``ov::optimal_number_of_infer_requests``
|
||||
- ``ov::range_for_async_infer_requests``
|
||||
- ``ov::supported_properties``
|
||||
- ov::available_devices
|
||||
- ov::device::capabilities
|
||||
- ov::device::full_name
|
||||
- ov::intel_gna::library_full_version
|
||||
- ov::optimal_number_of_infer_requests
|
||||
- ov::range_for_async_infer_requests
|
||||
- ov::supported_properties
|
||||
|
||||
Limitations
|
||||
###########################################################
|
||||
@@ -315,8 +310,8 @@ a convolution kernel moves in a single direction. Initially, a limited subset of
|
||||
previous feature set including:
|
||||
|
||||
* **2D VALID Convolution With Small 2D Kernels:** Two-dimensional convolutions with the following kernel dimensions
|
||||
[``H``, ``W``] are supported: [1,1], [2,2], [3,3], [2,1], [3,1], [4,1], [5,1], [6,1], [7,1], [1,2], or [1,3].
|
||||
Input tensor dimensions are limited to [1,8,16,16] <= [``N``, ``C``, ``H``, ``W``] <= [1,120,384,240]. Up to 384 ``C``
|
||||
[``H``,``W``] are supported: [1,1], [2,2], [3,3], [2,1], [3,1], [4,1], [5,1], [6,1], [7,1], [1,2], or [1,3].
|
||||
Input tensor dimensions are limited to [1,8,16,16] <= [``N``,``C``,``H``,``W``] <= [1,120,384,240]. Up to 384 ``C``
|
||||
channels may be used with a subset of kernel sizes (see the table below). Up to 256 kernels (output channels)
|
||||
are supported. Pooling is limited to pool shapes of [1,1], [2,2], or [3,3]. Not all combinations of kernel
|
||||
shape and input tensor shape are supported (see the tables below for exact limitations).
|
||||
|
||||
@@ -44,112 +44,87 @@ Creation of RemoteContext from Native Handle
|
||||
To create the ``ov::RemoteContext`` object for user context, explicitly provide the context to the plugin using constructor for one
|
||||
of ``ov::RemoteContext`` derived classes.
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Linux/C++
|
||||
:sync: linux-cpp
|
||||
.. tab:: Linux/C++
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Create from cl_context
|
||||
:sync: create-from-cl-context
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
:fragment: [context_from_cl_context]
|
||||
|
||||
.. tab-item:: Create from cl_queue
|
||||
:sync: create-from-cl-queue
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
:fragment: [context_from_cl_queue]
|
||||
|
||||
.. tab-item:: Create from VADisplay
|
||||
:sync: create-from-vadisplay
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
:fragment: [context_from_va_display]
|
||||
|
||||
.. tab-item:: Windows/C++
|
||||
:sync: windows-cpp
|
||||
.. tab:: Create from cl_context
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
:fragment: [context_from_cl_context]
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Create from cl_context
|
||||
:sync: create-from-cl-context
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
:fragment: [context_from_cl_context]
|
||||
|
||||
.. tab-item:: Create from cl_queue
|
||||
:sync: create-from-cl-queue
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
:fragment: [context_from_cl_queue]
|
||||
|
||||
.. tab-item:: Create from ID3D11Device
|
||||
:sync: create-from-id3d11device
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
:fragment: [context_from_d3d_device]
|
||||
|
||||
.. tab-item:: Linux/C
|
||||
:sync: linux-c
|
||||
.. tab:: Create from cl_queue
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Create from cl_context
|
||||
:sync: create-from-cl-context
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [context_from_cl_context]
|
||||
|
||||
.. tab-item:: Create from cl_queue
|
||||
:sync: create-from-cl-queue
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [context_from_cl_queue]
|
||||
|
||||
.. tab-item:: Create from VADisplay
|
||||
:sync: create-from-vadisplay
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [context_from_va_display]
|
||||
|
||||
.. tab-item:: Windows/C
|
||||
:sync: windows-c
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
:fragment: [context_from_cl_queue]
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Create from cl_context
|
||||
:sync: create-from-cl-context
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [context_from_cl_context]
|
||||
|
||||
.. tab-item:: Create from cl_queue
|
||||
:sync: create-from-cl-queue
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [context_from_cl_queue]
|
||||
|
||||
.. tab-item:: Create from ID3D11Device
|
||||
:sync: create-from-id3d11device
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [context_from_d3d_device]
|
||||
.. tab:: Create from VADisplay
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
:fragment: [context_from_va_display]
|
||||
|
||||
.. tab:: Windows/C++
|
||||
|
||||
.. tab:: Create from cl_context
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
:fragment: [context_from_cl_context]
|
||||
|
||||
.. tab:: Create from cl_queue
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
:fragment: [context_from_cl_queue]
|
||||
|
||||
.. tab:: Create from ID3D11Device
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
:fragment: [context_from_d3d_device]
|
||||
|
||||
.. tab:: Linux/C
|
||||
|
||||
.. tab:: Create from cl_context
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [context_from_cl_context]
|
||||
|
||||
.. tab:: Create from cl_queue
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [context_from_cl_queue]
|
||||
|
||||
.. tab:: Create from VADisplay
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [context_from_va_display]
|
||||
|
||||
.. tab:: Windows/C
|
||||
|
||||
.. tab:: Create from cl_context
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [context_from_cl_context]
|
||||
|
||||
.. tab:: Create from cl_queue
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [context_from_cl_queue]
|
||||
|
||||
.. tab:: Create from ID3D11Device
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [context_from_d3d_device]
|
||||
|
||||
Getting RemoteContext from the Plugin
|
||||
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
@@ -160,46 +135,33 @@ Once the plugin options have been changed, the internal context is replaced by t
|
||||
|
||||
To request the current default context of the plugin, use one of the following methods:
|
||||
|
||||
.. tab-set::
|
||||
.. tab:: C++
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
.. tab:: Get context from Core
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Get context from Core
|
||||
:sync: context-core
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
:fragment: [default_context_from_core]
|
||||
|
||||
.. tab-item:: Get context from compiled model
|
||||
:sync: context-compiled-model
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
:fragment: [default_context_from_model]
|
||||
|
||||
.. tab-item:: C
|
||||
:sync: c
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
:fragment: [default_context_from_core]
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Get context from Core
|
||||
:sync: context-core
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [default_context_from_core]
|
||||
|
||||
.. tab-item:: Get context from compiled model
|
||||
:sync: context-compiled-model
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [default_context_from_model]
|
||||
|
||||
.. tab:: Get context from compiled model
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
:fragment: [default_context_from_model]
|
||||
|
||||
.. tab:: C
|
||||
|
||||
.. tab:: Get context from Core
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [default_context_from_core]
|
||||
|
||||
.. tab:: Get context from compiled model
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [default_context_from_model]
|
||||
|
||||
Memory Sharing Between Application and GPU Plugin
|
||||
###########################################################
|
||||
@@ -229,7 +191,7 @@ For more details, see the code snippets below:
|
||||
:fragment: [wrap_usm_pointer]
|
||||
|
||||
.. tab-item:: cl_mem
|
||||
:sync: cl-mem
|
||||
:sync: cl_mem
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
@@ -276,7 +238,6 @@ For more details, see the code snippets below:
|
||||
:fragment: [allocate_usm_device]
|
||||
|
||||
.. tab-item:: cl::Buffer
|
||||
:sync: buffer
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation.cpp
|
||||
:language: cpp
|
||||
@@ -297,7 +258,7 @@ For more details, see the code snippets below:
|
||||
:fragment: [wrap_usm_pointer]
|
||||
|
||||
.. tab-item:: cl_mem
|
||||
:sync: cl-mem
|
||||
:sync: cl_mem
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
@@ -306,9 +267,9 @@ For more details, see the code snippets below:
|
||||
.. tab-item:: cl::Buffer
|
||||
:sync: buffer
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [wrap_cl_buffer]
|
||||
.. doxygensnippet:: docs/snippets/gpu/remote_objects_creation_c.cpp
|
||||
:language: c
|
||||
:fragment: [wrap_cl_buffer]
|
||||
|
||||
.. tab-item:: cl::Image2D
|
||||
:sync: image2D
|
||||
@@ -362,111 +323,85 @@ To support the direct consumption of a hardware video decoder output, the GPU pl
|
||||
To ensure that the plugin generates a correct execution graph, static preprocessing
|
||||
should be added before model compilation:
|
||||
|
||||
.. tab-set::
|
||||
.. tab:: two-plane
|
||||
|
||||
.. tab-item:: two-plane
|
||||
:sync: two_plane
|
||||
.. tab:: C++
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_two_planes.cpp
|
||||
:language: cpp
|
||||
:fragment: [init_preproc]
|
||||
|
||||
.. tab-item:: C
|
||||
:sync: c
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_two_planes_c.cpp
|
||||
:language: c
|
||||
:fragment: [init_preproc]
|
||||
|
||||
.. tab-item:: single-plane
|
||||
:sync: single_plane
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_single_plane.cpp
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_two_planes.cpp
|
||||
:language: cpp
|
||||
:fragment: [init_preproc]
|
||||
|
||||
.. tab-item:: NV12 to Grey
|
||||
:sync: nv12-grey
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_to_gray.cpp
|
||||
:language: cpp
|
||||
|
||||
.. tab:: C
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_two_planes_c.cpp
|
||||
:language: c
|
||||
:fragment: [init_preproc]
|
||||
|
||||
.. tab:: single-plane
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_single_plane.cpp
|
||||
:language: cpp
|
||||
:fragment: [init_preproc]
|
||||
|
||||
.. tab:: NV12 to Grey
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_to_gray.cpp
|
||||
:language: cpp
|
||||
:fragment: [init_preproc]
|
||||
|
||||
|
||||
Since the ``ov::intel_gpu::ocl::ClImage2DTensor`` and its derived classes do not support batched surfaces,
|
||||
if batching and surface sharing are required at the same time,
|
||||
inputs need to be set via the ``ov::InferRequest::set_tensors`` method with vector of shared surfaces for each plane:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Single Batch
|
||||
:sync: single-batch
|
||||
|
||||
.. tab-set::
|
||||
.. tab:: Single Batch
|
||||
|
||||
.. tab-item:: two-plane
|
||||
:sync: two-plane
|
||||
.. tab:: two-plane
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_two_planes.cpp
|
||||
:language: cpp
|
||||
:fragment: [single_batch]
|
||||
|
||||
.. tab-item:: C
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_two_planes_c.cpp
|
||||
:language: c
|
||||
:fragment: [single_batch]
|
||||
|
||||
.. tab-item:: single-plane
|
||||
:sync: single-plane
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_single_plane.cpp
|
||||
:language: cpp
|
||||
:fragment: [single_batch]
|
||||
|
||||
.. tab-item:: NV12 to Grey
|
||||
:sync: nv12-grey
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_to_gray.cpp
|
||||
:language: cpp
|
||||
:fragment: [single_batch]
|
||||
|
||||
.. tab-item:: Multiple Batches
|
||||
:sync: multiple-batches
|
||||
.. tab:: C++
|
||||
|
||||
.. tab-set::
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_two_planes.cpp
|
||||
:language: cpp
|
||||
:fragment: [single_batch]
|
||||
|
||||
.. tab:: C
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_two_planes_c.cpp
|
||||
:language: c
|
||||
:fragment: [single_batch]
|
||||
|
||||
.. tab:: single-plane
|
||||
|
||||
.. tab-item:: two-plane
|
||||
:sync: two-plane
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_two_planes.cpp
|
||||
:language: cpp
|
||||
:fragment: [batched_case]
|
||||
|
||||
.. tab-item:: single-plane
|
||||
:sync: single-plane
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_single_plane.cpp
|
||||
:language: cpp
|
||||
:fragment: [batched_case]
|
||||
|
||||
.. tab-item:: NV12 to Grey
|
||||
:sync: single-plane
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_to_gray.cpp
|
||||
:language: cpp
|
||||
:fragment: [batched_case]
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_single_plane.cpp
|
||||
:language: cpp
|
||||
:fragment: [single_batch]
|
||||
|
||||
.. tab:: NV12 to Grey
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_to_gray.cpp
|
||||
:language: cpp
|
||||
:fragment: [single_batch]
|
||||
|
||||
.. tab:: Multiple Batches
|
||||
|
||||
.. tab:: two-plane
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_two_planes.cpp
|
||||
:language: cpp
|
||||
:fragment: [batched_case]
|
||||
|
||||
.. tab:: single-plane
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_single_plane.cpp
|
||||
:language: cpp
|
||||
:fragment: [batched_case]
|
||||
|
||||
.. tab:: NV12 to Grey
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/preprocessing_nv12_to_gray.cpp
|
||||
:language: cpp
|
||||
:fragment: [batched_case]
|
||||
|
||||
|
||||
I420 color format can be processed in a similar way
|
||||
@@ -539,22 +474,18 @@ To see pseudo-code of usage examples, refer to the sections below.
|
||||
:fragment: [context_sharing_user_handle]
|
||||
|
||||
.. dropdown:: Direct Consuming of the NV12 VAAPI Video Decoder Surface on Linux
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/context_sharing_va.cpp
|
||||
:language: cpp
|
||||
:fragment: [context_sharing_va]
|
||||
|
||||
.. tab-item:: C
|
||||
:sync: c
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/context_sharing_va_c.cpp
|
||||
:language: c
|
||||
:fragment: [context_sharing_va]
|
||||
|
||||
.. tab:: C++
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/context_sharing_va.cpp
|
||||
:language: cpp
|
||||
:fragment: [context_sharing_va]
|
||||
|
||||
.. tab:: C
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/context_sharing_va_c.cpp
|
||||
:language: c
|
||||
:fragment: [context_sharing_va]
|
||||
|
||||
See Also
|
||||
#######################################
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user