Compare commits
766 Commits
2022.1.0.d
...
releases/2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3635ceb3eb | ||
|
|
ba67256119 | ||
|
|
78d8a84dbc | ||
|
|
c32095d699 | ||
|
|
b19b108f27 | ||
|
|
bbfc189339 | ||
|
|
31fdc1ad6c | ||
|
|
fc0d88ca8a | ||
|
|
905a782c6a | ||
|
|
4d4bd1d8ae | ||
|
|
672c041e2d | ||
|
|
1c4fbc2588 | ||
|
|
7f0cfe7219 | ||
|
|
76d7cbcc33 | ||
|
|
8798bbfee7 | ||
|
|
b55818d83a | ||
|
|
f44c5e2e26 | ||
|
|
fffba9f885 | ||
|
|
6ff316a1eb | ||
|
|
b390f384b6 | ||
|
|
521df07e44 | ||
|
|
0fca7bb95e | ||
|
|
5621a7a2e6 | ||
|
|
665783ba65 | ||
|
|
3d027c5e1b | ||
|
|
f70a0660a5 | ||
|
|
10150788ce | ||
|
|
01a567fe65 | ||
|
|
e57a4aa9ee | ||
|
|
d58ba236b7 | ||
|
|
d6eebe8c23 | ||
|
|
d89d5d5320 | ||
|
|
ea6226a84d | ||
|
|
a0b45124ea | ||
|
|
0a466cdbd3 | ||
|
|
36b95c253a | ||
|
|
f6acfcc4b7 | ||
|
|
8914d22a8e | ||
|
|
a711fce5ac | ||
|
|
24b4182452 | ||
|
|
ad1c879a03 | ||
|
|
32662165f6 | ||
|
|
ace527e1d3 | ||
|
|
172ffa6cb9 | ||
|
|
1936ca551e | ||
|
|
64997d6c72 | ||
|
|
380c8656f3 | ||
|
|
1f229bc569 | ||
|
|
f5d9e1d050 | ||
|
|
658cf17d5e | ||
|
|
3249e61bfb | ||
|
|
21218617b5 | ||
|
|
2a6805610b | ||
|
|
a88a214190 | ||
|
|
e4f2d0c5a7 | ||
|
|
736bfae074 | ||
|
|
34b7005a36 | ||
|
|
51f8b681d7 | ||
|
|
a2689298b9 | ||
|
|
31e35fb4a9 | ||
|
|
fea54ccf19 | ||
|
|
c70b3bc7e8 | ||
|
|
d13fef48b3 | ||
|
|
1741e979ae | ||
|
|
a0a27c8849 | ||
|
|
5236c2c310 | ||
|
|
ffcea2a273 | ||
|
|
b579c325d9 | ||
|
|
b90baac902 | ||
|
|
f3c8f48c80 | ||
|
|
f9f6f505ec | ||
|
|
77c9da71ee | ||
|
|
6aa1150c34 | ||
|
|
a53bb64ac1 | ||
|
|
ed80e2eee8 | ||
|
|
80390cc89d | ||
|
|
72aee062cb | ||
|
|
3291d78845 | ||
|
|
1693047422 | ||
|
|
07e9fb4047 | ||
|
|
a12e529b9c | ||
|
|
ef2d84a585 | ||
|
|
f1807ad102 | ||
|
|
4d023ddc54 | ||
|
|
61abcdf7e6 | ||
|
|
c02e7d825e | ||
|
|
fdabdc934a | ||
|
|
cede276561 | ||
|
|
68bba406b6 | ||
|
|
8369e93208 | ||
|
|
83321da639 | ||
|
|
cfdd7d8bae | ||
|
|
47a73b49de | ||
|
|
0ba2774cf0 | ||
|
|
889f2b23b0 | ||
|
|
a689cf5524 | ||
|
|
dd0038b856 | ||
|
|
340ee1ec6c | ||
|
|
173c8c4dc5 | ||
|
|
3b62b5bd8b | ||
|
|
5a1bcc09e3 | ||
|
|
18bde21245 | ||
|
|
15d92f6866 | ||
|
|
45067713ee | ||
|
|
856575939d | ||
|
|
26d3895331 | ||
|
|
f601dc714c | ||
|
|
4ea182c744 | ||
|
|
b7cdc83449 | ||
|
|
ecf363c72e | ||
|
|
21d88da4b5 | ||
|
|
fb64fd38bb | ||
|
|
ad2eaeb773 | ||
|
|
c625d226b2 | ||
|
|
f91e863d41 | ||
|
|
703368ce85 | ||
|
|
2bdf51429c | ||
|
|
76753f1b51 | ||
|
|
bd48a3882f | ||
|
|
16237cc731 | ||
|
|
2bf0c8a8da | ||
|
|
3caa77eb30 | ||
|
|
1c616d4ed1 | ||
|
|
b6479bec08 | ||
|
|
1e65668aa4 | ||
|
|
33c0ee3bd2 | ||
|
|
1e3f50ef2d | ||
|
|
cf8ccb590a | ||
|
|
c3b05978e2 | ||
|
|
56e626d4b1 | ||
|
|
ea4d42d61f | ||
|
|
1fbc377d89 | ||
|
|
95223fa876 | ||
|
|
cdb9bec721 | ||
|
|
baf4b23d9a | ||
|
|
43fa3183dc | ||
|
|
63ca94179e | ||
|
|
8723d1cc7e | ||
|
|
cbfb8a1678 | ||
|
|
1ed828982e | ||
|
|
c670e4cc2b | ||
|
|
e124d4f5df | ||
|
|
09462af266 | ||
|
|
0b08b9a14c | ||
|
|
a98059daea | ||
|
|
27b5722944 | ||
|
|
c1fc602c7c | ||
|
|
e65fc4c849 | ||
|
|
994b06b744 | ||
|
|
6cf81ad6a3 | ||
|
|
a7f1710edf | ||
|
|
e20e828a1f | ||
|
|
5835cac31c | ||
|
|
b4b5f3333e | ||
|
|
a423a2b802 | ||
|
|
8890e2906a | ||
|
|
e4fcfa74c2 | ||
|
|
6474d2c94e | ||
|
|
bf11b965e6 | ||
|
|
af5b31c413 | ||
|
|
1d3fab80a8 | ||
|
|
5891a79249 | ||
|
|
c790aa85cb | ||
|
|
f756d55dc6 | ||
|
|
81ffb7a3bc | ||
|
|
205e6ba573 | ||
|
|
b8d23e04f1 | ||
|
|
a43369c152 | ||
|
|
0b4b627e02 | ||
|
|
76c82ae844 | ||
|
|
939c420435 | ||
|
|
7d7af2a9bf | ||
|
|
829c8c98c5 | ||
|
|
5f19d22323 | ||
|
|
cb635050fb | ||
|
|
68863478d3 | ||
|
|
8dacbf789d | ||
|
|
8f9c368aae | ||
|
|
5f755d5e4a | ||
|
|
22a8e75bb7 | ||
|
|
d44cad85ed | ||
|
|
0047db7377 | ||
|
|
4b677dd5b3 | ||
|
|
390ca9f45f | ||
|
|
5f4f27cd73 | ||
|
|
617160492f | ||
|
|
8308b1e122 | ||
|
|
07322aa5aa | ||
|
|
d64c5d8c7c | ||
|
|
c31129c7cd | ||
|
|
db05e54483 | ||
|
|
c80e70a917 | ||
|
|
4d6b43d76f | ||
|
|
cdd4f56ba1 | ||
|
|
3c75a4fd16 | ||
|
|
6354ac6b5d | ||
|
|
b51bc06077 | ||
|
|
93320f4fd6 | ||
|
|
28889c4833 | ||
|
|
fdf12c9537 | ||
|
|
8121de731c | ||
|
|
d1630c9ac1 | ||
|
|
75f7bced65 | ||
|
|
59cfdce73b | ||
|
|
1fec99afa3 | ||
|
|
974ae136a6 | ||
|
|
1c5e76c4db | ||
|
|
7ba71f9c20 | ||
|
|
3318dd6c68 | ||
|
|
4f6ca1b85f | ||
|
|
d670e77d97 | ||
|
|
21185189d8 | ||
|
|
24a5aab501 | ||
|
|
4b55ef9911 | ||
|
|
bea352f272 | ||
|
|
180f15e84c | ||
|
|
42d3893833 | ||
|
|
7cd3c8e86e | ||
|
|
d3ded2fc36 | ||
|
|
40fc5334d8 | ||
|
|
cd52cc6767 | ||
|
|
c54926ecb8 | ||
|
|
969060c8db | ||
|
|
86b175534a | ||
|
|
d1bcb6d0fc | ||
|
|
9cd3bff7df | ||
|
|
e75ee60bec | ||
|
|
81cd9d86d1 | ||
|
|
5e023ebdd9 | ||
|
|
6b067bc0ed | ||
|
|
18035209a0 | ||
|
|
0f409ccea9 | ||
|
|
3f941e3c5f | ||
|
|
9eca8515b8 | ||
|
|
6c6aa8fa95 | ||
|
|
1d469a2b87 | ||
|
|
8e0978818c | ||
|
|
64fca57af4 | ||
|
|
5f40ba9a23 | ||
|
|
6c78715749 | ||
|
|
9da124544a | ||
|
|
4b29eed013 | ||
|
|
173f328c53 | ||
|
|
b319acc672 | ||
|
|
4a8b142fef | ||
|
|
33ad1b96d4 | ||
|
|
7d0d950b9a | ||
|
|
f6fbef1f66 | ||
|
|
bed0adf5ef | ||
|
|
1ceb9729e9 | ||
|
|
b9ef57112e | ||
|
|
d4f77f1d3e | ||
|
|
f55e69d656 | ||
|
|
5724c5ac44 | ||
|
|
52b450a5fb | ||
|
|
7b58f931b5 | ||
|
|
18ff8afe63 | ||
|
|
94cbbe063b | ||
|
|
e9e59cb954 | ||
|
|
54f39294de | ||
|
|
14d11a8998 | ||
|
|
bdee939fe0 | ||
|
|
38d87dd9de | ||
|
|
a32ed5a07a | ||
|
|
bacf597516 | ||
|
|
9e3610c028 | ||
|
|
6062e3d4b7 | ||
|
|
53d3ef8eab | ||
|
|
ffd63f9758 | ||
|
|
806ce96899 | ||
|
|
f2bbd5bbb8 | ||
|
|
e906b3581f | ||
|
|
163a79b232 | ||
|
|
1c18733ade | ||
|
|
a2f9963045 | ||
|
|
85707198b3 | ||
|
|
3de428c713 | ||
|
|
4c01d6c50c | ||
|
|
506303cc79 | ||
|
|
23b74840c1 | ||
|
|
e544f5e66f | ||
|
|
9dec8db964 | ||
|
|
c1919a0f1d | ||
|
|
7ff8ada805 | ||
|
|
75cca1e9e9 | ||
|
|
817550fa0a | ||
|
|
3f4e384d5d | ||
|
|
5b3b48aa17 | ||
|
|
37923a9183 | ||
|
|
14d31d59af | ||
|
|
b12c3389ee | ||
|
|
e2df6d149b | ||
|
|
dab1a34aa2 | ||
|
|
e59739ce88 | ||
|
|
71a0a6d261 | ||
|
|
bc0a84a1c1 | ||
|
|
aced89a655 | ||
|
|
5bb8f77c3f | ||
|
|
435584bb91 | ||
|
|
487bb67995 | ||
|
|
850f93f21b | ||
|
|
51ef938385 | ||
|
|
6dc8b8b047 | ||
|
|
c80a872f73 | ||
|
|
a3004e7d80 | ||
|
|
991c9db1c1 | ||
|
|
3f15afb926 | ||
|
|
3d223ebc2a | ||
|
|
efd3c119fa | ||
|
|
6500ec775d | ||
|
|
a3887f3328 | ||
|
|
b7ead46943 | ||
|
|
d57fb75ba6 | ||
|
|
171ad9536f | ||
|
|
3f56438d06 | ||
|
|
472ebc0cd9 | ||
|
|
5247fdfcaf | ||
|
|
100fff83bf | ||
|
|
4afd8667cf | ||
|
|
4075f8ed51 | ||
|
|
746b77c74a | ||
|
|
1891967ad3 | ||
|
|
33062bef7a | ||
|
|
aea0532d76 | ||
|
|
5be402750a | ||
|
|
d7ad1bd9cd | ||
|
|
e7145bd343 | ||
|
|
d26fd3aa22 | ||
|
|
f82533005b | ||
|
|
65d1575642 | ||
|
|
1d33c37970 | ||
|
|
b7fede89c8 | ||
|
|
6bb8701651 | ||
|
|
4decf16927 | ||
|
|
09379dca86 | ||
|
|
ae42bf1e86 | ||
|
|
f53f09f020 | ||
|
|
68e873c6c8 | ||
|
|
0ce255e56a | ||
|
|
11bf540018 | ||
|
|
5dbf2f7088 | ||
|
|
206442fb19 | ||
|
|
828d9d810a | ||
|
|
cd77b33f3a | ||
|
|
430e898c33 | ||
|
|
1fa5d44769 | ||
|
|
33ab7f9063 | ||
|
|
31f517a3b4 | ||
|
|
e5d6f18366 | ||
|
|
2cc6629624 | ||
|
|
f7a85c59fe | ||
|
|
e89c7ed8e5 | ||
|
|
73a6d50dbc | ||
|
|
0ee6959537 | ||
|
|
a7fff7447c | ||
|
|
575ded54a9 | ||
|
|
ea3bd087c4 | ||
|
|
7c93902dac | ||
|
|
a52c755d21 | ||
|
|
982942fa5d | ||
|
|
a312dd4a9f | ||
|
|
5c7be85435 | ||
|
|
5671ca2cf5 | ||
|
|
af62ff22b1 | ||
|
|
661002689f | ||
|
|
71fdcdf899 | ||
|
|
2e164b4ddc | ||
|
|
fb6359586d | ||
|
|
6b22d0d109 | ||
|
|
c9bfd3bf8b | ||
|
|
d2177cf177 | ||
|
|
b22585a696 | ||
|
|
7985c92095 | ||
|
|
d9b1f10074 | ||
|
|
f52f129ed8 | ||
|
|
ba9d18f181 | ||
|
|
a18c8076cc | ||
|
|
e8ff31f4fb | ||
|
|
20266dd0c3 | ||
|
|
7a82bb2acb | ||
|
|
dea35b8e6e | ||
|
|
03862e780f | ||
|
|
3a89c87f52 | ||
|
|
dcd6e3e961 | ||
|
|
2ac15eae3d | ||
|
|
17311c46b3 | ||
|
|
b8ac041da9 | ||
|
|
76ade7a7d0 | ||
|
|
9b36daf23b | ||
|
|
2d88e67616 | ||
|
|
10ac5b280b | ||
|
|
215db2dad8 | ||
|
|
b6a75d7d91 | ||
|
|
7fa9d07a1f | ||
|
|
f482f9765e | ||
|
|
ac880f601c | ||
|
|
b8bbe056b1 | ||
|
|
73caba0f67 | ||
|
|
a090abbc92 | ||
|
|
6e5eb87340 | ||
|
|
ade4c6c7f9 | ||
|
|
61f657795c | ||
|
|
198f44fdc7 | ||
|
|
306b7611d9 | ||
|
|
3144c5fab8 | ||
|
|
ccd7104108 | ||
|
|
fc1157cf68 | ||
|
|
8ae4bc95fd | ||
|
|
0882f863d6 | ||
|
|
7ce9801ec3 | ||
|
|
d1378d94b8 | ||
|
|
ff4e97ab09 | ||
|
|
e444715c8d | ||
|
|
83a8ac800c | ||
|
|
61f915b4f6 | ||
|
|
43784e2cec | ||
|
|
8abb949af9 | ||
|
|
5ace7bb96f | ||
|
|
a7b28953e2 | ||
|
|
8148921fa7 | ||
|
|
68f523010e | ||
|
|
ed323afc93 | ||
|
|
d35335193a | ||
|
|
861d43e06d | ||
|
|
be6a3c34f1 | ||
|
|
29883a152a | ||
|
|
ff293f5560 | ||
|
|
541627d319 | ||
|
|
3597ae61f9 | ||
|
|
926460e603 | ||
|
|
ab4a11b3bd | ||
|
|
1fc61299c8 | ||
|
|
90a100d5f6 | ||
|
|
00abcbacc4 | ||
|
|
5cadee20eb | ||
|
|
abeb910ce2 | ||
|
|
4f000b780d | ||
|
|
5bf9631073 | ||
|
|
05650551b7 | ||
|
|
434d7bbecc | ||
|
|
5b8b698f88 | ||
|
|
7a24f53b57 | ||
|
|
e2948a807c | ||
|
|
fc5a416423 | ||
|
|
2e71fccd82 | ||
|
|
483b3828ca | ||
|
|
ba69bae055 | ||
|
|
4d954d0c13 | ||
|
|
2a1d8d7e99 | ||
|
|
0c4d50239a | ||
|
|
709084888a | ||
|
|
0b27fb80b1 | ||
|
|
c8ce93290e | ||
|
|
e22a2b3076 | ||
|
|
0a056857c5 | ||
|
|
c0d54e48bb | ||
|
|
fa4246d531 | ||
|
|
cbb5dff9c1 | ||
|
|
06eb74b77f | ||
|
|
e71f23fc7e | ||
|
|
d14f1e54a5 | ||
|
|
eda4cbf30e | ||
|
|
317b956d2e | ||
|
|
d5e8e0fb88 | ||
|
|
dc905f972a | ||
|
|
fa6865d569 | ||
|
|
0793a56260 | ||
|
|
f150e2ad09 | ||
|
|
498d865ea6 | ||
|
|
b837b7e32c | ||
|
|
121d59aa80 | ||
|
|
f1557c06de | ||
|
|
e168c9b1c3 | ||
|
|
68c390f679 | ||
|
|
788a5bb9f2 | ||
|
|
ccc38d22a8 | ||
|
|
2b8e1ec49a | ||
|
|
f5283300f0 | ||
|
|
a875f6ed9c | ||
|
|
523adff17a | ||
|
|
64812fd635 | ||
|
|
0099755434 | ||
|
|
004daca1fa | ||
|
|
ded2d00711 | ||
|
|
39c90e9d48 | ||
|
|
2b03d5fe66 | ||
|
|
d48dd1f26c | ||
|
|
e85c473d59 | ||
|
|
acf6185bf3 | ||
|
|
13c024b7a3 | ||
|
|
8020a7abcc | ||
|
|
f75e50cc88 | ||
|
|
c3c52bae63 | ||
|
|
84ee38d89e | ||
|
|
a0ad849c19 | ||
|
|
1ab9c07ccd | ||
|
|
2f9c5df271 | ||
|
|
2f876e3b5b | ||
|
|
d3712a148b | ||
|
|
0050643e9b | ||
|
|
3a5d821219 | ||
|
|
310eb81403 | ||
|
|
3fcff15166 | ||
|
|
2d3bd40c3d | ||
|
|
e1197065fe | ||
|
|
9b41aa707d | ||
|
|
a3d5b6501d | ||
|
|
d1477b8569 | ||
|
|
08eb4766f2 | ||
|
|
25bd2c8aee | ||
|
|
9ac542c455 | ||
|
|
56be1a5438 | ||
|
|
a9b6eaf5c0 | ||
|
|
9b1e4b801b | ||
|
|
3cb7592607 | ||
|
|
be4464ca2b | ||
|
|
9e89ee2478 | ||
|
|
7ff5f5ea70 | ||
|
|
c5b26bc10c | ||
|
|
931f4c077d | ||
|
|
be8e15c180 | ||
|
|
d13e04a693 | ||
|
|
bb0d82f724 | ||
|
|
d85715f991 | ||
|
|
ba19551b13 | ||
|
|
ac2e639ff8 | ||
|
|
80a901e103 | ||
|
|
ea00eae922 | ||
|
|
8e43987cd7 | ||
|
|
976a20cedf | ||
|
|
78281fef74 | ||
|
|
451453c4ce | ||
|
|
e49370c008 | ||
|
|
74475e216d | ||
|
|
9989db5ae0 | ||
|
|
e3cc4833f4 | ||
|
|
653ed4a34c | ||
|
|
897e2acd91 | ||
|
|
7b288d125a | ||
|
|
c2a9036482 | ||
|
|
14c1e98e8c | ||
|
|
7abd61f867 | ||
|
|
cc602ac6fd | ||
|
|
4d61600077 | ||
|
|
bcd192e882 | ||
|
|
f36d3303d2 | ||
|
|
03566b4e4d | ||
|
|
20d2633af0 | ||
|
|
04c1b9760c | ||
|
|
dc1e9aa9bd | ||
|
|
013b5f5b5f | ||
|
|
d758a21d6e | ||
|
|
31501a7992 | ||
|
|
6e1bc49862 | ||
|
|
f03590d245 | ||
|
|
5535fdefa9 | ||
|
|
c186449735 | ||
|
|
8bbabf8720 | ||
|
|
cf805b17b9 | ||
|
|
281e38bd83 | ||
|
|
1621a5a0b5 | ||
|
|
437bc3280d | ||
|
|
dedcbeafa8 | ||
|
|
fa69ee9596 | ||
|
|
fd79ca91a1 | ||
|
|
e41e1f51a0 | ||
|
|
510e5fb746 | ||
|
|
7b1b6f22e5 | ||
|
|
7c455c7f23 | ||
|
|
efbfd957ff | ||
|
|
50dffb80bb | ||
|
|
87f8ff5918 | ||
|
|
9af8d9339c | ||
|
|
bc21e52912 | ||
|
|
d94cff59a3 | ||
|
|
54f56be077 | ||
|
|
8b7aeb7f52 | ||
|
|
9ad09f2120 | ||
|
|
d5c837cc1b | ||
|
|
80be557605 | ||
|
|
ea26ec32b3 | ||
|
|
d484411f39 | ||
|
|
51c89dff26 | ||
|
|
89c3a18f83 | ||
|
|
3f0e532dce | ||
|
|
334e9e994e | ||
|
|
36de4e8e28 | ||
|
|
87c6e09cae | ||
|
|
36afedd93d | ||
|
|
fce49e6d80 | ||
|
|
a002b26294 | ||
|
|
d28f8b7857 | ||
|
|
aedd902cd8 | ||
|
|
840d2fb80d | ||
|
|
6ea20340d1 | ||
|
|
a37195492c | ||
|
|
e81ca9f975 | ||
|
|
c0a375f844 | ||
|
|
f56c640550 | ||
|
|
a60c110b96 | ||
|
|
c186069025 | ||
|
|
c93c9ec3d5 | ||
|
|
21601398d6 | ||
|
|
051724f0d5 | ||
|
|
603ea50277 | ||
|
|
79fceddd7e | ||
|
|
60011b6eb2 | ||
|
|
654b025a26 | ||
|
|
25ca17e789 | ||
|
|
4fdf71cdc1 | ||
|
|
0168bda833 | ||
|
|
320c64de24 | ||
|
|
04194b292d | ||
|
|
52374a4b8b | ||
|
|
6dd6fb6c12 | ||
|
|
c4e54d882b | ||
|
|
9d40c5184f | ||
|
|
532a1da548 | ||
|
|
acf8cacfbc | ||
|
|
0d64afc2c8 | ||
|
|
8f0e974ee6 | ||
|
|
1970baeb1c | ||
|
|
d951433b12 | ||
|
|
a18069926e | ||
|
|
0dfdadb531 | ||
|
|
b47b8ad4bf | ||
|
|
dfc738b493 | ||
|
|
0c855ee8b2 | ||
|
|
f17c26506f | ||
|
|
24c4ccc621 | ||
|
|
47b8c77a59 | ||
|
|
42a0ce0514 | ||
|
|
7406b1ffc3 | ||
|
|
f9eaaa9ff6 | ||
|
|
863c74471f | ||
|
|
0a316216f3 | ||
|
|
c4c46beb6b | ||
|
|
67e2bdfc28 | ||
|
|
2215440188 | ||
|
|
65701e12ef | ||
|
|
9d3028a9f7 | ||
|
|
2d9a248912 | ||
|
|
c6c9a06d41 | ||
|
|
e34ff009e0 | ||
|
|
d62d185ac5 | ||
|
|
bde1d5edb0 | ||
|
|
857c0bd9dd | ||
|
|
14fcd196a3 | ||
|
|
707a1b9377 | ||
|
|
89f071f5fa | ||
|
|
57b08583cc | ||
|
|
3d6e90b8f9 | ||
|
|
abda6eb4af | ||
|
|
74fa60cf86 | ||
|
|
b365e67561 | ||
|
|
3c13cea02b | ||
|
|
38f470c184 | ||
|
|
ac28063b19 | ||
|
|
9f9df184c4 | ||
|
|
ae4c727b31 | ||
|
|
12746efbe5 | ||
|
|
a2ca1d4499 | ||
|
|
788fb5c010 | ||
|
|
eff6084ec9 | ||
|
|
7d1ad47611 | ||
|
|
a365ee768b | ||
|
|
502c89e4a7 | ||
|
|
ced90de0a5 | ||
|
|
213e02f3b0 | ||
|
|
5f5bea2c5a | ||
|
|
18fd46a447 | ||
|
|
e0114fd22d | ||
|
|
6ac54df960 | ||
|
|
768f353300 | ||
|
|
c83d265416 | ||
|
|
a8c520878d | ||
|
|
69b118ed7b | ||
|
|
1abc6e2a16 | ||
|
|
12a310636d | ||
|
|
b3a990b0a7 | ||
|
|
265ab03314 | ||
|
|
8a85bfa312 | ||
|
|
9743784f91 | ||
|
|
72216a9b95 | ||
|
|
b7c62fcfbc | ||
|
|
797b2221be | ||
|
|
7478915ef3 | ||
|
|
da02951d67 | ||
|
|
ed6bb8ab2d | ||
|
|
70ca4b6e40 | ||
|
|
7b5a4e8c5e | ||
|
|
54678f47cf | ||
|
|
f9b88c385c | ||
|
|
e8b88b9021 | ||
|
|
64aabc74d1 | ||
|
|
f2f281e60b | ||
|
|
86faa25724 | ||
|
|
d30365f3d5 | ||
|
|
8993c4c18a | ||
|
|
6677079821 | ||
|
|
5c9b6915dc | ||
|
|
168bfe58c4 | ||
|
|
3c35cf73c2 | ||
|
|
ca45bf430a | ||
|
|
f57be8fdd8 | ||
|
|
711d6de33b | ||
|
|
ccf4f4e420 | ||
|
|
b34cb55081 | ||
|
|
0b75589e27 | ||
|
|
3d9da2901e | ||
|
|
8d27103f06 | ||
|
|
db334efbbd | ||
|
|
38ed0de9cf | ||
|
|
b4206fe0a1 | ||
|
|
e7d8284e4d | ||
|
|
cf69c97765 | ||
|
|
03c38ca3fd | ||
|
|
9219242dbd | ||
|
|
552454a3f0 | ||
|
|
5406839e3f | ||
|
|
fbe8aa94a4 | ||
|
|
7a88daa8f7 | ||
|
|
8a05ef2514 | ||
|
|
0700ba781b | ||
|
|
53af687a0c | ||
|
|
04f5b233f2 | ||
|
|
9dd4476c58 | ||
|
|
176bc2d83d | ||
|
|
0dd8d895a0 | ||
|
|
70f65bdb74 | ||
|
|
336fc37b94 | ||
|
|
83b1a247ec | ||
|
|
bf908e9bdf | ||
|
|
4cbcf4b4e3 | ||
|
|
c715fde8f0 | ||
|
|
172cbe7340 | ||
|
|
ff8c217e03 | ||
|
|
ba736e2bcd | ||
|
|
89fe26e3db | ||
|
|
56759d9cdc | ||
|
|
5e8f997262 | ||
|
|
c848e55f5e | ||
|
|
6845392aa6 | ||
|
|
ca09ddd123 | ||
|
|
09f53b56e6 | ||
|
|
52d53d187d | ||
|
|
2ce7becc6b | ||
|
|
8892b7b327 | ||
|
|
f25c450534 | ||
|
|
9bb7697b2f | ||
|
|
e0af970d62 | ||
|
|
b8a4b0742b | ||
|
|
8ca6aeae83 | ||
|
|
6866ced978 | ||
|
|
e1e467f23f | ||
|
|
a3f2a4ef99 | ||
|
|
298cced3b3 | ||
|
|
4717e7639c | ||
|
|
75abee2500 | ||
|
|
ab3207a81b | ||
|
|
ff784ed6ab | ||
|
|
44362c97be | ||
|
|
cc19ff74f1 | ||
|
|
8c7e0d9479 | ||
|
|
bcdf7b0cad | ||
|
|
73e9eb4c61 |
@@ -1,9 +1,34 @@
|
||||
trigger:
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
pr:
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
resources:
|
||||
repositories:
|
||||
- repository: openvino_contrib
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: releases/2022/1
|
||||
|
||||
jobs:
|
||||
- job: android_arm64
|
||||
|
||||
@@ -5,7 +5,22 @@ trigger:
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/*
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
pr:
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
resources:
|
||||
repositories:
|
||||
@@ -13,19 +28,21 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: releases/2022/1
|
||||
|
||||
- repository: testdata
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: releases/2022/1
|
||||
|
||||
jobs:
|
||||
- job: Lin
|
||||
strategy:
|
||||
matrix:
|
||||
Dynamic:
|
||||
CMAKE_BUILD_SHARED_LIBS: 'ON'
|
||||
PYTHON_STATIC_ARGS:
|
||||
# Dynamic:
|
||||
# CMAKE_BUILD_SHARED_LIBS: 'ON'
|
||||
# PYTHON_STATIC_ARGS:
|
||||
Static:
|
||||
CMAKE_BUILD_SHARED_LIBS: 'OFF'
|
||||
PYTHON_STATIC_ARGS: -m "not dynamic_library and not template_plugin"
|
||||
@@ -147,7 +164,6 @@ jobs:
|
||||
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
|
||||
-DENABLE_PYTHON=ON
|
||||
-DBUILD_SHARED_LIBS=$(CMAKE_BUILD_SHARED_LIBS)
|
||||
-DENABLE_INTEL_GNA=$(CMAKE_BUILD_SHARED_LIBS)
|
||||
-DENABLE_ONEDNN_FOR_GPU=$(CMAKE_BUILD_SHARED_LIBS)
|
||||
-DPYTHON_EXECUTABLE=/usr/bin/python3.8
|
||||
-DENABLE_WHEEL=ON
|
||||
@@ -237,8 +253,16 @@ jobs:
|
||||
- script: |
|
||||
export DATA_PATH=$(MODELS_PATH)
|
||||
export MODELS_PATH=$(MODELS_PATH)
|
||||
. $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_TEST_DIR)/pyngraph $(PYTHON_STATIC_ARGS) --junitxml=TEST-Pyngraph.xml --ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_utils/test_utils.py --ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_zoo_models.py --ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_backend.py
|
||||
displayName: 'nGraph Python Bindings Tests'
|
||||
. $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_TEST_DIR)/pyngraph $(PYTHON_STATIC_ARGS) --junitxml=TEST-Pyngraph.xml --ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_zoo_models.py --ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_backend.py
|
||||
displayName: 'nGraph and IE Python Bindings Tests'
|
||||
continueOnError: false
|
||||
|
||||
# Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time
|
||||
- script: |
|
||||
export DATA_PATH=$(MODELS_PATH)
|
||||
export MODELS_PATH=$(MODELS_PATH)
|
||||
. $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_TEST_DIR)/pyopenvino $(PYTHON_STATIC_ARGS) --junitxml=TEST-Pyngraph.xml --ignore=$(INSTALL_TEST_DIR)/pyopenvino/tests/test_utils/test_utils.py --ignore=$(INSTALL_TEST_DIR)/pyopenvino/tests/test_onnx/test_zoo_models.py --ignore=$(INSTALL_TEST_DIR)/pyopenvino/tests/test_onnx/test_backend.py
|
||||
displayName: 'Python API 2.0 Tests'
|
||||
continueOnError: false
|
||||
|
||||
- script: |
|
||||
@@ -246,7 +270,6 @@ jobs:
|
||||
. $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_DIR)/tests/mo/unit_tests --junitxml=TEST-ModelOptimizer.xml
|
||||
displayName: 'Model Optimizer UT'
|
||||
continueOnError: false
|
||||
enabled: true
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
|
||||
workingDirectory: $(INSTALL_TEST_DIR)
|
||||
@@ -277,7 +300,6 @@ jobs:
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/gnaUnitTests --gtest_output=xml:TEST-gnaUnitTests.xml
|
||||
displayName: 'GNA UT'
|
||||
continueOnError: false
|
||||
condition: eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'ON')
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/vpuUnitTests --gtest_output=xml:TEST-vpuUnitTests.xml
|
||||
displayName: 'VPU UT'
|
||||
@@ -338,16 +360,6 @@ jobs:
|
||||
workingDirectory: $(INSTALL_DIR)/samples_bin
|
||||
displayName: 'Samples Smoke Tests'
|
||||
continueOnError: false
|
||||
condition: eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'ON')
|
||||
enabled: true
|
||||
|
||||
- script: |
|
||||
export DATA_PATH=$(MODELS_PATH)
|
||||
export MODELS_PATH=$(MODELS_PATH)
|
||||
cd $(REPO_DIR)/src/bindings/python/tests_compatibility/test_inference_engine
|
||||
. $(SETUPVARS) -pyver 3.8 && python3 -m pytest --junitxml=TEST-PythonAPI.xml $(PYTHON_STATIC_ARGS)
|
||||
displayName: 'Python API Tests'
|
||||
continueOnError: false
|
||||
|
||||
- script: |
|
||||
. $(SETUPVARS)
|
||||
@@ -358,7 +370,6 @@ jobs:
|
||||
workingDirectory: $(LAYER_TESTS_DIR)
|
||||
displayName: 'Layer Tests'
|
||||
continueOnError: false
|
||||
enabled: true
|
||||
|
||||
- task: PublishTestResults@2
|
||||
condition: always()
|
||||
|
||||
@@ -1,9 +1,34 @@
|
||||
trigger:
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
pr:
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
resources:
|
||||
repositories:
|
||||
- repository: openvino_contrib
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: releases/2022/1
|
||||
|
||||
jobs:
|
||||
- job: linux_arm64
|
||||
@@ -17,16 +42,28 @@ jobs:
|
||||
system.debug: true
|
||||
VSTS_HTTP_RETRY: 5
|
||||
VSTS_HTTP_TIMEOUT: 200
|
||||
PYTHON_ARM_VERSION: "3.8.12"
|
||||
PYTHON_EXEC: "python3.8"
|
||||
OPENVINO_ARCH: 'aarch64'
|
||||
NUM_PROC: 1
|
||||
BUILD_TYPE: Release
|
||||
OPENVINO_REPO_DIR: $(Build.Repository.LocalPath)
|
||||
OPENVINO_CONTRIB_REPO_DIR: $(OPENVINO_REPO_DIR)/../openvino_contrib
|
||||
OPENCV_REPO_DIR: $(OPENVINO_REPO_DIR)/../opencv
|
||||
BUILD_PYTHON: $(WORK_DIR)/build_python
|
||||
BUILD_OPENCV: $(WORK_DIR)/build_opencv
|
||||
BUILD_OPENVINO: $(WORK_DIR)/build
|
||||
BUILD_OPENVINO_PYTHON: $(WORK_DIR)/build_python
|
||||
BUILD_OPEN_MODEL_ZOO: $(WORK_DIR)/build_open_model_zoo
|
||||
INSTALL_OPENVINO: $(WORK_DIR)/install_openvino
|
||||
INSTALL_PYTHON: $(INSTALL_OPENVINO)/extras/python
|
||||
INSTALL_OPENCV: $(INSTALL_OPENVINO)/extras/opencv
|
||||
INSTALL_OPEN_MODEL_ZOO: $(INSTALL_OPENVINO)/extras/open_model_zoo
|
||||
WORK_DIR: $(Pipeline.Workspace)/_w
|
||||
BUILD_DIR: $(WORK_DIR)/build
|
||||
BUILD_DIR_OPENCV: $(WORK_DIR)/build_opencv
|
||||
TMP_DIR: /mnt/tmp
|
||||
SHARE_DIR: /mount/cinfsshare/onnxtestdata
|
||||
CCACHE_DIR: $(SHARE_DIR)/ccache/master/linux_arm64
|
||||
TMP_DIR: /mnt/tmp
|
||||
OPENVINO_CCACHE_DIR: $(SHARE_DIR)/ccache/master/linux_arm64
|
||||
OPENCV_CCACHE_DIR: $(SHARE_DIR)/ccache/master/linux_arm64_opencv
|
||||
|
||||
steps:
|
||||
- script: |
|
||||
@@ -47,17 +84,21 @@ jobs:
|
||||
df
|
||||
lsblk -o NAME,HCTL,SIZE,MOUNTPOINT | grep -i "sd"
|
||||
free -h
|
||||
echo "##vso[task.setvariable variable=NUM_PROC]$(nproc --all)"
|
||||
echo "NUM_PROC=$(NUM_PROC)"
|
||||
displayName: 'System information'
|
||||
|
||||
- script: |
|
||||
rm -rf $(WORK_DIR) ; mkdir $(WORK_DIR)
|
||||
rm -rf $(BUILD_DIR) ; mkdir $(BUILD_DIR)
|
||||
mkdir -p $(BUILD_OPENCV) $(BUILD_OPENVINO) $(BUILD_OPENVINO_PYTHON) $(BUILD_PYTHON) $(BUILD_OPEN_MODEL_ZOO)
|
||||
mkdir -p $(INSTALL_OPENVINO) $(INSTALL_PYTHON) $(INSTALL_OPENCV) $(INSTALL_OPEN_MODEL_ZOO)
|
||||
sudo rm -rf $(TMP_DIR) ; sudo mkdir $(TMP_DIR) ; sudo chmod 777 -R $(TMP_DIR)
|
||||
sudo mkdir -p $(SHARE_DIR)
|
||||
sudo apt --assume-yes update && sudo apt --assume-yes install nfs-common
|
||||
sudo mount -vvv -t nfs cinfsshare.file.core.windows.net:/cinfsshare/onnxtestdata $(SHARE_DIR) -o vers=4,minorversion=1,sec=sys
|
||||
mkdir -p $(CCACHE_DIR)
|
||||
displayName: 'Make directory'
|
||||
mkdir -p $(OPENVINO_CCACHE_DIR)
|
||||
mkdir -p $(OPENCV_CCACHE_DIR)
|
||||
displayName: 'Make directories'
|
||||
|
||||
- checkout: self
|
||||
clean: true
|
||||
@@ -74,16 +115,25 @@ jobs:
|
||||
- script: |
|
||||
set -e
|
||||
$(OPENVINO_REPO_DIR)/install_build_dependencies.sh
|
||||
# Move into contrib install_build_dependencies.sh
|
||||
sudo apt --assume-yes install scons crossbuild-essential-arm64 libprotoc-dev protobuf-compiler
|
||||
# OpenCV should provide install_build_dependencies.sh as well
|
||||
# Move into resources
|
||||
git clone https://github.com/opencv/opencv.git --depth 1 $(OPENCV_REPO_DIR)
|
||||
# Speed up build
|
||||
wget https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip
|
||||
unzip ninja-linux.zip
|
||||
sudo cp -v ninja /usr/local/bin/
|
||||
workingDirectory: $(WORK_DIR)
|
||||
export CCACHE_DIR=$(OPENCV_CCACHE_DIR)
|
||||
export CCACHE_TEMPDIR=$(TMP_DIR)/ccache
|
||||
export CCACHE_BASEDIR=$(Pipeline.Workspace)
|
||||
export CCACHE_MAXSIZE=50G
|
||||
export USE_CCACHE=1
|
||||
export PYTHON_ARM_VERSION=$(PYTHON_ARM_VERSION)
|
||||
export NUM_PROC=$(NUM_PROC)
|
||||
export BUILD_PYTHON=$(BUILD_PYTHON)
|
||||
export WORK_DIR=$(WORK_DIR)
|
||||
export INSTALL_PYTHON=$(INSTALL_PYTHON)
|
||||
export BUILD_TYPE=$(BUILD_TYPE)
|
||||
export OPENVINO_REPO_DIR=$(OPENVINO_REPO_DIR)
|
||||
export INSTALL_OPENCV=$(INSTALL_OPENCV)
|
||||
export PYTHON_EXEC=$(PYTHON_EXEC)
|
||||
export OPENCV_REPO_DIR=$(OPENCV_REPO_DIR)
|
||||
export BUILD_OPENCV=$(BUILD_OPENCV)
|
||||
export INSTALL_OPENVINO=$(INSTALL_OPENVINO)
|
||||
$(OPENVINO_CONTRIB_REPO_DIR)/modules/arm_plugin/scripts/install_build_dependencies.sh
|
||||
workingDirectory: $(BUILD_OPENVINO)
|
||||
displayName: 'Install dependencies'
|
||||
|
||||
- task: CMake@1
|
||||
@@ -91,30 +141,21 @@ jobs:
|
||||
cmakeArgs: >
|
||||
-GNinja
|
||||
-DVERBOSE_BUILD=ON
|
||||
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
|
||||
-DBUILD_LIST=imgcodecs,videoio,highgui
|
||||
-DCMAKE_TOOLCHAIN_FILE=$(OPENCV_REPO_DIR)/platforms/linux/aarch64-gnu.toolchain.cmake
|
||||
$(OPENCV_REPO_DIR)
|
||||
workingDirectory: $(BUILD_DIR_OPENCV)
|
||||
|
||||
- script: ninja
|
||||
workingDirectory: $(BUILD_DIR_OPENCV)
|
||||
displayName: 'Build OpenCV Linux ARM64'
|
||||
|
||||
- script: ninja install
|
||||
workingDirectory: $(BUILD_DIR_OPENCV)
|
||||
displayName: 'Install OpenCV Linux ARM64'
|
||||
|
||||
- task: CMake@1
|
||||
inputs:
|
||||
cmakeArgs: >
|
||||
-GNinja
|
||||
-DVERBOSE_BUILD=ON
|
||||
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
|
||||
-DCMAKE_TOOLCHAIN_FILE=$(OPENVINO_REPO_DIR)/cmake/arm64.toolchain.cmake
|
||||
-DOpenCV_DIR=$(BUILD_DIR_OPENCV)/install/lib/cmake/opencv4
|
||||
-DOpenCV_DIR=$(INSTALL_OPENCV)/cmake
|
||||
-DENABLE_OPENCV=OFF
|
||||
-DPYTHON_INCLUDE_DIRS=$(INSTALL_PYTHON)/include/python3.8
|
||||
-DPYTHON_LIBRARY=$(INSTALL_PYTHON)/lib/libpython3.8.so
|
||||
-DENABLE_PYTHON=ON
|
||||
-DPYTHON_MODULE_EXTENSION=".so"
|
||||
-DENABLE_TESTS=ON
|
||||
-DENABLE_FUNCTIONAL_TESTS=ON
|
||||
-DENABLE_GAPI_TESTS=OFF
|
||||
-DENABLE_GAPI_PREPROCESSING=OFF
|
||||
-DENABLE_DATA=OFF
|
||||
-DCMAKE_EXE_LINKER_FLAGS=-Wl,-rpath-link,$(INSTALL_OPENCV)/lib
|
||||
-DTHREADING=SEQ -DENABLE_LTO=ON
|
||||
-DCMAKE_TOOLCHAIN_FILE=$(OPENVINO_REPO_DIR)/cmake/arm64.toolchain.cmake
|
||||
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
|
||||
-DENABLE_SAMPLES=ON
|
||||
-DBUILD_java_api=OFF
|
||||
-DENABLE_INTEL_MYRIAD=OFF
|
||||
@@ -122,26 +163,102 @@ jobs:
|
||||
-DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules
|
||||
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache
|
||||
-DCMAKE_C_COMPILER_LAUNCHER=ccache
|
||||
-DARM_COMPUTE_SCONS_JOBS=$(NUM_PROC)
|
||||
-DOUTPUT_ROOT=$(INSTALL_OPENVINO)
|
||||
-DCMAKE_INSTALL_PREFIX=$(INSTALL_OPENVINO)
|
||||
$(OPENVINO_REPO_DIR)
|
||||
workingDirectory: $(BUILD_DIR)
|
||||
|
||||
- script: ls -alR $(OPENVINO_REPO_DIR)/temp/
|
||||
displayName: 'List temp SDKs'
|
||||
|
||||
- script: ccache --zero-stats --max-size=50G --show-config
|
||||
displayName: 'Clean ccache stats'
|
||||
workingDirectory: $(BUILD_OPENVINO)
|
||||
displayName: 'CMake OpenVINO ARM plugin'
|
||||
|
||||
- script: |
|
||||
export CCACHE_DIR=$(CCACHE_DIR)
|
||||
export CCACHE_DIR=$(OPENVINO_CCACHE_DIR)
|
||||
export CCACHE_TEMPDIR=$(TMP_DIR)/ccache
|
||||
export CCACHE_BASEDIR=$(Pipeline.Workspace)
|
||||
export CCACHE_MAXSIZE=50G
|
||||
export USE_CCACHE=1
|
||||
ninja
|
||||
workingDirectory: $(BUILD_DIR)
|
||||
displayName: 'Build Linux ARM64'
|
||||
workingDirectory: $(BUILD_OPENVINO)
|
||||
displayName: 'Build OpenVINO ARM plugin'
|
||||
|
||||
- script: ccache --show-stats
|
||||
displayName: 'Show ccache stats'
|
||||
- script: ninja install
|
||||
workingDirectory: $(BUILD_OPENVINO)
|
||||
displayName: 'Install OpenVINO ARM plugin'
|
||||
|
||||
- script: ls -alR $(OPENVINO_REPO_DIR)/bin/
|
||||
displayName: 'List binary files'
|
||||
- task: CMake@1
|
||||
inputs:
|
||||
cmakeArgs: >
|
||||
-GNinja
|
||||
-DInferenceEngineDeveloperPackage_DIR=$(BUILD_OPENVINO)
|
||||
-DENABLE_PYTHON=ON
|
||||
-DPYTHON_EXECUTABLE=$(INSTALL_PYTHON)/bin/python3.8
|
||||
-DPYTHON_INCLUDE_DIRS=$(INSTALL_PYTHON)/include/python3.8
|
||||
-DPYTHON_LIBRARIES=$(INSTALL_PYTHON)/lib
|
||||
-DPYTHON3_NUMPY_INCLUDE_DIRS=/usr/local/lib/python3.8/site-packages/numpy/core/include
|
||||
-DPYTHON_MODULE_EXTENSION=".so"
|
||||
-DPYBIND11_FINDPYTHON=OFF
|
||||
-DPYBIND11_NOPYTHON=OFF
|
||||
-DPYTHONLIBS_FOUND=TRUE
|
||||
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
|
||||
-DENABLE_DATA=OFF
|
||||
-DCMAKE_EXE_LINKER_FLAGS=-Wl,-rpath-link,$(INSTALL_OPENCV)/lib
|
||||
-DCMAKE_TOOLCHAIN_FILE=$(OPENVINO_REPO_DIR)/cmake/arm64.toolchain.cmake
|
||||
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache
|
||||
-DCMAKE_C_COMPILER_LAUNCHER=ccache
|
||||
-DCMAKE_INSTALL_PREFIX=$(INSTALL_OPENVINO)
|
||||
$(OPENVINO_REPO_DIR)/src/bindings/python
|
||||
workingDirectory: $(BUILD_OPENVINO_PYTHON)
|
||||
displayName: 'CMake OpenVINO python binding'
|
||||
|
||||
- script: |
|
||||
export CCACHE_DIR=$(OPENVINO_CCACHE_DIR)
|
||||
export CCACHE_TEMPDIR=$(TMP_DIR)/ccache
|
||||
export CCACHE_BASEDIR=$(Pipeline.Workspace)
|
||||
export CCACHE_MAXSIZE=50G
|
||||
export USE_CCACHE=1
|
||||
ninja
|
||||
workingDirectory: $(BUILD_OPENVINO_PYTHON)
|
||||
displayName: 'Build OpenVINO python binding'
|
||||
|
||||
- script: ninja install
|
||||
workingDirectory: $(BUILD_OPENVINO_PYTHON)
|
||||
displayName: 'Install OpenVINO python binding'
|
||||
|
||||
- task: CMake@1
|
||||
inputs:
|
||||
cmakeArgs: >
|
||||
-GNinja
|
||||
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
|
||||
-DENABLE_PYTHON=ON
|
||||
-DPYTHON_EXECUTABLE=/usr/local/bin/python3.8
|
||||
-DPYTHON_INCLUDE_DIR=$(INSTALL_PYTHON)/include/python3.8
|
||||
-DPYTHON_LIBRARY=$(INSTALL_PYTHON)/lib
|
||||
-DCMAKE_TOOLCHAIN_FILE=$(OPENVINO_REPO_DIR)/cmake/arm64.toolchain.cmake
|
||||
-DOpenVINO_DIR=$(BUILD_OPENVINO)
|
||||
-DInferenceEngine_DIR=$(BUILD_OPENVINO)
|
||||
-DOpenCV_DIR=$(INSTALL_OPENCV)/cmake
|
||||
-Dngraph_DIR=$(BUILD_OPENVINO)
|
||||
-DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules
|
||||
-DCMAKE_INSTALL_PREFIX=$(INSTALL_OPEN_MODEL_ZOO)
|
||||
$(OPENVINO_REPO_DIR)/thirdparty/open_model_zoo/demos
|
||||
workingDirectory: $(BUILD_OPEN_MODEL_ZOO)
|
||||
displayName: 'CMake Open Model Zoo demos'
|
||||
|
||||
- script: ninja
|
||||
workingDirectory: $(BUILD_OPEN_MODEL_ZOO)
|
||||
displayName: 'Build Open Model Zoo demos'
|
||||
|
||||
- script: ninja install
|
||||
workingDirectory: $(BUILD_OPEN_MODEL_ZOO)
|
||||
displayName: 'Install Open Model Zoo demos'
|
||||
|
||||
- script: |
|
||||
cp -r $(BUILD_OPEN_MODEL_ZOO)/$(OPENVINO_ARCH)/$(BUILD_TYPE)/* $(INSTALL_OPEN_MODEL_ZOO)/
|
||||
zip -9 -r $(Build.ArtifactStagingDirectory)/openvino_$(OPENVINO_ARCH)_linux.zip ./*
|
||||
workingDirectory: $(INSTALL_OPENVINO)
|
||||
displayName: 'Create OpenVINO ARM64 linux package'
|
||||
|
||||
- task: PublishBuildArtifacts@1
|
||||
inputs:
|
||||
pathToPublish: $(Build.ArtifactStagingDirectory)
|
||||
artifactName: 'openvino_aarch64_linux'
|
||||
displayName: 'Publish OpenVINO AArch64 linux package'
|
||||
|
||||
@@ -5,7 +5,22 @@ trigger:
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/*
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
pr:
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
jobs:
|
||||
- job: LinCC
|
||||
|
||||
@@ -4,6 +4,7 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: releases/2022/1
|
||||
|
||||
jobs:
|
||||
- job: Lin
|
||||
@@ -79,11 +80,12 @@ jobs:
|
||||
|
||||
- task: CMake@1
|
||||
inputs:
|
||||
# Coverity has too many PARSE_ERROR errors with ENABLE_FASTER_BUILD=ON. Disabling FASTER_BUILD.
|
||||
cmakeArgs: >
|
||||
-GNinja
|
||||
-DVERBOSE_BUILD=ON
|
||||
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
|
||||
-DENABLE_FASTER_BUILD=ON
|
||||
-DENABLE_FASTER_BUILD=OFF
|
||||
-DENABLE_STRICT_DEPENDENCIES=OFF
|
||||
-DENABLE_REQUIREMENTS_INSTALL=OFF
|
||||
-DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules
|
||||
@@ -112,11 +114,6 @@ jobs:
|
||||
workingDirectory: $(BUILD_DIR)
|
||||
displayName: 'Pack cov-int folder for submission'
|
||||
|
||||
- publish: $(BUILD_DIR)/openvino.tgz
|
||||
artifact: openvino.tgz
|
||||
continueOnError: true
|
||||
displayName: 'Publish submission'
|
||||
|
||||
- script: |
|
||||
curl --form token=$(COVERITY_TOKEN) \
|
||||
--form email=$(COVERITY_USER) \
|
||||
|
||||
@@ -69,9 +69,9 @@ jobs:
|
||||
- script: >
|
||||
env -C ~/work
|
||||
./buildreleasenolto.sh
|
||||
libinference_engine_preproc.so
|
||||
ov_intel_cpu_plugin
|
||||
ov_intel_gpu_plugin
|
||||
libopenvino_gapi_preproc.so
|
||||
openvino_intel_cpu_plugin
|
||||
openvino_intel_gpu_plugin
|
||||
clDNN_unit_tests64
|
||||
gpuFuncTests
|
||||
displayName: Build Lin
|
||||
|
||||
@@ -5,7 +5,22 @@ trigger:
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/*
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
pr:
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
jobs:
|
||||
- job: OpenVINO_ONNX_CI
|
||||
|
||||
@@ -5,7 +5,22 @@ trigger:
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/*
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
pr:
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
jobs:
|
||||
- job: onnxruntime
|
||||
|
||||
@@ -5,7 +5,22 @@ trigger:
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/*
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
pr:
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
resources:
|
||||
repositories:
|
||||
@@ -13,11 +28,13 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: releases/2022/1
|
||||
|
||||
- repository: testdata
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: releases/2022/1
|
||||
|
||||
jobs:
|
||||
- job: Mac
|
||||
@@ -40,6 +57,8 @@ jobs:
|
||||
INSTALL_DIR: $(WORK_DIR)/install_pkg
|
||||
INSTALL_TEST_DIR: $(INSTALL_DIR)/tests
|
||||
SETUPVARS: $(INSTALL_DIR)/setupvars.sh
|
||||
TMP_DIR: /tmp
|
||||
CCACHE_DIR: $(WORK_DIR)/ccache/mac
|
||||
|
||||
steps:
|
||||
- script: |
|
||||
@@ -87,6 +106,7 @@ jobs:
|
||||
python3 -m pip install -r $(REPO_DIR)/src/core/tests/requirements_test_onnx.txt
|
||||
# Speed up build
|
||||
brew install ninja
|
||||
brew install ccache
|
||||
# Speed up tests
|
||||
git clone https://github.com/google/gtest-parallel.git
|
||||
workingDirectory: $(WORK_DIR)
|
||||
@@ -96,17 +116,36 @@ jobs:
|
||||
export PATH="/usr/local/opt/cython/bin:$PATH"
|
||||
export CC=gcc
|
||||
export CXX=g++
|
||||
cmake -GNinja -DVERBOSE_BUILD=ON -DENABLE_REQUIREMENTS_INSTALL=OFF -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DENABLE_TESTS=ON -DENABLE_STRICT_DEPENDENCIES=OFF -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules $(REPO_DIR)
|
||||
cmake -GNinja -DVERBOSE_BUILD=ON -DENABLE_REQUIREMENTS_INSTALL=OFF -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DENABLE_TESTS=OFF -DENABLE_STRICT_DEPENDENCIES=OFF -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules -DCMAKE_CXX_COMPILER_LAUNCHER=ccache -DCMAKE_C_COMPILER_LAUNCHER=ccache $(REPO_DIR)
|
||||
workingDirectory: $(BUILD_DIR)
|
||||
displayName: 'CMake'
|
||||
|
||||
- script: ls -alR $(REPO_DIR)/temp/
|
||||
displayName: 'List temp SDKs'
|
||||
|
||||
- script: ninja
|
||||
- task: Cache@2
|
||||
inputs:
|
||||
key: 'ccache | "$(Agent.OS)"'
|
||||
path: $(CCACHE_DIR)
|
||||
restoreKeys: |
|
||||
ccache | "$(Agent.OS)"
|
||||
displayName: Cache
|
||||
|
||||
- script: ccache --zero-stats --max-size=10G --show-config
|
||||
displayName: 'Clean ccache stats'
|
||||
|
||||
- script: |
|
||||
export CCACHE_DIR=$(CCACHE_DIR)
|
||||
export CCACHE_TEMPDIR=$(TMP_DIR)/ccache
|
||||
export CCACHE_BASEDIR=$(Pipeline.Workspace)
|
||||
export CCACHE_MAXSIZE=10G
|
||||
ninja
|
||||
workingDirectory: $(BUILD_DIR)
|
||||
displayName: 'Build Mac'
|
||||
|
||||
- script: ccache --show-stats
|
||||
displayName: 'Show ccache stats'
|
||||
|
||||
- script: ls -alR $(REPO_DIR)/bin/
|
||||
displayName: 'List bin files'
|
||||
|
||||
@@ -132,34 +171,42 @@ jobs:
|
||||
workingDirectory: $(INSTALL_TEST_DIR)
|
||||
displayName: 'OV Core UT'
|
||||
continueOnError: false
|
||||
enabled: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/InferenceEngineUnitTests --gtest_print_time=1 --gtest_filter=-MKLDNNGraphStructureTests.TestNoRedundantReordersBeforeDWConvolution:TestConvolution/MKLDNNGraphConvolutionTests.TestsConvolution/0:TestConvolutionDefaultPrimitivesPriority/MKLDNNGraphConvolutionTests.TestsConvolution/0 --gtest_output=xml:TEST-InferenceEngineUnitTests.xml
|
||||
displayName: 'IE UT old'
|
||||
continueOnError: false
|
||||
enabled: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ieUnitTests --gtest_output=xml:TEST-ieUnitTests.xml
|
||||
displayName: 'IE UT'
|
||||
continueOnError: false
|
||||
enabled: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/cpuUnitTests --gtest_output=xml:TEST-cpuUnitTests.xml
|
||||
displayName: 'CPU UT'
|
||||
continueOnError: false
|
||||
enabled: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/vpuUnitTests --gtest_output=xml:TEST-vpuUnitTests.xml
|
||||
displayName: 'VPU UT'
|
||||
continueOnError: false
|
||||
enabled: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/onnxImporterUnitTests --gtest_output=xml:TEST-onnxImporterUnitTests.xml
|
||||
displayName: 'ONNX Importer UT'
|
||||
continueOnError: false
|
||||
enabled: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ieMultiPluginUnitTests --gtest_output=xml:TEST-ieMultiPluginUnitTests.xml
|
||||
displayName: 'MULTI UT'
|
||||
continueOnError: false
|
||||
enabled: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ieFuncTests --gtest_output=xml:TEST-ieFuncTests.xml
|
||||
displayName: 'IE FuncTests'
|
||||
continueOnError: false
|
||||
enabled: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/cpuFuncTests --gtest_filter=*smoke*:-smoke_LPT/ReduceMinTransformation.CompareWithRefImpl/f32_Shape* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml
|
||||
displayName: 'CPU FuncTests'
|
||||
@@ -172,6 +219,7 @@ jobs:
|
||||
. $(SETUPVARS) && $(INSTALL_TEST_DIR)/InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml
|
||||
displayName: 'IE CAPITests'
|
||||
continueOnError: false
|
||||
enabled: false
|
||||
|
||||
- task: PublishTestResults@2
|
||||
condition: always()
|
||||
|
||||
@@ -5,7 +5,22 @@ trigger:
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/*
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
pr:
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
resources:
|
||||
repositories:
|
||||
@@ -13,11 +28,13 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: releases/2022/1
|
||||
|
||||
- repository: testdata
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: releases/2022/1
|
||||
|
||||
jobs:
|
||||
- job: Win
|
||||
@@ -30,7 +47,7 @@ jobs:
|
||||
maxParallel: 2
|
||||
|
||||
# About 150% of total time
|
||||
timeoutInMinutes: 120
|
||||
timeoutInMinutes: 180
|
||||
|
||||
pool:
|
||||
name: WIN_VMSS_VENV_D8S_WU2
|
||||
@@ -133,7 +150,7 @@ jobs:
|
||||
|
||||
- script: |
|
||||
set PATH=$(WORK_DIR)\ninja-win;%PATH%
|
||||
call "$(MSVS_VARS_PATH)" && $(CMAKE_CMD) -G "Ninja Multi-Config" -DENABLE_WHEEL=ON -DENABLE_INTEL_GNA=$(CMAKE_BUILD_SHARED_LIBS) -DENABLE_INTEL_GPU=$(CMAKE_BUILD_SHARED_LIBS) -DENABLE_GAPI_PREPROCESSING=$(CMAKE_BUILD_SHARED_LIBS) -DBUILD_SHARED_LIBS=$(CMAKE_BUILD_SHARED_LIBS) -DENABLE_REQUIREMENTS_INSTALL=OFF -DENABLE_FASTER_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DENABLE_STRICT_DEPENDENCIES=OFF -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE="C:\hostedtoolcache\windows\Python\3.7.6\x64\python.exe" -DPYTHON_INCLUDE_DIR="C:\hostedtoolcache\windows\Python\3.7.6\x64\include" -DPYTHON_LIBRARY="C:\hostedtoolcache\windows\Python\3.7.6\x64\libs\python37.lib" -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)\modules -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(REPO_DIR)
|
||||
call "$(MSVS_VARS_PATH)" && $(CMAKE_CMD) -G "Ninja Multi-Config" -DENABLE_WHEEL=ON -DENABLE_ONEDNN_FOR_GPU=$(CMAKE_BUILD_SHARED_LIBS) -DENABLE_GAPI_PREPROCESSING=$(CMAKE_BUILD_SHARED_LIBS) -DBUILD_SHARED_LIBS=$(CMAKE_BUILD_SHARED_LIBS) -DENABLE_REQUIREMENTS_INSTALL=OFF -DENABLE_FASTER_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DENABLE_STRICT_DEPENDENCIES=OFF -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE="C:\hostedtoolcache\windows\Python\3.7.6\x64\python.exe" -DPYTHON_INCLUDE_DIR="C:\hostedtoolcache\windows\Python\3.7.6\x64\include" -DPYTHON_LIBRARY="C:\hostedtoolcache\windows\Python\3.7.6\x64\libs\python37.lib" -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)\modules -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(REPO_DIR)
|
||||
workingDirectory: $(BUILD_DIR)
|
||||
displayName: 'CMake'
|
||||
|
||||
@@ -198,8 +215,8 @@ jobs:
|
||||
python -m pytest $(INSTALL_DIR)\tests\smoke_tests\ --env_conf $(INSTALL_DIR)\tests\smoke_tests\env_config.yml -s --junitxml=TEST-SamplesSmokeTests.xml
|
||||
workingDirectory: $(INSTALL_DIR)
|
||||
displayName: 'Samples Smoke Tests'
|
||||
continueOnError: false
|
||||
condition: eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'ON')
|
||||
continueOnError: false
|
||||
|
||||
- script: rd /Q /S $(BUILD_DIR)
|
||||
displayName: 'Clean build dir'
|
||||
@@ -218,10 +235,10 @@ jobs:
|
||||
displayName: 'Tensorflow Frontend UT'
|
||||
continueOnError: false
|
||||
|
||||
- script: |
|
||||
set PATH=$(IB_DIR);%PATH%
|
||||
call $(SETUPVARS) && "$(IB_TESTCONSOLE)" $(INSTALL_TEST_DIR)\InferenceEngineUnitTests.exe --gtest_output=xml:TEST-InferenceEngineUnitTests-IB.xml
|
||||
displayName: 'IE UT old - IB'
|
||||
# set PATH=$(IB_DIR);%PATH%
|
||||
# call $(SETUPVARS) && "$(IB_TESTCONSOLE)" $(INSTALL_TEST_DIR)\InferenceEngineUnitTests.exe --gtest_output=xml:TEST-InferenceEngineUnitTests-IB.xml
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\InferenceEngineUnitTests --gtest_output=xml:TEST-InferenceEngineUnitTests.xml
|
||||
displayName: 'IE UT old'
|
||||
continueOnError: false
|
||||
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ieUnitTests --gtest_output=xml:TEST-ieUnitTests.xml
|
||||
@@ -235,7 +252,6 @@ jobs:
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\gnaUnitTests --gtest_output=xml:TEST-gnaUnitTests.xml
|
||||
displayName: 'GNA UT'
|
||||
continueOnError: false
|
||||
condition: eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'ON')
|
||||
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\vpuUnitTests --gtest_output=xml:TEST-vpuUnitTests.xml
|
||||
displayName: 'VPU UT'
|
||||
@@ -257,11 +273,10 @@ jobs:
|
||||
displayName: 'TEMPLATE FuncTests'
|
||||
continueOnError: false
|
||||
|
||||
# call $(SETUPVARS) && $(INSTALL_TEST_DIR)\cpuFuncTests.exe --gtest_filter=*smoke* --gtest_output=xml:TEST-cpuFuncTests.xml
|
||||
- script: |
|
||||
set PATH=$(IB_DIR);%PATH%
|
||||
call $(SETUPVARS) && "$(IB_TESTCONSOLE)" $(INSTALL_TEST_DIR)\cpuFuncTests.exe --gtest_filter=*smoke*:-*CompareWithRefs/base_size=16_pre_nms_topn=100_post_nms_topn=100_nms_thresh=0.7_feat_stride=1_min_size=1_ratio*:*smoke_GRUSequenceCommonZeroClip/GRUSequenceTest.CompareWithRefs/mode=CONVERT_TO_TI_MAX_SEQ_LEN_CONST_seq_lengths* --gtest_output=xml:TEST-cpuFuncTests-IB.xml /testlevel=24
|
||||
displayName: 'CPU FuncTests - IB'
|
||||
# set PATH=$(IB_DIR);%PATH%
|
||||
# call $(SETUPVARS) && "$(IB_TESTCONSOLE)" $(INSTALL_TEST_DIR)\cpuFuncTests.exe --gtest_filter=*smoke*:-*CompareWithRefs/base_size=16_pre_nms_topn=100_post_nms_topn=100_nms_thresh=0.7_feat_stride=1_min_size=1_ratio*:*smoke_GRUSequenceCommonZeroClip/GRUSequenceTest.CompareWithRefs/mode=CONVERT_TO_TI_MAX_SEQ_LEN_CONST_seq_lengths* --gtest_output=xml:TEST-cpuFuncTests-IB.xml /testlevel=24
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\cpuFuncTests --gtest_filter=*smoke* --gtest_output=xml:TEST-cpuFuncTests.xml
|
||||
displayName: 'CPU FuncTests'
|
||||
continueOnError: false
|
||||
condition: eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'OFF')
|
||||
|
||||
|
||||
@@ -5,7 +5,22 @@ trigger:
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/*
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
pr:
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
- releases/*
|
||||
paths:
|
||||
exclude:
|
||||
- docs/
|
||||
- /**/docs/*
|
||||
- /**/*.md
|
||||
- README.md
|
||||
|
||||
jobs:
|
||||
- job: WinCC
|
||||
|
||||
2
.github/workflows/build_doc.yml
vendored
2
.github/workflows/build_doc.yml
vendored
@@ -90,7 +90,7 @@ jobs:
|
||||
path: build/docs/sphinx.log
|
||||
|
||||
- name: 'Upload html'
|
||||
if: github.event_name == 'push'
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: openvino_html
|
||||
|
||||
1
.github/workflows/code_style.yml
vendored
1
.github/workflows/code_style.yml
vendored
@@ -82,6 +82,7 @@ jobs:
|
||||
- name: Install Clang dependency
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt --assume-yes remove clang-7 clang-8 clang-9 clang-10 clang-11
|
||||
sudo apt --assume-yes install libclang-12-dev
|
||||
|
||||
- name: Install Python-based dependencies
|
||||
|
||||
@@ -34,7 +34,9 @@ endif()
|
||||
message (STATUS "PROJECT ............................... " ${PROJECT_NAME})
|
||||
message (STATUS "CMAKE_VERSION ......................... " ${CMAKE_VERSION})
|
||||
message (STATUS "CMAKE_BINARY_DIR ...................... " ${CMAKE_BINARY_DIR})
|
||||
message (STATUS "CMAKE_SOURCE_DIR ...................... " ${CMAKE_SOURCE_DIR})
|
||||
message (STATUS "OpenVINO_SOURCE_DIR ................... " ${OpenVINO_SOURCE_DIR})
|
||||
message (STATUS "OpenVINO_BINARY_DIR ................... " ${OpenVINO_BINARY_DIR})
|
||||
message (STATUS "CMAKE_GENERATOR ....................... " ${CMAKE_GENERATOR})
|
||||
message (STATUS "CMAKE_C_COMPILER_ID ................... " ${CMAKE_C_COMPILER_ID})
|
||||
message (STATUS "CMAKE_CXX_COMPILER_ID ................. " ${CMAKE_CXX_COMPILER_ID})
|
||||
@@ -42,7 +44,7 @@ message (STATUS "CMAKE_BUILD_TYPE ...................... " ${CMAKE_BUILD_TYPE})
|
||||
message (STATUS "CMAKE_TOOLCHAIN_FILE .................. " ${CMAKE_TOOLCHAIN_FILE})
|
||||
|
||||
# remove file with exported developer targets to force its regeneration
|
||||
file(REMOVE "${CMAKE_BINARY_DIR}/ngraph/ngraphTargets.cmake")
|
||||
file(REMOVE "${CMAKE_BINARY_DIR}/ngraphTargets.cmake")
|
||||
file(REMOVE "${CMAKE_BINARY_DIR}/InferenceEngineTargets.cmake")
|
||||
file(REMOVE "${CMAKE_BINARY_DIR}/OpenVINOTargets.cmake")
|
||||
foreach(component IN LISTS openvino_export_components)
|
||||
|
||||
@@ -47,6 +47,9 @@ Jenkinsfile @openvinotoolkit/openvino-admins
|
||||
/src/inference/include/ie/cldnn/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
|
||||
/src/inference/include/openvino/runtime/intel_gpu/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
|
||||
/src/plugins/intel_gpu/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
|
||||
/docs/snippets/gpu/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
|
||||
/docs/OV_Runtime_UG/supported_plugins/GPU.md @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
|
||||
/docs/OV_Runtime_UG/supported_plugins/GPU_RemoteTensor_API.md @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
|
||||
|
||||
# IE VPU:
|
||||
/src/plugins/intel_myriad @openvinotoolkit/openvino-ie-vpu-maintainers
|
||||
@@ -63,6 +66,9 @@ Jenkinsfile @openvinotoolkit/openvino-admins
|
||||
/src/plugins/intel_gna/ @openvinotoolkit/openvino-ie-gna-maintainers
|
||||
/src/inference/include/ie/gna/ @openvinotoolkit/openvino-ie-gna-maintainers
|
||||
|
||||
# IE ARM CPU:
|
||||
/docs/OV_Runtime_UG/supported_plugins/ARM_CPU.md @openvinotoolkit/openvino_contrib-arm_plugin-maintainers
|
||||
|
||||
# IE Auto (MULTI) plugin:
|
||||
/src/plugins/auto/ @openvinotoolkit/openvino-ie-auto-multi-maintainers
|
||||
/src/inference/include/ie/multi-device/ @openvinotoolkit/openvino-ie-auto-multi-maintainers
|
||||
|
||||
68
CONTRIBUTING.md
Normal file
68
CONTRIBUTING.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# How to contribute to the OpenVINO repository
|
||||
|
||||
We suppose that you are an enthusiastic coder, want to contribute some code. For that purpose OpenVINO project now has a repository on the GitHub, to simplify everybody's life! All the bug fixes, new functionality, new tutorials etc. should be submitted via the GitHub's mechanism of pull requests.
|
||||
|
||||
If you are not familiar with the mechanism - do not worry, it's very simple. Keep reading.
|
||||
|
||||
## Before you start contributing you should
|
||||
|
||||
- Make sure you agree to contribute your code under [OpenVINO (Apache 2.0)](https://github.com/openvinotoolkit/openvino/blob/master/LICENSE) license.
|
||||
- If you are submitting a new module, you should go into [openvino_contrib](https://github.com/openvinotoolkit/openvino_contrib) repository by default.
|
||||
- If you are going to fix a bug, check that it's still exists. This can be done by building the latest [releases/2020/3](https://github.com/openvinotoolkit/openvino/tree/releases/2020/3) branch (LTS release) or the latest master branch, and make sure that the error is still reproducible there. We do not fix bugs that only affect older non-LTS releases like 2020.2 for example (more details about [branching strategy](https://github.com/openvinotoolkit/openvino/wiki/Branches))
|
||||
- Make sure that nobody beat you into fixing or reporting the issue by doing a search on the [Github OpenVINO issues](https://github.com/openvinotoolkit/openvino/issues) page, and making sure that there isn't someone working on it. In the latter case you might provide support or suggestion in the issue or in the linked pull request.
|
||||
- If you have a question about the software, then this is **NOT** the right place. You should open up a question at the [OpenVINO forum](https://community.intel.com/t5/Intel-Distribution-of-OpenVINO/bd-p/distribution-openvino-toolkit). In order to post a decent question from the start, feel free to read the official forum guidelines.
|
||||
|
||||
Before you open up anything on the OpenVINO GitHub page, be sure that you are at the right place with your problem.
|
||||
|
||||
## "Fork & Pull Request model" for code contribution
|
||||
|
||||
### [](https://github.com/openvinotoolkit/openvino/wiki/Contribute#the-instruction-in-brief)The instruction in brief
|
||||
|
||||
- Register at GitHub. Create your fork of OpenVINO repository [https://github.com/openvinotoolkit/openvino](https://github.com/openvinotoolkit/openvino) (see [https://help.github.com/articles/fork-a-repo](https://help.github.com/articles/fork-a-repo) for details).
|
||||
- Install Git.
|
||||
- Set your user name and email address in a Git configuration according to GitHub account (see [https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup](https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup) for details).
|
||||
- Choose a task for yourself. It could be a bugfix or some new code.
|
||||
- Choose a base branch for your work. More details about branches and policies are here: [Branches](https://github.com/openvinotoolkit/openvino/wiki/Branches)
|
||||
- Clone your fork to your computer.
|
||||
- Create a new branch (with a meaningful name) from the base branch you chose.
|
||||
- Modify / add the code following our [Coding Style Guide](https://github.com/openvinotoolkit/openvino/wiki/CodingStyleGuideLines) and [Documentation guidelines](https://github.com/openvinotoolkit/openvino/wiki/CodingStyleGuideLinesDocumentation).
|
||||
- If you want to add a new sample, please look at this [Guide for contributing to C++/C/Python IE samples](https://github.com/openvinotoolkit/openvino/wiki/SampleContribute)
|
||||
- Run testsuite locally:
|
||||
- execute each test binary from the artifacts directory, e.g. `<source dir>/bin/intel64/Release/ieFuncTests`
|
||||
- If you contribute to the documentation and want to add a new guide:
|
||||
- Create a new markdown file in an appropriate folder.
|
||||
- **REQUIRED:** The document title must contain a document label in a form: `{#openvino_docs_<name>}`. For example: `Deep Learning Network Intermediate Representation and Operation Sets in OpenVINO™ {#openvino_docs_MO_DG_IR_and_opsets}`.
|
||||
- Add your file to the documentation structure. Open the documentation structure file [`docs/doxygen/ie_docs.xml`](https://github.com/openvinotoolkit/openvino/blob/master/docs/doxygen/ie_docs.xml) and add your file path to the appropriate section.
|
||||
- When you are done, make sure that your branch is to date with latest state of the branch you want to contribute to (e.g. `git fetch upstream && git merge upstream/master`), push your branch to your GitHub fork; then create a pull request from your branch to the base branch (see [https://help.github.com/articles/using-pull-requests](https://help.github.com/articles/using-pull-requests) for details).
|
||||
|
||||
## Making a good pull request
|
||||
|
||||
Following these guidelines will increase the likelihood of your pull request being accepted:
|
||||
|
||||
- Before pushing your PR to the repository, make sure that it builds perfectly fine on your local system.
|
||||
- Add enough information, like a meaningful title, the reason why you made the commit and a link to the issue page if you opened one for this PR.
|
||||
- Scope your PR to one issue. Before submitting, make sure the diff contains no unrelated changes. If you want to cover more than one issue, submit your changes for each as separate pull requests.
|
||||
- If you have added new functionality, you should update/create the relevant documentation, as well as add tests for it to the testsuite.
|
||||
- Try not to include "oops" commits - ones that just fix an error in the previous commit. If you have those, then before submitting [squash](https://github.com/openvinotoolkit/openvino/wiki/Contribute#https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History#Squashing-Commits) those fixes directly into the commits where they belong.
|
||||
- Make sure to choose the right base branch and to follow the [Coding Style Guide](https://github.com/openvinotoolkit/openvino/wiki/CodingStyleGuideLines) for your code or [Documentation guidelines](https://github.com/openvinotoolkit/openvino/wiki/CodingStyleGuideLinesDocumentation) you are changing documentation files.
|
||||
- Make sure to add test for new functionality or test that reproduces fixed bug with related test data. Please do not add extra images or videos, if some of existing media files are suitable.
|
||||
|
||||
## Testing and merging pull requests
|
||||
|
||||
- Your pull request will be automatically tested by OpenVINO's precommit (testing status are automatically reported as "green" or "red" circles in precommit steps on PR's page). If any builders have failed, you should fix the issue. To rerun the automatic builds just push changes to your branch on GitHub. No need to close pull request and open a new one!
|
||||
- Once all the builders are "green", one of OpenVINO developers will review your code. Reviewer could ask you to modify your pull request. Please provide timely response for reviewers (within weeks, not months), otherwise you submission could be postponed or even rejected.
|
||||
|
||||
## PR review good practices
|
||||
|
||||
- Originator is responsible for driving the review of changes and should ping reviewers periodically.
|
||||
- Originator should close comments from the Reviewer when it is resolved. The Reviewer may re-open the comment if he does not agree with the resolution.
|
||||
- Originator should request re-review from the Reviewer when all comments are resolved by pushing the button in the “Reviewers” section.
|
||||
- If it is still WIP and you want to check CI test results early then use _Draft_ PR.
|
||||
- Do **NOT** rewrite history (push -f) once you converted draft PR into regular one, add new commits instead. Looking at diffs makes review easier.
|
||||
- Write meaningful description of commits resulting from review. _"Addressing review comments"_ is **NOT** a good description! Having a quick look at good descriptions can tell you much what is going on in PR without a need to go through all of resolved comments.
|
||||
|
||||
## Merging PR
|
||||
|
||||
As soon as the reviewer is fine with the pull request and Precommit likes your code and shows "green" status, the "Approved" review status is put, which signals OpenVINO maintainers that they can merge your pull request.
|
||||
|
||||
© Copyright 2018-2022, OpenVINO team
|
||||
33
README.md
33
README.md
@@ -1,35 +1,33 @@
|
||||
# OpenVINO™ Toolkit
|
||||
[](https://github.com/openvinotoolkit/openvino/releases/tag/2021.4.2)
|
||||
[](https://github.com/openvinotoolkit/openvino/releases/tag/2022.1)
|
||||
[](LICENSE)
|
||||

|
||||

|
||||
[](https://pepy.tech/project/openvino)
|
||||
|
||||
This toolkit allows developers to deploy pre-trained deep learning models
|
||||
through a high-level C++ Inference Engine API integrated with application logic.
|
||||
through a high-level OpenVINO™ Runtime C++ and Python APIs integrated with application logic.
|
||||
|
||||
This open source version includes several components: namely [Model Optimizer], [nGraph] and
|
||||
[Inference Engine], as well as CPU, GPU, MYRIAD, multi device and heterogeneous plugins to accelerate deep learning inferencing on Intel® CPUs and Intel® Processor Graphics.
|
||||
This open source version includes several components: namely [Model Optimizer], [OpenVINO™ Runtime], [Post-Training Optimization Tool], as well as CPU, GPU, MYRIAD, multi device and heterogeneous plugins to accelerate deep learning inferencing on Intel® CPUs and Intel® Processor Graphics.
|
||||
It supports pre-trained models from the [Open Model Zoo], along with 100+ open
|
||||
source and public models in popular formats such as Caffe\*, TensorFlow\*,
|
||||
MXNet\* and ONNX\*.
|
||||
source and public models in popular formats such as TensorFlow, ONNX, PaddlePaddle, MXNet, Caffe, Kaldi.
|
||||
|
||||
## Repository components:
|
||||
* [Inference Engine]
|
||||
* [nGraph]
|
||||
## Repository components
|
||||
* [OpenVINO™ Runtime]
|
||||
* [Model Optimizer]
|
||||
* [Post-Training Optimization Tool]
|
||||
* [Samples]
|
||||
|
||||
## License
|
||||
Deep Learning Deployment Toolkit is licensed under [Apache License Version 2.0](LICENSE).
|
||||
By contributing to the project, you agree to the license and copyright terms therein
|
||||
and release your contribution under these terms.
|
||||
OpenVINO™ Toolkit is licensed under [Apache License Version 2.0](LICENSE).
|
||||
By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms.
|
||||
|
||||
## Resources:
|
||||
## Resources
|
||||
* Docs: https://docs.openvino.ai/
|
||||
* Wiki: https://github.com/openvinotoolkit/openvino/wiki
|
||||
* Issue tracking: https://github.com/openvinotoolkit/openvino/issues
|
||||
* Storage: https://storage.openvinotoolkit.org/
|
||||
* Additional OpenVINO™ modules: https://github.com/openvinotoolkit/openvino_contrib
|
||||
* Additional OpenVINO™ toolkit modules: https://github.com/openvinotoolkit/openvino_contrib
|
||||
* [Intel® Distribution of OpenVINO™ toolkit Product Page](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit.html)
|
||||
* [Intel® Distribution of OpenVINO™ toolkit Release Notes](https://software.intel.com/en-us/articles/OpenVINO-RelNotes)
|
||||
|
||||
@@ -44,8 +42,9 @@ Please report questions, issues and suggestions using:
|
||||
\* Other names and brands may be claimed as the property of others.
|
||||
|
||||
[Open Model Zoo]:https://github.com/openvinotoolkit/open_model_zoo
|
||||
[Inference Engine]:https://software.intel.com/en-us/articles/OpenVINO-InferEngine
|
||||
[Model Optimizer]:https://software.intel.com/en-us/articles/OpenVINO-ModelOptimizer
|
||||
[nGraph]:https://docs.openvino.ai/latest/openvino_docs_nGraph_DG_DevGuide.html
|
||||
[OpenVINO™ Runtime]:https://docs.openvino.ai/latest/openvino_docs_OV_UG_OV_Runtime_User_Guide.html
|
||||
[Model Optimizer]:https://docs.openvino.ai/latest/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html
|
||||
[Post-Training Optimization Tool]:https://docs.openvino.ai/latest/pot_introduction.html
|
||||
[Samples]:https://github.com/openvinotoolkit/openvino/tree/master/samples
|
||||
[tag on StackOverflow]:https://stackoverflow.com/search?q=%23openvino
|
||||
|
||||
|
||||
@@ -23,14 +23,14 @@ ie_coverage_extract(INPUT "openvino" OUTPUT "legacy"
|
||||
ie_coverage_genhtml(INFO_FILE "legacy"
|
||||
PREFIX "${OV_COVERAGE_BASE_DIRECTORY}")
|
||||
|
||||
ie_coverage_extract(INPUT "openvino" OUTPUT "ov_hetero_plugin"
|
||||
ie_coverage_extract(INPUT "openvino" OUTPUT "hetero_plugin"
|
||||
PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/src/plugins/hetero/*")
|
||||
ie_coverage_genhtml(INFO_FILE "ov_hetero_plugin"
|
||||
ie_coverage_genhtml(INFO_FILE "hetero_plugin"
|
||||
PREFIX "${OV_COVERAGE_BASE_DIRECTORY}")
|
||||
|
||||
ie_coverage_extract(INPUT "openvino" OUTPUT "ov_auto_plugin"
|
||||
ie_coverage_extract(INPUT "openvino" OUTPUT "auto_plugin"
|
||||
PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/src/plugins/auto/*")
|
||||
ie_coverage_genhtml(INFO_FILE "ov_auto_plugin"
|
||||
ie_coverage_genhtml(INFO_FILE "auto_plugin"
|
||||
PREFIX "${OV_COVERAGE_BASE_DIRECTORY}")
|
||||
|
||||
ie_coverage_extract(INPUT "openvino" OUTPUT "preprocessing"
|
||||
@@ -73,9 +73,9 @@ if (ENABLE_INTEL_GPU)
|
||||
endif()
|
||||
|
||||
if(ENABLE_INTEL_GNA)
|
||||
ie_coverage_extract(INPUT "openvino" OUTPUT "ov_intel_gna_plugin"
|
||||
ie_coverage_extract(INPUT "openvino" OUTPUT "intel_gna_plugin"
|
||||
PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/src/plugins/intel_gna/*")
|
||||
ie_coverage_genhtml(INFO_FILE "ov_intel_gna_plugin"
|
||||
ie_coverage_genhtml(INFO_FILE "intel_gna_plugin"
|
||||
PREFIX "${OV_COVERAGE_BASE_DIRECTORY}")
|
||||
endif()
|
||||
|
||||
|
||||
@@ -28,12 +28,12 @@ if(COMMAND get_linux_name)
|
||||
endif()
|
||||
|
||||
if(CMAKE_CROSSCOMPILING AND CMAKE_HOST_SYSTEM_NAME MATCHES Linux AND CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
|
||||
set(protoc_version "3.9.2")
|
||||
set(protoc_version "3.18.2")
|
||||
|
||||
RESOLVE_DEPENDENCY(SYSTEM_PROTOC_ROOT
|
||||
ARCHIVE_LIN "protoc-${protoc_version}-linux-x86_64.tar.gz"
|
||||
TARGET_PATH "${TEMP}/protoc-${protoc_version}-linux-x86_64"
|
||||
SHA256 "1d6da1d97d0cbfcd333558afe24533eb3cb48dc1e0ab5e971aa1e50ede8bcf45"
|
||||
SHA256 "42fde2b6044c1f74c7e86d4e03b43aac87128ddf57ac6ed8c4eab7a1e21bbf21"
|
||||
)
|
||||
debug_message(STATUS "host protoc-${protoc_version} root path = " ${SYSTEM_PROTOC_ROOT})
|
||||
|
||||
@@ -269,7 +269,7 @@ include(${OpenVINO_SOURCE_DIR}/src/cmake/ie_parallel.cmake)
|
||||
|
||||
if(ENABLE_INTEL_GNA)
|
||||
reset_deps_cache(
|
||||
GNA
|
||||
GNA_EXT_DIR
|
||||
GNA_PLATFORM_DIR
|
||||
GNA_KERNEL_LIB_NAME
|
||||
GNA_LIBS_LIST
|
||||
@@ -286,12 +286,26 @@ if(ENABLE_INTEL_GNA)
|
||||
LIST(APPEND FILES_TO_EXTRACT_LIST gna_${GNA_VERSION}/linux)
|
||||
endif()
|
||||
|
||||
RESOLVE_DEPENDENCY(GNA
|
||||
RESOLVE_DEPENDENCY(GNA_EXT_DIR
|
||||
ARCHIVE_UNIFIED "GNA/GNA_${GNA_VERSION}.zip"
|
||||
TARGET_PATH "${TEMP}/gna_${GNA_VERSION}"
|
||||
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+.[0-9]+).*"
|
||||
FILES_TO_EXTRACT FILES_TO_EXTRACT_LIST
|
||||
SHA256 ${GNA_HASH})
|
||||
update_deps_cache(GNA "${GNA}" "Path to GNA root folder")
|
||||
debug_message(STATUS "gna=" ${GNA})
|
||||
update_deps_cache(GNA_EXT_DIR "${GNA_EXT_DIR}" "Path to GNA root folder")
|
||||
debug_message(STATUS "gna=" ${GNA_EXT_DIR})
|
||||
|
||||
if (WIN32)
|
||||
set(GNA_PLATFORM_DIR win64 CACHE STRING "" FORCE)
|
||||
elseif (UNIX)
|
||||
set(GNA_PLATFORM_DIR linux CACHE STRING "" FORCE)
|
||||
else ()
|
||||
message(FATAL_ERROR "GNA not supported on this platform, only linux, and windows")
|
||||
endif ()
|
||||
set(GNA_LIB_DIR x64 CACHE STRING "" FORCE)
|
||||
set(GNA_PATH ${GNA_EXT_DIR}/${GNA_PLATFORM_DIR}/${GNA_LIB_DIR} CACHE STRING "" FORCE)
|
||||
|
||||
if(NOT BUILD_SHARED_LIBS)
|
||||
list(APPEND PATH_VARS "GNA_PATH")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@@ -129,7 +129,7 @@ set(IE_DEBUG_POSTFIX_WIN "d")
|
||||
set(IE_RELEASE_POSTFIX_WIN "")
|
||||
set(IE_DEBUG_POSTFIX_LIN "")
|
||||
set(IE_RELEASE_POSTFIX_LIN "")
|
||||
set(IE_DEBUG_POSTFIX_MAC "")
|
||||
set(IE_DEBUG_POSTFIX_MAC "d")
|
||||
set(IE_RELEASE_POSTFIX_MAC "")
|
||||
|
||||
if(WIN32)
|
||||
@@ -158,16 +158,22 @@ else ()
|
||||
endif()
|
||||
add_definitions(-DIE_BUILD_POSTFIX=\"${IE_BUILD_POSTFIX}\")
|
||||
|
||||
macro(ov_set_if_not_defined var value)
|
||||
if(NOT DEFINED ${var})
|
||||
set(${var} ${value})
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
if(NOT UNIX)
|
||||
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
|
||||
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
|
||||
ov_set_if_not_defined(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
|
||||
ov_set_if_not_defined(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
|
||||
else()
|
||||
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/lib)
|
||||
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/lib)
|
||||
ov_set_if_not_defined(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/lib)
|
||||
ov_set_if_not_defined(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/lib)
|
||||
endif()
|
||||
set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
|
||||
set(CMAKE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
|
||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
|
||||
ov_set_if_not_defined(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
|
||||
ov_set_if_not_defined(CMAKE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
|
||||
ov_set_if_not_defined(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
|
||||
|
||||
if(APPLE)
|
||||
set(CMAKE_MACOSX_RPATH ON)
|
||||
@@ -206,6 +212,10 @@ endif()
|
||||
|
||||
macro(ov_install_static_lib target comp)
|
||||
if(NOT BUILD_SHARED_LIBS)
|
||||
get_target_property(target_type ${target} TYPE)
|
||||
if(${target_type} STREQUAL "STATIC_LIBRARY")
|
||||
set_target_properties(${target} PROPERTIES EXCLUDE_FROM_ALL FALSE)
|
||||
endif()
|
||||
install(TARGETS ${target} EXPORT OpenVINOTargets
|
||||
ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ${comp} ${ARGN})
|
||||
endif()
|
||||
|
||||
@@ -51,12 +51,6 @@ endfunction()
|
||||
set(VALIDATED_LIBRARIES "" CACHE INTERNAL "")
|
||||
|
||||
function(_ov_add_api_validator_post_build_step)
|
||||
if(NOT BUILD_SHARED_LIBS)
|
||||
# since _ov_add_api_validator_post_build_step
|
||||
# is currently run only on shared libraries, we have nothing to test
|
||||
return()
|
||||
endif()
|
||||
|
||||
set(UWP_API_VALIDATOR_APIS "${PROGRAMFILES}/Windows Kits/10/build/universalDDIs/x64/UniversalDDIs.xml")
|
||||
set(UWP_API_VALIDATOR_EXCLUSION "${UWP_SDK_PATH}/BinaryExclusionlist.xml")
|
||||
|
||||
|
||||
@@ -28,9 +28,26 @@ if (ENABLE_UB_SANITIZER)
|
||||
if (WIN32)
|
||||
message(FATAL_ERROR "UndefinedBehavior sanitizer is not supported in Windows")
|
||||
endif()
|
||||
|
||||
# TODO: Remove -fno-sanitize=null as thirdparty/ocl/clhpp_headers UBSAN compatibility resolved:
|
||||
# https://github.com/KhronosGroup/OpenCL-CLHPP/issues/17
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize=undefined -fno-sanitize=null")
|
||||
# Mute -fsanitize=function Indirect call of a function through a function pointer of the wrong type.
|
||||
# Sample cases:
|
||||
# call to function GetAPIVersion through pointer to incorrect function type 'void *(*)()'
|
||||
# Mute -fsanitize=alignment Use of a misaligned pointer or creation of a misaligned reference. Also sanitizes assume_aligned-like attributes.
|
||||
# Sample cases:
|
||||
# VPU_FixedMaxHeapTest.DefaultConstructor test case load of misaligned address 0x62000000187f for type 'const DataType', which requires 4 byte alignment
|
||||
# Mute -fsanitize=bool Load of a bool value which is neither true nor false.
|
||||
# Samples cases:
|
||||
# ie_c_api_version.apiVersion test case load of value 32, which is not a valid value for type 'bool'
|
||||
# Mute -fsanitize=enum Load of a value of an enumerated type which is not in the range of representable values for that enumerated type.
|
||||
# Samples cases:
|
||||
# load of value 4294967295, which is not a valid value for type 'const (anonymous namespace)::onnx::Field'
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize=undefined -fno-sanitize=null -fno-sanitize=alignment -fno-sanitize=bool -fno-sanitize=enum")
|
||||
if(OV_COMPILER_IS_CLANG)
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fno-sanitize=function")
|
||||
endif()
|
||||
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
# TODO: Remove -Wno-maybe-uninitialized after CVS-61143 fix
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -Wno-maybe-uninitialized")
|
||||
|
||||
2
cmake/developer_package/cpplint/cpplint.py
vendored
2
cmake/developer_package/cpplint/cpplint.py
vendored
@@ -3592,7 +3592,7 @@ def CheckOperatorSpacing(filename, clean_lines, linenum, error):
|
||||
elif not Match(r'#.*include', line):
|
||||
# Look for < that is not surrounded by spaces. This is only
|
||||
# triggered if both sides are missing spaces, even though
|
||||
# technically should should flag if at least one side is missing a
|
||||
# technically should flag if at least one side is missing a
|
||||
# space. This is done to avoid some false positives with shifts.
|
||||
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
|
||||
if match:
|
||||
|
||||
@@ -146,8 +146,6 @@ function (DownloadOrExtractInternal URL archive_path unpacked_path folder fattal
|
||||
|
||||
endfunction(DownloadOrExtractInternal)
|
||||
|
||||
file(REMOVE ${CMAKE_BINARY_DIR}/dependencies_64.txt)
|
||||
|
||||
function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked_path result_path folder fattal resultExt use_alternatives sha256 files_to_extract)
|
||||
set (archive_path ${TEMP}/download/${archive_name})
|
||||
set (status "ON")
|
||||
@@ -164,7 +162,6 @@ function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked
|
||||
if (${use_alternatives})
|
||||
set(DEP_INFO "${component}=${URL}")
|
||||
debug_message (STATUS "DEPENDENCY_URL: ${DEP_INFO}")
|
||||
file(APPEND ${CMAKE_BINARY_DIR}/dependencies_64.txt "${DEP_INFO}\n")
|
||||
endif()
|
||||
|
||||
debug_message ("checking that unpacked directory exist: ${unpacked_path}")
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
#
|
||||
|
||||
set(FRONTEND_INSTALL_INCLUDE "runtime/include/")
|
||||
set(FRONTEND_NAME_PREFIX "ov_")
|
||||
set(FRONTEND_NAME_PREFIX "openvino_")
|
||||
set(FRONTEND_NAME_SUFFIX "_frontend")
|
||||
|
||||
set(FRONTEND_NAMES "" CACHE INTERNAL "")
|
||||
@@ -35,7 +35,7 @@ function(ov_generate_frontends_hpp)
|
||||
endif()
|
||||
|
||||
# add frontends to libraries including ov_frontends.hpp
|
||||
ov_target_link_frontends(ov_runtime)
|
||||
ov_target_link_frontends(openvino)
|
||||
|
||||
set(ov_frontends_hpp "${CMAKE_BINARY_DIR}/src/frontends/common/src/ov_frontends.hpp")
|
||||
set(frontends_hpp_in "${IEDevScripts_DIR}/frontends/ov_frontends.hpp.in")
|
||||
|
||||
@@ -23,7 +23,7 @@ execute_process(
|
||||
ERROR_VARIABLE error_var)
|
||||
|
||||
if(NOT clang_find_result EQUAL "0")
|
||||
message(WARNING "Please, install libclang-[N]-dev package (required for ncc naming style check)")
|
||||
message(WARNING "Please, install clang-[N] libclang-[N]-dev package (required for ncc naming style check)")
|
||||
message(WARNING "find_package(Clang) output: ${output_var}")
|
||||
message(WARNING "find_package(Clang) error: ${error_var}")
|
||||
set(ENABLE_NCC_STYLE OFF)
|
||||
@@ -107,8 +107,11 @@ function(ov_ncc_naming_style)
|
||||
|
||||
list(APPEND NCC_STYLE_ADDITIONAL_INCLUDE_DIRECTORIES "${NCC_STYLE_SOURCE_DIRECTORY}")
|
||||
|
||||
# without it sources with same name from different directories will map to same .ncc_style target
|
||||
file(RELATIVE_PATH source_dir_rel ${CMAKE_SOURCE_DIR} ${NCC_STYLE_SOURCE_DIRECTORY})
|
||||
|
||||
foreach(source IN LISTS sources)
|
||||
set(output_file "${ncc_style_bin_dir}/${source}.ncc_style")
|
||||
set(output_file "${ncc_style_bin_dir}/${source_dir_rel}/${source}.ncc_style")
|
||||
set(full_source_path "${NCC_STYLE_SOURCE_DIRECTORY}/${source}")
|
||||
|
||||
add_custom_command(
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# custom OpenVINO values
|
||||
CppMethod: '^(operator\W+|[a-z_\d]+|signaling_NaN|quiet_NaN)$'
|
||||
CppMethod: '^(operator\W+|[a-z_\d]+|signaling_NaN|quiet_NaN|OPENVINO_OP)$'
|
||||
ClassName: '^([A-Z][\w]+|b?float16|numeric_limits|ngraph_error|stopwatch|unsupported_op)$'
|
||||
StructName: '^([A-Z][\w]+|element_type_traits|hash|oi_pair)$'
|
||||
FunctionName: '^(operator\W+|[a-z_\d]+)$'
|
||||
FunctionName: '^(operator\W+|[a-z_\d]+)|PrintTo$'
|
||||
Namespace: '^([a-z\d_]+|InferenceEngine)$'
|
||||
NamespaceAlias: '^([a-z\d_]+|InferenceEngine)$'
|
||||
UnionName: '[A-Z][\w]+$'
|
||||
@@ -99,7 +99,7 @@ CxxCatchStatement: '^.*$'
|
||||
CxxTryStatement: '^.*$'
|
||||
CxxForRangeStatement: '^.*$'
|
||||
MsAsmStatement: 'XXXX'
|
||||
NullStatement: 'XXXX'
|
||||
NullStatement: '^.*$'
|
||||
DeclarationStatement: '^.*$'
|
||||
TranslationUnit: 'XXXX'
|
||||
UnexposedAttribute: '^.*$'
|
||||
|
||||
@@ -15,6 +15,10 @@ function(ie_cpack_set_library_dir)
|
||||
set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER}/$<CONFIG> PARENT_SCOPE)
|
||||
set(IE_CPACK_RUNTIME_PATH runtime/bin/${ARCH_FOLDER}/$<CONFIG> PARENT_SCOPE)
|
||||
set(IE_CPACK_ARCHIVE_PATH runtime/lib/${ARCH_FOLDER}/$<CONFIG> PARENT_SCOPE)
|
||||
elseif(APPLE)
|
||||
set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER}/$<CONFIG> PARENT_SCOPE)
|
||||
set(IE_CPACK_RUNTIME_PATH runtime/lib/${ARCH_FOLDER}/$<CONFIG> PARENT_SCOPE)
|
||||
set(IE_CPACK_ARCHIVE_PATH runtime/lib/${ARCH_FOLDER}/$<CONFIG> PARENT_SCOPE)
|
||||
else()
|
||||
set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE)
|
||||
set(IE_CPACK_RUNTIME_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE)
|
||||
|
||||
@@ -102,32 +102,33 @@ function(ie_add_plugin)
|
||||
endif()
|
||||
|
||||
add_dependencies(ie_plugins ${IE_PLUGIN_NAME})
|
||||
if(TARGET inference_engine_preproc)
|
||||
if(TARGET openvino_gapi_preproc)
|
||||
if(BUILD_SHARED_LIBS)
|
||||
add_dependencies(${IE_PLUGIN_NAME} inference_engine_preproc)
|
||||
add_dependencies(${IE_PLUGIN_NAME} openvino_gapi_preproc)
|
||||
else()
|
||||
target_link_libraries(${IE_PLUGIN_NAME} PRIVATE inference_engine_preproc)
|
||||
target_link_libraries(${IE_PLUGIN_NAME} PRIVATE openvino_gapi_preproc)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# fake dependencies to build in the following order:
|
||||
# IE -> IE readers -> IE inference plugins -> IE-based apps
|
||||
if(BUILD_SHARED_LIBS)
|
||||
if(TARGET ov_ir_frontend)
|
||||
add_dependencies(${IE_PLUGIN_NAME} ov_ir_frontend)
|
||||
if(TARGET openvino_ir_frontend)
|
||||
add_dependencies(${IE_PLUGIN_NAME} openvino_ir_frontend)
|
||||
endif()
|
||||
if(TARGET openvino_onnx_frontend)
|
||||
add_dependencies(${IE_PLUGIN_NAME} openvino_onnx_frontend)
|
||||
endif()
|
||||
if(TARGET openvino_paddle_frontend)
|
||||
add_dependencies(${IE_PLUGIN_NAME} openvino_paddle_frontend)
|
||||
endif()
|
||||
if(TARGET openvino_tensorflow_frontend)
|
||||
add_dependencies(${IE_PLUGIN_NAME} openvino_tensorflow_frontend)
|
||||
endif()
|
||||
# TODO: remove with legacy CNNNLayer API / IR v7
|
||||
if(TARGET inference_engine_ir_v7_reader)
|
||||
add_dependencies(${IE_PLUGIN_NAME} inference_engine_ir_v7_reader)
|
||||
endif()
|
||||
if(TARGET ov_onnx_frontend)
|
||||
add_dependencies(${IE_PLUGIN_NAME} ov_onnx_frontend)
|
||||
endif()
|
||||
if(TARGET ov_paddle_frontend)
|
||||
add_dependencies(${IE_PLUGIN_NAME} ov_paddle_frontend)
|
||||
endif()
|
||||
if(TARGET ov_tensorflow_frontend)
|
||||
add_dependencies(${IE_PLUGIN_NAME} ov_tensorflow_frontend)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# install rules
|
||||
@@ -319,7 +320,7 @@ function(ie_generate_plugins_hpp)
|
||||
endforeach()
|
||||
|
||||
# add plugins to libraries including ie_plugins.hpp
|
||||
ie_target_link_plugins(ov_runtime)
|
||||
ie_target_link_plugins(openvino)
|
||||
if(TARGET inference_engine_s)
|
||||
ie_target_link_plugins(inference_engine_s)
|
||||
endif()
|
||||
@@ -346,7 +347,7 @@ function(ie_generate_plugins_hpp)
|
||||
# for some reason dependency on source files does not work
|
||||
# so, we have to use explicit target and make it dependency for inference_engine
|
||||
add_custom_target(_ie_plugins_hpp DEPENDS ${ie_plugins_hpp})
|
||||
add_dependencies(inference_engine _ie_plugins_hpp)
|
||||
add_dependencies(inference_engine_obj _ie_plugins_hpp)
|
||||
|
||||
# add dependency for object files
|
||||
get_target_property(sources inference_engine_obj SOURCES)
|
||||
|
||||
@@ -82,8 +82,8 @@ function(register_extra_modules)
|
||||
endif()
|
||||
endforeach()
|
||||
if ("${NS}" STREQUAL "openvino")
|
||||
file(APPEND "${devconfig_file}" "add_library(${NS}::runtime ALIAS ov_runtime)\n")
|
||||
file(APPEND "${devconfig_file}" "add_library(${NS}::runtime::dev ALIAS ov_runtime_dev)\n")
|
||||
file(APPEND "${devconfig_file}" "add_library(${NS}::runtime ALIAS openvino)\n")
|
||||
file(APPEND "${devconfig_file}" "add_library(${NS}::runtime::dev ALIAS openvino_dev)\n")
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ find_dependency(InferenceEngine
|
||||
NO_DEFAULT_PATH)
|
||||
|
||||
find_dependency(ngraph
|
||||
PATHS "${CMAKE_CURRENT_LIST_DIR}/src/core"
|
||||
PATHS "${CMAKE_CURRENT_LIST_DIR}"
|
||||
NO_CMAKE_FIND_ROOT_PATH
|
||||
NO_DEFAULT_PATH)
|
||||
|
||||
|
||||
@@ -168,7 +168,19 @@ endif()
|
||||
|
||||
_ov_find_dependency(Threads)
|
||||
|
||||
if(NOT TARGET ov_runtime)
|
||||
set(ENABLE_INTEL_GNA "@ENABLE_INTEL_GNA@")
|
||||
set(ENABLE_INTEL_GNA_SHARED "@BUILD_SHARED_LIBS@")
|
||||
if(ENABLE_INTEL_GNA AND NOT ENABLE_INTEL_GNA_SHARED AND NOT libGNA_FOUND)
|
||||
set_and_check(GNA_PATH "@PACKAGE_GNA_PATH@")
|
||||
_ov_find_dependency(libGNA
|
||||
COMPONENTS KERNEL
|
||||
CONFIG
|
||||
PATHS ${CMAKE_CURRENT_LIST_DIR}
|
||||
NO_CMAKE_FIND_ROOT_PATH
|
||||
NO_DEFAULT_PATH)
|
||||
endif()
|
||||
|
||||
if(NOT TARGET openvino)
|
||||
set(_ov_as_external_package ON)
|
||||
include("${CMAKE_CURRENT_LIST_DIR}/OpenVINOTargets.cmake")
|
||||
|
||||
@@ -224,6 +236,7 @@ if(_need_package_name_reset)
|
||||
unset(_need_package_name_reset)
|
||||
endif()
|
||||
|
||||
unset(${CMAKE_FIND_PACKAGE_NAME}_IR_FOUND)
|
||||
unset(${CMAKE_FIND_PACKAGE_NAME}_Paddle_FOUND)
|
||||
unset(${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND)
|
||||
unset(${CMAKE_FIND_PACKAGE_NAME}_TensorFlow_FOUND)
|
||||
|
||||
@@ -26,11 +26,16 @@
|
||||
#
|
||||
# Frontends:
|
||||
#
|
||||
# ngraph_ov_onnx_frontend_FOUND - True if the system has ov_onnx_frontend library
|
||||
# ngraph::ov_onnx_frontend - ONNX FrontEnd target (optional)
|
||||
# ngraph_onnx_frontend_FOUND - True if the system has ngraph::onnx_frontend library
|
||||
# ngraph::onnx_frontend - ONNX FrontEnd target (optional)
|
||||
#
|
||||
# ngraph_paddle_frontend_FOUND - True if the system has Paddle frontend
|
||||
# ngraph::ov_paddle_frontend - nGraph Paddle frontend (optional)
|
||||
# ngraph_paddle_frontend_FOUND - True if the system has Paddle frontend
|
||||
# ngraph::paddle_frontend - nGraph Paddle frontend (optional)
|
||||
#
|
||||
# ngraph_ir_frontend_FOUND - True if the system has OpenVINO IR frontend
|
||||
#
|
||||
# ngraph_tensorflow_frontend_FOUND - True if the system has TensorFlow frontend
|
||||
# ngraph::tensorflow_frontend - nGraph TensorFlow frontend (optional)
|
||||
#
|
||||
|
||||
@PACKAGE_INIT@
|
||||
@@ -50,43 +55,46 @@ if(TARGET openvino::runtime AND NOT TARGET ngraph::ngraph)
|
||||
INTERFACE_LINK_LIBRARIES openvino::runtime)
|
||||
endif()
|
||||
|
||||
if(TARGET openvino::frontend::onnx AND NOT TARGET ngraph::ov_onnx_frontend)
|
||||
add_library(ngraph::ov_onnx_frontend INTERFACE IMPORTED)
|
||||
set_target_properties(ngraph::ov_onnx_frontend PROPERTIES
|
||||
if(TARGET openvino::frontend::onnx AND NOT TARGET ngraph::onnx_frontend)
|
||||
add_library(ngraph::onnx_frontend INTERFACE IMPORTED)
|
||||
set_target_properties(ngraph::onnx_frontend PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES openvino::frontend::onnx)
|
||||
endif()
|
||||
|
||||
if(TARGET openvino::frontend::paddle AND NOT TARGET ngraph::ov_paddle_frontend)
|
||||
add_library(ngraph::ov_paddle_frontend INTERFACE IMPORTED)
|
||||
set_target_properties(ngraph::ov_paddle_frontend PROPERTIES
|
||||
if(TARGET openvino::frontend::paddle AND NOT TARGET ngraph::paddle_frontend)
|
||||
add_library(ngraph::paddle_frontend INTERFACE IMPORTED)
|
||||
set_target_properties(ngraph::paddle_frontend PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES openvino::frontend::paddle)
|
||||
endif()
|
||||
|
||||
if(TARGET openvino::frontend::tensorflow AND NOT TARGET ngraph::ov_tensorflow_frontend)
|
||||
add_library(ngraph::ov_tensorflow_frontend INTERFACE IMPORTED)
|
||||
set_target_properties(ngraph::ov_tensorflow_frontend PROPERTIES
|
||||
if(TARGET openvino::frontend::tensorflow AND NOT TARGET ngraph::tensorflow_frontend)
|
||||
add_library(ngraph::tensorflow_frontend INTERFACE IMPORTED)
|
||||
set_target_properties(ngraph::tensorflow_frontend PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES openvino::frontend::tensorflow)
|
||||
endif()
|
||||
|
||||
set(ngraph_ngraph_FOUND ON)
|
||||
set(NGRAPH_LIBRARIES ngraph::ngraph)
|
||||
|
||||
set(ngraph_ov_onnx_frontend_FOUND ${OpenVINO_Frontend_ONNX_FOUND})
|
||||
set(ngraph_onnx_frontend_FOUND ${OpenVINO_Frontend_ONNX_FOUND})
|
||||
set(ngraph_tensorflow_frontend_FOUND ${OpenVINO_Frontend_TensorFlow_FOUND})
|
||||
set(ngraph_paddle_frontend_FOUND ${OpenVINO_Frontend_Paddle_FOUND})
|
||||
set(ngraph_onnx_importer_FOUND ${OpenVINO_Frontend_ONNX_FOUND})
|
||||
|
||||
if(ngraph_onnx_importer_FOUND)
|
||||
set(ONNX_IMPORTER_LIBRARIES ngraph::ov_onnx_frontend)
|
||||
set(ONNX_IMPORTER_LIBRARIES ngraph::onnx_frontend)
|
||||
# ngraph::onnx_importer target and variables are deprecated
|
||||
# but need to create a dummy target for BW compatibility
|
||||
if(NOT TARGET ngraph::onnx_importer)
|
||||
add_library(ngraph::onnx_importer INTERFACE IMPORTED)
|
||||
set_target_properties(ngraph::onnx_importer PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES ngraph::ov_onnx_frontend)
|
||||
INTERFACE_LINK_LIBRARIES ngraph::onnx_frontend)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(ngraph_paddle_frontend_FOUND ${OpenVINO_Frontend_Paddle_FOUND})
|
||||
set(ngraph_tensorflow_frontend_FOUND ${OpenVINO_Frontend_TensorFlow_FOUND})
|
||||
set(ngraph_onnx_frontend_FOUND ${OpenVINO_Frontend_ONNX_FOUND})
|
||||
set(ngraph_ir_frontend_FOUND ${OpenVINO_Frontend_IR_FOUND})
|
||||
|
||||
check_required_components(ngraph)
|
||||
|
||||
@@ -86,11 +86,6 @@ ov_model_convert("${OpenVINO_SOURCE_DIR}/${rel_path}"
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/test_model_zoo/onnx_import"
|
||||
ie_onnx_import_out_files)
|
||||
|
||||
set(rel_path "docs/onnx_custom_op")
|
||||
ov_model_convert("${OpenVINO_SOURCE_DIR}/${rel_path}"
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/test_model_zoo/docs/models"
|
||||
docs_onnx_out_files)
|
||||
|
||||
if(ENABLE_TESTS)
|
||||
if(ENABLE_OV_ONNX_FRONTEND AND ENABLE_REQUIREMENTS_INSTALL)
|
||||
find_package(PythonInterp 3 REQUIRED)
|
||||
|
||||
@@ -25,7 +25,7 @@ endif()
|
||||
if(use_static_runtime)
|
||||
foreach(lang C CXX)
|
||||
foreach(build_type "" "_DEBUG" "_MINSIZEREL" "_RELEASE" "_RELWITHDEBINFO")
|
||||
set(flag_var "CMAKE_${lang}_FLAGS${build_type}")
|
||||
set(flag_var "CMAKE_${lang}_FLAGS${build_type}_INIT")
|
||||
string(REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
|
||||
endforeach()
|
||||
endforeach()
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
# Copyright (C) 2018-2022 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
if(DEFINED OECORE_BASE_DIR)
|
||||
# OECORE_BASE_DIR was passed via CMake command line, nothing to do
|
||||
elseif(DEFINED ENV{OECORE_BASE_DIR})
|
||||
# User sets OECORE_BASE_DIR environment variable
|
||||
set(OECORE_BASE_DIR $ENV{OECORE_BASE_DIR})
|
||||
elseif(DEFINED ENV{OECORE_NATIVE_SYSROOT})
|
||||
# OECORE_NATIVE_SYSROOT is a default environment variable for the OECore toolchain
|
||||
set(OECORE_BASE_DIR "$ENV{OECORE_NATIVE_SYSROOT}/../..")
|
||||
else()
|
||||
# Use default value
|
||||
set(OECORE_BASE_DIR "/usr/local/oecore-x86_64")
|
||||
endif()
|
||||
|
||||
set(OECORE_TARGET_NAME "aarch64-ese-linux")
|
||||
set(OECORE_TARGET_SYSROOT "${OECORE_BASE_DIR}/sysroots/${OECORE_TARGET_NAME}")
|
||||
set(OECORE_HOST_SYSROOT "${OECORE_BASE_DIR}/sysroots/x86_64-esesdk-linux")
|
||||
set(OECORE_HOST_COMPILER_BIN_DIR "${OECORE_HOST_SYSROOT}/usr/bin/${OECORE_TARGET_NAME}")
|
||||
|
||||
set(CMAKE_SYSTEM_NAME "Linux")
|
||||
set(CMAKE_SYSTEM_PROCESSOR "aarch64")
|
||||
|
||||
set(CMAKE_SYSROOT "${OECORE_TARGET_SYSROOT}")
|
||||
|
||||
set(CMAKE_C_COMPILER "${OECORE_HOST_COMPILER_BIN_DIR}/aarch64-ese-linux-gcc")
|
||||
set(CMAKE_CXX_COMPILER "${OECORE_HOST_COMPILER_BIN_DIR}/aarch64-ese-linux-g++")
|
||||
|
||||
set(CMAKE_C_FLAGS_INIT "-mcpu=cortex-a53 -mtune=cortex-a53 --sysroot=${OECORE_TARGET_SYSROOT}")
|
||||
set(CMAKE_CXX_FLAGS_INIT "-mcpu=cortex-a53 -mtune=cortex-a53 --sysroot=${OECORE_TARGET_SYSROOT}")
|
||||
|
||||
set(CMAKE_EXE_LINKER_FLAGS_INIT "-Wl,-O1 -Wl,--hash-style=gnu -Wl,--as-needed --sysroot=${OECORE_TARGET_SYSROOT}")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS_INIT "-Wl,-O1 -Wl,--hash-style=gnu -Wl,--as-needed --sysroot=${OECORE_TARGET_SYSROOT}")
|
||||
set(CMAKE_MODULE_LINKER_FLAGS_INIT "-Wl,-O1 -Wl,--hash-style=gnu -Wl,--as-needed --sysroot=${OECORE_TARGET_SYSROOT}")
|
||||
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
|
||||
@@ -35,14 +35,14 @@ if(_onecoreuap_arch STREQUAL "x64")
|
||||
# Forcefull make VS search for C++ libraries in these folders prior to other c++ standard libraries localizations.
|
||||
add_link_options("/LIBPATH:\"\$\(VC_LibraryPath_VC_x64_OneCore\)\"")
|
||||
|
||||
set(CMAKE_C_STANDARD_LIBRARIES "\$\(UCRTContentRoot\)lib/\$\(TargetUniversalCRTVersion\)/um/\$\(Platform\)/OneCoreUap.lib" CACHE STRING "" FORCE)
|
||||
set(CMAKE_CXX_STANDARD_LIBRARIES "\$\(UCRTContentRoot\)lib/\$\(TargetUniversalCRTVersion\)/um/\$\(Platform\)/OneCoreUap.lib" CACHE STRING "" FORCE)
|
||||
set(CMAKE_C_STANDARD_LIBRARIES_INIT "\$\(UCRTContentRoot\)lib/\$\(TargetUniversalCRTVersion\)/um/\$\(Platform\)/OneCoreUap.lib" CACHE STRING "" FORCE)
|
||||
set(CMAKE_CXX_STANDARD_LIBRARIES_INIT "\$\(UCRTContentRoot\)lib/\$\(TargetUniversalCRTVersion\)/um/\$\(Platform\)/OneCoreUap.lib" CACHE STRING "" FORCE)
|
||||
elseif(_onecoreuap_arch STREQUAL "X86")
|
||||
add_link_options("/LIBPATH:\"\$\(VCInstallDir\)lib/onecore\"")
|
||||
add_link_options("/LIBPATH:\"\$\(VC_LibraryPath_VC_x86_OneCore\)\"")
|
||||
|
||||
set(CMAKE_C_STANDARD_LIBRARIES "\$\(UCRTContentRoot\)lib/\$\(TargetUniversalCRTVersion\)/um/x86/OneCoreUap.lib" CACHE STRING "" FORCE)
|
||||
set(CMAKE_CXX_STANDARD_LIBRARIES "\$\(UCRTContentRoot\)lib/\$\(TargetUniversalCRTVersion\)/um/x86/OneCoreUap.lib" CACHE STRING "" FORCE)
|
||||
set(CMAKE_C_STANDARD_LIBRARIES_INIT "\$\(UCRTContentRoot\)lib/\$\(TargetUniversalCRTVersion\)/um/x86/OneCoreUap.lib" CACHE STRING "" FORCE)
|
||||
set(CMAKE_CXX_STANDARD_LIBRARIES_INIT "\$\(UCRTContentRoot\)lib/\$\(TargetUniversalCRTVersion\)/um/x86/OneCoreUap.lib" CACHE STRING "" FORCE)
|
||||
else()
|
||||
message(FATAL_ERROR "Unsupported architecture ${_onecoreuap_arch}. Only X86 or X86_64 are supported")
|
||||
endif()
|
||||
@@ -52,8 +52,8 @@ unset(_onecoreuap_arch)
|
||||
# compile flags
|
||||
|
||||
set(includes "/I\"\$\(UniversalCRT_IncludePath\)\"")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${includes}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${includes}")
|
||||
set(CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS_INIT} ${includes}")
|
||||
set(CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS_INIT} ${includes}")
|
||||
unset(includes)
|
||||
|
||||
# linker flags
|
||||
@@ -62,9 +62,9 @@ foreach(lib kernel32 user32 advapi32 ole32 mscoree combase)
|
||||
set(linker_flags "/NODEFAULTLIB:${lib}.lib ${linker_flags}")
|
||||
endforeach()
|
||||
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${linker_flags}")
|
||||
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${linker_flags}")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${linker_flags}")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS_INIT "${CMAKE_SHARED_LINKER_FLAGS_INIT} ${linker_flags}")
|
||||
set(CMAKE_MODULE_LINKER_FLAGS_INIT "${CMAKE_MODULE_LINKER_FLAGS_INIT} ${linker_flags}")
|
||||
set(CMAKE_EXE_LINKER_FLAGS_INIT "${CMAKE_EXE_LINKER_FLAGS_INIT} ${linker_flags}")
|
||||
unset(linker_flags)
|
||||
|
||||
#
|
||||
|
||||
@@ -7,8 +7,6 @@ if(NOT ENABLE_DOCKER)
|
||||
ie_add_compiler_flags(-Wall)
|
||||
endif()
|
||||
|
||||
add_subdirectory(snippets)
|
||||
|
||||
# Detect OpenVINO
|
||||
find_package(OpenVINO QUIET
|
||||
PATHS "${CMAKE_BINARY_DIR}"
|
||||
@@ -17,14 +15,13 @@ if(NOT ENABLE_DOCKER)
|
||||
set(OpenVINO_DIR ${CMAKE_BINARY_DIR})
|
||||
endif()
|
||||
|
||||
if(ENABLE_OV_ONNX_FRONTEND)
|
||||
add_subdirectory(onnx_custom_op)
|
||||
endif()
|
||||
add_subdirectory(snippets)
|
||||
|
||||
add_subdirectory(template_extension)
|
||||
|
||||
set(all_docs_targets
|
||||
ie_docs_snippets ov_template_func_tests
|
||||
template_extension ov_template_extension ov_template_plugin)
|
||||
template_extension openvino_template_extension openvino_template_plugin)
|
||||
foreach(target_name IN LISTS all_docs_targets)
|
||||
if(TARGET ${target_name})
|
||||
set_target_properties(${target_name} PROPERTIES FOLDER docs)
|
||||
@@ -36,7 +33,7 @@ if(NOT ENABLE_DOCKER)
|
||||
|
||||
# install
|
||||
|
||||
foreach(target ov_template_plugin template_extension ov_template_extension)
|
||||
foreach(target openvino_template_plugin template_extension openvino_template_extension)
|
||||
if(TARGET ${target})
|
||||
install(TARGETS ${target}
|
||||
LIBRARY DESTINATION ${IE_CPACK_RUNTIME_PATH}
|
||||
@@ -49,9 +46,9 @@ endif()
|
||||
set(LINKCHECKER_PY "" CACHE FILEPATH "Path to linkchecker.py for documentation check dir.")
|
||||
set(ENABLE_OPENVINO_NOTEBOOKS OFF CACHE BOOL "Build with openvino notebooks")
|
||||
set(OMZ_DOCS_DIR "" CACHE PATH "Path to open_model_zoo documentation dir.")
|
||||
set(OTE_DOCS_DIR "" CACHE PATH "Path to training_extensions documentation dir.")
|
||||
set(WORKBENCH_DOCS_DIR "" CACHE PATH "Path to workbench documentation dir.")
|
||||
set(OVMS_DOCS_DIR "" CACHE PATH "Path to model server documentation dir.")
|
||||
set(GST_DOCS_DIR "" CACHE PATH "Path to gst-video-analytics documentation dir.")
|
||||
set(GRAPH_CSV_DIR "" CACHE PATH "Path to the folder containing csv data for rendering graphs.")
|
||||
|
||||
function(build_docs)
|
||||
@@ -89,6 +86,8 @@ function(build_docs)
|
||||
|
||||
# Sphinx folders, doxyrest templates and config
|
||||
set(SPHINX_CONF_IN "${DOCS_SOURCE_DIR}/conf.py")
|
||||
set(SPHINX_TEMPLATES_IN "${DOCS_SOURCE_DIR}/_templates")
|
||||
set(SPHINX_TEMPLATES_OUT "${RST_OUTPUT}/_templates")
|
||||
set(SPHINX_CONF_OUT "${RST_OUTPUT}/conf.py")
|
||||
set(SPHINX_STATIC_IN "${DOCS_SOURCE_DIR}/_static")
|
||||
set(SPHINX_STATIC_OUT "${RST_OUTPUT}/_static")
|
||||
@@ -132,6 +131,16 @@ function(build_docs)
|
||||
)
|
||||
endif()
|
||||
|
||||
list(APPEND commands
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${API_DOCS_IN}/api_reference.rst ${API_DOCS_OUT}/api_reference.rst
|
||||
)
|
||||
|
||||
if(ENABLE_PYTHON)
|
||||
list(APPEND commands
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${API_DOCS_IN}/ie_python_api ${API_DOCS_OUT}/ie_python_api
|
||||
)
|
||||
endif()
|
||||
|
||||
# omz doc files
|
||||
if(EXISTS "${OMZ_DOCS_DIR}")
|
||||
get_filename_component(OMZ_DOCS_DIR "${OMZ_DOCS_DIR}" ABSOLUTE)
|
||||
@@ -151,6 +160,15 @@ function(build_docs)
|
||||
--output_dir=${DOCS_BUILD_DIR}/workbench)
|
||||
endif()
|
||||
|
||||
# ote doc files
|
||||
if(EXISTS "${OTE_DOCS_DIR}")
|
||||
get_filename_component(WORKBENCH_DOCS_DIR "${OTE_DOCS_DIR}" ABSOLUTE)
|
||||
|
||||
list(APPEND commands COMMAND ${PYTHON_EXECUTABLE} ${DOXY_MD_FILTER}
|
||||
--input_dir=${OTE_DOCS_DIR}
|
||||
--output_dir=${DOCS_BUILD_DIR}/ote)
|
||||
endif()
|
||||
|
||||
# ovms doc files
|
||||
if(EXISTS "${OVMS_DOCS_DIR}")
|
||||
get_filename_component(OVMS_DOCS_DIR "${OVMS_DOCS_DIR}" ABSOLUTE)
|
||||
@@ -160,14 +178,6 @@ function(build_docs)
|
||||
--output_dir=${DOCS_BUILD_DIR}/ovms)
|
||||
endif()
|
||||
|
||||
# gst doc files
|
||||
if(EXISTS "${GST_DOCS_DIR}")
|
||||
get_filename_component(GST_DOCS_DIR "${GST_DOCS_DIR}" ABSOLUTE)
|
||||
list(APPEND commands COMMAND ${PYTHON_EXECUTABLE} ${DOXY_MD_FILTER}
|
||||
--input_dir=${GST_DOCS_DIR}
|
||||
--output_dir=${DOCS_BUILD_DIR}/gst)
|
||||
endif()
|
||||
|
||||
add_custom_target(preprocess_docs
|
||||
COMMENT "Preprocess documentation"
|
||||
VERBATIM)
|
||||
@@ -197,7 +207,7 @@ function(build_docs)
|
||||
COMMAND ${PYTHON_EXECUTABLE} ${COPY_IMAGES_SCRIPT} ${XML_OUTPUT} ${RST_OUTPUT}
|
||||
COMMAND ${PYTHON_EXECUTABLE} ${DOXYGEN_MAPPING_SCRIPT} ${XML_OUTPUT} ${DOCS_BUILD_DIR} ${OpenVINO_SOURCE_DIR}/../
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${SPHINX_INDEX_IN} ${SPHINX_INDEX_OUT}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${API_DOCS_IN} ${API_DOCS_OUT}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${SPHINX_TEMPLATES_IN} ${SPHINX_TEMPLATES_OUT}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${DOXYREST_IN} ${DOXYREST_OUT}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${DOXYREST_SPHINX_IN} ${DOXYREST_SPHINX_OUT}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${SPHINX_STATIC_IN} ${SPHINX_STATIC_OUT}
|
||||
|
||||
@@ -264,6 +264,10 @@ TAB_SIZE = 4
|
||||
ALIASES = "ref_ie{1}=@ref InferenceEngine::\1 \"\1\""
|
||||
ALIASES += sphinxdirective="\n\xmlonly<sphinxdirective>"
|
||||
ALIASES += endsphinxdirective="</sphinxdirective>\endxmlonly"
|
||||
ALIASES += sphinxtabset="\n\xmlonly<sphinxtabset></sphinxtabset>\endxmlonly\n"
|
||||
ALIASES += endsphinxtabset="\n\xmlonly<endsphinxtabset></endsphinxtabset>\endxmlonly\n"
|
||||
ALIASES += sphinxtab{1}="\n\xmlonly<sphinxtab>\1</sphinxtab>\endxmlonly\n"
|
||||
ALIASES += endsphinxtab="\n\xmlonly<endsphinxtab></endsphinxtab>\endxmlonly\n"
|
||||
|
||||
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
|
||||
# only. Doxygen will then generate output that is more tailored for C. For
|
||||
@@ -719,7 +723,7 @@ SHOW_NAMESPACES = YES
|
||||
# The FILE_VERSION_FILTER tag can be used to specify a program or script that
|
||||
# doxygen should invoke to get the current version for each file (typically from
|
||||
# the version control system). Doxygen will invoke the program by executing (via
|
||||
# popen()) the command command input-file, where command is the value of the
|
||||
# popen()) the command input-file, where command is the value of the
|
||||
# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
|
||||
# by doxygen. Whatever the program writes to standard output is used as the file
|
||||
# version. For an example see the documentation.
|
||||
@@ -843,16 +847,6 @@ INPUT = "@MARKDOWN_INPUT@" \
|
||||
"@OpenVINO_SOURCE_DIR@/src/common/transformations/include/" \
|
||||
"@OpenVINO_SOURCE_DIR@/src/common/util/include/" \
|
||||
"@OpenVINO_SOURCE_DIR@/src/core/include/" \
|
||||
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/" \
|
||||
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/descriptor" \
|
||||
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/op/" \
|
||||
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/op/util" \
|
||||
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/opsets/" \
|
||||
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/pass/" \
|
||||
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/pattern/" \
|
||||
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/pattern/op/" \
|
||||
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/runtime/" \
|
||||
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/type/" \
|
||||
"@OpenVINO_SOURCE_DIR@/src/core/include/openvino/" \
|
||||
"@OpenVINO_SOURCE_DIR@/src/core/include/openvino/core/" \
|
||||
"@OpenVINO_SOURCE_DIR@/src/core/include/openvino/core/descriptor/" \
|
||||
@@ -917,7 +911,9 @@ RECURSIVE = YES
|
||||
# Note that relative paths are relative to the directory from which doxygen is
|
||||
# run.
|
||||
|
||||
EXCLUDE =
|
||||
EXCLUDE = "@OpenVINO_SOURCE_DIR@/thirdparty" \
|
||||
"@OpenVINO_SOURCE_DIR@/temp" \
|
||||
"@OpenVINO_SOURCE_DIR@/bin"
|
||||
|
||||
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
|
||||
# directories that are symbolic links (a Unix file system feature) are excluded
|
||||
@@ -936,7 +932,6 @@ EXCLUDE_SYMLINKS = NO
|
||||
EXCLUDE_PATTERNS = */temp/* \
|
||||
*/bin/* \
|
||||
*/tests/* \
|
||||
*/openvx/* \
|
||||
*/thirdparty/* \
|
||||
"@DOXYREST_OUT@" \
|
||||
"@XML_OUTPUT@" \
|
||||
@@ -1045,7 +1040,6 @@ EXCLUDE_SYMBOLS = InferenceEngine::details \
|
||||
EXAMPLE_PATH = "@OpenVINO_SOURCE_DIR@" \
|
||||
"@OpenVINO_SOURCE_DIR@/docs/HOWTO/" \
|
||||
"@OpenVINO_SOURCE_DIR@/docs/" \
|
||||
"@OpenVINO_SOURCE_DIR@/docs/onnx_custom_op/" \
|
||||
"@OpenVINO_SOURCE_DIR@/docs/template_extension/" \
|
||||
"@OpenVINO_SOURCE_DIR@/docs/template_extension/old/" \
|
||||
"@OpenVINO_SOURCE_DIR@/docs/template_extension/new/" \
|
||||
|
||||
@@ -1,17 +1,27 @@
|
||||
# How to Implement Custom GPU Operations {#openvino_docs_IE_DG_Extensibility_DG_GPU_Kernel}
|
||||
# How to Implement Custom GPU Operations {#openvino_docs_Extensibility_UG_GPU}
|
||||
|
||||
To enable operations not supported by OpenVINO™ out of the box, you need a custom extension for Model Optimizer, a custom nGraph operation set, and a custom kernel for the device you will target. This page describes custom kernel support for the GPU device.
|
||||
To enable operations not supported by OpenVINO out of the box, you may need an extension for an OpenVINO operation set, and a custom kernel for the device you will target. This page describes custom kernel support for the GPU device.
|
||||
|
||||
The GPU codepath abstracts many details about OpenCL\*. You need to provide the kernel code in OpenCL C and an XML configuration file that connects the kernel and its parameters to the parameters of the operation.
|
||||
The GPU codepath abstracts many details about OpenCL. You need to provide the kernel code in OpenCL C and an XML configuration file that connects the kernel and its parameters to the parameters of the operation.
|
||||
|
||||
There are two options for using the custom operation configuration file:
|
||||
|
||||
* Include a section with your kernels into the global automatically-loaded `cldnn_global_custom_kernels/cldnn_global_custom_kernels.xml` file, which is hosted in the `<INSTALL_DIR>/runtime/bin` folder
|
||||
* Call the `InferenceEngine::Core::SetConfig()` method from your application with the `InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE` key and the configuration file name as a value before loading the network that uses custom operations to the plugin:
|
||||
* Include a section with your kernels into the automatically-loaded `<lib_path>/cldnn_global_custom_kernels/cldnn_global_custom_kernels.xml` file.
|
||||
* Call the `ov::Core::set_property()` method from your application with the `"CONFIG_FILE"` key and the configuration file name as a value before loading the network that uses custom operations to the plugin:
|
||||
|
||||
@snippet snippets/GPU_Kernel.cpp part0
|
||||
@sphinxtabset
|
||||
|
||||
All Inference Engine samples, except the trivial `hello_classification`, and most Open Model Zoo demos
|
||||
@sphinxtab{C++}
|
||||
@snippet docs/snippets/gpu/custom_kernels_api.cpp part0
|
||||
@endsphinxtab
|
||||
|
||||
@sphinxtab{Python}
|
||||
@snippet docs/snippets/gpu/custom_kernels_api.py part0
|
||||
@endsphinxtab
|
||||
|
||||
@endsphinxtabset
|
||||
|
||||
All OpenVINO samples, except the trivial `hello_classification`, and most Open Model Zoo demos
|
||||
feature a dedicated command-line option `-c` to load custom kernels. For example, to load custom operations for the classification sample, run the command below:
|
||||
```sh
|
||||
$ ./classification_sample -m <path_to_model>/bvlc_alexnet_fp16.xml -i ./validation_set/daily/227x227/apron.bmp -d GPU
|
||||
@@ -21,7 +31,7 @@ $ ./classification_sample -m <path_to_model>/bvlc_alexnet_fp16.xml -i ./validati
|
||||
## Configuration File Format <a name="config-file-format"></a>
|
||||
|
||||
The configuration file is expected to follow the `.xml` file structure
|
||||
with a node of the type `CustomLayer` for every custom operation you provide.
|
||||
with a node of the `CustomLayer` type for every custom operation you provide.
|
||||
|
||||
The definitions described in the sections below use the following notations:
|
||||
|
||||
@@ -47,8 +57,7 @@ Notation | Description
|
||||
|
||||
### Kernel Node and Sub-Node Structure
|
||||
|
||||
`Kernel` node contains all kernel source code configuration. No kernel
|
||||
node structure exists.
|
||||
`Kernel` node contains all kernel source code configuration.
|
||||
|
||||
**Sub-nodes**: `Source` (1+), `Define` (0+)
|
||||
|
||||
@@ -134,7 +143,7 @@ queuing an OpenCL program for execution.
|
||||
|
||||
## Example Configuration File
|
||||
|
||||
The following code sample provides an example configuration file in XML
|
||||
The following code sample provides an example configuration file in XML
|
||||
format. For information on the configuration file structure, see
|
||||
[Configuration File Format](#config-file-format).
|
||||
```xml
|
||||
@@ -155,8 +164,7 @@ format. For information on the configuration file structure, see
|
||||
## Built-In Definitions for Custom Layers
|
||||
|
||||
The following table includes definitions that are attached before
|
||||
user sources, where `<TENSOR>` is the actual input and output, for
|
||||
example, `INPUT0` or `OUTPUT0`.
|
||||
user sources.
|
||||
|
||||
For an example, see [Example Kernel](#example-kernel).
|
||||
|
||||
@@ -170,19 +178,20 @@ For an example, see [Example Kernel](#example-kernel).
|
||||
| `<TENSOR>_DIMS`| An array of the tensor dimension sizes. Always ordered as `BFYX` |
|
||||
| `<TENSOR>_DIMS_SIZE`| The size of the `<TENSOR>_DIMS` array.|
|
||||
| `<TENSOR>_TYPE`| The datatype of the tensor: `float`, `half`, or `char`|
|
||||
| `<TENSOR>_FORMAT_` | The format of the tensor, BFYX, BYXF, YXFB , FYXB, or ANY. The format is concatenated to the defined name. You can use the tensor format to define codepaths in your code with `#‍ifdef/#‍endif`. |
|
||||
| `<TENSOR>_FORMAT_<TENSOR_FORMAT>` | The format of the tensor, BFYX, BYXF, YXFB , FYXB, or ANY. The format is concatenated to the defined name. You can use the tensor format to define codepaths in your code with `#‍ifdef/#‍endif`. |
|
||||
| `<TENSOR>_LOWER_PADDING` | An array of padding elements used for the tensor dimensions before they start. Always ordered as BFYX.|
|
||||
| `<TENSOR>_ LOWER_PADDING_SIZE` | The size of the `<TENSOR>_LOWER_PADDING` array |
|
||||
| `<TENSOR>_LOWER_PADDING_SIZE` | The size of the `<TENSOR>_LOWER_PADDING` array |
|
||||
| `<TENSOR>_UPPER_PADDING` | An array of padding elements used for the tensor dimensions after they end. Always ordered as BFYX. |
|
||||
| `<TENSOR>_UPPER_PADDING_SIZE` | The size of the `<TENSOR>_UPPER_PADDING` array |
|
||||
| `<TENSOR>_PITCHES` | The number of elements between adjacent elements in each dimension. Always ordered as BFYX.|
|
||||
| `<TENSOR>_PITCHES` | The offset (in elements) between adjacent elements in each dimension. Always ordered as BFYX.|
|
||||
| `<TENSOR>_PITCHES_SIZE`| The size of the `<TENSOR>_PITCHES` array |
|
||||
| `<TENSOR>_OFFSET`| The number of elements from the start of the tensor to the first valid element, bypassing the lower padding. |
|
||||
|
||||
All `<TENSOR>` values are automatically defined for every tensor
|
||||
bound to this operation, such as `INPUT0`, `INPUT1`, and `OUTPUT0`, as shown
|
||||
in the following example:
|
||||
|
||||
```sh
|
||||
```c
|
||||
#define INPUT0_DIMS_SIZE 4
|
||||
#define INPUT0_DIMS (int []){ 1,96,55,55, }
|
||||
```
|
||||
@@ -197,28 +206,25 @@ __kernel void example_relu_kernel(
|
||||
{
|
||||
const uint idx = get_global_id(0);
|
||||
const uint idy = get_global_id(1);
|
||||
const uint idbf = get_global_id(2);//batches*features, as OpenCL supports 3D nd-ranges only
|
||||
const uint feature = idbf%OUTPUT0_DIMS[1];
|
||||
const uint batch = idbf/OUTPUT0_DIMS[1];
|
||||
const uint idbf = get_global_id(2); // batches*features, as OpenCL supports 3D nd-ranges only
|
||||
const uint feature = idbf % OUTPUT0_DIMS[1];
|
||||
const uint batch = idbf / OUTPUT0_DIMS[1];
|
||||
//notice that pitches are in elements, not in bytes!
|
||||
const uint in_id = batch*INPUT0_PITCHES[0] + feature*INPUT0_PITCHES[1] + idy*INPUT0_PITCHES[2] + idx*INPUT0_PITCHES[3] + INPUT0_OFFSET;
|
||||
const uint out_id = batch*OUTPUT0_PITCHES[0] + feature*OUTPUT0_PITCHES[1] + idy*OUTPUT0_PITCHES[2] + idx*OUTPUT0_PITCHES[3] + OUTPUT0_OFFSET;
|
||||
|
||||
INPUT0_TYPE value = input0[in_id];
|
||||
//neg_slope (which is non-zero for leaky ReLU) is put automatically as #define, refer to the config xml
|
||||
// neg_slope (which is non-zero for leaky ReLU) is put automatically as #define, refer to the config xml
|
||||
output[out_id] = value < 0 ? value * neg_slope : value;
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
> **NOTE**: As described in the previous section, all items like
|
||||
> `INPUT0_TYPE` are actually defined as OpenCL (pre-)compiler inputs by
|
||||
> the Inference Engine for efficiency reasons. See [Debugging
|
||||
> OpenVINO for efficiency reasons. See [Debugging
|
||||
> Tips](#debugging-tips) for information on debugging the results.
|
||||
|
||||
> **NOTE**: Several GPU-targeted kernels are also added to the binaries upon compilation of samples
|
||||
> so that the sample application can easy load them.
|
||||
> Refer to the `cldnn_global_custom_kernels` folder in the GPU plugin installation directory.
|
||||
|
||||
## Debugging Tips<a name="debugging-tips"></a>
|
||||
|
||||
* **Using `printf` in the OpenCL™ Kernels**.
|
||||
171
docs/Extensibility_UG/Intro.md
Normal file
171
docs/Extensibility_UG/Intro.md
Normal file
@@ -0,0 +1,171 @@
|
||||
# OpenVINO Extensibility Mechanism {#openvino_docs_Extensibility_UG_Intro}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
openvino_docs_Extensibility_UG_add_openvino_ops
|
||||
openvino_docs_Extensibility_UG_Frontend_Extensions
|
||||
openvino_docs_Extensibility_UG_GPU
|
||||
openvino_docs_Extensibility_UG_VPU_Kernel
|
||||
openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
The Intel® Distribution of OpenVINO™ toolkit supports neural network models trained with various frameworks, including
|
||||
TensorFlow, PyTorch, ONNX, PaddlePaddle, MXNet, Caffe, and Kaldi. The list of supported operations is different for
|
||||
each of the supported frameworks. To see the operations supported by your framework, refer to
|
||||
[Supported Framework Operations](../MO_DG/prepare_model/Supported_Frameworks_Layers.md).
|
||||
|
||||
Custom operations, that is those not included in the list, are not recognized by OpenVINO™ out-of-the-box. The need for a custom operation may appear in two main cases:
|
||||
|
||||
1. A regular framework operation that is new or rarely used, which is why it hasn’t been implemented in OpenVINO yet.
|
||||
|
||||
2. A new user operation that was created for some specific model topology by a model author using framework extension capabilities.
|
||||
|
||||
Importing models with such operations requires additional steps. This guide illustrates the workflow for running inference on models featuring custom operations, allowing you to plug in your own implementation for them. OpenVINO™ Extensibility API lets you add support for those custom operations and use one implementation for Model Optimizer and OpenVINO™ Runtime.
|
||||
|
||||
Defining a new custom operation basically consist of two parts:
|
||||
|
||||
1. Definition of operation semantics in OpenVINO, the code that describes how this operation should be inferred consuming input tensor(s) and producing output tensor(s). How to implement execution kernels for [GPU](./GPU_Extensibility.md) and [VPU](./VPU_Extensibility.md) is described in separate guides.
|
||||
|
||||
2. Mapping rule that facilitates conversion of framework operation representation to OpenVINO defined operation semantics.
|
||||
|
||||
The first part is required for inference, the second part is required for successful import of a model containing such operations from the original framework model format. There are several options to implement each part, the next sections will describe them in detail.
|
||||
|
||||
## Definition of Operation Semantics
|
||||
|
||||
|
||||
If the custom operation can be mathematically represented as a combination of exiting OpenVINO operations and such decomposition gives desired performance, then low-level operation implementation is not required. When deciding feasibility of such decomposition refer to the latest OpenVINO operation set. You can use any valid combination of exiting operations. How to map a custom operation is described in the next section of this document.
|
||||
|
||||
If such decomposition is not possible or appears too bulky with lots of consisting operations that are not performing well, then a new class for the custom operation should be implemented as described in the [Custom Operation Guide](add_openvino_ops.md).
|
||||
|
||||
Prefer implementing a custom operation class if you already have a generic C++ implementation of operation kernel. Otherwise try to decompose the operation first as described above and then after verifying correctness of inference and resulting performance, optionally invest to implementing bare metal C++ implementation.
|
||||
|
||||
## Mapping from Framework Operation
|
||||
|
||||
Depending on model format used for import, mapping of custom operation is implemented differently, choose one of:
|
||||
|
||||
1. If model is represented in ONNX (including models exported from Pytorch in ONNX) or PaddlePaddle formats, then one of the classes from [Frontend Extension API](frontend_extensions.md) should be used. It consists of several classes available in C++ which can be used with Model Optimizer `--extensions` option or when model is imported directly to OpenVINO run-time using read_model method. Python API is also available for run-time model importing.
|
||||
|
||||
2. If model is represented in TensorFlow, Caffe, Kaldi or MXNet formats, then [Model Optimizer Extensions](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) should be used. This approach is available for model conversion in Model Optimizer only.
|
||||
|
||||
Existing of two approaches simultaneously is explained by two different types of frontends used for model conversion in OpenVINO: new frontends (ONNX, PaddlePaddle) and legacy frontends (TensorFlow, Caffe, Kaldi and MXNet). Model Optimizer can use both front-ends in contrast to the direct import of model with `read_model` method which can use new frontends only. Follow one of the appropriate guides referenced above to implement mappings depending on framework frontend.
|
||||
|
||||
If you are implementing extensions for ONNX or PaddlePaddle new frontends and plan to use Model Optimizer `--extension` option for model conversion, then the extensions should be
|
||||
|
||||
1. Implemented in C++ only
|
||||
|
||||
2. Compiled as a separate shared library (see details how to do that later in this guide).
|
||||
|
||||
You cannot write new frontend extensions using Python API if you plan to use them with Model Optimizer.
|
||||
|
||||
Remaining part of this guide uses Frontend Extension API applicable for new frontends.
|
||||
|
||||
## Registering Extensions
|
||||
|
||||
A custom operation class and a new mapping frontend extension class object should be registered to be usable in OpenVINO runtime.
|
||||
|
||||
> **NOTE**: This documentation is written based on the [Template extension](https://github.com/openvinotoolkit/openvino/tree/master/docs/template_extension/new), which demonstrates extension development details based on minimalistic `Identity` operation that is a placeholder for your real custom operation. You can review the complete code, which is fully compliable, to see how it works.
|
||||
|
||||
To load the extensions to the `ov::Core` object, use the `ov::Core::add_extension` method, this method allows to load library with extensions or extensions from the code.
|
||||
|
||||
### Load extensions to core
|
||||
|
||||
Extensions can be loaded from code with `ov::Core::add_extension` method:
|
||||
|
||||
@sphinxtabset
|
||||
|
||||
@sphinxtab{C++}
|
||||
|
||||
@snippet docs/snippets/ov_extensions.cpp add_extension
|
||||
|
||||
@endsphinxtab
|
||||
|
||||
@sphinxtab{Python}
|
||||
|
||||
@snippet docs/snippets/ov_extensions.py add_extension
|
||||
|
||||
@endsphinxtab
|
||||
|
||||
@endsphinxtabset
|
||||
|
||||
`Identity` is custom operation class defined in [Custom Operation Guide](add_openvino_ops.md). This is enough to enable reading IR which uses `Identity` extension operation emitted by Model Optimizer. To be able to load original model directly to the runtime, you need to add also a mapping extension:
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. tab:: C++
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: add_frontend_extension
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
:fragment: add_frontend_extension
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
When Python API is used there is no way to implement a custom OpenVINO operation. Also, even if custom OpenVINO operation is implemented in C++ and loaded to the runtime through a shared library, there is still no way to add a frontend mapping extension that refers to this custom operation. Use C++ shared library approach to implement both operations semantics and framework mapping in this case.
|
||||
|
||||
You still can use Python for operation mapping and decomposition in case if operations from the standard OpenVINO operation set is used only.
|
||||
|
||||
### Create library with extensions
|
||||
|
||||
You need to create extension library in the following cases:
|
||||
- Convert model with custom operations in Model Optimizer
|
||||
- Load model with custom operations in Python application. It is applicable for both framework model and IR.
|
||||
- Loading models with custom operations in tools that support loading extensions from a library, for example `benchmark_app`.
|
||||
|
||||
If you want to create an extension library, for example in order to load these extensions to the Model Optimizer, you need to do next steps:
|
||||
Create an entry point for extension library. OpenVINO™ provides an `OPENVINO_CREATE_EXTENSIONS()` macro, which allows to define an entry point to a library with OpenVINO™ Extensions.
|
||||
This macro should have a vector of all OpenVINO™ Extensions as an argument.
|
||||
|
||||
Based on that, the declaration of an extension class can look as follows:
|
||||
|
||||
@snippet template_extension/new/ov_extension.cpp ov_extension:entry_point
|
||||
|
||||
To configure the build of your extension library, use the following CMake script:
|
||||
|
||||
@snippet template_extension/new/CMakeLists.txt cmake:extension
|
||||
|
||||
This CMake script finds the OpenVINO™ using the `find_package` CMake command.
|
||||
|
||||
To build the extension library, run the commands below:
|
||||
|
||||
```sh
|
||||
$ cd docs/template_extension/new
|
||||
$ mkdir build
|
||||
$ cd build
|
||||
$ cmake -DOpenVINO_DIR=<OpenVINO_DIR> ../
|
||||
$ cmake --build .
|
||||
```
|
||||
|
||||
After the build you can use path to your extension library to load your extensions to OpenVINO™ Runtime:
|
||||
|
||||
@sphinxtabset
|
||||
|
||||
@sphinxtab{C++}
|
||||
|
||||
@snippet docs/snippets/ov_extensions.cpp add_extension_lib
|
||||
|
||||
@endsphinxtab
|
||||
|
||||
@sphinxtab{Python}
|
||||
|
||||
@snippet docs/snippets/ov_extensions.py add_extension_lib
|
||||
|
||||
@endsphinxtab
|
||||
|
||||
@endsphinxtabset
|
||||
|
||||
## See Also
|
||||
|
||||
* [OpenVINO Transformations](./ov_transformations.md)
|
||||
* [Using OpenVINO Runtime Samples](../OV_Runtime_UG/Samples_Overview.md)
|
||||
* [Hello Shape Infer SSD sample](../../samples/cpp/hello_reshape_ssd/README.md)
|
||||
|
||||
@@ -1,32 +1,29 @@
|
||||
# How to Implement Custom Layers for VPU (Intel® Neural Compute Stick 2) {#openvino_docs_IE_DG_Extensibility_DG_VPU_Kernel}
|
||||
# How to Implement Custom Layers for VPU (Intel® Neural Compute Stick 2) {#openvino_docs_Extensibility_UG_VPU_Kernel}
|
||||
|
||||
To enable operations not supported by OpenVINO™ out of the box, you need a custom extension for Model Optimizer, a custom nGraph operation set, and a custom kernel for the device you will target. This page describes custom kernel support for one the VPU, the Intel® Neural Compute Stick 2 device, which uses the MYRIAD device plugin.
|
||||
|
||||
> **NOTES:**
|
||||
> * OpenCL\* custom layer support is available in the preview mode.
|
||||
> * This section assumes you are familiar with developing kernels using OpenCL.
|
||||
|
||||
To customize your topology with an OpenCL layer, carry out the tasks described on this page:
|
||||
|
||||
1. Write and compile your OpenCL code with the standalone offline OpenCL compiler (`clc`).
|
||||
2. Write a configuration file to bind the OpenCL kernel to the topology file (`.xml`) of the model IR.
|
||||
3. Pass the configuration file to the Inference Engine with the model IR.
|
||||
3. Pass the configuration file to the OpenVINO™ Runtime with the model IR.
|
||||
|
||||
## Compile OpenCL code for VPU (Intel® Neural Compute Stick 2)
|
||||
|
||||
> **NOTE**: OpenCL compiler, targeting Intel® Neural Compute Stick 2 for the SHAVE* processor only, is redistributed with OpenVINO.
|
||||
OpenCL support is provided by ComputeAorta* and is distributed under a license agreement between Intel® and Codeplay* Software Ltd.
|
||||
|
||||
The OpenCL toolchain for the Intel® Neural Compute Stick 2 supports offline compilation only, so first compile OpenCL C code using the standalone `clc` compiler. You can find the compiler binary at `<INSTALL_DIR>/tools/cl_compiler`.
|
||||
|
||||
> **NOTE**: By design, custom OpenCL layers support any OpenCL kernels written assuming OpenCL version 1.2. It also supports half float extension and is optimized for this type, because it is a native type for Intel® Movidius™ VPUs.
|
||||
|
||||
1. Prior to running a compilation, make sure that the following variables are set:
|
||||
* `SHAVE_MA2X8XLIBS_DIR=<INSTALL_DIR>/tools/cl_compiler/lib/`
|
||||
* `SHAVE_LDSCRIPT_DIR=<INSTALL_DIR>/tools/cl_compiler/ldscripts/`
|
||||
* `SHAVE_MYRIAD_LD_DIR=<INSTALL_DIR>/tools/cl_compiler/bin/`
|
||||
* `SHAVE_MOVIASM_DIR=<INSTALL_DIR>/tools/cl_compiler/bin/`
|
||||
2. Run the compilation with the command below. You should use `--strip-binary-header` to make an OpenCL runtime-agnostic binary runnable with the Inference Engine.
|
||||
2. Run the compilation with the command below. You should use `--strip-binary-header` to make an OpenCL runtime-agnostic binary runnable with the OpenVINO™ Runtime.
|
||||
```bash
|
||||
cd <INSTALL_DIR>/tools/cl_compiler/bin
|
||||
./clc --strip-binary-header custom_layer.cl -o custom_layer.bin
|
||||
@@ -34,7 +31,7 @@ The OpenCL toolchain for the Intel® Neural Compute Stick 2 supports offline com
|
||||
|
||||
## Write a Configuration File
|
||||
|
||||
To tie the topology IR for a layer you customize, prepare a configuration file, so that the Inference Engine can find parameters for your kernel and the execution work grid is described.
|
||||
To tie the topology IR for a layer you customize, prepare a configuration file, so that the OpenVINO™ Runtime can find parameters for your kernel and the execution work grid is described.
|
||||
For example, consider the following OpenCL kernel signature:
|
||||
```cpp
|
||||
__kernel void reorg_nhwc(__global const half *src, __global half *out, int w, int h, int c, int stride);
|
||||
@@ -58,7 +55,7 @@ A configuration file for this kernel might be the following:
|
||||
```
|
||||
Each custom layer is described with the `CustomLayer` node. It has the following nodes and attributes:
|
||||
- Root node `CustomLayer` contains the following attributes:
|
||||
- `name` – (Required) The name of the Inference Engine layer to bind the kernel with.
|
||||
- `name` – (Required) The name of the OpenVINO™ Runtime layer to bind the kernel with.
|
||||
- `type` and `version` – (Required) Reserved for future use. Set them to `MVCL` and `1` respectively.
|
||||
- `max-shaves` – (Optional) The maximum number of SHAVE cores that should be dedicated for the layer. It is useful for debugging concurrency issues or for resource saving that memory bound kernel does not scale well with the number of cores, so more resources can be left for the rest of a topology.
|
||||
- Sub-node `Kernel` must contain the following attributes:
|
||||
@@ -158,25 +155,12 @@ Each custom layer is described with the `CustomLayer` node. It has the following
|
||||
</CustomLayer>
|
||||
```
|
||||
|
||||
## Pass Configuration File to Inference Runtime
|
||||
## Pass Configuration File to OpenVINO™ Runtime
|
||||
|
||||
> **NOTE**: If both native and custom layer implementations are present, the custom kernel has a priority over the native one.
|
||||
Before loading the network that features the custom layers, provide a separate configuration file and load it using the ov::Core::set_property() method with the "CONFIG_KEY" key and the configuration file name as a value before loading the network that uses custom operations to the plugin:
|
||||
|
||||
Before loading the network that features the custom layers, provide a separate configuration file and load it using the InferenceEngine::Core::SetConfig() method with the PluginConfigParams::KEY_CONFIG_FILE key and the configuration file name as a value:
|
||||
```cpp
|
||||
InferenceEngine::Core core;
|
||||
// Load custom layers
|
||||
core.SetConfig({ { InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, "<path to the xml file>" } }, "MYRIAD");
|
||||
```
|
||||
Optionally, set a path to a custom layers description with a pair of `VPU_CUSTOM_LAYERS` and `/path/to/your/customLayers.xml`
|
||||
as a network configuration:
|
||||
```cpp
|
||||
InferenceEngine::Core core;
|
||||
std::map<std::string, std::string> networkConfig;
|
||||
config["VPU_CUSTOM_LAYERS"] = "/path/to/your/customLayers.xml";
|
||||
// Load custom layers in network config
|
||||
auto exeNetwork = core.LoadNetwork(cnnNetwork, "MYRIAD", networkConfig);
|
||||
```
|
||||
@snippet docs/snippets/vpu/custom_op.cpp part0
|
||||
|
||||
## Optimizing Kernels with OpenCL for VPU (Intel® Neural Compute Stick 2)
|
||||
|
||||
@@ -233,15 +217,11 @@ __kernel void ocl_grn(__global const half* restrict src_data, __global half* res
|
||||
int W = get_global_size(0);
|
||||
int y = get_global_id(1);
|
||||
int H = get_global_size(1);
|
||||
|
||||
float variance = bias + 1e-9f;
|
||||
|
||||
#pragma unroll 4
|
||||
for (int c = 0; c < C; c++)
|
||||
variance += (float)(src_data[c*H*W + y*W + x] * src_data[c*H*W + y*W + x]);
|
||||
|
||||
variance = 1.f / native_sqrt(variance);
|
||||
|
||||
#pragma unroll 4
|
||||
for (int c = 0; c < C; c++)
|
||||
dst_data[c*H*W + y*W + x] = (half)((float)src_data[c*H*W + y*W + x] * variance);
|
||||
@@ -253,11 +233,9 @@ __kernel void ocl_grn_line(__global const half* restrict src_data, __global hal
|
||||
{
|
||||
int y = get_global_id(1);
|
||||
int H = get_global_size(1);
|
||||
|
||||
for (int x = 0; x < W/8; x++)
|
||||
{
|
||||
float8 variance = (float8)(bias+1e-9f);
|
||||
|
||||
#pragma unroll 4
|
||||
for (int c = 0; c < C; c++)
|
||||
{
|
||||
@@ -265,15 +243,12 @@ __kernel void ocl_grn_line(__global const half* restrict src_data, __global hal
|
||||
half8 sh = src_line[x];
|
||||
variance += convert_float8(sh*sh);
|
||||
}
|
||||
|
||||
variance = 1.f/native_sqrt(variance);
|
||||
|
||||
#pragma unroll 4
|
||||
for (int c = 0; c < C; c++)
|
||||
{
|
||||
__global const half8* restrict src_line = ((__global const half8 * restrict)(src_data + c*H*W + y*W));
|
||||
__global half8* restrict dst_line = ((__global half8 * restrict)(dst_data + c*H*W + y*W));
|
||||
|
||||
dst_line[x] = convert_half8(convert_float8(src_line[x])*variance);
|
||||
}
|
||||
}
|
||||
@@ -283,9 +258,7 @@ __kernel void ocl_grn_line(__global const half* restrict src_data, __global hal
|
||||
#pragma unroll 4
|
||||
for (int c = 0; c < C; c++)
|
||||
variance += (float)(src_data[c*H*W + y*W + x]*src_data[c*H*W + y*W + x]);
|
||||
|
||||
variance = 1.f/native_sqrt(variance);
|
||||
|
||||
#pragma unroll 4
|
||||
for (int c = 0; c < C; c++)
|
||||
dst_data[c*H*W + y*W + x] = (float)src_data[c*H*W + y*W + x]*variance;
|
||||
@@ -314,23 +287,17 @@ The kernel example below demonstrates the impact of early exits on kernel perfor
|
||||
{
|
||||
int w = get_global_id(0);
|
||||
int W = get_global_size(0);
|
||||
|
||||
int h = get_global_id(1);
|
||||
int H = get_global_size(1);
|
||||
|
||||
int c = get_global_id(2);
|
||||
int C = get_global_size(2);
|
||||
|
||||
int C2 = C/(stride*stride);
|
||||
int offset = c / C2;
|
||||
int c2 = c - C2 * offset;
|
||||
|
||||
int H2 = H*stride;
|
||||
int W2 = W*stride;
|
||||
|
||||
int h2 = h*stride + offset / stride;
|
||||
int w2 = w*stride + offset - stride * (offset / stride);
|
||||
|
||||
out[W*H*c + W*h + w] = src[W2*H2*c2 + W2*h2 + w2];
|
||||
}
|
||||
```
|
||||
@@ -343,23 +310,17 @@ Since the auto-vectorized version is faster, it makes sense to enable it for the
|
||||
{
|
||||
int w = get_global_id(0);
|
||||
w = min(w, W-1);
|
||||
|
||||
int h = get_global_id(1);
|
||||
int H = get_global_size(1);
|
||||
|
||||
int c = get_global_id(2);
|
||||
int C = get_global_size(2);
|
||||
|
||||
int C2 = C/(stride*stride);
|
||||
int offset = c / C2;
|
||||
int c2 = c - C2 * offset;
|
||||
|
||||
int H2 = H*stride;
|
||||
int W2 = W*stride;
|
||||
|
||||
int h2 = h*stride + offset / stride;
|
||||
int w2 = w*stride + offset - stride * (offset / stride);
|
||||
|
||||
out[W*H*c + W*h + w] = src[W2*H2*c2 + W2*h2 + w2];
|
||||
}
|
||||
```
|
||||
@@ -370,21 +331,17 @@ If branching is inevitable for your element-based kernel, it is recommended to c
|
||||
__kernel void reorg(const __global half* restrict src, __global half* restrict out, int H, int W, int stride)
|
||||
{
|
||||
int h = min((int)get_global_id(0), H-1);
|
||||
|
||||
int c = get_global_id(1);
|
||||
int C = get_global_size(1);
|
||||
int C2 = C/(stride*stride);
|
||||
int offset = c / C2;
|
||||
int c2 = c - C2 * offset;
|
||||
|
||||
int H2 = H*stride;
|
||||
int W2 = W*stride;
|
||||
|
||||
for (int w = 0; w < W; ++w)
|
||||
{
|
||||
int h2 = h*stride + offset / stride;
|
||||
int w2 = w*stride + offset - stride * (offset / stride);
|
||||
|
||||
out[W*H*c + W*h + w] = src[W2*H2*c2 + W2*h2 + w2];
|
||||
}
|
||||
}
|
||||
@@ -398,14 +355,11 @@ This decreases the execution time up to 40% against the best performing vectoriz
|
||||
int H, int W, int stride)
|
||||
{
|
||||
int h = min((int)get_global_id(0), H-1);
|
||||
|
||||
int c2 = get_global_id(1);
|
||||
int C2 = get_global_size(1);
|
||||
int C = C2*stride*stride;
|
||||
|
||||
int H2 = H*stride;
|
||||
int W2 = W*stride;
|
||||
|
||||
for (int stride_y = 0; stride_y < stride; stride_y++)
|
||||
for (int stride_x = 0; stride_x < stride; stride_x++)
|
||||
for (int w2 = 0, w = 0; w < W; w2 += stride, w++)
|
||||
@@ -428,16 +382,13 @@ from/to a `__blobal` pointer since work-group copying could be done in a vector
|
||||
float bias)
|
||||
{
|
||||
float variance = bias + 1e-9f;
|
||||
|
||||
#pragma unroll 4
|
||||
for (int c = 0; c < C; c++)
|
||||
{
|
||||
float val = (float) src_data[c*get_global_size(1)*get_global_size(0) + get_global_id(1)*get_global_size(0) + get_global_id(0)];
|
||||
variance += val*val;
|
||||
}
|
||||
|
||||
half hvariance = (half)(native_rsqrt((half)(variance/16.f))*0.25f);
|
||||
|
||||
#pragma unroll 4
|
||||
for (int c = 0; c < C; c++)
|
||||
{
|
||||
@@ -446,7 +397,7 @@ from/to a `__blobal` pointer since work-group copying could be done in a vector
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
This kernel can be rewritten to introduce special data binding `__dma_preload` and `__dma_postwrite intrinsics`. This means that instead of one kernel, a group of three kernels should be implemented: `kernelName`, `__dma_preload_kernelName`, and `__dma_postwrite_kernelName`. `__dma_preload_kernelName` for a particular work group `n` is guaranteed to be executed before the `n`-th work group itself, while `__dma_postwrite_kernelName` is guaranteed to be executed after a corresponding work group. You can define one of those functions that are intended to be used to copy data from-to `__global` and `__local` memory. The syntactics requires exact functional signature match. The example below illustrates how to prepare your kernel for manual-DMA.
|
||||
|
||||
```cpp
|
||||
@@ -498,8 +449,6 @@ event_t WorkGroupDmaCreateStrideTransaction(
|
||||
size_t dst_stride, // stride between corresponding 2 consecutive lines of destination in bytes
|
||||
size_t size, // total number of bytes loaded for all lines from source to destination
|
||||
event_t event) __OVERLOAD;
|
||||
|
||||
|
||||
event_t WorkGroupDmaCreateStrideTransaction(
|
||||
const global T *src,
|
||||
local T *dst,
|
||||
@@ -509,7 +458,6 @@ event_t WorkGroupDmaCreateStrideTransaction(
|
||||
size_t dst_stride, // stride between corresponding 2 consecutive lines of destination in bytes
|
||||
size_t size, // total number of bytes loaded for all lines from source to destination
|
||||
event_t event) __OVERLOAD;
|
||||
|
||||
// 3D sub-tensor copy
|
||||
event_t WorkGroupDmaCreate3DTransaction(
|
||||
const local T *src,
|
||||
@@ -523,7 +471,6 @@ event_t WorkGroupDmaCreate3DTransaction(
|
||||
size_t dst_plane_stride, // stride between corresponding 2 consecutive planes of destination in bytes
|
||||
size_t size, // size of the loaded plane in bytes, analogues to the size in 2D case
|
||||
event_t event) __OVERLOAD;
|
||||
|
||||
event_t WorkGroupDmaCreate3DTransaction(
|
||||
const global T *src,
|
||||
local T *dst,
|
||||
@@ -563,7 +510,6 @@ __kernel void __dma_preload_grn_NCHW(
|
||||
get_local_size(0) * get_local_size(1) * sizeof(half), // plane size
|
||||
0);
|
||||
}
|
||||
|
||||
__kernel void __dma_postwrite_grn_NCHW(
|
||||
__global const half* restrict src,
|
||||
__global half* restrict dst,
|
||||
@@ -586,7 +532,6 @@ __kernel void __dma_postwrite_grn_NCHW(
|
||||
get_local_size(0) * get_local_size(1) * sizeof(half), // plane size
|
||||
0);
|
||||
}
|
||||
|
||||
__kernel void grn_NCHW(
|
||||
__global const half* restrict src_data,
|
||||
__global half* restrict dst_data,
|
||||
@@ -596,16 +541,13 @@ __kernel void grn_NCHW(
|
||||
float bias)
|
||||
{
|
||||
float variance = bias + 1e-9f;
|
||||
|
||||
#pragma unroll 8
|
||||
for (int c = 0; c < C; c++)
|
||||
{
|
||||
float val = (float) src[c*get_local_size(1)*get_local_size(0) + get_local_id(1)*get_local_size(0) + get_local_id(0)];
|
||||
variance += val*val;
|
||||
}
|
||||
|
||||
half hvariance = (half)(native_rsqrt((half)(variance/16.f))*0.25f);
|
||||
|
||||
#pragma unroll 8
|
||||
for (int c = 0; c < C; c++)
|
||||
{
|
||||
@@ -626,13 +568,11 @@ item_dma_event_t WorkItemDmaCreateTransaction(
|
||||
private T *dst,
|
||||
size_t size,
|
||||
item_dma_event_t event) __OVERLOAD;
|
||||
|
||||
item_dma_event_t WorkItemDmaCreateTransaction(
|
||||
const private T *src,
|
||||
global T *dst,
|
||||
size_t size,
|
||||
item_dma_event_t event) __OVERLOAD;
|
||||
|
||||
item_dma_event_t WorkItemDmaCreateStrideTransaction(
|
||||
const global T *src,
|
||||
private T *dst,
|
||||
@@ -642,7 +582,6 @@ item_dma_event_t WorkItemDmaCreateStrideTransaction(
|
||||
size_t dst_stride,
|
||||
size_t size,
|
||||
item_dma_event_t event) __OVERLOAD;
|
||||
|
||||
item_dma_event_t WorkItemDmaCreateStrideTransaction(
|
||||
const private T *src,
|
||||
global T *dst,
|
||||
@@ -652,7 +591,6 @@ item_dma_event_t WorkItemDmaCreateStrideTransaction(
|
||||
size_t dst_stride,
|
||||
size_t size,
|
||||
item_dma_event_t event) __OVERLOAD;
|
||||
|
||||
item_dma_event_t WorkItemDmaCreate3DTransaction(
|
||||
const global T *src,
|
||||
private T *dst,
|
||||
@@ -665,7 +603,6 @@ item_dma_event_t WorkItemDmaCreate3DTransaction(
|
||||
size_t dst_plane_stride,
|
||||
size_t size,
|
||||
item_dma_event_t event) __OVERLOAD;
|
||||
|
||||
item_dma_event_t WorkItemDmaCreate3DTransaction(
|
||||
const private T *src,
|
||||
global T *dst,
|
||||
59
docs/Extensibility_UG/add_openvino_ops.md
Normal file
59
docs/Extensibility_UG/add_openvino_ops.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Custom OpenVINO™ Operations {#openvino_docs_Extensibility_UG_add_openvino_ops}
|
||||
|
||||
OpenVINO™ Extension API allows you to register custom operations to support models with operations which OpenVINO™ does not support out-of-the-box.
|
||||
|
||||
## Operation Class
|
||||
|
||||
To add your custom operation, create a new class that extends `ov::Op`, which is in turn derived from `ov::Node`, the base class for all graph operations in OpenVINO™. To add `ov::Op` please include next file:
|
||||
|
||||
@snippet template_extension/new/identity.hpp op:common_include
|
||||
|
||||
Follow the steps below to add a custom operation:
|
||||
|
||||
1. Add the `OPENVINO_OP` macro which defines a `NodeTypeInfo` object that identifies the type of the operation to the graph users and helps with dynamic type resolution. The type info of an operation currently consists of a string operation identifier and a string for operation version.
|
||||
|
||||
2. Implement default constructor and constructors that optionally take the operation inputs and attributes as parameters.
|
||||
|
||||
3. Override the shape inference method `validate_and_infer_types`. This method is called multiple times during graph manipulations to determine the shapes and element types of the operations outputs. To access the input shapes and input element types, use the `get_input_partial_shape()` and `get_input_element_type()` methods of `ov::Node`. Set the inferred shape and element type of the output using `set_output_type`.
|
||||
|
||||
4. Override the `clone_with_new_inputs` method, which enables graph manipulation routines to create copies of this operation and connect it to different nodes during optimization.
|
||||
|
||||
5. Override the `visit_attributes` method, which enables serialization and deserialization of operation attributes. An `AttributeVisitor` is passed to the method, and the implementation is expected to walk over all the attributes in the op using the type-aware `on_attribute` helper. Helpers are already implemented for standard C++ types like `int64_t`, `float`, `bool`, `vector`, and for existing OpenVINO defined types.
|
||||
|
||||
6. Override `evaluate`, which is an optional method that enables fallback of some devices to this implementation and the application of constant folding if there is a custom operation on the constant branch. If your operation contains `evaluate` method you also need to override the `has_evaluate` method, this method allows to get information about availability of `evaluate` method for the operation.
|
||||
|
||||
Based on that, declaration of an operation class can look as follows:
|
||||
|
||||
|
||||
### Operation Constructors
|
||||
|
||||
OpenVINO™ operation contains two constructors:
|
||||
* Default constructor, which enables you to create an operation without attributes
|
||||
* Constructor that creates and validates an operation with specified inputs and attributes
|
||||
|
||||
@snippet template_extension/new/identity.cpp op:ctor
|
||||
|
||||
### `validate_and_infer_types()`
|
||||
|
||||
`ov::Node::validate_and_infer_types` method validates operation attributes and calculates output shapes using attributes of the operation.
|
||||
|
||||
@snippet template_extension/new/identity.cpp op:validate
|
||||
|
||||
### `clone_with_new_inputs()`
|
||||
|
||||
`ov::Node::clone_with_new_inputs` method creates a copy of the operation with new inputs.
|
||||
|
||||
@snippet template_extension/new/identity.cpp op:copy
|
||||
|
||||
### `visit_attributes()`
|
||||
|
||||
`ov::Node::visit_attributes` method enables you to visit all operation attributes.
|
||||
|
||||
@snippet template_extension/new/identity.cpp op:visit_attributes
|
||||
|
||||
### evaluate() and has_evaluate()
|
||||
|
||||
`ov::Node::evaluate` method enables you to apply constant folding to an operation.
|
||||
|
||||
@snippet template_extension/new/identity.cpp op:evaluate
|
||||
|
||||
105
docs/Extensibility_UG/frontend_extensions.md
Normal file
105
docs/Extensibility_UG/frontend_extensions.md
Normal file
@@ -0,0 +1,105 @@
|
||||
# Frontend Extensions {#openvino_docs_Extensibility_UG_Frontend_Extensions}
|
||||
|
||||
The goal of this chapter is to explain how to use Frontend extension classes to facilitate mapping of custom operations from framework model representation to OpenVINO representation. Refer to [Introduction to OpenVINO Extension](Intro.md) to understand entire flow.
|
||||
|
||||
This API is applicable for new frontends only, which exist for ONNX and PaddlePaddle. If a different model format is used, follow legacy [Model Optimizer Extensions](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) guide.
|
||||
|
||||
> **NOTE**: This documentation is written based on the [Template extension](https://github.com/openvinotoolkit/openvino/tree/master/docs/template_extension/new), which demonstrates extension development details based on minimalistic `Identity` operation that is a placeholder for your real custom operation. You can review the complete code, which is fully compliable, to see how it works.
|
||||
|
||||
## Single Operation Mapping with OpExtension
|
||||
|
||||
This section covers the case when a single operation in framework representation is mapped to a single operation in OpenVINO representation. This is called *one-to-one mapping*. There is `OpExtension` class that works well if all the following conditions are satisfied:
|
||||
|
||||
1. Number of inputs to operation in the Framework representation is the same as in the OpenVINO representation.
|
||||
|
||||
2. Number of outputs is also the same in both representations.
|
||||
|
||||
3. Inputs can be indexed and are mapped in order correspondingly, e.g. input with index 0 in framework representation maps to input with index 0 in OpenVINO representation and so on.
|
||||
|
||||
4. The same for outputs.
|
||||
|
||||
5. Each attribute in OpenVINO operation can be initialized from one of the attributes of original operation or by some predefined constant value. Value of copied attributes cannot contain expressions, value is accepted as-is, so type of a value should be compatible.
|
||||
|
||||
> **NOTE**: `OpExtension` class is currently available for ONNX frontend only. PaddlePaddle frontend has named inputs and outputs for operation (not indexed) therefore OpExtension mapping is not applicable for this case.
|
||||
|
||||
The next example maps ONNX operation with type [“Identity”]( https://github.com/onnx/onnx/blob/main/docs/Operators.md#Identity) to OpenVINO template extension `Identity` class.
|
||||
|
||||
@snippet ov_extensions.cpp frontend_extension_Identity_header
|
||||
@snippet ov_extensions.cpp frontend_extension_Identity
|
||||
|
||||
The mapping doesn’t involve any attributes, as operation Identity doesn’t have them.
|
||||
|
||||
Extension objects, like just constructed `extension` can be used to add to the OpenVINO runtime just before the loading a model that contains custom operations:
|
||||
|
||||
@snippet ov_extensions.cpp frontend_extension_read_model
|
||||
|
||||
Or extensions can be constructed in a separately compiled shared library. Separately compiled library can be used in Model Optimizer or `benchmark_app`. Read about how to build and load such library in chapter “Create library with extensions” in [Introduction to OpenVINO Extension](Intro.md).
|
||||
|
||||
If operation have multiple inputs and/or outputs they will be mapped in order. The type of elements in input/output tensors should match expected types in the surrounding operations. For example, if custom operation produces `f32` data type then operation that consumes this output should also support `f32`. Otherwise, model conversion fails with an error, there are no automatic type conversion happens.
|
||||
|
||||
### Converting to Standard OpenVINO Operation
|
||||
|
||||
`OpExtension` class can be used when mapping to one of the operations from standard OpenVINO operation set is what you need and there is no class like `TemplateExtension::Identity` implemented.
|
||||
|
||||
Here is an example for a custom framework operation “MyRelu”. Suppose it is mathematically equivalent to standard `Relu` that exists in OpenVINO operation set, but for some reason has type name “MyRelu”. In this case you can directly say that “MyRelu” -> `Relu` mapping should be used:
|
||||
|
||||
@snippet ov_extensions.cpp frontend_extension_MyRelu
|
||||
|
||||
In the resulting converted OpenVINO model, “MyRelu” operation will be replaced by the standard operation `Relu` from the latest available OpenVINO operation set. Notice that when standard operation is used, it can be specified using just a type string (“Relu”) instead of using a `ov::opset8::Relu` class name as a template parameter for `OpExtension`. This method is available for operations from the standard operation set only. For a user custom OpenVINO operation the corresponding class should be always specified as a template parameter as it was demonstrated with `TemplateExtension::Identity`.
|
||||
|
||||
### Attributes Mapping
|
||||
|
||||
As described above, `OpExtension` is useful when attributes can be mapped one by one or initialized by a constant. If the set of attributes in framework representation and OpenVINO representation completely match by their names and types, nothing should be specified in OpExtension constructor parameters. The attributes are discovered and mapped automatically based on `visit_attributes` method that should be defined for any OpenVINO operation.
|
||||
|
||||
Imagine you have CustomOperation class implementation that has two attributes with names `attr1` and `attr2`:
|
||||
|
||||
@snippet ov_extensions.cpp frontend_extension_CustomOperation
|
||||
|
||||
And original model in framework representation also has operation with name “CustomOperatoin” with the same `attr1` and `attr2` attributes. Then with the following code:
|
||||
|
||||
@snippet ov_extensions.cpp frontend_extension_CustomOperation_as_is
|
||||
|
||||
both `attr1` and `attr2` are copied from framework representation to OpenVINO representation automatically. If for some reason names of attributes are different but values still can be copied “as-is” you can pass attribute names mapping in `OpExtension` constructor:
|
||||
|
||||
@snippet ov_extensions.cpp frontend_extension_CustomOperation_rename
|
||||
|
||||
Where `fw_attr1` and `fw_attr2` are names for corresponding attributes in framework operation representation.
|
||||
|
||||
If copying of an attribute is not what you need, `OpExtension` also can set attribute to predefined constant value. For the same `CustomOperation`, imagine you want to set `attr2` to value 5 instead of copying from `fw_attr2`, to achieve that do the following:
|
||||
|
||||
@snippet ov_extensions.cpp frontend_extension_CustomOperation_rename_set
|
||||
|
||||
So the conclusion is that each attribute of target OpenVINO operation should be initialized either by
|
||||
|
||||
1. Setting automatically due to name matching
|
||||
|
||||
2. Mapped by attribute name
|
||||
|
||||
3. Set to a constant value
|
||||
|
||||
This is achieved by specifying maps as arguments for `OpExtension` constructor.
|
||||
|
||||
|
||||
## Mapping to Multiple Operations with ConversionExtension
|
||||
|
||||
Previous sections cover the case when a single operation is mapped to a single operation with optional adjustment in names and attribute values. That is likely enough for your own custom operation with existing C++ kernel implementation. In this case your framework representation and OpenVINO representation for the operation are under your control and inputs/outpus/attributes can be aligned to make `OpExtension` usable.
|
||||
|
||||
In case if one-to-one mapping is not possible, *decomposition to multiple operations* should be considered. It is achieved by using more verbose and less automated `ConversionExtension` class. It enables writing arbitrary code to replace a single framework operation by multiple connected OpenVINO operations constructing dependency graph of any complexity.
|
||||
|
||||
`ConversionExtension` maps a single operation to a function which builds a graph using OpenVINO operation classes. Follow chapter [Build a Model in OpenVINO Runtime](@ref ov_ug_build_model) to learn how to use OpenVINO operation classes to build a fragment of model for replacement.
|
||||
|
||||
The next example illustrates using `ConversionExtension` for conversion of “ThresholdedRelu” from ONNX according to the formula: `ThresholdedRelu(x, alpha) -> Multiply(x, Convert(Greater(x, alpha), type=float))`.
|
||||
|
||||
> **NOTE**: `ThresholdedRelu` is one of the standard ONNX operators which is supported by ONNX frontend natively out-of-the-box. Here we are re-implementing it to illustrate how you can add a similar support for your custom operation instead of `ThresholdedRelu`.
|
||||
|
||||
@snippet ov_extensions.cpp frontend_extension_ThresholdedReLU_header
|
||||
@snippet ov_extensions.cpp frontend_extension_ThresholdedReLU
|
||||
|
||||
To access original framework operation attribute value and connect to inputs, `node` object of type `NodeContext` is used. It has two main methods:
|
||||
|
||||
* `NodeContext::get_input` to get input with a given index,
|
||||
|
||||
* `NodeContext::get_attribute` to get attribute value with a given name.
|
||||
|
||||
The conversion function should return a vector of node outputs that are mapped to corresponding outputs of the original framework operation in the same order.
|
||||
|
||||
28
docs/Extensibility_UG/graph_rewrite_pass.md
Normal file
28
docs/Extensibility_UG/graph_rewrite_pass.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# OpenVINO Graph Rewrite Pass {#openvino_docs_Extensibility_UG_graph_rewrite_pass}
|
||||
|
||||
`ov::pass::GraphRewrite` serves for running multiple matcher passes on `ov::Model` in a single graph traversal.
|
||||
Example:
|
||||
|
||||
@snippet src/transformations/template_pattern_transformation.cpp matcher_pass:graph_rewrite
|
||||
|
||||
In addition, GraphRewrite handles nodes that were registered by MatcherPasses during their execution. This nodes will be added to the beginning of the sequence with nodes for pattern matching.
|
||||
|
||||
> **NOTE**: when using `ov::pass::Manager` temporary GraphRewrite is used to execute single MatcherPass.
|
||||
|
||||
GraphRewrite has two algorithms for MatcherPasses execution. First algorithm is straightforward. It applies each MatcherPass in registration order to current node.
|
||||
|
||||
![graph_rewrite_execution]
|
||||
|
||||
But it is not really efficient when you have a lot of registered passes. So first of all GraphRewrite checks that all MatcherPass patterns has type-based root node (it means that type of this node is not hidden into predicate).
|
||||
And then creates map from registered MatcherPasses. That helps to avoid additional cost of applying each MatcherPass for each node.
|
||||
|
||||
![graph_rewrite_efficient_search]
|
||||
|
||||
> **NOTE**: GraphRewrite execution algorithm cannot be set manually and depends only on root nodes registered inside MatcherPasses.
|
||||
|
||||
## See Also
|
||||
|
||||
* [OpenVINO™ Transformations](./ov_transformations.md)
|
||||
|
||||
[graph_rewrite_execution]: ./img/graph_rewrite_execution.png
|
||||
[graph_rewrite_efficient_search]: ./img/graph_rewrite_efficient_search.png
|
||||
101
docs/Extensibility_UG/matcher_pass.md
Normal file
101
docs/Extensibility_UG/matcher_pass.md
Normal file
@@ -0,0 +1,101 @@
|
||||
# OpenVINO Matcher Pass {#openvino_docs_Extensibility_UG_matcher_pass}
|
||||
|
||||
`ov::pass::MatcherPass` is used for pattern-based transformations.
|
||||
|
||||
Template for MatcherPass transformation class
|
||||
@snippet src/transformations/template_pattern_transformation.hpp graph_rewrite:template_transformation_hpp
|
||||
|
||||
@snippet src/transformations/template_pattern_transformation.cpp graph_rewrite:template_transformation_cpp
|
||||
|
||||
To use `ov::pass::MatcherPass`, you need to complete these steps:
|
||||
1. Create a pattern
|
||||
2. Implement a callback
|
||||
3. Register the pattern and Matcher
|
||||
4. Execute MatcherPass
|
||||
|
||||
So let's go through each of these steps.
|
||||
|
||||
## Create a pattern
|
||||
|
||||
Pattern is a single root `ov::Model`. But the only difference is that you do not need to create a model object, you just need to create and connect opset or special pattern operations.
|
||||
Then you need to take the last created operation and put it as a root of the pattern. This root node will be used as a root node in pattern matching.
|
||||
> **NOTE**: Any nodes in a pattern that have no consumers and are not registered as root will not be used in pattern matching.
|
||||
|
||||
@snippet ov_model_snippets.cpp pattern:simple_example
|
||||
|
||||
The `Parameter` operation in the example above has type and shape specified. These attributes are needed only to create Parameter operation class and will not be used in pattern matching.
|
||||
|
||||
For more pattern examples, refer to the [pattern matching](#pattern_matching) section.
|
||||
|
||||
## Implement callback
|
||||
|
||||
Callback is an action applied to every pattern entrance. In general, callback is the lambda function that takes Matcher object with detected subgraph.
|
||||
|
||||
@snippet ov_model_snippets.cpp pattern:callback_example
|
||||
|
||||
The example above shows the callback structure and how Matcher can be used for accessing nodes detected by pattern.
|
||||
Callback return value is `true` if root node was replaced and another pattern cannot be applied to the same root node; otherwise, it is `false`.
|
||||
> **NOTE**: It is not recommended to manipulate with nodes that are under root node. This may affect GraphRewrite execution as it is expected that all nodes that come after root node in topological order are valid and can be used in pattern matching.
|
||||
|
||||
MatcherPass also provides functionality that allows reporting of the newly created nodes that can be used in additional pattern matching.
|
||||
If MatcherPass was registered in `ov::pass::Manager` or `ov::pass::GraphRewrite`, these registered nodes will be added for additional pattern matching.
|
||||
That means that matcher passes registered in `ov::pass::GraphRewrite` will be applied to these nodes.
|
||||
|
||||
The example below shows how single MatcherPass can fuse sequence of operations using the `register_new_node` method.
|
||||
|
||||
@snippet src/transformations/template_pattern_transformation.cpp matcher_pass:relu_fusion
|
||||
|
||||
> **NOTE**: If you register multiple nodes, please add them in topological order. We do not topologically sort these nodes as it is a time-consuming operation.
|
||||
|
||||
## Register pattern and Matcher
|
||||
|
||||
The last step is to register Matcher and callback inside the MatcherPass pass. To do this, call the `register_matcher` method.
|
||||
> **NOTE**: Only one matcher can be registered for a single MatcherPass class.
|
||||
|
||||
```cpp
|
||||
// Register matcher and callback
|
||||
register_matcher(m, callback);
|
||||
```
|
||||
## Execute MatcherPass
|
||||
|
||||
MatcherPass has multiple ways to be executed:
|
||||
* Run on a single node - it can be useful if you want to run MatcherPass inside another transformation.
|
||||
@snippet src/transformations/template_pattern_transformation.cpp matcher_pass:run_on_node
|
||||
* Run on `ov::Model` using GraphRewrite - this approach gives ability to run MatcherPass on whole `ov::Model`. Moreover, multiple MatcherPass transformation can be registered in a single GraphRewite to be executed in a single graph traversal.
|
||||
@snippet src/transformations/template_pattern_transformation.cpp matcher_pass:graph_rewrite
|
||||
* Run on `ov::Model` using `ov::pass::Manager` - this approach helps you to register MatcherPass for execution on `ov::Model` as another transformation types.
|
||||
@snippet src/transformations/template_pattern_transformation.cpp matcher_pass:manager
|
||||
|
||||
## Pattern Matching <a name="pattern_matching"></a>
|
||||
|
||||
Sometimes patterns cannot be expressed via regular operations or it is too complicated.
|
||||
For example, if you want to detect **Convolution->Add** sub-graph without specifying particular input type for Convolution operation or you want to create a pattern where some of operations can have different types.
|
||||
And for these cases OpenVINO™ provides additional helpers to construct patterns for GraphRewrite transformations.
|
||||
|
||||
There are two main helpers:
|
||||
1. `ov::pass::pattern::any_input` - helps to express inputs if their types are undefined.
|
||||
2. `ov::pass::pattern::wrap_type<T>` - helps to express nodes of pattern without specifying node attributes.
|
||||
|
||||
Let's go through the example to have better understanding of how it works:
|
||||
|
||||
> **NOTE**: Node attributes do not participate in pattern matching and are needed only for operations creation. Only operation types participate in pattern matching.
|
||||
|
||||
The example below shows basic usage of `ov::passpattern::any_input`.
|
||||
Here we construct Multiply pattern with arbitrary first input and Constant as a second input.
|
||||
Also as Multiply is commutative operation, it does not matter in which order we set inputs (any_input/Constant or Constant/any_input) because both cases will be matched.
|
||||
|
||||
@snippet ov_model_snippets.cpp pattern:label_example
|
||||
|
||||
This example shows how we can construct a pattern when operation has arbitrary number of inputs.
|
||||
|
||||
@snippet ov_model_snippets.cpp pattern:concat_example
|
||||
|
||||
This example shows how to use predicate to construct a pattern. Also it shows how to match pattern manually on given node.
|
||||
|
||||
@snippet ov_model_snippets.cpp pattern:predicate_example
|
||||
|
||||
> **NOTE**: Be careful with manual matching because Matcher object holds matched nodes. To clear a match, use the m->clear_state() method.
|
||||
|
||||
## See Also
|
||||
|
||||
* [OpenVINO™ Transformations](./ov_transformations.md)
|
||||
17
docs/Extensibility_UG/model_pass.md
Normal file
17
docs/Extensibility_UG/model_pass.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# OpenVINO Model Pass {#openvino_docs_Extensibility_UG_model_pass}
|
||||
|
||||
`ov::pass::ModelPass` is used for transformations that take entire `ov::Model` as an input and process it.
|
||||
|
||||
Template for ModelPass transformation class
|
||||
|
||||
@snippet src/transformations/template_model_transformation.hpp model_pass:template_transformation_hpp
|
||||
|
||||
@snippet src/transformations/template_model_transformation.cpp model_pass:template_transformation_cpp
|
||||
|
||||
Using `ov::pass::ModelPass`, you need to override the `run_on_model` method where you will write the transformation code.
|
||||
Return value is `true` if the original model has changed during transformation (new operation was added, or operations replacement was made, or node attributes were changed); otherwise, it is `false`.
|
||||
Also `ov::pass::ModelPass` based transformations can be executed via `ov::pass::Manager`.
|
||||
|
||||
## See Also
|
||||
|
||||
* [OpenVINO™ Transformations](./ov_transformations.md)
|
||||
173
docs/Extensibility_UG/ov_transformations.md
Normal file
173
docs/Extensibility_UG/ov_transformations.md
Normal file
@@ -0,0 +1,173 @@
|
||||
# Overview of Transformations API {#openvino_docs_transformations}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
openvino_docs_Extensibility_UG_model_pass
|
||||
openvino_docs_Extensibility_UG_matcher_pass
|
||||
openvino_docs_Extensibility_UG_graph_rewrite_pass
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
OpenVINO Transformation mechanism allows to develop transformation passes to modify `ov::Model`. You can use this mechanism to apply additional optimizations to the original Model or transform unsupported subgraphs and operations to new operations which are supported by the plugin.
|
||||
This guide contains all necessary information that you need to start implementing OpenVINO™ transformations.
|
||||
|
||||
## Working with Model
|
||||
|
||||
Before the moving to transformation part it is needed to say several words about functions which allow to modify `ov::Model`.
|
||||
This chapter extends the [model representation guide](../OV_Runtime_UG/model_representation.md) and shows an API that allows us to manipulate with `ov::Model`.
|
||||
|
||||
### Working with node input and output ports
|
||||
|
||||
First of all let's talk about `ov::Node` input/output ports. Each OpenVINO™ operation has input and output ports except cases when operation has `Parameter` or `Constant` type.
|
||||
|
||||
Every port belongs to its node, so using a port we can access parent node, get shape and type for particular input/output, get all consumers in case of output port, and get producer node in case of input port.
|
||||
With output port we can set inputs for newly created operations.
|
||||
|
||||
Lets look at the code example.
|
||||
|
||||
@snippet ov_model_snippets.cpp ov:ports_example
|
||||
|
||||
### Node replacement
|
||||
|
||||
OpenVINO™ provides two ways for node replacement: via OpenVINO™ helper function and directly via port methods. We are going to review both of them.
|
||||
|
||||
Let's start with OpenVINO™ helper functions. The most popular function is `ov::replace_node(old_node, new_node)`.
|
||||
|
||||
We will review real replacement case where Negative operation is replaced with Multiply.
|
||||
|
||||
![ngraph_replace_node]
|
||||
|
||||
@snippet ov_model_snippets.cpp ov:replace_node
|
||||
|
||||
`ov::replace_node` has a constraint that number of output ports for both of ops must be the same; otherwise, it raises an exception.
|
||||
|
||||
|
||||
The alternative way to do the same replacement is the following:
|
||||
|
||||
@snippet ov_model_snippets.cpp ov:manual_replace
|
||||
|
||||
Another transformation example is insertion.
|
||||
|
||||
![ngraph_insert_node]
|
||||
|
||||
@snippet ov_model_snippets.cpp ov:insert_node
|
||||
|
||||
The alternative way to the insert operation is to make a node copy and use `ov::replace_node()`:
|
||||
|
||||
@snippet ov_model_snippets.cpp ov:insert_node_with_copy
|
||||
|
||||
### Node elimination
|
||||
|
||||
Another type of node replacement is its elimination.
|
||||
|
||||
To eliminate operation, OpenVINO™ has special method that considers all limitations related to OpenVINO™ Runtime.
|
||||
|
||||
@snippet ov_model_snippets.cpp ov:eliminate_node
|
||||
|
||||
`ov::replace_output_update_name()` in case of successful replacement it automatically preserves friendly name and runtime info.
|
||||
|
||||
## Transformations types <a name="transformations_types"></a>
|
||||
|
||||
OpenVINO™ Runtime has three main transformation types:
|
||||
|
||||
* [Model pass](./model_pass.md) - straightforward way to work with `ov::Model` directly
|
||||
* [Matcher pass](./matcher_pass.md) - pattern-based transformation approach
|
||||
* [Graph rewrite pass](./graph_rewrite_pass.md) - container for matcher passes needed for efficient execution
|
||||
|
||||
![transformations_structure]
|
||||
|
||||
## Transformation conditional compilation
|
||||
|
||||
Transformation library has two internal macros to support conditional compilation feature.
|
||||
|
||||
* `MATCHER_SCOPE(region)` - allows to disable the MatcherPass if matcher isn't used. The region name should be unique. This macro creates a local variable `matcher_name` which you should use as a matcher name.
|
||||
* `RUN_ON_MODEL_SCOPE(region)` - allows to disable run_on_model pass if it isn't used. The region name should be unique.
|
||||
|
||||
## Transformation writing essentials <a name="transformation_writing_essentials"></a>
|
||||
|
||||
When developing a transformation, you need to follow these transformation rules:
|
||||
|
||||
###1. Friendly Names
|
||||
|
||||
Each `ov::Node` has an unique name and a friendly name. In transformations we care only about friendly name because it represents the name from the model.
|
||||
To avoid losing friendly name when replacing node with other node or subgraph, set the original friendly name to the latest node in replacing subgraph. See the example below.
|
||||
|
||||
@snippet ov_model_snippets.cpp ov:replace_friendly_name
|
||||
|
||||
In more advanced cases, when replaced operation has several outputs and we add additional consumers to its outputs, we make a decision how to set friendly name by arrangement.
|
||||
|
||||
###2. Runtime Info
|
||||
|
||||
Runtime info is a map `std::map<std::string, ov::Any>` located inside `ov::Node` class. It represents additional attributes in `ov::Node`.
|
||||
These attributes can be set by users or by plugins and when executing transformation that changes `ov::Model` we need to preserve these attributes as they will not be automatically propagated.
|
||||
In most cases, transformations have the following types: 1:1 (replace node with another node), 1:N (replace node with a sub-graph), N:1 (fuse sub-graph into a single node), N:M (any other transformation).
|
||||
Currently, there is no mechanism that automatically detects transformation types, so we need to propagate this runtime information manually. See the examples below.
|
||||
|
||||
@snippet ov_model_snippets.cpp ov:copy_runtime_info
|
||||
|
||||
When transformation has multiple fusions or decompositions, `ov::copy_runtime_info` must be called multiple times for each case.
|
||||
|
||||
**Note**: copy_runtime_info removes rt_info from destination nodes. If you want to keep it, you need to specify them in source nodes like this: copy_runtime_info({a, b, c}, {a, b})
|
||||
|
||||
###3. Constant Folding
|
||||
|
||||
If your transformation inserts constant sub-graphs that need to be folded, do not forget to use `ov::pass::ConstantFolding()` after your transformation or call constant folding directly for operation.
|
||||
The example below shows how constant subgraph can be constructed.
|
||||
|
||||
@snippet ov_model_snippets.cpp ov:constant_subgraph
|
||||
|
||||
Manual constant folding is more preferable than `ov::pass::ConstantFolding()` because it is much faster.
|
||||
|
||||
Below you can find an example of manual constant folding:
|
||||
|
||||
@snippet src/transformations/template_pattern_transformation.cpp manual_constant_folding
|
||||
|
||||
## Common mistakes in transformations <a name="common_mistakes"></a>
|
||||
|
||||
In transformation development process:
|
||||
|
||||
* Do not use deprecated OpenVINO™ API. Deprecated methods has the `OPENVINO_DEPRECATED` macros in its definition.
|
||||
* Do not pass `shared_ptr<Node>` as an input for other node if type of node is unknown or it has multiple outputs. Use explicit output port.
|
||||
* If you replace node with another node that produces different shape, remember that new shape will not be propagated until the first `validate_nodes_and_infer_types` call for `ov::Model`. If you are using `ov::pass::Manager`, it will automatically call this method after each transformation execution.
|
||||
* Do not forget to call the `ov::pass::ConstantFolding` pass if your transformation creates constant subgraphs.
|
||||
* Use latest OpSet if you are not developing downgrade transformation pass.
|
||||
* When developing a callback for `ov::pass::MatcherPass`, do not change nodes that come after the root node in topological order.
|
||||
|
||||
## Using pass manager <a name="using_pass_manager"></a>
|
||||
|
||||
`ov::pass::Manager` is a container class that can store the list of transformations and execute them. The main idea of this class is to have high-level representation for grouped list of transformations.
|
||||
It can register and apply any [transformation pass](#transformations_types) on model.
|
||||
In addition, `ov::pass::Manager` has extended debug capabilities (find more information in the [how to debug transformations](#how_to_debug_transformations) section).
|
||||
|
||||
The example below shows basic usage of `ov::pass::Manager`
|
||||
|
||||
@snippet src/transformations/template_pattern_transformation.cpp matcher_pass:manager3
|
||||
|
||||
Another example shows how multiple matcher passes can be united into single GraphRewrite.
|
||||
|
||||
@snippet src/transformations/template_pattern_transformation.cpp matcher_pass:manager2
|
||||
|
||||
## How to debug transformations <a name="how_to_debug_transformations"></a>
|
||||
|
||||
If you are using `ngraph::pass::Manager` to run sequence of transformations, you can get additional debug capabilities by using the following environment variables:
|
||||
|
||||
```
|
||||
OV_PROFILE_PASS_ENABLE=1 - enables performance measurement for each transformation and prints execution status
|
||||
OV_ENABLE_VISUALIZE_TRACING=1 - enables visualization after each transformation. By default, it saves dot and svg files.
|
||||
```
|
||||
|
||||
> **Note**: Make sure that you have dot installed on your machine; otherwise, it will silently save only dot file without svg file.
|
||||
|
||||
## See Also
|
||||
|
||||
* [OpenVINO™ Model Representation](../OV_Runtime_UG/model_representation.md)
|
||||
* [OpenVINO™ Extensions](./Intro.md)
|
||||
|
||||
[ngraph_replace_node]: ./img/ngraph_replace_node.png
|
||||
[ngraph_insert_node]: ./img/ngraph_insert_node.png
|
||||
[transformations_structure]: ./img/transformations_structure.png
|
||||
[register_new_node]: ./img/register_new_node.png
|
||||
@@ -1,349 +0,0 @@
|
||||
# Custom Operations Guide {#openvino_docs_HOWTO_Custom_Layers_Guide}
|
||||
|
||||
The Intel® Distribution of OpenVINO™ toolkit supports neural network models trained with multiple frameworks including
|
||||
TensorFlow*, Caffe*, MXNet*, Kaldi* and ONNX* file format. The list of supported operations (layers) is different for
|
||||
each of the supported frameworks. To see the operations supported by your framework, refer to
|
||||
[Supported Framework Layers](../MO_DG/prepare_model/Supported_Frameworks_Layers.md).
|
||||
|
||||
Custom operations, that is those not included in the list, are not recognized by Model Optimizer out-of-the-box. Therefore, creating Intermediate Representation (IR) for a model using them requires additional steps. This guide illustrates the workflow for running inference on topologies featuring custom operations, allowing you to plug in your own implementation for existing or completely new operations.
|
||||
|
||||
> **NOTE**: *Layer* is a legacy term for *operation* which came from Caffe\* framework. Currently it is not used.
|
||||
> Refer to the [Deep Learning Network Intermediate Representation and Operation Sets in OpenVINO™](../MO_DG/IR_and_opsets.md)
|
||||
> for more information on the topic.
|
||||
|
||||
## Terms Used in This Guide
|
||||
|
||||
- *Intermediate Representation (IR)* — OpenVINO's Neural Network format used by Inference Engine. It abstracts different frameworks and describs model topology, operations parameters, and weights.
|
||||
|
||||
- *Operation* — an abstract concept of a math function selected for a specific purpose. Operations supported by
|
||||
OpenVINO™ are listed in the supported operation set provided in the [Available Operations Sets](../ops/opset.md).
|
||||
Examples of the operations are: [ReLU](../ops/activation/ReLU_1.md), [Convolution](../ops/convolution/Convolution_1.md),
|
||||
[Add](../ops/arithmetic/Add_1.md), etc.
|
||||
|
||||
- *Kernel* — The implementation of an operation function in the OpenVINO™ plugin, in this case, the math programmed (in
|
||||
C++ and OpenCL) to perform the operation for a target hardware (CPU or GPU).
|
||||
|
||||
- *Inference Engine Extension* — Device-specific module implementing custom operations (a set of kernels).
|
||||
|
||||
## Custom Operation Support Overview
|
||||
|
||||
There are three steps to support inference of a model with custom operation(s):
|
||||
1. Add support for a custom operation in the [Model Optimizer](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) so
|
||||
the Model Optimizer can generate the IR with the operation.
|
||||
2. Create an operation set and implement a custom nGraph operation in it as described in the
|
||||
[Custom nGraph Operation](../IE_DG/Extensibility_DG/AddingNGraphOps.md).
|
||||
3. Implement a customer operation in one of the [Inference Engine](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md)
|
||||
plugins to support inference of this operation using a particular target hardware (CPU, GPU or VPU).
|
||||
|
||||
To see the operations that are supported by each device plugin for the Inference Engine, refer to the
|
||||
[Supported Devices](../IE_DG/supported_plugins/Supported_Devices.md).
|
||||
|
||||
> **NOTE**: If a device doesn't support a particular operation, an alternative to creating a new operation is to target
|
||||
> an additional device using the HETERO plugin. The [Heterogeneous Plugin](../IE_DG/supported_plugins/HETERO.md) may be
|
||||
> used to run an inference model on multiple devices allowing the unsupported operations on one device to "fallback" to
|
||||
> run on another device (e.g., CPU) that does support those operations.
|
||||
|
||||
### Custom Operation Support for the Model Optimizer
|
||||
|
||||
Model Optimizer model conversion pipeline is described in detail in "Model Conversion Pipeline" section of [Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md). It is best to read that article first for a better understanding of the following material.
|
||||
|
||||
Model Optimizer provides an extensions mechanism to support new operations and implement custom model transformations to generate optimized IR. This mechanism is described in the "Model Optimizer Extensions" section of
|
||||
[Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md).
|
||||
|
||||
Two types of Model Optimizer extensions should be implemented to support custom operations, at a minimum:
|
||||
1. Operation class for a new operation. This class stores information about the operation, its attributes, shape inference function, attributes to be saved to an IR and some others internally used attributes. Refer to the "Model Optimizer Operation" section of [Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) for detailed instructions on how to implement it.
|
||||
2. Operation attributes extractor. The extractor is responsible for parsing framework-specific representation of the
|
||||
operation and uses corresponding operation class to update graph node attributes with necessary attributes of the
|
||||
operation. Refer to the "Operation Extractor" section of
|
||||
[Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) for detailed instructions on how to implement it.
|
||||
|
||||
> **NOTE**: In some cases you may need to implement some transformation to support the operation. This topic is covered in the "Graph Transformation Extensions" section of [Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md).
|
||||
|
||||
## Custom Operations Extensions for the Inference Engine
|
||||
|
||||
Inference Engine provides an extension mechanism to support new operations. This mechanism is described in [Inference Engine Extensibility Mechanism](../IE_DG/Extensibility_DG/Intro.md).
|
||||
|
||||
Each device plugin includes a library of optimized implementations to execute known operations which must be extended to execute a custom operation. The custom operation extension is implemented according to the target device:
|
||||
|
||||
- Custom Operation CPU Extension
|
||||
- A compiled shared library (`.so` or `.dll`) needed by the CPU Plugin for executing the custom operation
|
||||
on a CPU. Refer to the [How to Implement Custom CPU Operations](../IE_DG/Extensibility_DG/CPU_Kernel.md) for more
|
||||
details.
|
||||
- Custom Operation GPU Extension
|
||||
- OpenCL source code (.cl) for the custom operation kernel that will be compiled to execute on the GPU along with an operation description file (.xml) needed by the GPU Plugin for the custom operation kernel. Refer to the [How to Implement Custom GPU Operations](../IE_DG/Extensibility_DG/GPU_Kernel.md) for more details.
|
||||
- Custom Operation VPU Extension
|
||||
- OpenCL source code (.cl) for the custom operation kernel that will be compiled to execute on the VPU along with an operation description file (.xml) needed by the VPU Plugin for the custom operation kernel. Refer to [How to Implement Custom Operations for VPU](../IE_DG/Extensibility_DG/VPU_Kernel.md) for more details.
|
||||
|
||||
Also, it is necessary to implement nGraph custom operation according to [Custom nGraph Operation](../IE_DG/Extensibility_DG/AddingNGraphOps.md) so the Inference Engine can read an IR with this
|
||||
operation and correctly infer output tensor shape and type.
|
||||
|
||||
## Enabling Magnetic Resonance Image Reconstruction Model
|
||||
This chapter provides step-by-step instructions on how to enable the magnetic resonance image reconstruction model implemented in the [repository](https://github.com/rmsouza01/Hybrid-CS-Model-MRI/) using a custom operation on CPU. The example is prepared for a model generated from the repository with hash `2ede2f96161ce70dcdc922371fe6b6b254aafcc8`.
|
||||
|
||||
### Download and Convert the Model to a Frozen TensorFlow\* Model Format
|
||||
The original pre-trained model is provided in the hdf5 format which is not supported by OpenVINO directly and needs to be converted to TensorFlow\* frozen model format first.
|
||||
|
||||
1. Download repository `https://github.com/rmsouza01/Hybrid-CS-Model-MRI`:<br>
|
||||
```bash
|
||||
git clone https://github.com/rmsouza01/Hybrid-CS-Model-MRI
|
||||
git checkout 2ede2f96161ce70dcdc922371fe6b6b254aafcc8
|
||||
```
|
||||
|
||||
2. Convert pre-trained `.hdf5` to a frozen `.pb` graph using the following script (tested with TensorFlow==1.15.0 and
|
||||
Keras==2.2.4) which should be executed from the root of the cloned repository:<br>
|
||||
```py
|
||||
import keras as K
|
||||
import numpy as np
|
||||
import Modules.frequency_spatial_network as fsnet
|
||||
import tensorflow as tf
|
||||
|
||||
under_rate = '20'
|
||||
|
||||
stats = np.load("Data/stats_fs_unet_norm_" + under_rate + ".npy")
|
||||
var_sampling_mask = np.load("Data/sampling_mask_" + under_rate + "perc.npy")
|
||||
|
||||
model = fsnet.wnet(stats[0], stats[1], stats[2], stats[3], kshape = (5,5), kshape2=(3,3))
|
||||
model_name = "Models/wnet_" + under_rate + ".hdf5"
|
||||
model.load_weights(model_name)
|
||||
|
||||
inp = np.random.standard_normal([1, 256, 256, 2]).astype(np.float32)
|
||||
np.save('inp', inp)
|
||||
|
||||
sess = K.backend.get_session()
|
||||
sess.as_default()
|
||||
graph_def = sess.graph.as_graph_def()
|
||||
graph_def = tf.graph_util.convert_variables_to_constants(sess, graph_def, ['conv2d_44/BiasAdd'])
|
||||
with tf.gfile.FastGFile('wnet_20.pb', 'wb') as f:
|
||||
f.write(graph_def.SerializeToString())
|
||||
```
|
||||
|
||||
As a result the TensorFlow\* frozen model file "wnet_20.pb" is generated.
|
||||
|
||||
### Convert the Frozen TensorFlow\* Model to Intermediate Representation
|
||||
|
||||
Firstly, open the model in TensorBoard or other TensorFlow* model visualization tool. The model supports dynamic
|
||||
batch dimension because the value for the batch dimension is not hardcoded in the model. Model Optimizer need to set all
|
||||
dynamic dimensions to some specific value to create the IR, therefore specify the command line parameter `-b 1` to set
|
||||
the batch dimension equal to 1. The actual batch size dimension can be changed at runtime using the Inference Engine API
|
||||
described in the [Using Shape Inference](../IE_DG/ShapeInference.md). Also refer to the General Conversion Parameters section in [Converting a Model to Intermediate Representation (IR)](../MO_DG/prepare_model/convert_model/Converting_Model.md) and [Convert Your TensorFlow* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md)
|
||||
for more details and command line parameters used for the model conversion.
|
||||
|
||||
```sh
|
||||
mo --input_model <PATH_TO_MODEL>/wnet_20.pb -b 1
|
||||
```
|
||||
|
||||
> **NOTE**: This conversion guide is applicable for the 2021.3 release of OpenVINO and that starting from 2021.4
|
||||
> the OpenVINO supports this model out of the box.
|
||||
|
||||
Model Optimizer produces the following error:
|
||||
```bash
|
||||
[ ERROR ] List of operations that cannot be converted to Inference Engine IR:
|
||||
[ ERROR ] Complex (1)
|
||||
[ ERROR ] lambda_2/Complex
|
||||
[ ERROR ] IFFT2D (1)
|
||||
[ ERROR ] lambda_2/IFFT2D
|
||||
[ ERROR ] ComplexAbs (1)
|
||||
[ ERROR ] lambda_2/Abs
|
||||
[ ERROR ] Part of the nodes was not converted to IR. Stopped.
|
||||
```
|
||||
|
||||
The error means that the Model Optimizer doesn't know how to handle 3 types of TensorFlow\* operations: "Complex",
|
||||
"IFFT2D" and "ComplexAbs". In order to see more details about the conversion process run the model conversion with
|
||||
additional parameter `--log_level DEBUG`. It is worth to mention the following lines from the detailed output:
|
||||
|
||||
```bash
|
||||
[ INFO ] Called "tf_native_tf_node_infer" for node "lambda_2/Complex"
|
||||
[ <TIMESTAMP> ] [ DEBUG ] [ tf:228 ] Added placeholder with name 'lambda_2/lambda_3/strided_slice_port_0_ie_placeholder'
|
||||
[ <TIMESTAMP> ] [ DEBUG ] [ tf:228 ] Added placeholder with name 'lambda_2/lambda_4/strided_slice_port_0_ie_placeholder'
|
||||
[ <TIMESTAMP> ] [ DEBUG ] [ tf:241 ] update_input_in_pbs: replace input 'lambda_2/lambda_3/strided_slice' with input 'lambda_2/lambda_3/strided_slice_port_0_ie_placeholder'
|
||||
[ <TIMESTAMP> ] [ DEBUG ] [ tf:249 ] Replacing input '0' of the node 'lambda_2/Complex' with placeholder 'lambda_2/lambda_3/strided_slice_port_0_ie_placeholder'
|
||||
[ <TIMESTAMP> ] [ DEBUG ] [ tf:241 ] update_input_in_pbs: replace input 'lambda_2/lambda_4/strided_slice' with input 'lambda_2/lambda_4/strided_slice_port_0_ie_placeholder'
|
||||
[ <TIMESTAMP> ] [ DEBUG ] [ tf:249 ] Replacing input '1' of the node 'lambda_2/Complex' with placeholder 'lambda_2/lambda_4/strided_slice_port_0_ie_placeholder'
|
||||
[ <TIMESTAMP> ] [ DEBUG ] [ tf:148 ] Inferred shape of the output tensor with index '0' of the node 'lambda_2/Complex': '[ 1 256 256]'
|
||||
[ <TIMESTAMP> ] [ DEBUG ] [ infer:145 ] Outputs:
|
||||
[ <TIMESTAMP> ] [ DEBUG ] [ infer:32 ] output[0]: shape = [ 1 256 256], value = <UNKNOWN>
|
||||
[ <TIMESTAMP> ] [ DEBUG ] [ infer:129 ] --------------------
|
||||
[ <TIMESTAMP> ] [ DEBUG ] [ infer:130 ] Partial infer for lambda_2/IFFT2D
|
||||
[ <TIMESTAMP> ] [ DEBUG ] [ infer:131 ] Op: IFFT2D
|
||||
[ <TIMESTAMP> ] [ DEBUG ] [ infer:132 ] Inputs:
|
||||
[ <TIMESTAMP> ] [ DEBUG ] [ infer:32 ] input[0]: shape = [ 1 256 256], value = <UNKNOWN>
|
||||
```
|
||||
|
||||
This is a part of the log of the partial inference phase of the model conversion. See the "Partial Inference" section on
|
||||
the [Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) for
|
||||
more information about this phase. Model Optimizer inferred output shape for the unknown operation of type "Complex"
|
||||
using a "fallback" to TensorFlow\*. However, it is not enough to generate the IR because Model Optimizer doesn't know
|
||||
which attributes of the operation should be saved to IR. So it is necessary to implement Model Optimizer extensions to
|
||||
support these operations.
|
||||
|
||||
Before going into the extension development it is necessary to understand what these unsupported operations do according
|
||||
to the TensorFlow\* framework specification.
|
||||
|
||||
* "Complex" - returns a tensor of complex type constructed from two real input tensors specifying real and imaginary
|
||||
part of a complex number.
|
||||
* "IFFT2D" - returns a tensor with inverse 2-dimensional discrete Fourier transform over the inner-most 2 dimensions of
|
||||
an input.
|
||||
* "ComplexAbs" - returns a tensor with absolute values of input tensor with complex numbers.
|
||||
|
||||
The part of the model with all three unsupported operations is depicted below:
|
||||
|
||||

|
||||
|
||||
This model uses complex numbers during the inference but Inference Engine does not support tensors of this data type. So
|
||||
it is necessary to find a way how to avoid using tensors of such a type in the model. Fortunately, the complex tensor
|
||||
appear as a result of "Complex" operation, is used as input in the "IFFT2D" operation then is passed to "ComplexAbs"
|
||||
which produces real value tensor as output. So there are just 3 operations consuming/producing complex tensors in the
|
||||
model.
|
||||
|
||||
Let's design an OpenVINO operation "FFT" which get a single real number tensor describing the complex number and
|
||||
produces a single real number tensor describing output complex tensor. This way the fact that the model uses complex
|
||||
numbers is hidden inside the "FFT" operation implementation. The operation gets a tensor of shape `[N, H, W, 2]` and
|
||||
produces the output tensor with the same shape, where the innermost dimension contains pairs of real numbers describing
|
||||
the complex number (its real and imaginary part). As we will see further this operation will allow us to support the
|
||||
model. The implementation of the Model Optimizer operation should be saved to `mo_extensions/ops/FFT.py` file:
|
||||
|
||||
@snippet FFT.py fft:operation
|
||||
|
||||
The attribute `inverse` is a flag specifying type of the FFT to apply: forward or inverse.
|
||||
|
||||
See the "Model Optimizer Operation" section of [Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) for detailed instructions on how to implement the operation.
|
||||
|
||||
Now it is necessary to implement extractor for the "IFFT2D" operation according to the
|
||||
"Operation Extractor" section of [Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md). The
|
||||
following snippet provides two extractors: one for "IFFT2D", another one for "FFT2D", however only on of them is used in this example. The implementation should be saved to the file `mo_extensions/front/tf/FFT_ext.py`.
|
||||
|
||||
@snippet FFT_ext.py fft_ext:extractor
|
||||
|
||||
> **NOTE**: The graph is in inconsistent state after extracting node attributes because according to original operation
|
||||
> "IFFT2D" semantic it should have an input consuming a tensor of complex numbers, but the extractor instantiated an
|
||||
> operation "FFT" which expects a real tensor with specific layout. But the inconsistency will be resolved during
|
||||
> applying front phase transformations discussed below.
|
||||
|
||||
The output shape of the operation "AddV2" from the picture above is `[N, H, W, 2]`. Where the innermost dimension
|
||||
contains pairs of real numbers describing the complex number (its real and imaginary part). The following "StridedSlice"
|
||||
operations split the input tensor into 2 parts to get a tensor of real and a tensor of imaginary parts which are then
|
||||
consumed with the "Complex" operation to produce a tensor of complex numbers. These "StridedSlice" and "Complex"
|
||||
operations can be removed so the "FFT" operation will get a real value tensor encoding complex numbers. To achieve this
|
||||
we implement the front phase transformation which searches for a pattern of two "StridedSlice" operations with specific
|
||||
attributes producing data to "Complex" operation and removes it from the graph. Refer to the
|
||||
"Pattern-Defined Front Phase Transformations" section of [Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) for more
|
||||
information on how this type of transformation works. The code snippet should be saved to the file
|
||||
`mo_extensions/front/tf/Complex.py`.
|
||||
|
||||
@snippet Complex.py complex:transformation
|
||||
|
||||
> **NOTE**: The graph is in inconsistent state because the "ComplexAbs" operation consumes complex value tensor but
|
||||
> "FFT" produces real value tensor.
|
||||
|
||||
Now lets implement a transformation which replace a "ComplexAbs" operation with a sub-graph of primitive operations
|
||||
which calculate the result using the following formulae: \f$module(z) = \sqrt{real(z) \cdot real(z) + imag(z) \cdot imag(z)}\f$.
|
||||
Original "IFFT2D" operation produces tensor of complex values, but the "FFT" operation produces a real value tensor with
|
||||
the same format and shape as the input for the operation. So the input shape for the "ComplexAbs" will be `[N, H, W, 2]`
|
||||
with the innermost dimension containing tuple with real and imaginary part of a complex number. In order to calculate
|
||||
absolute values for the complex tensor we do the following:
|
||||
1. Raise all elements in the power of 2.
|
||||
2. Calculate a reduced sum over the innermost dimension.
|
||||
3. Calculate a square root.
|
||||
|
||||
The implementation should be saved to the file `mo_extensions/front/tf/ComplexAbs.py` and provided below:
|
||||
|
||||
@snippet ComplexAbs.py complex_abs:transformation
|
||||
|
||||
Now it is possible to convert the model using the following command line:
|
||||
```sh
|
||||
mo --input_model <PATH_TO_MODEL>/wnet_20.pb -b 1 --extensions mo_extensions/
|
||||
```
|
||||
|
||||
The sub-graph corresponding to the originally non-supported one is depicted in the image below:
|
||||
|
||||

|
||||
|
||||
> **NOTE**: Model Optimizer performed conversion of the model from NHWC to NCHW layout that is why the dimension with
|
||||
> the value 2 moved to another position.
|
||||
|
||||
### Inference Engine Extension Implementation
|
||||
Now it is necessary to implement the extension for the CPU plugin with operation "FFT" introduced previously. The code
|
||||
below is based on the template extension described in [Inference Engine Extensibility Mechanism](../IE_DG/Extensibility_DG/Intro.md).
|
||||
|
||||
#### CMake Build File
|
||||
The first step is to create a CMake configuration file which builds the extension. The content of the "CMakeLists.txt"
|
||||
file is the following:
|
||||
|
||||
@snippet template_extension/old/CMakeLists.txt cmake:extension
|
||||
|
||||
The CPU FFT kernel implementation uses OpenCV to perform the FFT that is why the extension library is linked with
|
||||
`opencv_core` which comes with the OpenVINO.
|
||||
|
||||
#### Custom nGraph Operation "FFT" Implementation
|
||||
The next step is to create the nGraph operation FFT. The header file "fft_op.hpp" has the following content:
|
||||
|
||||
@snippet template_extension/old/fft_op.hpp fft_op:header
|
||||
|
||||
The operation has just one boolean attribute `inverse`. Implementation of the necessary nGraph operation functions are
|
||||
in the `fft_op.cpp` file with the following content:
|
||||
|
||||
@snippet template_extension/old/fft_op.cpp fft_op:implementation
|
||||
|
||||
Refer to the [Custom nGraph Operation](../IE_DG/Extensibility_DG/AddingNGraphOps.md) for more details.
|
||||
|
||||
#### CPU FFT Kernel Implementation
|
||||
The operation implementation for CPU plugin uses OpenCV to perform the FFT. The header file "fft_kernel.hpp" has the
|
||||
following content:
|
||||
|
||||
@snippet template_extension/old/fft_kernel.hpp fft_kernel:header
|
||||
|
||||
The "fft_kernel.cpp" with the implementation of the CPU has the following content:
|
||||
|
||||
@snippet template_extension/old/fft_kernel.cpp fft_kernel:implementation
|
||||
|
||||
Refer to the [How to Implement Custom CPU Operations](../IE_DG/Extensibility_DG/CPU_Kernel.md) for more details.
|
||||
|
||||
#### Extension Library Implementation
|
||||
The last step is to create an extension library "extension.cpp" and "extension.hpp" which will include the FFT
|
||||
operation for the CPU plugin. The code of the library is described in the [Extension Library](../IE_DG/Extensibility_DG/Extension.md).
|
||||
|
||||
### Building and Running the Custom Extension
|
||||
To build the extension, run the following:<br>
|
||||
```bash
|
||||
mkdir build && cd build
|
||||
source /opt/intel/openvino_2022/setupvars.sh
|
||||
cmake .. -DCMAKE_BUILD_TYPE=Release
|
||||
make --jobs=$(nproc)
|
||||
```
|
||||
|
||||
The result of this command is a compiled shared library (`.so` or `.dll`). It should be loaded in the
|
||||
application using `Core` class instance method `AddExtension` like this
|
||||
`core.AddExtension(std::make_shared<Extension>(compiled_library_file_name), "CPU");`.
|
||||
|
||||
To test that the extension is implemented correctly we can run the "mri_reconstruction_demo" with the following content:
|
||||
|
||||
@snippet mri_reconstruction_demo.py mri_demo:demo
|
||||
|
||||
The script can be executed using the following command line:
|
||||
```bash
|
||||
python3 mri_reconstruction_demo.py \
|
||||
-m <PATH_TO_IR>/wnet_20.xml \
|
||||
-i <PATH_TO_SAMPLE_MRI_IMAGE>.npy \
|
||||
-p <Hybrid-CS-Model-MRI_repo>/Data/sampling_mask_20perc.npy \
|
||||
-l <PATH_TO_BUILD_DIR>/libtemplate_extension.so \
|
||||
-d CPU
|
||||
```
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit)
|
||||
- OpenVINO™ toolkit online documentation: [https://docs.openvino.ai](https://docs.openvino.ai)
|
||||
- [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
|
||||
- [Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md)
|
||||
- [Inference Engine Extensibility Mechanism](../IE_DG/Extensibility_DG/Intro.md)
|
||||
- [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md)
|
||||
- [Overview of OpenVINO™ Toolkit Pre-Trained Models](@ref omz_models_group_intel)
|
||||
- For IoT Libraries and Code Samples see the [Intel® IoT Developer Kit](https://github.com/intel-iot-devkit).
|
||||
|
||||
## Converting Models:
|
||||
|
||||
- [Convert Your Caffe* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_Caffe.md)
|
||||
- [Convert Your TensorFlow* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md)
|
||||
- [Convert Your MXNet* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md)
|
||||
- [Convert Your Kaldi* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md)
|
||||
- [Convert Your ONNX* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md)
|
||||
@@ -1,756 +0,0 @@
|
||||
# Inference Engine API Changes History {#openvino_docs_IE_DG_API_Changes}
|
||||
|
||||
The sections below contain detailed list of changes made to the Inference Engine API in recent releases.
|
||||
|
||||
## 2021.4
|
||||
|
||||
### New API
|
||||
|
||||
* InferenceEngine::Core::LoadNetwork(modelPath, deviceName, config) simplified API to read and load network in one call
|
||||
|
||||
### Deprecated API
|
||||
|
||||
**InferenceEngine::Parameter**
|
||||
|
||||
* InferenceEngine::Parameter(const std::shared_ptr<ngraph::Variant>&)
|
||||
* InferenceEngine::Parameter(std::shared_ptr<ngraph::Variant>& var)
|
||||
* std::shared_ptr<ngraph::Variant> InferenceEngine::Parameter::asVariant() const
|
||||
* InferenceEngine::Parameter::operator std::shared_ptr<ngraph::Variant>() const
|
||||
|
||||
**GPU plugin configuration keys**
|
||||
* KEY_CLDNN_NV12_TWO_INPUTS GPU plugin option. Use KEY_GPU_NV12_TWO_INPUTS instead
|
||||
* KEY_CLDNN_PLUGIN_PRIORITY GPU plugin option. Use KEY_GPU_PLUGIN_PRIORITY instead
|
||||
* KEY_CLDNN_PLUGIN_THROTTLE GPU plugin option. Use KEY_GPU_PLUGIN_THROTTLE instead
|
||||
* KEY_CLDNN_MEM_POOL GPU plugin option
|
||||
* KEY_CLDNN_GRAPH_DUMPS_DIR GPU plugin option
|
||||
* KEY_CLDNN_SOURCES_DUMPS_DIR GPU plugin option
|
||||
* KEY_DUMP_KERNELS GPU plugin option
|
||||
* KEY_TUNING_MODE GPU plugin option
|
||||
* KEY_TUNING_FILE GPU plugin option
|
||||
|
||||
**InferenceEngine::IInferRequest**
|
||||
* IInferRequest interface is deprecated, use InferRequest wrapper:
|
||||
* Constructor for InferRequest from IInferRequest:: Ptr is deprecated
|
||||
* Cast operator for InferRequest to IInferRequest shared pointer is deprecated
|
||||
|
||||
**InferenceEngine::ICNNNetwork**
|
||||
* ICNNNetwork interface is deprecated by means of deprecation of all its methods, use CNNNetwork wrapper
|
||||
* CNNNetwork methods working with ICNNNetwork are deprecated:
|
||||
* Cast to ICNNNetwork shared pointer
|
||||
* Cast to reference to ICNNNetwork interface
|
||||
* Constructor from ICNNNetwork shared pointer
|
||||
|
||||
**InferenceEngine::IExecutableNetwork**
|
||||
* IExecutableNetwork is deprecated, use ExecutableNetwork wrappers:
|
||||
* Constructor of ExecutableNetwork from IExecutableNetwork shared pointer is deprecated
|
||||
* The following ExecutableNetwork methods are deprecated:
|
||||
* ExecutableNetwork::reset
|
||||
* Cast operator to IExecutableNetwork shared pointer
|
||||
* ExecutableNetwork::CreateInferRequestPtr - use ExecutableNetwork::CreateInferRequest instead
|
||||
|
||||
**Extensions API**
|
||||
* InferenceEngine::make_so_pointer which is used to create Extensions library is replaced by std::make_shared<Extension>(..)
|
||||
* InferenceEngine::IExtension::Release is deprecated with no replacement
|
||||
* Use IE_DEFINE_EXTENSION_CREATE_FUNCTION helper macro instead of explicit declaration of CreateExtension function, which create extension.
|
||||
|
||||
**Other changes**
|
||||
* Version::ApiVersion structure is deprecated, Inference Engine does not have API version anymore
|
||||
* LowLatency - use lowLatency2 instead
|
||||
* CONFIG_KEY(DUMP_EXEC_GRAPH_AS_DOT) - use InferenceEngine::ExecutableNetwork::GetExecGraphInfo::serialize() instead
|
||||
* Core::ImportNetwork with no device - pass device name explicitly.
|
||||
* details::InferenceEngineException - use InferenceEngine::Exception and its derivatives instead.
|
||||
|
||||
## 2021.3
|
||||
|
||||
### New API
|
||||
|
||||
* InferenceEngine::InferRequest::Cancel to cancel inference request execution
|
||||
* InferenceEngine::Layout::HWC to support HWC layout for input or output blobs
|
||||
* InferenceEngine::Precision::F64 data precision for f64 data type
|
||||
* InferenceEngine::CNNNetwork::getOVNameForTensor to map frameworks tensor names to OpenVINO internal tensor names
|
||||
|
||||
### Deprecated API
|
||||
|
||||
* InferenceEngine::IVariableState interface is deprecated, use InferenceEngine::VariableState wrapper
|
||||
|
||||
## 2021.2
|
||||
|
||||
### New API
|
||||
|
||||
**State API**
|
||||
|
||||
* InferenceEngine::InferRequest::QueryState query state value of network on current infer request
|
||||
* InferenceEngine::IVariableState class instead of IMemoryState (rename)
|
||||
* InferenceEngine::IVariableState::GetState instead of IMemoryState::GetLastState (rename)
|
||||
|
||||
**BatchedBlob** - represents a InferenceEngine::BatchedBlob containing other blobs - one per batch.
|
||||
|
||||
**Transformations API** - added a new header `ie_transformations.hpp` which contains transformations for InferenceEngine::CNNNetwork object. Such transformations can be called prior to loading network for compilation for particular device:
|
||||
|
||||
* InferenceEngine::LowLatency
|
||||
|
||||
### Deprecated API
|
||||
|
||||
**State API**
|
||||
|
||||
* InferenceEngine::ExecutableNetwork::QueryState - use InferenceEngine::InferRequest::QueryState
|
||||
* InferenceEngine::IVariableState::GetLastState - use InferenceEngine::IVariableState::GetState
|
||||
|
||||
## 2021.1
|
||||
|
||||
### Deprecated API
|
||||
|
||||
**Utility functions to convert Unicode paths**
|
||||
|
||||
* InferenceEngine::stringToFileName - use OS-specific native conversion functions
|
||||
* InferenceEngine::fileNameToString - use OS-specific native conversion functions
|
||||
|
||||
### Removed API
|
||||
|
||||
**Plugin API:**
|
||||
|
||||
* InferenceEngine::InferencePlugin C++ plugin wrapper class
|
||||
* InferenceEngine::IInferencePlugin plugin interface
|
||||
* InferenceEngine::PluginDispatcher class
|
||||
* InferenceEngine::InferenceEnginePluginPtr typedef
|
||||
* InferenceEngine::ICNNNetReader reader interface
|
||||
* InferenceEngine::CNNNetReader class
|
||||
|
||||
**Extensibility API:**
|
||||
|
||||
* InferenceEngine::ILayerImplFactory class
|
||||
* InferenceEngine::IShapeInferImpl class
|
||||
* InferenceEngine::IShapeInferExtension class
|
||||
* InferenceEngine::IExtension::getFactoryFor(ILayerImplFactory\*& factory, const CNNLayer\* cnnLayer, ResponseDesc\* resp) noexcept method
|
||||
* InferenceEngine::IExtension::getPrimitiveTypes(char\*\*& types, unsigned int& size, ResponseDesc\* resp) noexcept method
|
||||
* InferenceEngine::ShapeInferImpl class
|
||||
* InferenceEngine::Extension::getFactoryFor(ILayerImplFactory\*& factory, const CNNLayer\* cnnLayer, ResponseDesc\* resp) noexcept method
|
||||
* InferenceEngine::Extension::getPrimitiveTypes(char\*\*& types, unsigned int& size, ResponseDesc\* resp) noexcept method
|
||||
|
||||
**Network API:**
|
||||
|
||||
* InferenceEngine::details::CNNNetworkIterator class
|
||||
* InferenceEngine::CNNNetwork::getPrecision() const method
|
||||
* InferenceEngine::CNNNetwork::getLayerByName(const char\* layerName) const method
|
||||
* InferenceEngine::CNNNetwork::size() const method
|
||||
* InferenceEngine::CNNNetwork::begin() const method
|
||||
* InferenceEngine::CNNNetwork::end() const method
|
||||
* InferenceEngine::CNNNetwork::AddExtension(const IShapeInferExtensionPtr& extension) method
|
||||
* InferenceEngine::ICNNNetwork::getPrecision() const noexcept method
|
||||
* InferenceEngine::ICNNNetwork::getName(char\* pName, size_t len) const noexcept method
|
||||
* InferenceEngine::ICNNNetwork::getData(const char\* dname) noexcept method
|
||||
* InferenceEngine::ICNNNetwork::addLayer(const CNNLayerPtr& layer) noexcept method
|
||||
* InferenceEngine::ICNNNetwork::getLayerByName(const char\* layerName, CNNLayerPtr& out, ResponseDesc\* resp) const noexcept method
|
||||
* InferenceEngine::ICNNNetwork::AddExtension(const IShapeInferExtensionPtr& extension, ResponseDesc\* resp) noexcept method
|
||||
* InferenceEngine::ICNNNetwork::getStats(ICNNNetworkStats\*\* stats, ResponseDesc\* resp) const noexcept method
|
||||
* InferenceEngine::ICNNNetworkStats class
|
||||
* InferenceEngine::NetworkNodeStats class
|
||||
* InferenceEngine::Data::getCreatorLayer() method
|
||||
* InferenceEngine::Data::getInputTo() method
|
||||
* InferenceEngine::LayerParams class
|
||||
|
||||
**Layer API:**
|
||||
|
||||
* InferenceEngine::CNNLayer class
|
||||
* InferenceEngine::WeightableLayer class
|
||||
* InferenceEngine::BatchNormalizationLayer class
|
||||
* InferenceEngine::BatchToSpaceLayer class
|
||||
* InferenceEngine::BinaryConvolutionLayer class
|
||||
* InferenceEngine::BroadcastLayer class
|
||||
* InferenceEngine::BucketizeLayer class
|
||||
* InferenceEngine::ClampLayer class
|
||||
* InferenceEngine::ConcatLayer class
|
||||
* InferenceEngine::ConvolutionLayer class
|
||||
* InferenceEngine::CropLayer class
|
||||
* InferenceEngine::DeconvolutionLayer class
|
||||
* InferenceEngine::DeformableConvolutionLayer class
|
||||
* InferenceEngine::DepthToSpaceLayer class
|
||||
* InferenceEngine::EltwiseLayer class
|
||||
* InferenceEngine::ExperimentalDetectronPriorGridGenerator class
|
||||
* InferenceEngine::ExperimentalDetectronPriorGridGeneratorLayer class
|
||||
* InferenceEngine::ExperimentalSparseWeightedReduceLayer class
|
||||
* InferenceEngine::FillLayer class
|
||||
* InferenceEngine::FullyConnectedLayer class
|
||||
* InferenceEngine::GRNLayer class
|
||||
* InferenceEngine::GRUCell class
|
||||
* InferenceEngine::GatherLayer class
|
||||
* InferenceEngine::GemmLayer class
|
||||
* InferenceEngine::LSTMCell class
|
||||
* InferenceEngine::MVNLayer class
|
||||
* InferenceEngine::MathLayer class
|
||||
* InferenceEngine::NonMaxSuppression class
|
||||
* InferenceEngine::NormLayer class
|
||||
* InferenceEngine::OneHotLayer class
|
||||
* InferenceEngine::PReLULayer class
|
||||
* InferenceEngine::PadLayer class
|
||||
* InferenceEngine::PoolingLayer class
|
||||
* InferenceEngine::PowerLayer class
|
||||
* InferenceEngine::QuantizeLayer class
|
||||
* InferenceEngine::RNNCell class
|
||||
* InferenceEngine::RNNCellBase class
|
||||
* InferenceEngine::RNNSequenceLayer class
|
||||
* InferenceEngine::RangeLayer class
|
||||
* InferenceEngine::ReLU6Layer class
|
||||
* InferenceEngine::ReLULayer class
|
||||
* InferenceEngine::ReduceLayer class
|
||||
* InferenceEngine::ReshapeLayer class
|
||||
* InferenceEngine::ReverseSequenceLayer class
|
||||
* InferenceEngine::ScaleShiftLayer class
|
||||
* InferenceEngine::ScatterLayer class
|
||||
* InferenceEngine::SelectLayer class
|
||||
* InferenceEngine::ShuffleChannelsLayer class
|
||||
* InferenceEngine::SoftMaxLayer class
|
||||
* InferenceEngine::SpaceToBatchLayer class
|
||||
* InferenceEngine::SpaceToDepthLayer class
|
||||
* InferenceEngine::SparseFillEmptyRowsLayer class
|
||||
* InferenceEngine::SparseSegmentReduceLayer class
|
||||
* InferenceEngine::SparseToDenseLayer class
|
||||
* InferenceEngine::SplitLayer class
|
||||
* InferenceEngine::StridedSliceLayer class
|
||||
* InferenceEngine::TensorIterator class
|
||||
* InferenceEngine::TileLayer class
|
||||
* InferenceEngine::TopKLayer class
|
||||
* InferenceEngine::UniqueLayer class
|
||||
|
||||
## 2020.4
|
||||
|
||||
### New API
|
||||
|
||||
**CPU Plugin API:**
|
||||
|
||||
* InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16 config key
|
||||
|
||||
**Metrics and values for Query API:**
|
||||
|
||||
* METRIC_KEY(OPTIMIZATION_CAPABILITIES)
|
||||
* METRIC_VALUE(BF16)
|
||||
|
||||
### Deprecated API
|
||||
|
||||
**MYRIAD Plugin API:**
|
||||
|
||||
* VPU_CONFIG_KEY(IGNORE_IR_STATISTIC)
|
||||
|
||||
### Removed API
|
||||
|
||||
**Inference Engine NN Builder API:**
|
||||
|
||||
* InferenceEngine::Builder::EltwiseLayer
|
||||
* InferenceEngine::Builder::MemoryLayer
|
||||
* InferenceEngine::Builder::ROIPoolingLayer
|
||||
* InferenceEngine::Builder::DeconvolutionLayer
|
||||
* InferenceEngine::Builder::ReLULayer
|
||||
* InferenceEngine::Builder::TanHLayer
|
||||
* InferenceEngine::Builder::InputLayer
|
||||
* InferenceEngine::Builder::PoolingLayer
|
||||
* InferenceEngine::Builder::CropLayer
|
||||
* InferenceEngine::Builder::GRUSequenceLayer
|
||||
* InferenceEngine::Builder::NormLayer
|
||||
* InferenceEngine::Builder::LSTMSequenceLayer
|
||||
* InferenceEngine::Builder::ClampLayer
|
||||
* InferenceEngine::Builder::PSROIPoolingLayer
|
||||
* InferenceEngine::Builder::Layer
|
||||
* InferenceEngine::Builder::RNNSequenceLayer
|
||||
* InferenceEngine::Builder::ReorgYoloLayer
|
||||
* InferenceEngine::Builder::NormalizeLayer
|
||||
* InferenceEngine::Builder::PriorBoxClusteredLayer
|
||||
* InferenceEngine::Builder::MVNLayer
|
||||
* InferenceEngine::Builder::PermuteLayer
|
||||
* InferenceEngine::Builder::SimplerNMSLayer
|
||||
* InferenceEngine::Builder::ConstLayer
|
||||
* InferenceEngine::Builder::DeformableConvolutionLayer
|
||||
* InferenceEngine::Builder::FullyConnectedLayer
|
||||
* InferenceEngine::Builder::PriorBoxLayer
|
||||
* InferenceEngine::Builder::SoftMaxLayer
|
||||
* InferenceEngine::Builder::OutputLayer
|
||||
* InferenceEngine::Builder::TileLayer
|
||||
* InferenceEngine::Builder::SplitLayer
|
||||
* InferenceEngine::Builder::PReLULayer
|
||||
* InferenceEngine::Builder::RegionYoloLayer
|
||||
* InferenceEngine::Builder::ReshapeLayer
|
||||
* InferenceEngine::Builder::ConvolutionLayer
|
||||
* InferenceEngine::Builder::DetectionOutputLayer
|
||||
* InferenceEngine::Builder::ConcatLayer
|
||||
* InferenceEngine::Builder::ELULayer
|
||||
* InferenceEngine::Builder::GRNLayer
|
||||
* InferenceEngine::Builder::LRNLayer
|
||||
* InferenceEngine::Builder::ArgMaxLayer
|
||||
* InferenceEngine::Builder::ReLU6Layer
|
||||
* InferenceEngine::Builder::ScaleShiftLayer
|
||||
* InferenceEngine::Builder::ProposalLayer
|
||||
* InferenceEngine::Builder::SigmoidLayer
|
||||
* InferenceEngine::Builder::ResampleLayer
|
||||
* InferenceEngine::Builder::CTCGreedyDecoderLayer
|
||||
* InferenceEngine::Builder::BatchNormalizationLayer
|
||||
* InferenceEngine::Builder::LayerDecorator
|
||||
* InferenceEngine::Builder::PowerLayer
|
||||
* InferenceEngine::Builder::Network
|
||||
* InferenceEngine::Builder::PortInfo
|
||||
* InferenceEngine::Builder::Connection
|
||||
* InferenceEngine::Builder::PortData
|
||||
* InferenceEngine::Builder::Port
|
||||
* InferenceEngine::Builder::ILayer
|
||||
* InferenceEngine::Builder::INetworkIterator
|
||||
* InferenceEngine::Builder::INetwork
|
||||
* InferenceEngine::Builder::ILayer
|
||||
|
||||
## 2020.2
|
||||
|
||||
### New API
|
||||
|
||||
**Extensibility API:**
|
||||
|
||||
* InferenceEngine::IExtension::getImplTypes(const std::shared_ptr<ngraph::Node>& node) method
|
||||
* InferenceEngine::IExtension::getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) method
|
||||
|
||||
### Deprecated API
|
||||
|
||||
**Extensibility API:**
|
||||
|
||||
* InferenceEngine::ILayerImplFactory class
|
||||
* InferenceEngine::IShapeInferImpl class
|
||||
* InferenceEngine::IShapeInferImpl class
|
||||
* InferenceEngine::IShapeInferExtension class
|
||||
* InferenceEngine::IExtension::getFactoryFor(ILayerImplFactory\*& factory, const CNNLayer\* cnnLayer, ResponseDesc\* resp) noexcept method
|
||||
* InferenceEngine::IExtension::getPrimitiveTypes(char\*\*& types, unsigned int& size, ResponseDesc\* resp) noexcept method
|
||||
* InferenceEngine::ShapeInferImpl class
|
||||
* InferenceEngine::Extension::getFactoryFor(ILayerImplFactory\*& factory, const CNNLayer\* cnnLayer, ResponseDesc\* resp) noexcept method
|
||||
* InferenceEngine::Extension::getPrimitiveTypes(char\*\*& types, unsigned int& size, ResponseDesc\* resp) noexcept method
|
||||
|
||||
**Network API:**
|
||||
|
||||
* InferenceEngine::details::CNNNetworkIterator class
|
||||
* InferenceEngine::CNNNetwork::getPrecision() const method
|
||||
* InferenceEngine::CNNNetwork::getLayerByName(const char\* layerName) const method
|
||||
* InferenceEngine::CNNNetwork::size() const method
|
||||
* InferenceEngine::CNNNetwork::begin() const method
|
||||
* InferenceEngine::CNNNetwork::end() const method
|
||||
* InferenceEngine::CNNNetwork::AddExtension(const IShapeInferExtensionPtr& extension) method
|
||||
* InferenceEngine::ICNNNetwork::getPrecision() const noexcept method
|
||||
* InferenceEngine::ICNNNetwork::getName(char\* pName, size_t len) const noexcept method
|
||||
* InferenceEngine::ICNNNetwork::getData(const char\* dname) noexcept method
|
||||
* InferenceEngine::ICNNNetwork::addLayer(const CNNLayerPtr& layer) noexcept method
|
||||
* InferenceEngine::ICNNNetwork::getLayerByName(const char\* layerName, CNNLayerPtr& out, ResponseDesc\* resp) const noexcept method
|
||||
* InferenceEngine::ICNNNetwork::AddExtension(const IShapeInferExtensionPtr& extension, ResponseDesc\* resp) noexcept method
|
||||
* InferenceEngine::ICNNNetwork::getStats(ICNNNetworkStats\*\* stats, ResponseDesc\* resp) const noexcept method
|
||||
* InferenceEngine::ICNNNetworkStats class
|
||||
* InferenceEngine::NetworkNodeStats class
|
||||
* InferenceEngine::Data::getCreatorLayer() method
|
||||
* InferenceEngine::Data::getInputTo() method
|
||||
* InferenceEngine::LayerParams class
|
||||
|
||||
**Layer API:**
|
||||
|
||||
* InferenceEngine::CNNLayer class
|
||||
* InferenceEngine::WeightableLayer class
|
||||
* InferenceEngine::BatchNormalizationLayer class
|
||||
* InferenceEngine::BatchToSpaceLayer class
|
||||
* InferenceEngine::BinaryConvolutionLayer class
|
||||
* InferenceEngine::BroadcastLayer class
|
||||
* InferenceEngine::BucketizeLayer class
|
||||
* InferenceEngine::ClampLayer class
|
||||
* InferenceEngine::ConcatLayer class
|
||||
* InferenceEngine::ConvolutionLayer class
|
||||
* InferenceEngine::CropLayer class
|
||||
* InferenceEngine::DeconvolutionLayer class
|
||||
* InferenceEngine::DeformableConvolutionLayer class
|
||||
* InferenceEngine::DepthToSpaceLayer class
|
||||
* InferenceEngine::EltwiseLayer class
|
||||
* InferenceEngine::ExperimentalDetectronPriorGridGenerator class
|
||||
* InferenceEngine::ExperimentalDetectronPriorGridGeneratorLayer class
|
||||
* InferenceEngine::ExperimentalSparseWeightedReduceLayer class
|
||||
* InferenceEngine::FillLayer class
|
||||
* InferenceEngine::FullyConnectedLayer class
|
||||
* InferenceEngine::GRNLayer class
|
||||
* InferenceEngine::GRUCell class
|
||||
* InferenceEngine::GatherLayer class
|
||||
* InferenceEngine::GemmLayer class
|
||||
* InferenceEngine::LSTMCell class
|
||||
* InferenceEngine::MVNLayer class
|
||||
* InferenceEngine::MathLayer class
|
||||
* InferenceEngine::NonMaxSuppression class
|
||||
* InferenceEngine::NormLayer class
|
||||
* InferenceEngine::OneHotLayer class
|
||||
* InferenceEngine::PReLULayer class
|
||||
* InferenceEngine::PadLayer class
|
||||
* InferenceEngine::PoolingLayer class
|
||||
* InferenceEngine::PowerLayer class
|
||||
* InferenceEngine::QuantizeLayer class
|
||||
* InferenceEngine::RNNCell class
|
||||
* InferenceEngine::RNNCellBase class
|
||||
* InferenceEngine::RNNSequenceLayer class
|
||||
* InferenceEngine::RangeLayer class
|
||||
* InferenceEngine::ReLU6Layer class
|
||||
* InferenceEngine::ReLULayer class
|
||||
* InferenceEngine::ReduceLayer class
|
||||
* InferenceEngine::ReshapeLayer class
|
||||
* InferenceEngine::ReverseSequenceLayer class
|
||||
* InferenceEngine::ScaleShiftLayer class
|
||||
* InferenceEngine::ScatterLayer class
|
||||
* InferenceEngine::SelectLayer class
|
||||
* InferenceEngine::ShuffleChannelsLayer class
|
||||
* InferenceEngine::SoftMaxLayer class
|
||||
* InferenceEngine::SpaceToBatchLayer class
|
||||
* InferenceEngine::SpaceToDepthLayer class
|
||||
* InferenceEngine::SparseFillEmptyRowsLayer class
|
||||
* InferenceEngine::SparseSegmentReduceLayer class
|
||||
* InferenceEngine::SparseToDenseLayer class
|
||||
* InferenceEngine::SplitLayer class
|
||||
* InferenceEngine::StridedSliceLayer class
|
||||
* InferenceEngine::TensorIterator class
|
||||
* InferenceEngine::TileLayer class
|
||||
* InferenceEngine::TopKLayer class
|
||||
* InferenceEngine::UniqueLayer class
|
||||
|
||||
## 2020.1
|
||||
|
||||
### New API
|
||||
|
||||
**Integration with ngraph API:**
|
||||
|
||||
* InferenceEngine::CNNNetwork(const std::shared_ptr<ngraph::Function>& network) ctor from ngraph::Function
|
||||
* InferenceEngine::CNNNetwork::getFunction() const noexcept method
|
||||
* InferenceEngine::ICNNNetwork::getFunction() const noexcept method
|
||||
* InferenceEngine::Parameter(const std::shared_ptr<ngraph::Variant>& var) ctor
|
||||
* InferenceEngine::Parameter::asVariant() const method
|
||||
* InferenceEngine::Parameter::operator std::shared_ptr<ngraph::Variant>() const operator
|
||||
* InferenceEngine::Core::ReadNetwork(const std::wstring& modelPath, const std::wstring& binPath) method
|
||||
* InferenceEngine::Core::ReadNetwork(const std::string& modelPath, const std::string& binPath = "") method
|
||||
* InferenceEngine::Core::ReadNetwork(const std::string& model, const Blob::CPtr& weights) method
|
||||
* InferenceEngine::Code::AddExtension(const IExtensionPtr& extension) method
|
||||
* InferenceEngine::IExtension::getOpSets() method
|
||||
|
||||
|
||||
**Offline compilation: import / export to std::stream:**
|
||||
|
||||
* InferenceEngine::ExecutableNetwork::Export(std::ostream& networkModel) method
|
||||
* InferenceEngine::Core::ImportNetwork(std::istream& networkModel, const std::string& deviceName = {}, const std::map<std::string, std::string>& config = {}) method
|
||||
* InferenceEngine::IExecutableNetwork::Export(std::ostream& networkModel, ResponseDesc \*resp) noexcept method
|
||||
|
||||
|
||||
**RemoteBlob accelerator memory sharing API:**
|
||||
|
||||
* InferenceEngine::RemoteContext class
|
||||
* InferenceEngine::RemoteBlob class
|
||||
* InferenceEngine::Core::CreateContext(const std::string& deviceName, const ParamMap& params) method
|
||||
* InferenceEngine::Core::GetDefaultContext(const std::string& deviceName) method
|
||||
* InferenceEngine::Core::LoadNetwork(CNNNetwork network, RemoteContext::Ptr context, const std::map<std::string, std::string>& config = std::map<std::string, std::string>()) method
|
||||
|
||||
|
||||
**GNA firmware model image generation:**
|
||||
|
||||
* GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE_GENERATION) config key
|
||||
* GNA_CONFIG_VALUE(GEN) value
|
||||
* GNA_CONFIG_VALUE(GEN_EXACT) value
|
||||
* GNA_CONFIG_VALUE(SSE) value
|
||||
* GNA_CONFIG_VALUE(SSE_EXACT) value
|
||||
* GNA_CONFIG_VALUE(AVX1) value
|
||||
* GNA_CONFIG_VALUE(AVX1_EXACT) value
|
||||
* GNA_CONFIG_VALUE(AVX2) value
|
||||
* GNA_CONFIG_VALUE(AVX2_EXACT) value
|
||||
|
||||
**MemoryBlob mapping of memory to the user space:**
|
||||
|
||||
* InferenceEngine::MemoryBlob::rwmap() noexcept method
|
||||
* InferenceEngine::MemoryBlob::rmap() noexcept method
|
||||
* InferenceEngine::MemoryBlob::wmap() noexcept method
|
||||
|
||||
**Memory interoperability on acceleration devices. General classes and GPU helper functions**
|
||||
* InferenceEngine::RemoteBlob class
|
||||
* InferenceEngine::RemoteContext class
|
||||
* InferenceEngine::Core::CreateContext(const std::string& deviceName, const ParamMap& params) method
|
||||
* InferenceEngine::Core::GetDefaultContext(const std::string& deviceName) method
|
||||
* InferenceEngine::make_shared_blob(const TensorDesc& desc, RemoteContext::Ptr ctx) function
|
||||
* InferenceEngine::gpu::make_shared_blob_nv12(size_t height, size_t width, RemoteContext::Ptr ctx, VASurfaceID nv12_surf) function
|
||||
* InferenceEngine::gpu::make_shared_context(Core& core, std::string deviceName, VADisplay device) function
|
||||
* InferenceEngine::gpu::make_shared_blob(const TensorDesc& desc, RemoteContext::Ptr ctx, VASurfaceID surface, uint32_t plane = 0) function
|
||||
* InferenceEngine::gpu::make_shared_blob_nv12(RemoteContext::Ptr ctx, cl::Image2D& nv12_image_plane_y, cl::Image2D& nv12_image_plane_uv) function
|
||||
* InferenceEngine::gpu::make_shared_context(Core& core, std::string deviceName, cl_context ctx) function
|
||||
* InferenceEngine::gpu::make_shared_blob(const TensorDesc& desc, ClContext::Ptr ctx) function
|
||||
* InferenceEngine::gpu::make_shared_blob(const TensorDesc& desc, RemoteContext::Ptr ctx, cl::Buffer& buffer) function
|
||||
* InferenceEngine::gpu::make_shared_blob(const TensorDesc& desc, RemoteContext::Ptr ctx, cl_mem buffer) function
|
||||
* InferenceEngine::gpu::make_shared_blob(const TensorDesc& desc, RemoteContext::Ptr ctx, cl::Image2D& image) function
|
||||
|
||||
### Deprecated API
|
||||
|
||||
**Inference Engine NN Builder API:**
|
||||
|
||||
* InferenceEngine::Builder::EltwiseLayer
|
||||
* InferenceEngine::Builder::MemoryLayer
|
||||
* InferenceEngine::Builder::ROIPoolingLayer
|
||||
* InferenceEngine::Builder::DeconvolutionLayer
|
||||
* InferenceEngine::Builder::ReLULayer
|
||||
* InferenceEngine::Builder::TanHLayer
|
||||
* InferenceEngine::Builder::InputLayer
|
||||
* InferenceEngine::Builder::PoolingLayer
|
||||
* InferenceEngine::Builder::CropLayer
|
||||
* InferenceEngine::Builder::GRUSequenceLayer
|
||||
* InferenceEngine::Builder::NormLayer
|
||||
* InferenceEngine::Builder::LSTMSequenceLayer
|
||||
* InferenceEngine::Builder::ClampLayer
|
||||
* InferenceEngine::Builder::PSROIPoolingLayer
|
||||
* InferenceEngine::Builder::Layer
|
||||
* InferenceEngine::Builder::RNNSequenceLayer
|
||||
* InferenceEngine::Builder::ReorgYoloLayer
|
||||
* InferenceEngine::Builder::NormalizeLayer
|
||||
* InferenceEngine::Builder::PriorBoxClusteredLayer
|
||||
* InferenceEngine::Builder::MVNLayer
|
||||
* InferenceEngine::Builder::PermuteLayer
|
||||
* InferenceEngine::Builder::SimplerNMSLayer
|
||||
* InferenceEngine::Builder::ConstLayer
|
||||
* InferenceEngine::Builder::DeformableConvolutionLayer
|
||||
* InferenceEngine::Builder::FullyConnectedLayer
|
||||
* InferenceEngine::Builder::PriorBoxLayer
|
||||
* InferenceEngine::Builder::SoftMaxLayer
|
||||
* InferenceEngine::Builder::OutputLayer
|
||||
* InferenceEngine::Builder::TileLayer
|
||||
* InferenceEngine::Builder::SplitLayer
|
||||
* InferenceEngine::Builder::PReLULayer
|
||||
* InferenceEngine::Builder::RegionYoloLayer
|
||||
* InferenceEngine::Builder::ReshapeLayer
|
||||
* InferenceEngine::Builder::ConvolutionLayer
|
||||
* InferenceEngine::Builder::DetectionOutputLayer
|
||||
* InferenceEngine::Builder::ConcatLayer
|
||||
* InferenceEngine::Builder::ELULayer
|
||||
* InferenceEngine::Builder::GRNLayer
|
||||
* InferenceEngine::Builder::LRNLayer
|
||||
* InferenceEngine::Builder::ArgMaxLayer
|
||||
* InferenceEngine::Builder::ReLU6Layer
|
||||
* InferenceEngine::Builder::ScaleShiftLayer
|
||||
* InferenceEngine::Builder::ProposalLayer
|
||||
* InferenceEngine::Builder::SigmoidLayer
|
||||
* InferenceEngine::Builder::ResampleLayer
|
||||
* InferenceEngine::Builder::CTCGreedyDecoderLayer
|
||||
* InferenceEngine::Builder::BatchNormalizationLayer
|
||||
* InferenceEngine::Builder::LayerDecorator
|
||||
* InferenceEngine::Builder::PowerLayer
|
||||
* InferenceEngine::Builder::Network
|
||||
* InferenceEngine::Builder::PortInfo
|
||||
* InferenceEngine::Builder::Connection
|
||||
* InferenceEngine::Builder::PortData
|
||||
* InferenceEngine::Builder::Port
|
||||
* InferenceEngine::Builder::ILayer
|
||||
* InferenceEngine::Builder::INetworkIterator
|
||||
* InferenceEngine::Builder::INetwork
|
||||
* InferenceEngine::Builder::ILayer
|
||||
|
||||
**Plugin API:**
|
||||
|
||||
* InferenceEngine::InferencePlugin C++ plugin wrapper class
|
||||
* InferenceEngine::IInferencePlugin plugin interface
|
||||
* InferenceEngine::PluginDispatcher class
|
||||
* InferenceEngine::InferenceEnginePluginPtr typedef
|
||||
* InferenceEngine::ICNNNetReader reader interface
|
||||
* InferenceEngine::CNNNetReader class
|
||||
|
||||
**Blob API:**
|
||||
|
||||
* Blob::element_size() const noexcept method
|
||||
* Blob::buffer() noexcept method
|
||||
* Blob::cbuffer() noexcept method
|
||||
* MemoryBlob::buffer() noexcept method
|
||||
* MemoryBlob::cbuffer() noexcept method
|
||||
|
||||
|
||||
### Removed API
|
||||
|
||||
Removed all [Inference Engine API which deprecated in 2019'R2](https://docs.openvino.ai/2019_R3/_docs_IE_DG_API_Changes.html#deprecated_api)
|
||||
|
||||
## 2019 R3
|
||||
|
||||
### New API
|
||||
|
||||
**New supported layers:**
|
||||
|
||||
* InferenceEngine::SparseFillEmptyRowsLayer new class
|
||||
* InferenceEngine::UniqueLayer new class
|
||||
* InferenceEngine::NonMaxSuppressionLayer new class
|
||||
* InferenceEngine::ScatterLayer new class
|
||||
|
||||
**FPGA plugin streaming support:**
|
||||
|
||||
* DLIA_METRIC_VALUE(INPUT_STREAMING) value to METRIC_KEY(OPTIMIZATION_CAPABILITIES)
|
||||
* DLIA_CONFIG_KEY(ENABLE_STREAMING) config key
|
||||
|
||||
### Removed API
|
||||
|
||||
* InferenceEngine::EltwiseLayer::Select from InferenceEngine::EltwiseLayer::eOperation enumeration
|
||||
|
||||
## 2019 R2
|
||||
|
||||
### New API
|
||||
|
||||
**Inference Engine Core API:**
|
||||
|
||||
* Introduced InferenceEngine::Core high level class to manage devices
|
||||
|
||||
**Query API extensions to InferenceEngine::ExecutableNetwork and InferenceEngine::IExecutableNetwork:**
|
||||
|
||||
* InferenceEngine::ExecutableNetwork::SetConfig method
|
||||
* InferenceEngine::ExecutableNetwork::GetConfig method
|
||||
* InferenceEngine::ExecutableNetwork::GetMetric method
|
||||
* InferenceEngine::IExecutableNetwork::SetConfig method
|
||||
* InferenceEngine::IExecutableNetwork::GetConfig method
|
||||
* InferenceEngine::IExecutableNetwork::GetMetric method
|
||||
|
||||
**Metrics and values for Query API:**
|
||||
|
||||
* METRIC_KEY(AVAILABLE_DEVICES)
|
||||
* METRIC_KEY(SUPPORTED_METRICS)
|
||||
* METRIC_KEY(SUPPORTED_CONFIG_KEYS)
|
||||
* METRIC_KEY(FULL_DEVICE_NAME)
|
||||
* METRIC_KEY(OPTIMIZATION_CAPABILITIES)
|
||||
* METRIC_VALUE(FP32)
|
||||
* METRIC_VALUE(FP16)
|
||||
* METRIC_VALUE(INT8)
|
||||
* METRIC_VALUE(BIN)
|
||||
* METRIC_VALUE(WINOGRAD)
|
||||
* DLIA_METRIC_VALUE(FP11)
|
||||
* METRIC_KEY(RANGE_FOR_STREAMS)
|
||||
* METRIC_KEY(NUMBER_OF_WAITING_INFER_REQUESTS)
|
||||
* METRIC_KEY(NUMBER_OF_EXEC_INFER_REQUESTS)
|
||||
* METRIC_KEY(DEVICE_THERMAL)
|
||||
* METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS)
|
||||
* EXEC_NETWORK_METRIC_KEY(NETWORK_NAME)
|
||||
* EXEC_NETWORK_METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)
|
||||
|
||||
**Common API:**
|
||||
|
||||
* CLDNN_CONFIG_KEY(INT8_ENABLED) config key
|
||||
* CONFIG_KEY(GPU_THROUGHPUT_AUTO)
|
||||
* CONFIG_KEY(GPU_THROUGHPUT_STREAMS)
|
||||
* DLIA_CONFIG_KEY(IO_TRANSFORMATIONS_NATIVE) config key
|
||||
* DLIA_CONFIG_KEY(DUMP_SUPPORTED_LAYERS_INFORMATION) config key
|
||||
* GNA_CONFIG_VALUE(SW_FP32) config value for GNA_CONFIG_KEY(DEVICE_MODE) key
|
||||
* MULTI_CONFIG_KEY(DEVICE_PRIORITIES) config key for `MULTI` device
|
||||
* InferenceEngine::CNNNetReader::ReadNetwork(const std::wstring &filepath) new method
|
||||
* InferenceEngine::CNNNetReader::ReadWeights(const std::wstring &filepath) new method
|
||||
* InferenceEngine::ExecutableNetwork::ExecutableNetwork(IExecutableNetwork::Ptr actual, InferenceEnginePluginPtr plg) constructor with additional `plg` parameter
|
||||
* InferenceEngine::InferRequest::InferRequest(IInferRequest::Ptr request, InferenceEnginePluginPtr plg) constructor with additional `plg` parameter
|
||||
* InferenceEngine::Data::setName method
|
||||
* InferenceEngine::QueryNetworkResult::supportedLayersMap
|
||||
* InferenceEngine::Precision::I64 extension to InferenceEngine::Precision::ePrecision enumeration
|
||||
|
||||
**New supported primitives:**
|
||||
|
||||
* InferenceEngine::Builder::DeformableConvolutionLayer new class
|
||||
* InferenceEngine::DeformableConvolutionLayer new class
|
||||
* InferenceEngine::EltwiseLayer::Logical_NOT, InferenceEngine::EltwiseLayer::Mean, InferenceEngine::EltwiseLayer::Select extensions to InferenceEngine::EltwiseLayer::eOperation enumeration
|
||||
* InferenceEngine::OneHotLayer new class
|
||||
* InferenceEngine::SelectLayer new class
|
||||
* InferenceEngine::BroadcastLayer new class
|
||||
* InferenceEngine::MathLayer new class
|
||||
* InferenceEngine::ReduceLayer new class
|
||||
* InferenceEngine::TopKLayer new class
|
||||
|
||||
**Extensions to Blob creation API:**
|
||||
|
||||
* InferenceEngine::Blob::is method
|
||||
* InferenceEngine::Blob::is const method
|
||||
* InferenceEngine::Blob::as method
|
||||
* InferenceEngine::Blob::as const method
|
||||
* InferenceEngine::Blob::getAllocator abstract method
|
||||
* InferenceEngine::Blob::getHandle abstract method
|
||||
* InferenceEngine::MemoryBlob class
|
||||
* InferenceEngine::ColorFormat enumeration
|
||||
* InferenceEngine::PreProcessInfo::setColorFormat method
|
||||
* InferenceEngine::PreProcessInfo::getColorFormat method
|
||||
* InferenceEngine::CompoundBlob class to work with blobs consisting of several planes
|
||||
* InferenceEngine::NV12Blob class representing NV12 blob with two planes
|
||||
|
||||
### Deprecated API
|
||||
|
||||
The methods listed below are deprecated and will be removed in 2019 R4 release:
|
||||
|
||||
**Common API:**
|
||||
|
||||
* InferenceEngine::InputInfo::getInputPrecision method
|
||||
* InferenceEngine::InputInfo::setInputPrecision method
|
||||
* InferenceEngine::InputInfo::getDims method
|
||||
* InferenceEngine::CNNLayer::GetParamsAsBool method
|
||||
* InferenceEngine::CNNNetwork::CNNNetwork(ICNNNetwork* actual) constructor
|
||||
* InferenceEngine::CNNNetwork::setTargetDevice method
|
||||
* HETERO_CONFIG_KEY(DUMP_DLA_MESSAGES) config key
|
||||
* InferenceEngine::ILayerImplFactory::getShapes method
|
||||
* InferenceEngine::IShapeInferImpl::inferShapes(const std::vector<SizeVector>&, const std::map<std::string, std::string>& , const std::map<std::string, Blob::Ptr>&, std::vector<SizeVector>&, ResponseDesc\*) method
|
||||
* InferenceEngine::Data::setBatchSize method
|
||||
* InferenceEngine::QueryNetworkResult::supportedLayers field
|
||||
* InferenceEngine::ICNNNetwork::setBatchSize(const size_t size) method
|
||||
* InferenceEngine::Blob::Resize method
|
||||
* InferenceEngine::Blob::Reshape method
|
||||
* InferenceEngine::TBlob::set method
|
||||
|
||||
**InferenceEngine::IInferencePlugin and InferenceEngine:InferencePlugin obsolete methods:**
|
||||
|
||||
* InferenceEngine::InferencePlugin::LoadNetwork(ICNNNetwork &network) method
|
||||
* InferenceEngine::InferencePlugin::Infer method
|
||||
* InferenceEngine::InferencePlugin::GetPerformanceCounts method
|
||||
* InferenceEngine::InferencePlugin::QueryNetwork(const ICNNNetwork &network, QueryNetworkResult &res) const method
|
||||
* InferenceEngine::IInferencePlugin::LoadNetwork(ICNNNetwork &network, ResponseDesc \*resp) method
|
||||
* InferenceEngine::IInferencePlugin::Infer(const Blob &input, Blob &result, ResponseDesc \*resp) method
|
||||
* InferenceEngine::IInferencePlugin::Infer(const BlobMap &input, BlobMap &result, ResponseDesc \*resp) method
|
||||
* InferenceEngine::IInferencePlugin::GetPerformanceCounts method
|
||||
* InferenceEngine::IInferencePlugin::QueryNetwork(const ICNNNetwork& network, QueryNetworkResult& res) const method
|
||||
|
||||
|
||||
**Fields in InferenceEngine::Data class are replaced with appropriate methods:**
|
||||
|
||||
* InferenceEngine::Data::precision field
|
||||
* InferenceEngine::Data::layout field
|
||||
* InferenceEngine::Data::dims field
|
||||
* InferenceEngine::Data::creatorLayer field
|
||||
* InferenceEngine::Data::name field
|
||||
* InferenceEngine::Data::inputTo field
|
||||
* InferenceEngine::Data::userObject field
|
||||
|
||||
**Heterogeneous plugin:**
|
||||
|
||||
* InferenceEngine::IHeteroDeviceLoader class
|
||||
* InferenceEngine::IHeteroInferencePlugin class
|
||||
* InferenceEngine::HeteroPluginPtr class
|
||||
* operator InferenceEngine::InferencePlugin::HeteroPluginPtr operator
|
||||
|
||||
**Blob creation API with dimensions in reverse order:**
|
||||
|
||||
* InferenceEngine::Blob::Blob(Precision p) constructor
|
||||
* InferenceEngine::Blob::Blob(Precision p, Layout l) constructor
|
||||
* InferenceEngine::Blob::Blob(Precision p, const SizeVector &dims) constructor
|
||||
* InferenceEngine::Blob::Blob(Precision p, Layout l, const SizeVector &dims) constructor
|
||||
* InferenceEngine::TBlob::TBlob(Precision p, Layout l) constructor
|
||||
* InferenceEngine::TBlob::TBlob(Precision p, Layout l, const SizeVector& dims) constructor
|
||||
* InferenceEngine::TBlob::TBlob(Precision p, Layout l, const SizeVector& dims, T* ptr, size_t data_size) constructor
|
||||
* InferenceEngine::TBlob::TBlob(Precision p, Layout l, const SizeVector &dims, std::shared_ptr<IAllocator> alloc) constructor
|
||||
* InferenceEngine::Blob::type() method
|
||||
* InferenceEngine::Blob::precision() method
|
||||
* InferenceEngine::Blob::layout() method
|
||||
* InferenceEngine::Blob::dims() method
|
||||
* InferenceEngine::make_shared_blob(Precision p, Layout l, const SizeVector &dims) function
|
||||
* InferenceEngine::make_shared_blob(Precision p, const SizeVector &dims) function
|
||||
* InferenceEngine::make_shared_blob(Precision p, Layout l, const TArg &arg) function
|
||||
* InferenceEngine::make_shared_blob(Precision p, const TArg &arg) function
|
||||
* InferenceEngine::make_shared_blob(TBlob<TypeTo> &&arg) function
|
||||
* InferenceEngine::make_shared_blob(Precision p, Layout l) function
|
||||
* InferenceEngine::make_shared_blob(Precision p, Layout l, SizeVector dims, const std::vector<TypeTo> &arg) function
|
||||
* InferenceEngine::make_shared_blob(Precision p, Layout l, const std::vector<TypeTo> &arg) function
|
||||
* InferenceEngine::make_shared_blob(Precision p, const std::vector<TypeTo> &arg) function
|
||||
* InferenceEngine::make_shared_blob(Precision p, Layout l, const SizeVector &dims, TypeTo * ptr, size_t size) function
|
||||
* InferenceEngine::make_shared_blob(Precision p, const SizeVector &dims, TypeTo * ptr, size_t size) function
|
||||
* InferenceEngine::I_N variable
|
||||
* InferenceEngine::I_C variable
|
||||
* InferenceEngine::I_H variable
|
||||
* InferenceEngine::I_W variable
|
||||
* InferenceEngine::LayoutOffsetCounter class
|
||||
* InferenceEngine::ConvertLayout function
|
||||
|
||||
**API working with device enumeration:**
|
||||
|
||||
* InferenceEngine::TargetDevice enumeration
|
||||
* InferenceEngine::TargetDeviceInfo class
|
||||
* InferenceEngine::getDeviceName function
|
||||
* InferenceEngine::FindPluginRequest class
|
||||
* InferenceEngine::FindPluginResponse class
|
||||
* InferenceEngine::findPlugin(const FindPluginRequest &req, FindPluginResponse &result, ResponseDesc *resp) function
|
||||
* InferenceEngine::ICNNNetwork::setTargetDevice method
|
||||
* InferenceEngine::ICNNNetwork::getTargetDevice method
|
||||
* InferenceEngine::PluginDispatcher::getPluginByDevice method
|
||||
* InferenceEngine::PluginDispatcher::getSuitablePlugin method
|
||||
@@ -1,214 +0,0 @@
|
||||
# Bfloat16 Inference {#openvino_docs_IE_DG_Bfloat16Inference}
|
||||
|
||||
## Bfloat16 Inference Usage (C++)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-cpp" class="switcher-anchor">C++</div>
|
||||
@endsphinxdirective
|
||||
|
||||
### Disclaimer
|
||||
|
||||
Inference Engine with the bfloat16 inference implemented on CPU must support the native *avx512_bf16* instruction and therefore the bfloat16 data format. It is possible to use bfloat16 inference in simulation mode on platforms with Intel® Advanced Vector Extensions 512 (Intel® AVX-512), but it leads to significant performance degradation in comparison with FP32 or native *avx512_bf16* instruction usage.
|
||||
|
||||
### Introduction
|
||||
Bfloat16 computations (referred to as BF16) is the Brain Floating-Point format with 16 bits. This is a truncated 16-bit version of the 32-bit IEEE 754 single-precision floating-point format FP32. BF16 preserves 8 exponent bits as FP32 but reduces precision of the sign and mantissa from 24 bits to 8 bits.
|
||||
|
||||
![bf16_format]
|
||||
|
||||
Preserving the exponent bits keeps BF16 to the same range as the FP32 (~1e-38 to ~3e38). This simplifies conversion between two data types: you just need to skip or flush to zero 16 low bits. Truncated mantissa leads to occasionally less precision, but according to [investigations](https://cloud.google.com/blog/products/ai-machine-learning/bfloat16-the-secret-to-high-performance-on-cloud-tpus), neural networks are more sensitive to the size of the exponent than the mantissa size. Also, in lots of models, precision is needed close to zero but not so much at the maximum range. Another useful feature of BF16 is possibility to encode INT8 in BF16 without loss of accuracy, because INT8 range completely fits in BF16 mantissa field. It reduces data flow in conversion from INT8 input image data to BF16 directly without intermediate representation in FP32, or in combination of [INT8 inference](Int8Inference.md) and BF16 layers.
|
||||
|
||||
See the [BFLOAT16 – Hardware Numerics Definition white paper](https://software.intel.com/content/dam/develop/external/us/en/documents/bf16-hardware-numerics-definition-white-paper.pdf) for more bfloat16 format details.
|
||||
|
||||
There are two ways to check if CPU device can support bfloat16 computations for models:
|
||||
|
||||
1. Query the instruction set using one of these system commands:
|
||||
* `lscpu | grep avx512_bf16`
|
||||
* `cat /proc/cpuinfo | grep avx512_bf16`
|
||||
2. Use the [Query API](InferenceEngine_QueryAPI.md) with `METRIC_KEY(OPTIMIZATION_CAPABILITIES)`, which should return `BF16` in the list of CPU optimization options:
|
||||
|
||||
@snippet snippets/Bfloat16Inference0.cpp part0
|
||||
|
||||
The current Inference Engine solution for bfloat16 inference uses the Intel® Math Kernel Library for Deep Neural Networks (Intel® MKL-DNN) and supports inference of the significant number of layers in BF16 computation mode.
|
||||
|
||||
### Lowering Inference Precision
|
||||
|
||||
Lowering precision to increase performance is [widely used](https://software.intel.com/content/www/us/en/develop/articles/lower-numerical-precision-deep-learning-inference-and-training.html) for optimization of inference. The bfloat16 data type usage on CPU for the first time opens the possibility of default optimization approach. The embodiment of this approach is to use the optimization capabilities of the current platform to achieve maximum performance while maintaining the accuracy of calculations within the acceptable range.
|
||||
|
||||
Using Bfloat16 precision provides the following performance benefits:
|
||||
|
||||
1. Faster multiplication of two BF16 numbers because of shorter mantissa of bfloat16 data.
|
||||
2. No need to support denormals and handling exceptions as this is a performance optimization.
|
||||
3. Fast conversion of float32 to bfloat16 and vice versa.
|
||||
4. Reduced size of data in memory, as a result, larger models fit in the same memory bounds.
|
||||
5. Reduced amount of data that must be transferred, as a result, reduced data transition time.
|
||||
|
||||
For default optimization on CPU, the source model is converted from FP32 or FP16 to BF16 and executed internally on platforms with native BF16 support. In this case, `KEY_ENFORCE_BF16` is set to `YES` in the `PluginConfigParams` for `GetConfig()`. The code below demonstrates how to check if the key is set:
|
||||
|
||||
@snippet snippets/Bfloat16Inference1.cpp part1
|
||||
|
||||
To disable BF16 internal transformations in C++ API, set the `KEY_ENFORCE_BF16` to `NO`. In this case, the model infers as is without modifications with precisions that were set on each layer edge.
|
||||
|
||||
@snippet snippets/Bfloat16Inference2.cpp part2
|
||||
|
||||
To disable BF16 in C API:
|
||||
|
||||
```
|
||||
ie_config_t config = { "ENFORCE_BF16", "NO", NULL};
|
||||
ie_core_load_network(core, network, device_name, &config, &exe_network);
|
||||
```
|
||||
|
||||
An exception with the message `Platform doesn't support BF16 format` is formed in case of setting `KEY_ENFORCE_BF16` to `YES` on CPU without native BF16 support or BF16 simulation mode.
|
||||
|
||||
Low-Precision 8-bit integer models cannot be converted to BF16, even if bfloat16 optimization is set by default.
|
||||
|
||||
### Bfloat16 Simulation Mode
|
||||
|
||||
Bfloat16 simulation mode is available on CPU and Intel® AVX-512 platforms that do not support the native `avx512_bf16` instruction. The simulator does not guarantee good performance. Note that the CPU must still support the AVX-512 extensions.
|
||||
|
||||
To enable the simulation of Bfloat16:
|
||||
* In the [Benchmark App](../../samples/cpp/benchmark_app/README.md), add the `-enforcebf16=true` option
|
||||
* In C++ API, set `KEY_ENFORCE_BF16` to `YES`
|
||||
* In C API:
|
||||
```
|
||||
ie_config_t config = { "ENFORCE_BF16", "YES", NULL};
|
||||
ie_core_load_network(core, network, device_name, &config, &exe_network);
|
||||
```
|
||||
|
||||
### Performance Counters
|
||||
|
||||
Information about layer precision is stored in the performance counters that are available from the Inference Engine API. The layers have the following marks:
|
||||
|
||||
* Suffix `BF16` for layers that had bfloat16 data type input and were computed in BF16 precision
|
||||
* Suffix `FP32` for layers computed in 32-bit precision
|
||||
|
||||
For example, the performance counters table for the Inception model can look as follows:
|
||||
|
||||
```
|
||||
pool5 EXECUTED layerType: Pooling realTime: 143 cpu: 143 execType: jit_avx512_BF16
|
||||
fc6 EXECUTED layerType: FullyConnected realTime: 47723 cpu: 47723 execType: jit_gemm_BF16
|
||||
relu6 NOT_RUN layerType: ReLU realTime: 0 cpu: 0 execType: undef
|
||||
fc7 EXECUTED layerType: FullyConnected realTime: 7558 cpu: 7558 execType: jit_gemm_BF16
|
||||
relu7 NOT_RUN layerType: ReLU realTime: 0 cpu: 0 execType: undef
|
||||
fc8 EXECUTED layerType: FullyConnected realTime: 2193 cpu: 2193 execType: jit_gemm_BF16
|
||||
prob EXECUTED layerType: SoftMax realTime: 68 cpu: 68 execType: jit_avx512_FP32
|
||||
```
|
||||
|
||||
The **execType** column of the table includes inference primitives with specific suffixes.
|
||||
|
||||
## Bfloat16 Inference Usage (Python)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-python" class="switcher-anchor">Python</div>
|
||||
@endsphinxdirective
|
||||
|
||||
### Disclaimer
|
||||
|
||||
Inference Engine with the bfloat16 inference implemented on CPU must support the native *avx512_bf16* instruction and therefore the bfloat16 data format. It is possible to use bfloat16 inference in simulation mode on platforms with Intel® Advanced Vector Extensions 512 (Intel® AVX-512), but it leads to significant performance degradation in comparison with FP32 or native *avx512_bf16* instruction usage.
|
||||
|
||||
### Introduction
|
||||
Bfloat16 computations (referred to as BF16) is the Brain Floating-Point format with 16 bits. This is a truncated 16-bit version of the 32-bit IEEE 754 single-precision floating-point format FP32. BF16 preserves 8 exponent bits as FP32 but reduces precision of the sign and mantissa from 24 bits to 8 bits.
|
||||
|
||||
![bf16_format]
|
||||
|
||||
Preserving the exponent bits keeps BF16 to the same range as the FP32 (~1e-38 to ~3e38). This simplifies conversion between two data types: you just need to skip or flush to zero 16 low bits. Truncated mantissa leads to occasionally less precision, but according to investigations, neural networks are more sensitive to the size of the exponent than the mantissa size. Also, in lots of models, precision is needed close to zero but not so much at the maximum range. Another useful feature of BF16 is possibility to encode INT8 in BF16 without loss of accuracy, because INT8 range completely fits in BF16 mantissa field. It reduces data flow in conversion from INT8 input image data to BF16 directly without intermediate representation in FP32, or in combination of [INT8 inference](Int8Inference.md) and BF16 layers.
|
||||
|
||||
See the [BFLOAT16 – Hardware Numerics Definition white paper](https://software.intel.com/content/dam/develop/external/us/en/documents/bf16-hardware-numerics-definition-white-paper.pdf) for more bfloat16 format details.
|
||||
|
||||
There are two ways to check if CPU device can support bfloat16 computations for models:
|
||||
|
||||
1. Query the instruction set using one of these system commands:
|
||||
* `lscpu | grep avx512_bf16`
|
||||
* `cat /proc/cpuinfo | grep avx512_bf16`
|
||||
2. Use the Query API with METRIC_KEY(OPTIMIZATION_CAPABILITIES), which should return BF16 in the list of CPU optimization options:
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
net = ie.read_network(path_to_xml_file)
|
||||
cpu_caps = ie.get_metric(metric_name="OPTIMIZATION_CAPABILITIES", device_name="CPU")
|
||||
```
|
||||
|
||||
The current Inference Engine solution for bfloat16 inference uses the Intel® Math Kernel Library for Deep Neural Networks (Intel® MKL-DNN) and supports inference of the significant number of layers in BF16 computation mode.
|
||||
|
||||
### Lowering Inference Precision
|
||||
|
||||
Lowering precision to increase performance is widely used for optimization of inference. The bfloat16 data type usage on CPU for the first time opens the possibility of default optimization approach. The embodiment of this approach is to use the optimization capabilities of the current platform to achieve maximum performance while maintaining the accuracy of calculations within the acceptable range.
|
||||
|
||||
Using Bfloat16 precision provides the following performance benefits:
|
||||
|
||||
1. Faster multiplication of two BF16 numbers because of shorter mantissa of bfloat16 data.
|
||||
2. No need to support denormals and handling exceptions as this is a performance optimization.
|
||||
3. Fast conversion of float32 to bfloat16 and vice versa.
|
||||
4. Reduced size of data in memory, as a result, larger models fit in the same memory bounds.
|
||||
5. Reduced amount of data that must be transferred, as a result, reduced data transition time.
|
||||
|
||||
For default optimization on CPU, the source model is converted from FP32 or FP16 to BF16 and executed internally on platforms with native BF16 support. In this case, ENFORCE_BF16 is set to YES. The code below demonstrates how to check if the key is set:
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
net = ie.read_network(path_to_xml_file)
|
||||
exec_net = ie.load_network(network=net, device_name="CPU")
|
||||
exec_net.get_config("ENFORCE_BF16")
|
||||
```
|
||||
|
||||
To enable BF16 internal transformations, set the key "ENFORCE_BF16" to "YES" in the ExecutableNetwork configuration.
|
||||
|
||||
```python
|
||||
bf16_config = {"ENFORCE_BF16" : "YES"}
|
||||
exec_net = ie.load_network(network=net, device_name="CPU", config = bf16_config)
|
||||
```
|
||||
|
||||
To disable BF16 internal transformations, set the key "ENFORCE_BF16" to "NO". In this case, the model infers as is without modifications with precisions that were set on each layer edge.
|
||||
|
||||
An exception with the message `Platform doesn't support BF16 format` is formed in case of setting "ENFORCE_BF16" to "YES"on CPU without native BF16 support or BF16 simulation mode.
|
||||
|
||||
Low-Precision 8-bit integer models cannot be converted to BF16, even if bfloat16 optimization is set by default.
|
||||
|
||||
### Bfloat16 Simulation Mode
|
||||
|
||||
Bfloat16 simulation mode is available on CPU and Intel® AVX-512 platforms that do not support the native avx512_bf16 instruction. The simulator does not guarantee good performance. Note that the CPU must still support the AVX-512 extensions.
|
||||
|
||||
#### To Enable the simulation of Bfloat16:
|
||||
|
||||
* In the Benchmark App, add the -enforcebf16=true option
|
||||
* In Python, use the following code as an example:
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
net = ie.read_network(path_to_xml_file)
|
||||
bf16_config = {"ENFORCE_BF16" : "YES"}
|
||||
exec_net = ie.load_network(network=net, device_name="CPU", config=bf16_config)
|
||||
```
|
||||
|
||||
### Performance Counters
|
||||
|
||||
Information about layer precision is stored in the performance counters that are available from the Inference Engine API. The layers have the following marks:
|
||||
|
||||
* Suffix *BF16* for layers that had bfloat16 data type input and were computed in BF16 precision
|
||||
* Suffix *FP32* for layers computed in 32-bit precision
|
||||
|
||||
For example, the performance counters table for the Inception model can look as follows:
|
||||
|
||||
```
|
||||
pool5 EXECUTED layerType: Pooling realTime: 143 cpu: 143 execType: jit_avx512_BF16
|
||||
fc6 EXECUTED layerType: FullyConnected realTime: 47723 cpu: 47723 execType: jit_gemm_BF16
|
||||
relu6 NOT_RUN layerType: ReLU realTime: 0 cpu: 0 execType: undef
|
||||
fc7 EXECUTED layerType: FullyConnected realTime: 7558 cpu: 7558 execType: jit_gemm_BF16
|
||||
relu7 NOT_RUN layerType: ReLU realTime: 0 cpu: 0 execType: undef
|
||||
fc8 EXECUTED layerType: FullyConnected realTime: 2193 cpu: 2193 execType: jit_gemm_BF16
|
||||
prob EXECUTED layerType: SoftMax realTime: 68 cpu: 68 execType: jit_avx512_FP32
|
||||
```
|
||||
|
||||
|
||||
The **execType** column of the table includes inference primitives with specific suffixes.
|
||||
|
||||
[bf16_format]: img/bf16_format.png
|
||||
@@ -1,54 +0,0 @@
|
||||
# Inference Engine Developer Guide {#openvino_docs_IE_DG_Deep_Learning_Inference_Engine_DevGuide}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
openvino_2_0_transition_guide
|
||||
openvino_docs_IE_DG_Integrate_with_customer_application_new_API
|
||||
openvino_docs_deployment_optimization_guide_dldt_optimization_guide
|
||||
openvino_docs_IE_DG_Device_Plugins
|
||||
Direct ONNX Format Support <openvino_docs_IE_DG_ONNX_Support>
|
||||
openvino_docs_IE_DG_Paddle_Support
|
||||
openvino_docs_IE_DG_Int8Inference
|
||||
openvino_docs_IE_DG_Bfloat16Inference
|
||||
openvino_docs_IE_DG_DynamicBatching
|
||||
openvino_docs_IE_DG_ShapeInference
|
||||
openvino_docs_IE_DG_Model_caching_overview
|
||||
openvino_docs_IE_DG_Extensibility_DG_Intro
|
||||
openvino_docs_IE_DG_Memory_primitives
|
||||
openvino_docs_IE_DG_network_state_intro
|
||||
openvino_docs_IE_DG_API_Changes
|
||||
openvino_docs_IE_DG_Known_Issues_Limitations
|
||||
openvino_docs_IE_DG_Glossary
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
## Introduction
|
||||
Inference Engine is a set of C++ libraries with C and Python bindings providing a common API to deliver inference solutions on the platform of your choice. Use the Inference Engine API to read the Intermediate Representation (IR), ONNX and execute the model on devices.
|
||||
|
||||
Inference Engine uses a plugin architecture. Inference Engine plugin is a software component that contains complete implementation for inference on a certain Intel® hardware device: CPU, GPU, VPU, etc. Each plugin implements the unified API and provides additional hardware-specific APIs.
|
||||
|
||||
The scheme below illustrates the typical workflow for deploying a trained deep learning model:
|
||||
|
||||

|
||||
|
||||
\\* _nGraph_ is the internal graph representation in the OpenVINO™ toolkit. Use it to [build a model from source code](https://docs.openvino.ai/latest/openvino_docs_nGraph_DG_build_function.html).
|
||||
|
||||
|
||||
## Video
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<iframe allowfullscreen mozallowfullscreen msallowfullscreen oallowfullscreen webkitallowfullscreen height="315" width="100%"
|
||||
src="https://www.youtube.com/embed/e6R13V8nbak">
|
||||
</iframe>
|
||||
* - **Inference Engine Concept**. Duration: 3:43
|
||||
|
||||
@endsphinxdirective
|
||||
@@ -1,106 +0,0 @@
|
||||
# Using Dynamic Batching {#openvino_docs_IE_DG_DynamicBatching}
|
||||
|
||||
## Using Dynamic Batching (C++)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-cpp" class="switcher-anchor">C++</div>
|
||||
@endsphinxdirective
|
||||
|
||||
The Dynamic Batching feature allows you to dynamically change batch size for inference calls
|
||||
within a preset batch size limit. This feature might be useful when batch size is unknown beforehand and using an extra-large batch size is undesirable or impossible due to resource limitations. For example, applying face detection and then mood labeling to a video, you won't know in advance how many frames will contain a face when you pass inferencing results to a secondary model.
|
||||
|
||||
|
||||
You can activate Dynamic Batching by setting `KEY_DYN_BATCH_ENABLED` flag to `YES` in a configuration map that is
|
||||
passed to the plugin while loading a network.
|
||||
This configuration creates an `ExecutableNetwork` object that will allow setting batch size
|
||||
dynamically in all of its infer requests using `SetBatch()` method.
|
||||
The batch size that was set in the passed `CNNNetwork` object will be used as a maximum batch size limit.
|
||||
|
||||
Here is a code example:
|
||||
|
||||
@snippet snippets/DynamicBatching.cpp part0
|
||||
|
||||
|
||||
### Limitations
|
||||
|
||||
Currently, there are certain limitations for the use of Dynamic Batching exist:
|
||||
|
||||
* Use Dynamic Batching with CPU and GPU plugins only.
|
||||
* Use Dynamic Batching on topologies that consist of certain layers only:
|
||||
* Convolution
|
||||
* Deconvolution
|
||||
* Activation
|
||||
* LRN
|
||||
* Pooling
|
||||
* FullyConnected
|
||||
* SoftMax
|
||||
* Split
|
||||
* Concatenation
|
||||
* Power
|
||||
* Eltwise
|
||||
* Crop
|
||||
* BatchNormalization
|
||||
* Copy
|
||||
|
||||
The following types of layers are not supported:
|
||||
|
||||
* Layers that might arbitrary change tensor shape (such as Flatten, Permute, Reshape)
|
||||
* Layers specific to object detection topologies (ROIPooling, ProirBox, DetectionOutput)
|
||||
* Custom layers
|
||||
|
||||
Topology analysis is performed during the process of loading a network into plugin, and if the topology is not supported, an exception is generated.
|
||||
|
||||
## Using Dynamic Batching (Python)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-python" class="switcher-anchor">Python</div>
|
||||
@endsphinxdirective
|
||||
|
||||
Dynamic Batching is a feature that allows you to dynamically change batch size for inference calls within a preset batch size limit. This feature might be useful when batch size is unknown beforehand, and using extra large batch size is not desired or impossible due to resource limitations. For example, face detection with person age, gender, or mood recognition is a typical usage scenario.
|
||||
|
||||
You can activate Dynamic Batching by setting the "DYN_BATCH_ENABLED" flag to "YES" in a configuration map that is passed to the plugin while loading a network. This configuration creates an `ExecutableNetwork` object that will allow setting batch size dynamically in all of its infer requests using the [ie_api.batch_size](api/ie_python_api/_autosummary/openvino.inference_engine.IENetwork.html#openvino.inference_engine.IENetwork.batch_size) method. The batch size that was set in the passed CNNNetwork object will be used as a maximum batch size limit.
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
dyn_config = {"DYN_BATCH_ENABLED": "YES"}
|
||||
ie.set_config(config=dyn_config, device_name=device)
|
||||
# Read a network in IR or ONNX format
|
||||
net = ie.read_network(path_to_model)
|
||||
net.batch_size = 32 # set the maximum batch size to 32
|
||||
exec_net = ie.load_network(network=net, device_name=device)
|
||||
```
|
||||
|
||||
### Limitations
|
||||
|
||||
Currently, certain limitations for the use of Dynamic Batching exist:
|
||||
|
||||
* Use Dynamic Batching with CPU and GPU plugins only.
|
||||
* Use Dynamic Batching on topologies that consist of certain layers only:
|
||||
* Convolution
|
||||
* Deconvolution
|
||||
* Activation
|
||||
* LRN
|
||||
* Pooling
|
||||
* FullyConnected
|
||||
* SoftMax
|
||||
* Split
|
||||
* Concatenation
|
||||
* Power
|
||||
* Eltwise
|
||||
* Crop
|
||||
* BatchNormalization
|
||||
* Copy
|
||||
|
||||
The following types of layers are not supported:
|
||||
|
||||
* Layers that might arbitrary change tensor shape (such as Flatten, Permute, Reshape)
|
||||
* Layers specific to object detection topologies (ROIPooling, ProirBox, DetectionOutput)
|
||||
* Custom layers
|
||||
|
||||
Topology analysis is performed during the process of loading a network into plugin, and if the topology is not supported, an exception is generated.
|
||||
@@ -1,82 +0,0 @@
|
||||
# Custom nGraph Operations {#openvino_docs_IE_DG_Extensibility_DG_AddingNGraphOps}
|
||||
|
||||
Inference Engine Extension API allows you to register operation sets (opsets) with custom nGraph operations to support models with operations which OpenVINO™ does not support out-of-the-box.
|
||||
|
||||
Besides creating custom nGraph operations, to [support custom operations](../../HOWTO/Custom_Layers_Guide.md) in your model you must also create a Model Optimizer extension for the custom operations and an Inference Engine device plugin extension for the device you will use for inference.
|
||||
|
||||
## Operation Class
|
||||
|
||||
To add your custom nGraph operation, create a new class that extends `ngraph::Op`, which is in turn derived from `ngraph::Node`, the base class for all graph operations in nGraph. Follow the steps below to add a custom nGraph operation:
|
||||
|
||||
1. Add the `NGRAPH_RTTI_DECLARATION` and `NGRAPH_RTTI_DEFINITION` macros which define a `NodeTypeInfo` object that identifies the type of the operation to the graph users and helps with dynamic type resolution. The type info of an nGraph operation currently consists of a string identifier and a version number, but this may change in the future.
|
||||
|
||||
2. Implement constructors that optionally take the operation inputs and attributes as parameters.
|
||||
|
||||
3. Override the shape inference method `validate_and_infer_types`. This method is called multiple times during graph manipulations to determine the shapes and element types of the operations outputs. To access the input shapes and input element types, use the `get_input_partial_shape()` and `get_input_element_type()` methods of `ngraph::Node`. Set the inferred shape and element type of the output using `set_output_type`.
|
||||
|
||||
4. Override the `clone_with_new_inputs` method, which enables graph manipulation routines to create copies of this operation and connect it to different nodes during optimization.
|
||||
|
||||
5. Override the `visit_attributes` method, which enables serialization and deserialization of operation attributes. An `AttributeVisitor` is passed to the method, and the implementation is expected to walk over all the attributes in the op using the type-aware `on_attribute` helper. Helpers are already implemented for standard C++ types like `int64_t`, `float`, `bool`, `vector`, and for existing nGraph defined types.
|
||||
|
||||
6. Override `evaluate`, which is an optional method that enables the application of constant folding if there is a custom operation on the constant branch. If your operation contains `evaluate` method you also need to override the `has_evaluate` method, this method allow to get information about availability of `evaluate` method for the operation.
|
||||
|
||||
Based on that, declaration of an operation class can look as follows:
|
||||
|
||||
@snippet template_extension/old/op.hpp op:header
|
||||
|
||||
### Class Fields
|
||||
|
||||
The provided implementation has several fields:
|
||||
|
||||
* `add` of type `int64_t` is an attribute of a custom operation
|
||||
* `type_info` of type `ngraph::NodeTypeInfo` defines type and version of an operation
|
||||
|
||||
### Operation Constructors
|
||||
|
||||
nGraph operation contains two constructors:
|
||||
* Default constructor, which enables you to create an operation without attributes
|
||||
* Constructor that creates and validates an operation with specified inputs and attributes
|
||||
|
||||
@snippet template_extension/old/op.cpp op:ctor
|
||||
|
||||
### `validate_and_infer_types()`
|
||||
|
||||
`ngraph::Node::validate_and_infer_types` method validates operation attributes and calculates output shapes using attributes of the operation.
|
||||
|
||||
@snippet template_extension/old/op.cpp op:validate
|
||||
|
||||
### `clone_with_new_inputs()`
|
||||
|
||||
`ngraph::Node::clone_with_new_inputs` method creates a copy of the nGraph operation with new inputs.
|
||||
|
||||
@snippet template_extension/old/op.cpp op:copy
|
||||
|
||||
### `visit_attributes()`
|
||||
|
||||
`ngraph::Node::visit_attributes` method enables you to visit all operation attributes.
|
||||
|
||||
@snippet template_extension/old/op.cpp op:visit_attributes
|
||||
|
||||
### `evaluate()` and `has_evaluate()`
|
||||
|
||||
`ngraph::Node::evaluate` method enables you to apply constant folding to an operation.
|
||||
|
||||
@snippet template_extension/old/op.cpp op:evaluate
|
||||
|
||||
## Register Custom Operations in Extension Class
|
||||
|
||||
To add custom operations to the [Extension](Extension.md) class, create an operation set with custom operations and implement the `InferenceEngine::IExtension::getOpSets` method:
|
||||
|
||||
@snippet template_extension/old/extension.cpp extension:getOpSets
|
||||
|
||||
This method returns a map of opsets that exist in the [extension library](Extension.md).
|
||||
nGraph provides an opset mechanism to group operations into clusters. Different opsets distinguish between different versions of one operation.
|
||||
|
||||
When specifying opset names, follow the rules below:
|
||||
* Use unique opset names.
|
||||
* Do not use the following built-in opset names: `extension`, `experimental`, `opset1`, `opset2`, `opset3`, ... , `opsetN`.
|
||||
* [Make sure that the Model Optimizer](../../HOWTO/Custom_Layers_Guide.md) and your extension use the same opset names.
|
||||
* IR v10 operations have the mandatory `version` attribute specifying the opset.
|
||||
Operations from the default opset cannot be redefined.
|
||||
|
||||
Use a custom opset to create a new operation or extend functionality of an existing operation from another opset.
|
||||
@@ -1,19 +0,0 @@
|
||||
# Build Extension Library Using CMake* {#openvino_docs_IE_DG_Extensibility_DG_Building}
|
||||
|
||||
Inference Engine build infrastructure provides the Inference Engine Package for application development.
|
||||
|
||||
To configure the build of your extension library, use the following CMake script:
|
||||
|
||||
@snippet template_extension/old/CMakeLists.txt cmake:extension
|
||||
|
||||
This CMake script finds the Inference Engine and nGraph using the `find_package` CMake command.
|
||||
|
||||
To build the extension library, run the commands below:
|
||||
|
||||
```sh
|
||||
$ cd template_extension/old
|
||||
$ mkdir build
|
||||
$ cd build
|
||||
$ cmake -DOpenVINO_DIR=[OpenVINO_DIR] ../
|
||||
$ cmake --build .
|
||||
```
|
||||
@@ -1,71 +0,0 @@
|
||||
# CPU Kernel Custom Operations {#openvino_docs_IE_DG_Extensibility_DG_CPU_Kernel}
|
||||
|
||||
To enable operations not supported by OpenVINO™ out of the box, you need a custom extension for Model Optimizer, a custom nGraph operation set, and a custom kernel for the device you will target. This page describes custom kernel support for the CPU device.
|
||||
|
||||
The primary means of the performance of the CPU codepath in the Inference Engine is the Intel® Math Kernel Library for Deep Neural Networks (Intel® MKL-DNN), and new CPU kernels extend the Inference Engine plugin for the Intel MKL-DNN. Implementing the InferenceEngine::ILayerExecImpl API call defines a general CPU-side extension. There are no Intel MKL-DNN specifics in the way you need to implement a kernel.
|
||||
|
||||
## Implementation Class
|
||||
|
||||
All custom kernels for the CPU plugin should be inherited from the InferenceEngine::ILayerExecImpl interface.
|
||||
Based on that, declaration of a kernel implementation class can look as follows:
|
||||
|
||||
@snippet template_extension/old/cpu_kernel.hpp cpu_implementation:header
|
||||
|
||||
### Class Fields
|
||||
|
||||
The provided implementation has several fields:
|
||||
|
||||
* `add` of the type `int64_t` is an attribute of a custom operation.
|
||||
* `inShape` of the type `ngraph::Shape` is an input shape.
|
||||
* `outShape` of the type `ngraph::Shape` is an output shape.
|
||||
* `error` of the type `std::string` is a field to handle errors from a constructor.
|
||||
|
||||
### Constructor of Implementation
|
||||
|
||||
An implementation constructor checks parameters of an nGraph operation, stores required attributes, and stores an error message in case of an error.
|
||||
|
||||
@snippet template_extension/old/cpu_kernel.cpp cpu_implementation:ctor
|
||||
|
||||
### `getSupportedConfigurations`
|
||||
|
||||
The InferenceEngine::ILayerExecImpl::getSupportedConfigurations method returns all supported configuration formats (input/output tensor layouts) for your implementation. To specify formats of data, use InferenceEngine::TensorDesc. Refer to the [Memory Primitives](../Memory_primitives.md) section for instructions.
|
||||
|
||||
@snippet template_extension/old/cpu_kernel.cpp cpu_implementation:getSupportedConfigurations
|
||||
|
||||
### `init`
|
||||
|
||||
The InferenceEngine::ILayerExecImpl::init method gets a runtime-selected configuration from a vector that is populated from the `getSupportedConfigurations` method and checks the parameters:
|
||||
|
||||
@snippet template_extension/old/cpu_kernel.cpp cpu_implementation:init
|
||||
|
||||
### `execute`
|
||||
|
||||
The InferenceEngine::ILayerExecImpl::execute method accepts and processes the actual tensors as input/output blobs:
|
||||
|
||||
@snippet template_extension/old/cpu_kernel.cpp cpu_implementation:execute
|
||||
|
||||
## Register Implementation in `Extension` Class
|
||||
|
||||
To register custom kernel implementation in the [Extension](Extension.md) class, implement the following methods:
|
||||
|
||||
* <a href="#getImpTypes">getImplTypes</a>
|
||||
* <a href="#getImplementation">getImplementation</a>
|
||||
|
||||
### <a name="getImpTypes"><code>getImplTypes</code></a>
|
||||
|
||||
InferenceEngine::IExtension::getImplTypes returns a vector of implementation types for an operation.
|
||||
|
||||
@snippet template_extension/old/extension.cpp extension:getImplTypes
|
||||
|
||||
### <a name="getImplementation"><code>getImplementation</code></a>
|
||||
|
||||
InferenceEngine::IExtension::getImplementation returns the kernel implementation with a specified type for an operation.
|
||||
|
||||
@snippet template_extension/old/extension.cpp extension:getImplementation
|
||||
|
||||
|
||||
## Load Extension with Executable Kernels to Plugin
|
||||
|
||||
Use the `AddExtension` method of the general plugin interface to load your primitives:
|
||||
|
||||
@snippet snippets/CPU_Kernel.cpp part0
|
||||
@@ -1,78 +0,0 @@
|
||||
# Custom ONNX* Operators {#openvino_docs_IE_DG_Extensibility_DG_Custom_ONNX_Ops}
|
||||
|
||||
The ONNX\* importer provides a mechanism to register custom ONNX operators based on predefined or custom nGraph operations.
|
||||
The function responsible for registering a new operator is called `ngraph::onnx_import::register_operator` and defined in the `onnx_import/onnx_utils.hpp` file.
|
||||
|
||||
## Register Custom ONNX Operator Based on Predefined nGraph Operations
|
||||
|
||||
The steps below explain how to register a custom ONNX operator, for example, CustomRelu, in a domain called `com.example`.
|
||||
CustomRelu is defined as follows:
|
||||
```
|
||||
x >= 0 => f(x) = x * alpha
|
||||
x < 0 => f(x) = x * beta
|
||||
```
|
||||
where `alpha` and `beta` are float constants.
|
||||
|
||||
1. Include headers:
|
||||
|
||||
@snippet onnx_custom_op/onnx_custom_op.cpp onnx_custom_op:headers
|
||||
|
||||
2. Register the CustomRelu operator in the ONNX importer:
|
||||
|
||||
@snippet onnx_custom_op/onnx_custom_op.cpp onnx_custom_op:register_operator
|
||||
|
||||
The `register_operator` function takes four arguments: op_type, opset version, domain, and a function object.
|
||||
The function object is a user-defined function that takes `ngraph::onnx_import::Node` as an input and based on that, returns a graph with nGraph operations.
|
||||
The `ngraph::onnx_import::Node` class represents a node in an ONNX model. It provides functions to fetch input node(s) using `get_ng_inputs`, attribute value using `get_attribute_value`, and many more. See the `onnx_import/core/node.hpp` file for the full class declaration.
|
||||
|
||||
New operator registration must happen before an ONNX model is read. For example, if an model uses the `CustomRelu` operator, call `register_operator("CustomRelu", ...)` before InferenceEngine::Core::ReadNetwork.
|
||||
Reregistering ONNX operators within the same process is supported. If you register an existing operator, you get a warning.
|
||||
|
||||
The example below demonstrates an exemplary model that requires a previously created `CustomRelu` operator:
|
||||
```
|
||||
@include onnx_custom_op/custom_relu_model.prototxt
|
||||
```
|
||||
|
||||
This model is in text format, so before it can be passed to Inference Engine, it has to be converted to binary using:
|
||||
```py
|
||||
from google.protobuf import text_format
|
||||
import onnx
|
||||
|
||||
with open("custom_relu_model.prototxt") as in_file:
|
||||
proto = onnx.ModelProto()
|
||||
text_format.Parse(in_file.read(), proto, allow_field_number=True)
|
||||
s = onnx._serialize(proto)
|
||||
onnx._save_bytes(s, "custom_relu_model.onnx")
|
||||
```
|
||||
|
||||
|
||||
To create a graph with nGraph operations, visit [Custom nGraph Operations](AddingNGraphOps.md).
|
||||
For a complete list of predefined nGraph operators, visit [Available Operations Sets](../../ops/opset.md).
|
||||
|
||||
If you do not need an operator anymore, unregister it by calling `unregister_operator`. The function takes three arguments: `op_type`, `version`, and `domain`.
|
||||
|
||||
@snippet onnx_custom_op/onnx_custom_op.cpp onnx_custom_op:unregister_operator
|
||||
|
||||
## Register Custom ONNX Operator Based on Custom nGraph Operations
|
||||
|
||||
The same principles apply when registering a custom ONNX operator based on custom nGraph operations.
|
||||
This example shows how to register a custom ONNX operator based on `Operation` presented in [this tutorial](AddingNGraphOps.md), which is used in [TemplateExtension](Extension.md):
|
||||
|
||||
@snippet template_extension/old/extension.cpp extension:ctor
|
||||
|
||||
Here, the `register_operator` function is called in the constructor of Extension. The constructor makes sure that the function is called before InferenceEngine::Core::ReadNetwork, because InferenceEngine::Core::AddExtension must be called before a model with a custom operator is read.
|
||||
|
||||
The example below demonstrates how to unregister an operator from the destructor of Extension:
|
||||
@snippet template_extension/old/extension.cpp extension:dtor
|
||||
|
||||
> **REQUIRED**: It is mandatory to unregister a custom ONNX operator if it is defined in a dynamic shared library.
|
||||
|
||||
## Requirements for Building with CMake
|
||||
|
||||
A program that uses the `register_operator` functionality requires `openvino::core` and `openvino::frontend::onnx` libraries in addition to the OpenVINO Inference Runtime.
|
||||
The `ov_onnx_frontend` is a component of the `OpenVINO` package , so `find_package(OpenVINO REQUIRED COMPONENTS ONNX)` can find both.
|
||||
Those libraries need to be passed to the `target_link_libraries` command in the CMakeLists.txt file.
|
||||
|
||||
See CMakeLists.txt below for reference:
|
||||
|
||||
@snippet onnx_custom_op/CMakeLists.txt cmake:onnx_custom_op
|
||||
@@ -1,29 +0,0 @@
|
||||
# Extension Library {#openvino_docs_IE_DG_Extensibility_DG_Extension}
|
||||
|
||||
Inference Engine provides an InferenceEngine::IExtension interface, which defines the interface for Inference Engine Extension libraries.
|
||||
Inherit all extension libraries from this interface. The example below contains an implementation of two operations: `Template`
|
||||
used as an example in this document and `FFT` used as a more complex example from the [Custom Operations Guide](../../HOWTO/Custom_Layers_Guide.md).
|
||||
|
||||
> **NOTE**: `FFT` operation is implemented using the OpenCV library functions `cv::dft` and `cv::idft`.
|
||||
|
||||
Based on that, the declaration of an extension class can look as follows:
|
||||
|
||||
@snippet template_extension/old/extension.hpp extension:header
|
||||
|
||||
The extension library should use `IE_DEFINE_EXTENSION_CREATE_FUNCTION` macro to export a function, which creates an `Extension` class:
|
||||
|
||||
@snippet template_extension/old/extension.cpp extension:CreateExtension
|
||||
|
||||
Also, an `Extension` object should implement the following methods:
|
||||
|
||||
* InferenceEngine::IExtension::Release deletes an extension object.
|
||||
|
||||
* InferenceEngine::IExtension::GetVersion returns information about the version of the library.
|
||||
|
||||
@snippet template_extension/old/extension.cpp extension:GetVersion
|
||||
|
||||
Implement the InferenceEngine::IExtension::getOpSets method if the extension contains custom layers.
|
||||
Read [Custom nGraph Operation](AddingNGraphOps.md) for more information.
|
||||
|
||||
To integrate execution kernels to the extension library, read [How to Implement Custom CPU Operations](CPU_Kernel.md).
|
||||
To register a custom ONNX\* operator to the extension library, read [Custom ONNX Operators](Custom_ONNX_Ops.md).
|
||||
@@ -1,60 +0,0 @@
|
||||
# Inference Engine Extensibility Mechanism {#openvino_docs_IE_DG_Extensibility_DG_Intro}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
openvino_docs_IE_DG_Extensibility_DG_AddingNGraphOps
|
||||
openvino_docs_IE_DG_Extensibility_DG_Custom_ONNX_Ops
|
||||
CPU Kernels Extensibility <openvino_docs_IE_DG_Extensibility_DG_CPU_Kernel>
|
||||
GPU Kernels Extensibility <openvino_docs_IE_DG_Extensibility_DG_GPU_Kernel>
|
||||
VPU Kernels Extensibility <openvino_docs_IE_DG_Extensibility_DG_VPU_Kernel>
|
||||
openvino_docs_IE_DG_Extensibility_DG_Extension
|
||||
openvino_docs_IE_DG_Extensibility_DG_Building
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
If your model contains operations not normally supported by OpenVINO, the Inference Engine Extensibility API lets you add support for those custom operations in a library containing custom nGraph operation sets, corresponding extensions to the Model Optimizer, and a device plugin extension. See the overview in the [Custom Operations Guide](../../HOWTO/Custom_Layers_Guide.md) to learn how these work together.
|
||||
|
||||
To load the Extensibility library to the `InferenceEngine::Core` object, use the `InferenceEngine::Core::AddExtension` method.
|
||||
|
||||
## Inference Engine Extension Library
|
||||
|
||||
An Inference Engine Extension dynamic library contains the following components:
|
||||
|
||||
* [Extension Library](Extension.md):
|
||||
- Contains custom operation sets
|
||||
- Provides CPU implementations for custom operations
|
||||
* [Custom nGraph Operation](AddingNGraphOps.md):
|
||||
- Enables the use of `InferenceEngine::Core::ReadNetwork` to read Intermediate Representation (IR) with unsupported
|
||||
operations
|
||||
- Enables the creation of `ngraph::Function` with unsupported operations
|
||||
- Provides a shape inference mechanism for custom operations
|
||||
|
||||
> **NOTE**: This documentation is written based on the [Template extension](https://github.com/openvinotoolkit/openvino/tree/master/docs/template_extension), which demonstrates extension development details. You can review the complete code, which is fully compilable and up-to-date, to see how it works.
|
||||
|
||||
## Execution Kernels
|
||||
|
||||
The Inference Engine workflow involves the creation of custom kernels and either custom or existing operations.
|
||||
|
||||
An _operation_ is a network building block implemented in the training framework, for example, `Convolution` in Caffe*.
|
||||
A _kernel_ is defined as the corresponding implementation in the Inference Engine.
|
||||
|
||||
Refer to the [Model Optimizer Extensibility](../../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md)
|
||||
for details on how a mapping between framework operations and Inference Engine kernels is registered.
|
||||
|
||||
In short, you can plug your own kernel implementations into the Inference Engine and map them to the operations in the original framework.
|
||||
|
||||
The following pages describe how to integrate custom _kernels_ into the Inference Engine:
|
||||
|
||||
* [Introduction to development of custom CPU kernels](CPU_Kernel.md)
|
||||
* [Introduction to development of custom GPU kernels](GPU_Kernel.md)
|
||||
* [Introduction to development of custom VPU kernels](VPU_Kernel.md)
|
||||
|
||||
## See Also
|
||||
|
||||
* [Build an extension library using CMake*](Building.md)
|
||||
* [Using Inference Engine Samples](../Samples_Overview.md)
|
||||
* [Hello Shape Infer SSD sample](../../../samples/cpp/hello_reshape_ssd/README.md)
|
||||
@@ -1,85 +0,0 @@
|
||||
# Glossary {#openvino_docs_IE_DG_Glossary}
|
||||
|
||||
## Acronyms and Abbreviations
|
||||
|
||||
| Abbreviation | Description |
|
||||
| :--- | :--- |
|
||||
| API | Application Programming Interface |
|
||||
| AVX | Advanced Vector Extensions |
|
||||
| clDNN | Compute Library for Deep Neural Networks |
|
||||
| CLI | Command Line Interface |
|
||||
| CNN | Convolutional Neural Network |
|
||||
| CPU | Central Processing Unit |
|
||||
| CV | Computer Vision |
|
||||
| DL | Deep Learning |
|
||||
| DLDT | Intel(R) Deep Learning Deployment Toolkit |
|
||||
| DLL | Dynamic Link Library |
|
||||
| DNN | Deep Neural Networks |
|
||||
| ELU | Exponential Linear rectification Unit |
|
||||
| FCN | Fully Convolutional Network |
|
||||
| FP | Floating Point |
|
||||
| GCC | GNU Compiler Collection |
|
||||
| GPU | Graphics Processing Unit |
|
||||
| HD | High Definition |
|
||||
| IE | Inference Engine |
|
||||
| IR | Intermediate Representation |
|
||||
| JIT | Just In Time |
|
||||
| JTAG | Joint Test Action Group |
|
||||
| LPR | License-Plate Recognition |
|
||||
| LRN | Local Response Normalization |
|
||||
| mAP | Mean Average Precision |
|
||||
| Intel(R) MKL-DNN | Intel(R) Math Kernel Library Deep Neural Networks |
|
||||
| MO | Model Optimizer |
|
||||
| MVN | Mean Variance Normalization |
|
||||
| NCDHW | Number of images, Channels, Depth, Height, Width |
|
||||
| NCHW | Number of images, Channels, Height, Width |
|
||||
| NHWC | Number of images, Height, Width, Channels |
|
||||
| NMS | Non-Maximum Suppression |
|
||||
| NN | Neural Network |
|
||||
| NST | Neural Style Transfer |
|
||||
| OD | Object Detection |
|
||||
| OS | Operating System |
|
||||
| PCI | Peripheral Component Interconnect |
|
||||
| PReLU | Parametric Rectified Linear Unit |
|
||||
| PSROI | Position Sensitive Region Of Interest |
|
||||
| RCNN, R-CNN | Region-based Convolutional Neural Network |
|
||||
| ReLU | Rectified Linear Unit |
|
||||
| ROI | Region Of Interest |
|
||||
| SDK | Software Development Kit |
|
||||
| SSD | Single Shot multibox Detector |
|
||||
| SSE | Streaming SIMD Extensions |
|
||||
| USB | Universal Serial Bus |
|
||||
| VGG | Visual Geometry Group |
|
||||
| VOC | Visual Object Classes |
|
||||
| WINAPI | Windows Application Programming Interface |
|
||||
|
||||
## Terms
|
||||
|
||||
Glossary of terms used in the Inference Engine
|
||||
|
||||
|
||||
| Term | Description |
|
||||
| :--- | :--- |
|
||||
| Batch | Number of images to analyze during one call of infer. Maximum batch size is a property of the network and it is set before loading of the network to the plugin. In NHWC, NCHW and NCDHW image data layout representation, the N refers to the number of images in the batch |
|
||||
| Blob | Memory container used for storing inputs, outputs of the network, weights and biases of the layers |
|
||||
| Device (Affinitity) | A preferred Intel(R) hardware device to run the inference (CPU, GPU, etc.) |
|
||||
| Extensibility mechanism, Custom layers | The mechanism that provides you with capabilities to extend the Inference Engine and Model Optimizer so that they can work with topologies containing layers that are not yet supported |
|
||||
| <code>CNNNetwork</code> | A class of the Convolutional Neural Network that Inference Engine reads from IR. Consists of topology, weights and biases |
|
||||
| <code>ExecutableNetwork</code> | An instance of the loaded network which allows the Inference Engine to request (several) infer requests and perform inference synchronously or asynchronously |
|
||||
| <code>InferRequest</code> | A class that represents the end point of inference on the model loaded to the plugin and represented by executable network. Inputs are set here, outputs should be requested from this interface as well |
|
||||
| <code>InferenceEngineProfileInfo</code> | Represents basic inference profiling information per layer |
|
||||
| Inference Engine | A C++ library with a set of classes that you can use in your application to infer input data (images) and get the result |
|
||||
| Inference Engine API | The basic default API for all supported devices, which allows you to load a model from Intermediate Representation, set input and output formats and execute the model on various devices |
|
||||
| Inference Engine <code>Core</code> | Inference Engine Core is a software component that manages inference on certain Intel(R) hardware devices: CPU, GPU, MYRIAD, GNA, etc. |
|
||||
| Layer catalog or Operations specification | A list of supported layers or operations and its parameters. Sets of supported layers are different for different plugins, please check the documentation on plugins to verify if the Inference Engine supports certain layer on the dedicated hardware |
|
||||
| <code>Layout</code> | Image data layout refers to the representation of images batch. Layout shows a sequence of 4D or 5D tensor data in memory. A typical NCHW format represents pixel in horizontal direction, rows by vertical dimension, planes by channel and images into batch |
|
||||
| <code>OutputsDataMap</code> | Structure which contains information about output precisions and layouts |
|
||||
| Precision | Represents data precision. For example, FP32 is 32-bit floating point, FP16 is 16-bit floating point. Precision can be changed before loading the network to the plugin |
|
||||
| <code>PreProcessInfo</code> | Class that represents input data for the network. It contains information about input precision, its layout, and pre-processing |
|
||||
| <code>ResponseDesc</code> | Represents debug information for an error |
|
||||
|
||||
|
||||
## See Also
|
||||
* [Deep Learning Model Optimizer IR Operations Catalog](../ops/opset.md)
|
||||
* [Inference Engine Memory primitives](Memory_primitives.md)
|
||||
* [Terminology](supported_plugins/Supported_Devices.md)
|
||||
@@ -1,235 +0,0 @@
|
||||
# Introduction to Inference Engine Device Query API {#openvino_docs_IE_DG_InferenceEngine_QueryAPI}
|
||||
|
||||
## Inference Engine Query API (C++)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-cpp" class="switcher-anchor">C++</div>
|
||||
@endsphinxdirective
|
||||
|
||||
The OpenVINO™ toolkit supports inferencing with several types of devices (processors or accelerators).
|
||||
This section provides a high-level description of the process of querying of different device properties and configuration values at runtime. Refer to the [Hello Query Device С++ Sample](../../samples/cpp/hello_query_device/README.md) sources and the [Multi-Device Plugin documentation](supported_plugins/MULTI.md) for examples of using the Inference Engine Query API in user applications.
|
||||
|
||||
### Using the Inference Engine Query API in Your Code
|
||||
|
||||
The `InferenceEngine::Core` class provides the following API to query device information, set or get different device configuration properties:
|
||||
|
||||
* `InferenceEngine::Core::GetAvailableDevices` - Provides a list of available devices. If there are more than one instance of a specific device, the devices are enumerated with `.suffix` where `suffix` is a unique string identifier. The device name can be passed to all methods of the `InferenceEngine::Core` class that work with devices, for example `InferenceEngine::Core::LoadNetwork`.
|
||||
* `InferenceEngine::Core::GetMetric` - Provides information about specific device.
|
||||
`InferenceEngine::Core::GetConfig` - Gets the current value of a specific configuration key.
|
||||
* `InferenceEngine::Core::SetConfig` - Sets a new value for the configuration key.
|
||||
|
||||
The `InferenceEngine::ExecutableNetwork` class is also extended to support the Query API:
|
||||
|
||||
* `InferenceEngine::ExecutableNetwork::GetMetric`
|
||||
* `InferenceEngine::ExecutableNetwork::GetConfig`
|
||||
* `InferenceEngine::ExecutableNetwork::SetConfig`
|
||||
|
||||
### Query API in the Core Class
|
||||
|
||||
#### GetAvailableDevices
|
||||
|
||||
@snippet snippets/InferenceEngine_QueryAPI0.cpp part0
|
||||
|
||||
The function returns a list of available devices, for example:
|
||||
|
||||
```
|
||||
MYRIAD.1.2-ma2480
|
||||
MYRIAD.1.4-ma2480
|
||||
CPU
|
||||
GPU.0
|
||||
GPU.1
|
||||
```
|
||||
|
||||
Each device name can then be passed to:
|
||||
|
||||
* `InferenceEngine::Core::LoadNetwork` to load the network to a specific device.
|
||||
* `InferenceEngine::Core::GetMetric` to get common or device specific metrics.
|
||||
* All other methods of the `InferenceEngine::Core` class that accept `deviceName`.
|
||||
|
||||
#### GetConfig()
|
||||
|
||||
The code below demonstrates how to understand whether the `HETERO` device dumps GraphViz `.dot` files with split graphs during the split stage:
|
||||
|
||||
@snippet snippets/InferenceEngine_QueryAPI1.cpp part1
|
||||
|
||||
For documentation about common configuration keys, refer to `ie_plugin_config.hpp`. Device specific configuration keys can be found in corresponding plugin folders.
|
||||
|
||||
#### GetMetric()
|
||||
|
||||
* To extract device properties such as available device, device name, supported configuration keys, and others, use the `InferenceEngine::Core::GetMetric` method:
|
||||
|
||||
@snippet snippets/InferenceEngine_QueryAPI2.cpp part2
|
||||
|
||||
A returned value appears as follows: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz`.
|
||||
|
||||
> **NOTE**: All metrics have a type, which is specified during metric instantiation. The list of common device-agnostic metrics can be found in `ie_plugin_config.hpp`. Device specific metrics (for example, for HDDL or MYRIAD devices) can be found in corresponding plugin folders.
|
||||
|
||||
### Query API in the ExecutableNetwork Class
|
||||
|
||||
#### GetMetric()
|
||||
|
||||
The method is used to get an executable network specific metric such as `METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)`:
|
||||
|
||||
@snippet snippets/InferenceEngine_QueryAPI3.cpp part3
|
||||
|
||||
Or the current temperature of the `MYRIAD` device:
|
||||
|
||||
@snippet snippets/InferenceEngine_QueryAPI4.cpp part4
|
||||
|
||||
#### GetConfig()
|
||||
|
||||
The method is used to get information about configuration values the executable network has been created with:
|
||||
|
||||
@snippet snippets/InferenceEngine_QueryAPI5.cpp part5
|
||||
|
||||
#### SetConfig()
|
||||
|
||||
The only device that supports this method is [Multi-Device](supported_plugins/MULTI.md).
|
||||
|
||||
## Inference Engine Query API (Python)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-python" class="switcher-anchor">Python</div>
|
||||
@endsphinxdirective
|
||||
|
||||
This section provides a high-level description of the process of querying of different device properties and configuration values. Refer to the [Hello Query Device Python Sample](../../samples/python/hello_query_device/README.md) sources and the [Multi-Device Plugin documentation](supported_plugins/MULTI.md) for examples of using the Inference Engine Query API in user applications.
|
||||
|
||||
### Using the Inference Engine Query API in Your Code
|
||||
|
||||
The Inference Engine [Core](api/ie_python_api/_autosummary/openvino.inference_engine.IECore.html#openvino-inference-engine-iecore) class provides the following API to query device information, set or get different device configuration properties:
|
||||
|
||||
* [ie_api.IECore.available_devices](api/ie_python_api/_autosummary/openvino.inference_engine.IECore.html#openvino.inference_engine.IECore.available_devices) - Provides a list of available devices. If there are more than one instance of a specific device, the devices are enumerated with .suffix where suffix is a unique string identifier. The device name can be passed to all methods of the IECore class that work with devices, for example [ie_api.IECore.load_network](api/ie_python_api/_autosummary/openvino.inference_engine.IECore.html#openvino.inference_engine.IECore.load_network).
|
||||
* [ie_api.ieCore.get_metric](api/ie_python_api/_autosummary/openvino.inference_engine.IECore.html#openvino.inference_engine.IECore.get_metric) - Provides information about specific device.
|
||||
* [ie_api.IECore.get_config](api/ie_python_api/_autosummary/openvino.inference_engine.IECore.html#openvino.inference_engine.IECore.get_config) - Gets the current value of a specific configuration key.
|
||||
* [ie_api.IECore.set_config](api/ie_python_api/_autosummary/openvino.inference_engine.IECore.html#openvino.inference_engine.IECore.set_config) - Sets a new value for the configuration key.
|
||||
|
||||
The [ie_api.ExecutableNetwork](api/ie_python_api/_autosummary/openvino.inference_engine.ExecutableNetwork.html) class is also extended to support the Query API:
|
||||
* [ie_api.ExecutableNetwork.get_metric](api/ie_python_api/_autosummary/openvino.inference_engine.ExecutableNetwork.html#openvino.inference_engine.ExecutableNetwork.get_metric)
|
||||
* [ie_api.ExecutableNetwork.get_config](latest/api/ie_python_api/_autosummary/openvino.inference_engine.ExecutableNetwork.html#openvino.inference_engine.ExecutableNetwork.get_config)
|
||||
* There is no method to call for set_config, but the equivalent action is described below.
|
||||
|
||||
### Query API in the IECore Class
|
||||
|
||||
#### Get Available Devices
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
print(ie.available_devices)
|
||||
```
|
||||
|
||||
This code prints a list of available devices, for example:
|
||||
|
||||
```
|
||||
MYRIAD.1.2-ma2480
|
||||
MYRIAD.1.4-ma2480
|
||||
FPGA.0
|
||||
FPGA.1
|
||||
CPU
|
||||
GPU.0
|
||||
GPU.1
|
||||
```
|
||||
|
||||
Each device name can then be passed to:
|
||||
|
||||
* `IECore.load_network` to load the network to a specific device.
|
||||
* `IECore.get_metric` to get common or device specific metrics.
|
||||
* All other methods of the `IECore` class that accept a device name.
|
||||
|
||||
#### Get Metric
|
||||
|
||||
To extract device properties such as available device, device name, supported configuration keys, and others, use the [IECore.get_metric](api/ie_python_api/_autosummary/openvino.inference_engine.IECore.html#openvino.inference_engine.IECore.get_metric) method:
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
ie.get_metric(device_name="CPU", metric_name="FULL_DEVICE_NAME")
|
||||
```
|
||||
|
||||
A returned value appears as follows: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz`.
|
||||
|
||||
To list all supported metrics for a device:
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
ie.get_metric(device_name="GPU", metric_name="SUPPORTED_METRICS")
|
||||
```
|
||||
|
||||
#### Get Configuration
|
||||
|
||||
The code below uses the [IECore.get_config](api/ie_python_api/_autosummary/openvino.inference_engine.IECore.html#openvino.inference_engine.IECore.get_config) method and demonstrates how to understand whether the HETERO device dumps .dot files with split graphs during the split stage:
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
ie.get_config(device_name="HETERO", config_name="HETERO_DUMP_GRAPH_DOT")
|
||||
```
|
||||
|
||||
To list all supported configuration keys for a device:
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
ie.get_metric(device_name=device, metric_name="SUPPORTED_CONFIG_KEYS")
|
||||
```
|
||||
|
||||
For documentation about common configuration keys, refer to `ie_plugin_config.hpp`. Device specific configuration keys can be found in corresponding plugin folders.
|
||||
|
||||
|
||||
### Query API in the ExecutableNetwork Class
|
||||
|
||||
#### Get Metric
|
||||
|
||||
To get the name of the loaded network:
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
net = ie.read_network(model=path_to_xml_file)
|
||||
exec_net = ie.load_network(network=net, device_name=device)
|
||||
exec_net.get_metric("NETWORK_NAME")
|
||||
```
|
||||
|
||||
Use `exec_net.get_metric("SUPPORTED_METRICS")` to list all supported metrics for an ExecutableNetwork instance.
|
||||
|
||||
|
||||
#### Get Configuration
|
||||
|
||||
The [IECore.get_config](api/ie_python_api/_autosummary/openvino.inference_engine.IECore.html#openvino.inference_engine.IECore.get_config) method is used to get information about configuration values the executable network has been created with:
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
net = ie.read_network(model=path_to_xml_file)
|
||||
exec_net = ie.load_network(network=net, device_name="CPU")
|
||||
exec_net.get_config("CPU_THREADS_NUM")
|
||||
```
|
||||
|
||||
Or the current temperature of MYRIAD device:
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
net = ie.read_network(model=path_to_xml_file)
|
||||
exec_net = ie.load_network(network=net, device_name="MYRIAD")
|
||||
exec_net.get_config("DEVICE_THERMAL")
|
||||
```
|
||||
|
||||
Use `exec_net.get_metric("SUPPORTED_CONFIG_KEYS")` to list all supported configuration keys.
|
||||
|
||||
#### Set Configuration
|
||||
|
||||
The only device that supports this method in the ExecutableNetwork class is the [Multi-Device](supported_plugins/MULTI.md), where you can change the priorities of the devices for the Multi plugin in real time: `exec_net.set_config({{"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}})`. See the Multi-Device documentation for more details.
|
||||
@@ -1,494 +0,0 @@
|
||||
# Integrate Inference Engine {#openvino_docs_IE_DG_Integrate_with_customer_application_new_API}
|
||||
|
||||
## Integrate Inference Engine with Your C++ Application
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-cpp" class="switcher-anchor">C++</div>
|
||||
@endsphinxdirective
|
||||
|
||||
The following diagram illustrates the typical Inference Engine С++ API workflow:
|
||||
|
||||
![ie_api_flow_cpp]
|
||||
|
||||
Read the sections below to learn about each item.
|
||||
|
||||
> **NOTE**: Before start using Inference Engine, make sure you set all environment variables during the installation. If you did not, follow the instructions from the _Set the Environment Variables_ section in the installation guides:
|
||||
> * [For Windows* 10](../install_guides/installing-openvino-windows.md)
|
||||
> * [For Linux*](../install_guides/installing-openvino-linux.md)
|
||||
> * [For macOS*](../install_guides/installing-openvino-macos.md)
|
||||
> * To build an open source version, use the [Inference Engine Build Instructions](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode).
|
||||
|
||||
### Link with Inference Library
|
||||
|
||||
1. **Create a structure** for the project:
|
||||
``` sh
|
||||
project/
|
||||
├── CMakeLists.txt - CMake file to build
|
||||
├── ... - Additional folders like includes/
|
||||
└── src/ - source folder
|
||||
└── main.cpp
|
||||
build/ - build directory
|
||||
...
|
||||
```
|
||||
|
||||
2. **Include Inference Engine, nGraph and OpenCV libraries** in `project/CMakeLists.txt`
|
||||
[OpenCV](https://docs.opencv.org/master/db/df5/tutorial_linux_gcc_cmake.html) integration is needed mostly for pre-processing input data and nGraph for more complex applications using [nGraph API](../nGraph_DG/nGraph_dg.md).
|
||||
``` cmake
|
||||
cmake_minimum_required(VERSION 3.0.0)
|
||||
project(project_name)
|
||||
find_package(ngraph REQUIRED)
|
||||
find_package(InferenceEngine REQUIRED)
|
||||
find_package(OpenCV REQUIRED)
|
||||
add_executable(${PROJECT_NAME} src/main.cpp)
|
||||
target_link_libraries(${PROJECT_NAME} PRIVATE ${InferenceEngine_LIBRARIES} ${OpenCV_LIBS} ${NGRAPH_LIBRARIES})
|
||||
```
|
||||
|
||||
### Use Inference Engine API to Implement Inference Pipeline
|
||||
|
||||
This section provides step-by-step instructions to implement a typical inference pipeline with the Inference Engine C++ API:
|
||||
|
||||
![ie_api_use_cpp]
|
||||
#### Step 1. Create Inference Engine Core
|
||||
|
||||
Use the following code to create Inference Engine Core to manage available devices and read network objects:
|
||||
|
||||
@snippet snippets/Integrate_with_customer_application_new_API.cpp part0
|
||||
|
||||
#### Step 2 (Optional). Configure Input and Output of the Model
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div class="collapsible-section">
|
||||
@endsphinxdirective
|
||||
|
||||
|
||||
Optionally, configure input and output of the model using the steps below:
|
||||
|
||||
1. Load a model to a Core object:
|
||||
@sphinxdirective
|
||||
|
||||
.. tab:: IR
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
auto network = core.ReadNetwork("model.xml");
|
||||
|
||||
.. tab:: ONNX
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
auto network = core.ReadNetwork("model.onnx");
|
||||
|
||||
You can find more information about the ONNX format support in the document `ONNX format support in the OpenVINO™ <https://docs.openvino.ai/latest/openvino_docs_IE_DG_ONNX_Support.html>`_
|
||||
|
||||
.. tab:: nGraph
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
std::shared_ptr<Function> createNetwork() {
|
||||
// To construct a network, please follow
|
||||
// https://docs.openvino.ai/latest/openvino_docs_nGraph_DG_build_function.html
|
||||
}
|
||||
auto network = CNNNetwork(createNetwork());
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
2. Request input and output information using `InferenceEngine::CNNNetwork::getInputsInfo()`, and `InferenceEngine::CNNNetwork::getOutputsInfo()` methods:
|
||||
```cpp
|
||||
/** Take information about all topology inputs **/
|
||||
InferenceEngine::InputsDataMap input_info = network.getInputsInfo();
|
||||
/** Iterate over all input info**/
|
||||
for (auto &item : input_info) {
|
||||
auto input_data = item.second;
|
||||
// Add your input configuration steps here
|
||||
}
|
||||
|
||||
/** Take information about all topology outputs **/
|
||||
InferenceEngine::OutputsDataMap output_info = network.getOutputsInfo();
|
||||
/** Iterate over all output info**/
|
||||
for (auto &item : output_info) {
|
||||
auto output_data = item.second;
|
||||
// Add your output configuration steps here
|
||||
}
|
||||
```
|
||||
Configuring options:
|
||||
1. **Set precision** (number format): FP16, FP32, INT8, etc. Refer to the Supported Configurations section on the [Supported Devices](supported_plugins/Supported_Devices.md) page to choose the relevant configuration.<br>
|
||||
For input (*iterate over all input info*):
|
||||
```cpp
|
||||
input_data->setPrecision(InferenceEngine::Precision::U8);
|
||||
```
|
||||
For output (*iterate over all output info*):
|
||||
```cpp
|
||||
output_data->setPrecision(InferenceEngine::Precision::FP32);
|
||||
```
|
||||
**By default**, the input and output precision is set to `Precision::FP32`.
|
||||
|
||||
2. **Set layout** (NCHW, ).<br>
|
||||
For input (*iterate over all input info*):
|
||||
```cpp
|
||||
input_data->setLayout(InferenceEngine::Layout::NCHW);
|
||||
```
|
||||
**By default**, the input layout is set to `Layout::NCHW`.<br>
|
||||
For output (*iterate over all output info*):
|
||||
```cpp
|
||||
output_data->setLayout(InferenceEngine::Layout::NC);
|
||||
```
|
||||
**By default**, the output layout depends on a number of its dimensions:<br>
|
||||
|Number of dimensions | 5 | 4 | 3 | 2 | 1 |
|
||||
|:--------------------|-------|------|-----|----|----|
|
||||
|Layout | NCDHW | NCHW | CHW | NC | C |
|
||||
3. **Set resize algorithm for inputs** (Bilinear). You can allow input of any size. To do this, mark each input as resizable by setting a desired resize algorithm (e.g. `BILINEAR`) inside of the appropriate input info (*Iterate over all input info*):
|
||||
```cpp
|
||||
input_data->getPreProcess().setResizeAlgorithm(InferenceEngine::RESIZE_BILINEAR);
|
||||
```
|
||||
**By default**, no resize algorithm is set for inputs.
|
||||
|
||||
4. **Set color format** (BGR, RGB, NV12). Basic color format conversions are supported as well. **By default**, the Inference Engine assumes that the input color format is BGR and color format conversions are disabled. Set `ColorFormat::RAW` input color format if the input does not need color conversions. The Inference Engine supports the following color format conversions:
|
||||
* RGB->BGR
|
||||
* RGBX->BGR
|
||||
* BGRX->BGR
|
||||
* NV12->BGR
|
||||
where X is a channel that will be ignored during inference. To enable the conversions, set a desired color format (for example, RGB) for each input inside of the appropriate input info (*iterate over all input info*):
|
||||
```cpp
|
||||
input_data->getPreProcess().setColorFormat(InferenceEngine::ColorFormat::RGB);
|
||||
```
|
||||
> **NOTE**: NV12 input color format pre-processing differs from other color conversions. In case of NV12, Inference Engine expects two separate image planes (Y and UV). You must use a specific `InferenceEngine::NV12Blob` object instead of default blob object and set this blob to the Inference Engine Infer Request using `InferenceEngine::InferRequest::SetBlob()`. Refer to [Hello NV12 Input Classification C++ Sample](../../samples/cpp/hello_nv12_input_classification/README.md) for more details.
|
||||
|
||||
5. **Run on multiple images** with setting batch. If you want to run inference for multiple images at once, you can use the built-in batch pre-processing functionality.
|
||||
|
||||
**NOTE** : Batch pre-processing is not supported if input color format is set to `ColorFormat::NV12`.
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
@endsphinxdirective
|
||||
|
||||
#### Step 3. Load the Model to the Device
|
||||
|
||||
Load the model to the device using `InferenceEngine::Core::LoadNetwork()`:
|
||||
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. tab:: IR
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
executable_network = core.LoadNetwork("model.xml", "CPU");
|
||||
|
||||
.. tab:: ONNX
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
executable_network = core.LoadNetwork("model.onnx", "CPU");
|
||||
|
||||
.. tab:: nGraph
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
std::shared_ptr<Function> createNetwork() {
|
||||
// To construct a network, please follow
|
||||
// https://docs.openvino.ai/latest/openvino_docs_nGraph_DG_build_function.html
|
||||
}
|
||||
auto network = CNNNetwork(createNetwork());
|
||||
executable_network = core.LoadNetwork(network, "CPU");
|
||||
|
||||
.. tab:: Model From Step 2
|
||||
|
||||
Follow this step only if you went through optional "Step 2 (Optional). Configure Input and Output of the Model", otherwise use another tab for your model type: IR (OpenVINO Intermediate Representation), ONNX or nGraph.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
executable_network = core.LoadNetwork(network, "CPU");
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
|
||||
It creates an executable network from a network object. The executable network is associated with single hardware device.
|
||||
It is possible to create as many networks as needed and to use them simultaneously (up to the limitation of the hardware resources).
|
||||
|
||||
Third parameter is a configuration for plugin. It is map of pairs: (parameter name, parameter value). Choose device from
|
||||
[Supported devices](supported_plugins/Supported_Devices.md) page for more details about supported configuration parameters.
|
||||
|
||||
@snippet snippets/Integrate_with_customer_application_new_API.cpp part6
|
||||
|
||||
#### Step 4. Create an Inference Request
|
||||
|
||||
Create an infer request using the following code:
|
||||
|
||||
@snippet snippets/Integrate_with_customer_application_new_API.cpp part7
|
||||
|
||||
#### Step 5. Prepare Input
|
||||
|
||||
You can use one of the following options to prepare input:
|
||||
|
||||
* **Optimal way for a single network.** Get blobs allocated by an infer request using `InferenceEngine::InferRequest::GetBlob()` and feed an image and the input data to the blobs. In this case, input data must be aligned (resized manually) with a given blob size and have a correct color format.
|
||||
|
||||
@snippet snippets/Integrate_with_customer_application_new_API.cpp part8
|
||||
|
||||
* **Optimal way for a cascade of networks (output of one network is input for another).** Get output blob from the first request using `InferenceEngine::InferRequest::GetBlob()` and set it as input for the second request using `InferenceEngine::InferRequest::SetBlob()`.
|
||||
|
||||
@snippet snippets/Integrate_with_customer_application_new_API.cpp part9
|
||||
|
||||
* **Optimal way to handle ROI (a ROI object located inside of input of one network is input for another).** It is possible to re-use shared input by several networks. You do not need to allocate separate input blob for a network if it processes a ROI object located inside of already allocated input of a previous network. For instance, when first network detects objects on a video frame (stored as input blob) and second network accepts detected bounding boxes (ROI inside of the frame) as input. In this case, it is allowed to re-use pre-allocated input blob (used by first network) by second network and just crop ROI without allocation of new memory using `InferenceEngine::make_shared_blob()` with passing of `InferenceEngine::Blob::Ptr` and `InferenceEngine::ROI` as parameters.
|
||||
|
||||
@snippet snippets/Integrate_with_customer_application_new_API.cpp part10
|
||||
|
||||
Make sure that shared input is kept valid during execution of each network. Otherwise, ROI blob may be corrupted if the original input blob (that ROI is cropped from) has already been rewritten.
|
||||
|
||||
* Allocate input blobs of the appropriate types and sizes, feed an image and the input data to the blobs, and call `InferenceEngine::InferRequest::SetBlob()` to set these blobs for an infer request:
|
||||
|
||||
@snippet snippets/Integrate_with_customer_application_new_API.cpp part11
|
||||
|
||||
A blob can be filled before and after `SetBlob()`.
|
||||
|
||||
> **NOTE**:
|
||||
>
|
||||
> * The `SetBlob()` method compares precision and layout of an input blob with the ones defined in step 3 and
|
||||
> throws an exception if they do not match. It also compares a size of the input blob with input
|
||||
> size of the read network. But if input was configured as resizable, you can set an input blob of
|
||||
> any size (for example, any ROI blob). Input resize will be invoked automatically using resize
|
||||
> algorithm configured on step 3. Similarly to the resize, color format conversions allow the color
|
||||
> format of an input blob to differ from the color format of the read network. Color format
|
||||
> conversion will be invoked automatically using color format configured on step 3.
|
||||
>
|
||||
> * `GetBlob()` logic is the same for pre-processable and not pre-processable input. Even if it is
|
||||
> called with input configured as resizable or as having specific color format, a blob allocated by
|
||||
> an infer request is returned. Its size and color format are already consistent with the
|
||||
> corresponding values of the read network. No pre-processing will happen for this blob. If you
|
||||
> call `GetBlob()` after `SetBlob()`, you will get the blob you set in `SetBlob()`.
|
||||
|
||||
#### Step 6. Start Inference
|
||||
|
||||
Start inference in asynchronous or synchronous mode. Async API usage can improve overall frame-rate of the application, because rather than wait for inference to complete, the app can continue doing things on the host, while accelerator is busy.
|
||||
|
||||
* For synchronous inference request:
|
||||
```cpp
|
||||
infer_request.Infer();
|
||||
```
|
||||
|
||||
* For asynchronous inference request:
|
||||
```cpp
|
||||
infer_request.StartAsync();
|
||||
infer_request.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY);
|
||||
```
|
||||
`StartAsync` returns immediately and starts inference without blocking main thread, `Infer` blocks main thread and returns when inference is completed. Call `Wait` for waiting result to become available for asynchronous request.
|
||||
|
||||
There are three ways to use it:
|
||||
* specify maximum duration in milliseconds to block for. The method is blocked until the specified timeout has elapsed, or the result becomes available, whichever comes first.
|
||||
* `InferenceEngine::InferRequest::WaitMode::RESULT_READY` - waits until inference result becomes available
|
||||
* `InferenceEngine::InferRequest::WaitMode::STATUS_ONLY` - immediately returns request status.It does not
|
||||
block or interrupts current thread.
|
||||
|
||||
|
||||
Both requests are thread-safe: can be called from different threads without fearing corruption and failures.
|
||||
|
||||
Multiple requests for single `ExecutableNetwork` are executed sequentially one by one in FIFO order.
|
||||
|
||||
While request is ongoing, all its methods except `InferenceEngine::InferRequest::Wait` would throw an
|
||||
exception.
|
||||
|
||||
#### Step 7. Process the Inference Results
|
||||
|
||||
Go over the output blobs and process the inference results. Note that casting `Blob` to `TBlob` via `std::dynamic_pointer_cast` is not the recommended way. It's better to access data via the `buffer()` and `as()` methods as follows:
|
||||
|
||||
@snippet snippets/Integrate_with_customer_application_new_API.cpp part14
|
||||
|
||||
### Build Your Application
|
||||
|
||||
For details about building your application, refer to the CMake files for the sample applications.
|
||||
All samples source code is located in the `<INSTALL_DIR>/samples` directory, where `INSTALL_DIR` is the OpenVINO™ installation directory.
|
||||
|
||||
To build your project using CMake with the default build tools currently available on your machine, execute the following commands:
|
||||
|
||||
> **NOTE**: Make sure you set environment variables first by running `<INSTALL_DIR>/setupvars.sh` (or `setupvars.bat` for Windows). Otherwise the `InferenceEngine_DIR` and `OpenCV_DIR` variables won't be configured properly to pass `find_package` calls.
|
||||
|
||||
```sh
|
||||
cd build/
|
||||
cmake ../project
|
||||
cmake --build .
|
||||
```
|
||||
It's allowed to specify additional build options (e.g. to build CMake project on Windows with a specific build tools). Please refer to the [CMake page](https://cmake.org/cmake/help/latest/manual/cmake.1.html#manual:cmake(1)) for details.
|
||||
|
||||
### Run Your Application
|
||||
|
||||
> **NOTE**: Before running, make sure you completed **Set the Environment Variables** section in [OpenVINO Installation](../../samples/cpp/hello_nv12_input_classification/README.md) document so that the application can find the libraries.
|
||||
|
||||
To run compiled applications on Microsoft* Windows* OS, make sure that Microsoft* Visual C++ 2017
|
||||
Redistributable and Intel® C++ Compiler 2017 Redistributable packages are installed and
|
||||
`<INSTALL_DIR>/bin/intel64/Release/*.dll` files are placed to the
|
||||
application folder or accessible via `%PATH%` environment variable.
|
||||
|
||||
## Integrate Inference Engine with Your Python Application
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-python" class="switcher-anchor">Python</div>
|
||||
@endsphinxdirective
|
||||
|
||||
This document explains how to integrate and use the Inference Engine API with your Python application.
|
||||
|
||||
The following diagram illustrates the typical Inference Engine Python API workflow:
|
||||
![ie_api_flow_python]
|
||||
|
||||
Read the sections below to learn about each item.
|
||||
|
||||
### Import Inference Module
|
||||
|
||||
To make use of the Inference Engine functionality, import IECore to your application:
|
||||
|
||||
```py
|
||||
from openvino.inference_engine import IECore
|
||||
```
|
||||
|
||||
### Use Inference Engine API
|
||||
|
||||
This section provides step-by-step instructions to implement a typical inference pipeline with the Inference Engine API:
|
||||
|
||||
![ie_api_use_python]
|
||||
|
||||
#### Step 1. Create Inference Engine Core
|
||||
|
||||
Use the following code to create Inference Engine Core to manage available devices and read network objects:
|
||||
```py
|
||||
ie = IECore()
|
||||
```
|
||||
#### Step 2 (Optional). Read model. Configure Input and Output of the Model
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div class="collapsible-section">
|
||||
@endsphinxdirective
|
||||
|
||||
Optionally, configure input and output of the model using the steps below:
|
||||
|
||||
1. Read model
|
||||
@sphinxdirective
|
||||
|
||||
.. tab:: IR
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
net = ie.read_network(model="model.xml")
|
||||
|
||||
.. tab:: ONNX
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
net = ie.read_network(model="model.onnx")
|
||||
|
||||
.. tab:: nGraph
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
#Basic example of nGraph model creation
|
||||
param = Parameter(Type.f32, Shape([1, 3, 22, 22]))
|
||||
relu = ng.relu(param)
|
||||
func = Function([relu], [param], 'test')
|
||||
caps = Function.to_capsule(func)
|
||||
net = IENetwork(caps)
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
2. Request input and output information using input_info, outputs
|
||||
```py
|
||||
inputs = net.input_info
|
||||
input_name = next(iter(net.input_info))
|
||||
|
||||
outputs = net.outputs
|
||||
output_name = next(iter(net.outputs))
|
||||
```
|
||||
Information for this input layer is stored in input_info. The next cell prints the input layout, precision and shape.
|
||||
```py
|
||||
print("Inputs:")
|
||||
for name, info in net.input_info.items():
|
||||
print("\tname: {}".format(name))
|
||||
print("\tshape: {}".format(info.tensor_desc.dims))
|
||||
print("\tlayout: {}".format(info.layout))
|
||||
print("\tprecision: {}\n".format(info.precision))
|
||||
```
|
||||
This cell output tells us that the model expects inputs with a shape of [1,3,224,224], and that this is in NCHW layout. This means that the model expects input data with a batch size (N) of 1, 3 channels (C), and images of a height (H) and width (W) of 224. The input data is expected to be of FP32 (floating point) precision.
|
||||
|
||||
Getting the output layout, precision and shape is similar to getting the input layout, precision and shape.
|
||||
```py
|
||||
print("Outputs:")
|
||||
for name, info in net.outputs.items():
|
||||
print("\tname: {}".format(name))
|
||||
print("\tshape: {}".format(info.shape))
|
||||
print("\tlayout: {}".format(info.layout))
|
||||
print("\tprecision: {}\n".format(info.precision))
|
||||
```
|
||||
This cell output shows that the model returns outputs with a shape of [1, 1001], where 1 is the batch size (N) and 1001 the number of classes (C). The output is returned as 32-bit floating point.
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
@endsphinxdirective
|
||||
|
||||
#### Step 3. Load model to the Device
|
||||
|
||||
Load the model to the device using `load_network()`:
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. tab:: IR
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
exec_net = ie.load_network(network= "model.xml", device_name="CPU")
|
||||
.. tab:: ONNX
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
exec_net = ie.load_network(network= "model.onnx", device_name="CPU")
|
||||
|
||||
.. tab:: Model from step 2
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
exec_net = ie.load_network(network=net, device_name="CPU")
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
This example is designed for CPU device, refer to the [Supported Devices](../IE_DG/supported_plugins/Supported_Devices.md) page to read about more devices.
|
||||
|
||||
#### Step 4. Prepare input
|
||||
```py
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
image = cv2.imread("image.png")
|
||||
|
||||
# Resize with OpenCV your image if needed to match with net input shape
|
||||
# N, C, H, W = net.input_info[input_name].tensor_desc.dims
|
||||
# image = cv2.resize(src=image, dsize=(W, H))
|
||||
|
||||
# Converting image to NCHW format with FP32 type
|
||||
input_data = np.expand_dims(np.transpose(image, (2, 0, 1)), 0).astype(np.float32)
|
||||
```
|
||||
|
||||
#### Step 5. Start Inference
|
||||
```py
|
||||
result = exec_net.infer({input_name: input_data})
|
||||
```
|
||||
|
||||
#### Step 6. Process the Inference Results
|
||||
```py
|
||||
output = result[output_name]
|
||||
```
|
||||
|
||||
### Run Your Application
|
||||
|
||||
Congratulations, you have made your first Python application with OpenVINO™ toolkit, now you may run it.
|
||||
|
||||
[ie_api_flow_cpp]: img/BASIC_IE_API_workflow_Cpp.svg
|
||||
[ie_api_use_cpp]: img/IMPLEMENT_PIPELINE_with_API_C.svg
|
||||
[ie_api_flow_python]: img/BASIC_IE_API_workflow_Python.svg
|
||||
[ie_api_use_python]: img/IMPLEMENT_PIPELINE_with_API_Python.svg
|
||||
@@ -1,58 +0,0 @@
|
||||
# Known Issues and Limitations {#openvino_docs_IE_DG_Known_Issues_Limitations}
|
||||
|
||||
## Multiple OpenMP Loadings
|
||||
|
||||
If the application uses the Inference Engine with third-party components that depend on Intel OpenMP, multiple loadings of the libiomp library may occur and cause OpenMP runtime initialization conflicts. This may happen, for example, if the application uses Intel® Math Kernel Library (Intel® MKL) through the “Single Dynamic Library” (<code>libmkl_rt.so</code>) mechanism and calls Intel MKL after loading the Inference Engine plugin.
|
||||
The error log looks like this:
|
||||
|
||||
```sh
|
||||
OMP: Error #15: Initializing libiomp5.so, but found libiomp5.so already initialized.
|
||||
OMP: Hint: This means that multiple copies of the OpenMP runtime have been linked into the program. That is dangerous, since it can degrade performance or cause incorrect results. The best thing to do is to ensure that only a single OpenMP runtime is linked into the process, e.g. by avoiding static linking of the OpenMP runtime in any library. As an unsafe, unsupported, undocumented workaround you can set the environment variable KMP_DUPLICATE_LIB_OK=TRUE to allow the program to continue to execute, but that may cause crashes or silently produce incorrect results. For more information, please see http://www.intel.com/software/products/support/.
|
||||
```
|
||||
|
||||
Possible workarounds:
|
||||
|
||||
* Preload the OpenMP runtime using the <code>LD_PRELOAD</code> variable:
|
||||
```sh
|
||||
LD_PRELOAD=<path_to_libiomp5.so> <path_to your_executable>
|
||||
```
|
||||
This eliminates multiple loadings of libiomp, and makes all the components use this specific version of OpenMP.
|
||||
|
||||
* Alternatively, you can set <code>KMP_DUPLICATE_LIB_OK=TRUE</code>. However, performance degradation or incorrect results may occur in this case.
|
||||
|
||||
|
||||
## Old proto compiler breaks protobuf library
|
||||
|
||||
With python protobuf library version 3.5.1, the following incompatibility can happen.
|
||||
The known case is for Cent OS 7.4.
|
||||
|
||||
The error log looks like this:
|
||||
|
||||
```sh
|
||||
File "../lib64/python3.5/site-packages/google/protobuf/descriptor.py", line 829, in _new_
|
||||
return _message.default_pool.AddSerializedFile(serialized_pb)
|
||||
TypeError: expected bytes, str found
|
||||
```
|
||||
|
||||
A possible workaround is to upgrade default protobuf compiler (libprotoc 2.5.0) to newer version, for example libprotoc 2.6.1.
|
||||
|
||||
[protobuf_issue]: https://github.com/google/protobuf/issues/4272
|
||||
|
||||
## Dynamic batching
|
||||
Refer to the **Limitations** section of the [Dynamic batching page](DynamicBatching.md).
|
||||
|
||||
## Static Shape Infer
|
||||
Refer to the **Limitations** section of the [Static Shape Infer page](ShapeInference.md).
|
||||
|
||||
|
||||
## Image Pre-Processing Performance Optimization Issue
|
||||
|
||||
As described in [documentation for the new API](Integrate_with_customer_application_new_API.md), you can set an image blob of any size to an
|
||||
infer request using resizable input. Resize is executed during inference using the configured resize algorithm.
|
||||
|
||||
But currently, resize algorithms are not completely optimized. So expect performance degradation if resizable input is
|
||||
specified and an input blob (to be resized) is set using `SetBlob()`. The best performance is for the
|
||||
[CPU](supported_plugins/CPU.md) plugin only (because enabled openMP* provides parallelism).
|
||||
|
||||
Another limitation is that currently, resize algorithms support NCHW layout only. So if you set NHWC layout for an input
|
||||
blob, NHWC is converted to NCHW before resize and back to NHWC after resize.
|
||||
@@ -1,60 +0,0 @@
|
||||
# Inference Engine Memory Primitives {#openvino_docs_IE_DG_Memory_primitives}
|
||||
|
||||
## Inference Memory Primitives (C++)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-cpp" class="switcher-anchor">C++</div>
|
||||
@endsphinxdirective
|
||||
|
||||
## Blobs
|
||||
|
||||
<code>InferenceEngine::Blob</code> is the main class intended for working with memory.
|
||||
Using this class you can read and write memory, get information about the memory structure etc.
|
||||
|
||||
The right way to create <code>Blob</code> objects with a specific layout is to use constructors with <code>InferenceEngine::TensorDesc</code>.
|
||||
<pre class="brush:cpp">
|
||||
InferenceEngine::TensorDesc tdesc(FP32, {1, 3, 227, 227}, InferenceEngine::Layout::NCHW);
|
||||
InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<float>(tdesc);
|
||||
</pre>
|
||||
|
||||
## Layouts
|
||||
|
||||
<code>InferenceEngine::TensorDesc</code> is a special class that provides layout format description.
|
||||
|
||||
This class allows to create planar layouts using the standard formats (like <code>InferenceEngine::Layout::NCDHW</code>, <code>InferenceEngine::Layout::NCHW</code>, <code>InferenceEngine::Layout::NC</code>, <code>InferenceEngine::Layout::C</code> and etc) and also non-planar layouts using <code>InferenceEngine::BlockingDesc</code>.
|
||||
|
||||
In order to create a complex layout you should use <code>InferenceEngine::BlockingDesc</code>, which allows you to define the blocked memory with offsets and strides.
|
||||
|
||||
## Examples
|
||||
|
||||
1. You can define a blob with dimensions {N: 1, C: 25, H: 20, W: 20} and format NHWC with using next parameters:<br/>
|
||||
<pre class="brush:cpp">
|
||||
InferenceEngine::BlockingDesc({1, 20, 20, 25}, {0, 2, 3, 1}); // or
|
||||
InferenceEngine::BlockingDesc({1, 20, 20, 25}, InferenceEngine::Layout::NHWC);
|
||||
</pre>
|
||||
2. If you have a memory with real dimensions {N: 1, C: 25, H: 20, W: 20} but with channels that are blocked by 8, you can define it using next parameters:<br/>
|
||||
<pre class="brush:cpp">
|
||||
InferenceEngine::BlockingDesc({1, 4, 20, 20, 8}, {0, 1, 2, 3, 1})
|
||||
</pre>
|
||||
3. Also you can set strides and offsets if layout contains it.
|
||||
4. If you have a complex blob layout and you don't want to calculate the real offset to data you can use the <code>InferenceEngine::TensorDesc::offset(size_t l)</code> or <code>InferenceEngine::TensorDesc::offset(SizeVector v)</code> methods.<br/>
|
||||
For example:
|
||||
<pre class="brush:cpp">
|
||||
InferenceEngine::BlockingDesc blk({1, 4, 20, 20, 8}, {0, 1, 2, 3, 1});
|
||||
InferenceEngine::TensorDesc tdesc(FP32, {1, 25, 20, 20}, blk);
|
||||
tdesc.offset(0); // = 0
|
||||
tdesc.offset(1); // = 8
|
||||
tdesc.offset({0, 0, 0, 2}); // = 16
|
||||
tdesc.offset({0, 1, 0, 2}); // = 17
|
||||
</pre>
|
||||
5. If you would like to create a TensorDesc with a planar format and for N dimensions (N can be different 1, 2, 4 and etc), you can use the <code>InferenceEngine::TensorDesc::getLayoutByDims</code> method.
|
||||
<pre class="brush:cpp">
|
||||
InferenceEngine::TensorDesc::getLayoutByDims({1}); // InferenceEngine::Layout::C
|
||||
InferenceEngine::TensorDesc::getLayoutByDims({1, 2}); // InferenceEngine::Layout::NC
|
||||
InferenceEngine::TensorDesc::getLayoutByDims({1, 2, 3, 4}); // InferenceEngine::Layout::NCHW
|
||||
InferenceEngine::TensorDesc::getLayoutByDims({1, 2, 3}); // InferenceEngine::Layout::BLOCKED
|
||||
InferenceEngine::TensorDesc::getLayoutByDims({1, 2, 3, 4, 5}); // InferenceEngine::Layout::NCDHW
|
||||
InferenceEngine::TensorDesc::getLayoutByDims({1, 2, 3, 4, 5, ...}); // InferenceEngine::Layout::BLOCKED
|
||||
</pre>
|
||||
@@ -1,136 +0,0 @@
|
||||
# Model Caching Overview {#openvino_docs_IE_DG_Model_caching_overview}
|
||||
|
||||
## Introduction (C++)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-cpp" class="switcher-anchor">C++</div>
|
||||
@endsphinxdirective
|
||||
|
||||
As described in the [Inference Engine Developer Guide](Deep_Learning_Inference_Engine_DevGuide.md), a common application flow consists of the following steps:
|
||||
|
||||
1. **Create an Inference Engine Core object**: First step to manage available devices and read network objects
|
||||
|
||||
2. **Read the Intermediate Representation**: Read an Intermediate Representation file into an object of the `InferenceEngine::CNNNetwork`
|
||||
|
||||
3. **Prepare inputs and outputs**: If needed, manipulate precision, memory layout, size or color format
|
||||
|
||||
4. **Set configuration**: Pass device-specific loading configurations to the device
|
||||
|
||||
5. **Compile and Load Network to device**: Use the `InferenceEngine::Core::LoadNetwork()` method with a specific device
|
||||
|
||||
6. **Set input data**: Specify input blob
|
||||
|
||||
7. **Execute**: Carry out inference and process results
|
||||
|
||||
Step 5 can potentially perform several time-consuming device-specific optimizations and network compilations,
|
||||
and such delays can lead to a bad user experience on application startup. To avoid this, some devices offer
|
||||
import/export network capability, and it is possible to either use the [Compile tool](../../tools/compile_tool/README.md)
|
||||
or enable model caching to export compiled network automatically. Reusing cached networks can significantly reduce load network time.
|
||||
|
||||
### Set "CACHE_DIR" config option to enable model caching
|
||||
|
||||
To enable model caching, the application must specify a folder to store cached blobs, which is done like this:
|
||||
|
||||
@snippet snippets/InferenceEngine_Caching0.cpp part0
|
||||
|
||||
With this code, if the device specified by `LoadNetwork` supports import/export network capability, a cached blob is automatically created inside the `myCacheFolder` folder.
|
||||
CACHE_DIR config is set to the Core object. If the device does not support import/export capability, cache is not created and no error is thrown.
|
||||
|
||||
Depending on your device, total time for loading network on application startup can be significantly reduced.
|
||||
Also note that the very first LoadNetwork (when cache is not yet created) takes slightly longer time to "export" the compiled blob into a cache file:
|
||||
|
||||
![caching_enabled]
|
||||
|
||||
### Even faster: use LoadNetwork(modelPath)
|
||||
|
||||
In some cases, applications do not need to customize inputs and outputs every time. Such an application always
|
||||
call `cnnNet = ie.ReadNetwork(...)`, then `ie.LoadNetwork(cnnNet, ..)` and it can be further optimized.
|
||||
For these cases, the 2021.4 release introduces a more convenient API to load the network in a single call, skipping the export step:
|
||||
|
||||
@snippet snippets/InferenceEngine_Caching1.cpp part1
|
||||
|
||||
With model caching enabled, total load time is even smaller, if ReadNetwork is optimized as well.
|
||||
|
||||
@snippet snippets/InferenceEngine_Caching2.cpp part2
|
||||
|
||||
![caching_times]
|
||||
|
||||
### Advanced Examples
|
||||
|
||||
Not every device supports network import/export capability. For those that don't, enabling caching has no effect.
|
||||
To check in advance if a particular device supports model caching, your application can use the following code:
|
||||
|
||||
@snippet snippets/InferenceEngine_Caching3.cpp part3
|
||||
|
||||
## Introduction (Python)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-python" class="switcher-anchor">Python</div>
|
||||
@endsphinxdirective
|
||||
|
||||
As described in Inference Engine Developer Guide, a common application flow consists of the following steps:
|
||||
|
||||
1. **Create an Inference Engine Core Object**
|
||||
2. **Read the Intermediate Representation** - Read an Intermediate Representation file into an object of the [ie_api.IENetwork](api/ie_python_api/_autosummary/openvino.inference_engine.IENetwork.html)
|
||||
3. **Prepare inputs and outputs**
|
||||
4. **Set configuration** - Pass device-specific loading configurations to the device
|
||||
5. **Compile and Load Network to device** - Use the `IECore.load_network()` method and specify the target device
|
||||
6. **Set input data**
|
||||
7. **Execute the model** - Run inference
|
||||
|
||||
Step #5 can potentially perform several time-consuming device-specific optimizations and network compilations, and such delays can lead to bad user experience on application startup. To avoid this, some devices offer Import/Export network capability, and it is possible to either use the [Compile tool](../../tools/compile_tool/README.md) or enable model caching to export the compiled network automatically. Reusing cached networks can significantly reduce load network time.
|
||||
|
||||
### Set the “CACHE_DIR” config option to enable model caching
|
||||
|
||||
To enable model caching, the application must specify the folder where to store cached blobs. It can be done using [IECore.set_config](api/ie_python_api/_autosummary/openvino.inference_engine.IECore.html#openvino.inference_engine.IECore.set_config).
|
||||
|
||||
``` python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
ie.set_config(config={"CACHE_DIR": path_to_cache}, device_name=device)
|
||||
net = ie.read_network(model=path_to_xml_file)
|
||||
exec_net = ie.load_network(network=net, device_name=device)
|
||||
```
|
||||
|
||||
With this code, if a device supports the Import/Export network capability, a cached blob is automatically created inside the path_to_cache directory `CACHE_DIR` config is set to the Core object. If device does not support Import/Export capability, cache is just not created and no error is thrown
|
||||
|
||||
Depending on your device, total time for loading network on application startup can be significantly reduced. Please also note that very first [IECore.load_network](api/ie_python_api/_autosummary/openvino.inference_engine.IECore.html#openvino.inference_engine.IECore.load_network) (when the cache is not yet created) takes slightly longer time to ‘export’ the compiled blob into a cache file.
|
||||
|
||||
![caching_enabled]
|
||||
|
||||
|
||||
### Even Faster: Use IECore.load_network(path_to_xml_file)
|
||||
|
||||
In some cases, applications do not need to customize inputs and outputs every time. These applications always call [IECore.read_network](api/ie_python_api/_autosummary/openvino.inference_engine.IECore.html#openvino.inference_engine.IECore.read_network), then `IECore.load_network(model=path_to_xml_file)` and may be further optimized. For such cases, it's more convenient to load the network in a single call to `ie.load_network()`
|
||||
A model can be loaded directly to the device, with model caching enabled:
|
||||
|
||||
``` python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
ie.set_config(config={"CACHE_DIR" : path_to_cache}, device_name=device)
|
||||
ie.load_network(network=path_to_xml_file, device_name=device)
|
||||
```
|
||||
|
||||
![caching_times]
|
||||
|
||||
### Advanced Examples
|
||||
|
||||
Not every device supports network import/export capability, enabling of caching for such devices does not have any effect. To check in advance if a particular device supports model caching, your application can use the following code:
|
||||
|
||||
```python
|
||||
all_metrics = ie.get_metric(device_name=device, metric_name="SUPPORTED_METRICS")
|
||||
# Find the 'IMPORT_EXPORT_SUPPORT' metric in supported metrics
|
||||
allows_caching = "IMPORT_EXPORT_SUPPORT" in all_metrics
|
||||
```
|
||||
|
||||
> **NOTE**: The GPU plugin does not have the IMPORT_EXPORT_SUPPORT capability, and does not support model caching yet. However, the GPU plugin supports caching kernels (see the [GPU plugin documentation](supported_plugins/GPU.md)). Kernel caching for the GPU plugin can be accessed the same way as model caching: by setting the `CACHE_DIR` configuration key to a folder where the cache should be stored.
|
||||
|
||||
|
||||
[caching_enabled]: ../img/caching_enabled.png
|
||||
[caching_times]: ../img/caching_times.png
|
||||
@@ -1,91 +0,0 @@
|
||||
# ONNX Format Support {#openvino_docs_IE_DG_ONNX_Support}
|
||||
|
||||
## Introduction (C++)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-cpp" class="switcher-anchor">C++</div>
|
||||
@endsphinxdirective
|
||||
|
||||
Starting with the 2020.4 release, OpenVINO™ supports reading native ONNX models. The `Core::ReadNetwork()` method provides a uniform way to read models from IR or ONNX format, it is a recommended approach to reading models. Example:
|
||||
|
||||
```cpp
|
||||
InferenceEngine::Core core;
|
||||
auto network = core.ReadNetwork("model.onnx");
|
||||
```
|
||||
|
||||
### Reshape Feature
|
||||
OpenVINO™ does not provide a mechanism to specify pre-processing (like mean values subtraction, reverse input channels) for the ONNX format. If an ONNX model contains dynamic shapes for input, please use the `CNNNetwork::reshape` method to reshape the model.
|
||||
|
||||
### Weights Saved in External Files
|
||||
|
||||
OpenVINO™ supports ONNX models that store weights in external files. It is especially useful for models larger than 2GB because of protobuf limitations. To read such models, use the `ReadNetwork` overload which takes `modelPath` as input parameter (both `std::string` and `std::wstring`). Note that the `binPath` argument of `ReadNetwork` should be empty in this case, because paths to external weights are saved directly in an ONNX model.
|
||||
Otherwise, a runtime exception is thrown. Reading models with external weights is not supported by the `ReadNetwork(const std::string& model, const Blob::CPtr& weights)` overload.
|
||||
|
||||
Paths to external weight files are saved in an ONNX model; these paths are relative to the model's directory path.
|
||||
It means that if a model is located at `home/user/workspace/models/model.onnx` and a file that contains external weights is in `home/user/workspace/models/data/weights.bin`, then the path saved in the model should be:
|
||||
`data/weights.bin`
|
||||
|
||||
> **NOTE**: A single model can use many external weights files.
|
||||
|
||||
> **NOTE**: Data of many tensors can be stored in a single external weights file (it is processed using offset and length values, which can be also saved in a model).
|
||||
|
||||
The described mechanism is the only way to read weights from external files. The following input parameters of the `ReadNetwork` function overloads are NOT supported for ONNX models and should be passed as empty:
|
||||
* `const std::wstring& binPath`
|
||||
* `const std::string& binPath`
|
||||
* `const Blob::CPtr& weights`
|
||||
|
||||
You can find more details about the external data mechanism in [ONNX documentation](https://github.com/onnx/onnx/blob/master/docs/ExternalData.md).
|
||||
To convert a model to use the external data feature, you can use [ONNX helper functions](https://github.com/onnx/onnx/blob/master/onnx/external_data_helper.py).
|
||||
|
||||
Unsupported types of tensors:
|
||||
* string
|
||||
* complex64
|
||||
* complex128
|
||||
|
||||
## Introduction (Python)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-python" class="switcher-anchor">Python</div>
|
||||
@endsphinxdirective
|
||||
|
||||
Starting with the 2020.4 release, OpenVINO™ supports reading native ONNX models. The `IECore.read_network()` method provides a uniform way to read models from IR or ONNX format, it is a recommended approach to reading models. Example:
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
net = ie.read_network(model=path_to_onnx_file)
|
||||
```
|
||||
|
||||
### Reshape Feature
|
||||
OpenVINO™ does not provide a mechanism to specify pre-processing (like mean values subtraction, reverse input channels) for the ONNX format. If an ONNX model contains dynamic shapes for input, please use the [IENetwork.reshape](api/ie_python_api/_autosummary/openvino.inference_engine.IENetwork.html#openvino.inference_engine.IENetwork.reshape) method to reshape the model.
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
net = ie.read_network(model=path_to_onnx_file)
|
||||
input_layer = next(iter(net.input_info))
|
||||
net.reshape({input_layer: new_shape})
|
||||
```
|
||||
|
||||
### Weights Saved in External Files
|
||||
|
||||
OpenVINO™ supports ONNX models that store weights in external files. It is especially useful for models larger than 2GB because of protobuf limitations. To read such models, use the `model` parameter in the `IECore.read_network(model=path_to_onnx_file)` method. Note that the parameter for the path to the binary weight file, `weights=` should be empty in this case, because paths to external weights are saved directly in an ONNX model. Otherwise, a runtime exception is thrown. Reading models with external weights is **NOT** supported by the `read_network(weights=path_to_bin_file)` parameter.
|
||||
|
||||
Paths to external weight files are saved in an ONNX model; these paths are relative to the model’s directory path. It means that if a model is located at: `$HOME/workspace/models/model.onnx` and a file that contains external weights: `$HOME/workspace/models/data/weights.bin`, the path saved in model should be: data/weights.bin.
|
||||
|
||||
**NOTE**:
|
||||
* A single model can use many external weights files.
|
||||
* Data of many tensors can be stored in a single external weights file (it is processed using offset and length values, which can be also saved in a model).
|
||||
|
||||
The described mechanism is the only possibility to read weights from external files. The `weights` input parameter of the [IECore.read_network](api/ie_python_api/_autosummary/openvino.inference_engine.IECore.html#openvino.inference_engine.IECore.read_network) function is NOT supported for ONNX models and should not be passed, or set as None.
|
||||
|
||||
Unsupported types of tensors:
|
||||
* string
|
||||
* complex64
|
||||
* complex128
|
||||
@@ -1,197 +0,0 @@
|
||||
# Operations Specifications {#openvino_docs_operations_specifications}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
openvino_docs_ops_arithmetic_Abs_1
|
||||
openvino_docs_ops_arithmetic_Acos_1
|
||||
openvino_docs_ops_arithmetic_Acosh_3
|
||||
openvino_docs_ops_pooling_AdaptiveAvgPool_8
|
||||
openvino_docs_ops_pooling_AdaptiveMaxPool_8
|
||||
openvino_docs_ops_arithmetic_Add_1
|
||||
openvino_docs_ops_arithmetic_Asin_1
|
||||
openvino_docs_ops_arithmetic_Asinh_3
|
||||
openvino_docs_ops_infrastructure_Assign_3
|
||||
openvino_docs_ops_arithmetic_Atan_1
|
||||
openvino_docs_ops_arithmetic_Atanh_3
|
||||
openvino_docs_ops_pooling_AvgPool_1
|
||||
openvino_docs_ops_normalization_BatchNormInference_1
|
||||
openvino_docs_ops_normalization_BatchNormInference_5
|
||||
openvino_docs_ops_movement_BatchToSpace_2
|
||||
openvino_docs_ops_convolution_BinaryConvolution_1
|
||||
openvino_docs_ops_movement_Broadcast_1
|
||||
openvino_docs_ops_movement_Broadcast_3
|
||||
openvino_docs_ops_condition_Bucketize_3
|
||||
openvino_docs_ops_sequence_CTCGreedyDecoder_1
|
||||
openvino_docs_ops_sequence_CTCGreedyDecoderSeqLen_6
|
||||
openvino_docs_ops_arithmetic_Ceiling_1
|
||||
openvino_docs_ops_activation_Clamp_1
|
||||
openvino_docs_ops_movement_Concat_1
|
||||
openvino_docs_ops_infrastructure_Constant_1
|
||||
openvino_docs_ops_type_ConvertLike_1
|
||||
openvino_docs_ops_type_Convert_1
|
||||
openvino_docs_ops_convolution_ConvolutionBackpropData_1
|
||||
openvino_docs_ops_convolution_Convolution_1
|
||||
openvino_docs_ops_arithmetic_Cos_1
|
||||
openvino_docs_ops_arithmetic_Cosh_1
|
||||
openvino_docs_ops_sequence_CTCLoss_4
|
||||
openvino_docs_ops_arithmetic_CumSum_3
|
||||
openvino_docs_ops_convolution_DeformableConvolution_1
|
||||
openvino_docs_ops_convolution_DeformableConvolution_8
|
||||
openvino_docs_ops_detection_DeformablePSROIPooling_1
|
||||
openvino_docs_ops_movement_DepthToSpace_1
|
||||
openvino_docs_ops_detection_DetectionOutput_1
|
||||
openvino_docs_ops_detection_DetectionOutput_8
|
||||
openvino_docs_ops_signals_DFT_7
|
||||
openvino_docs_ops_arithmetic_Divide_1
|
||||
openvino_docs_ops_matrix_Einsum_7
|
||||
openvino_docs_ops_activation_Elu_1
|
||||
openvino_docs_ops_sparse_EmbeddingBagOffsetsSum_3
|
||||
openvino_docs_ops_sparse_EmbeddingBagPackedSum_3
|
||||
openvino_docs_ops_sparse_EmbeddingSegmentsSum_3
|
||||
openvino_docs_ops_comparison_Equal_1
|
||||
openvino_docs_ops_arithmetic_Erf_1
|
||||
openvino_docs_ops_activation_Exp_1
|
||||
openvino_docs_ops_detection_ExperimentalDetectronDetectionOutput_6
|
||||
openvino_docs_ops_detection_ExperimentalDetectronGenerateProposalsSingleImage_6
|
||||
openvino_docs_ops_detection_ExperimentalDetectronPriorGridGenerator_6
|
||||
openvino_docs_ops_detection_ExperimentalDetectronROIFeatureExtractor_6
|
||||
openvino_docs_ops_sort_ExperimentalDetectronTopKROIs_6
|
||||
openvino_docs_ops_movement_ExtractImagePatches_3
|
||||
openvino_docs_ops_quantization_FakeQuantize_1
|
||||
openvino_docs_ops_arithmetic_FloorMod_1
|
||||
openvino_docs_ops_arithmetic_Floor_1
|
||||
openvino_docs_ops_normalization_GRN_1
|
||||
openvino_docs_ops_sequence_GRUCell_3
|
||||
openvino_docs_ops_sequence_GRUSequence_5
|
||||
openvino_docs_ops_movement_GatherTree_1
|
||||
openvino_docs_ops_movement_Gather_1
|
||||
openvino_docs_ops_movement_Gather_7
|
||||
openvino_docs_ops_movement_Gather_8
|
||||
openvino_docs_ops_movement_GatherElements_6
|
||||
openvino_docs_ops_movement_GatherND_5
|
||||
openvino_docs_ops_movement_GatherND_8
|
||||
openvino_docs_ops_activation_GELU_2
|
||||
openvino_docs_ops_activation_GELU_7
|
||||
openvino_docs_ops_comparison_GreaterEqual_1
|
||||
openvino_docs_ops_comparison_Greater_1
|
||||
openvino_docs_ops_convolution_GroupConvolutionBackpropData_1
|
||||
openvino_docs_ops_convolution_GroupConvolution_1
|
||||
openvino_docs_ops_activation_HardSigmoid_1
|
||||
openvino_docs_ops_activation_HSigmoid_5
|
||||
openvino_docs_ops_activation_HSwish_4
|
||||
openvino_docs_ops_image_I420toBGR_8
|
||||
openvino_docs_ops_image_I420toRGB_8
|
||||
openvino_docs_ops_signals_IDFT_7
|
||||
openvino_docs_ops_infrastructure_If_8
|
||||
openvino_docs_ops_image_Interpolate_1
|
||||
openvino_docs_ops_image_Interpolate_4
|
||||
openvino_docs_ops_normalization_LRN_1
|
||||
openvino_docs_ops_sequence_LSTMCell_1
|
||||
openvino_docs_ops_sequence_LSTMSequence_1
|
||||
openvino_docs_ops_comparison_LessEqual_1
|
||||
openvino_docs_ops_comparison_Less_1
|
||||
openvino_docs_ops_arithmetic_Log_1
|
||||
openvino_docs_ops_logical_LogicalAnd_1
|
||||
openvino_docs_ops_logical_LogicalNot_1
|
||||
openvino_docs_ops_logical_LogicalOr_1
|
||||
openvino_docs_ops_logical_LogicalXor_1
|
||||
openvino_docs_ops_activation_LogSoftmax_5
|
||||
openvino_docs_ops_infrastructure_Loop_5
|
||||
openvino_docs_ops_normalization_MVN_1
|
||||
openvino_docs_ops_normalization_MVN_6
|
||||
openvino_docs_ops_matrix_MatMul_1
|
||||
openvino_docs_ops_sort_MatrixNms_8
|
||||
openvino_docs_ops_pooling_MaxPool_1
|
||||
openvino_docs_ops_pooling_MaxPool_8
|
||||
openvino_docs_ops_arithmetic_Maximum_1
|
||||
openvino_docs_ops_arithmetic_Minimum_1
|
||||
openvino_docs_ops_activation_Mish_4
|
||||
openvino_docs_ops_arithmetic_Mod_1
|
||||
openvino_docs_ops_sort_MulticlassNonMaxSuppression_8
|
||||
openvino_docs_ops_arithmetic_Multiply_1
|
||||
openvino_docs_ops_arithmetic_Negative_1
|
||||
openvino_docs_ops_sort_NonMaxSuppression_1
|
||||
openvino_docs_ops_sort_NonMaxSuppression_3
|
||||
openvino_docs_ops_sort_NonMaxSuppression_4
|
||||
openvino_docs_ops_sort_NonMaxSuppression_5
|
||||
openvino_docs_ops_condition_NonZero_3
|
||||
openvino_docs_ops_normalization_NormalizeL2_1
|
||||
openvino_docs_ops_comparison_NotEqual_1
|
||||
openvino_docs_ops_image_NV12toBGR_8
|
||||
openvino_docs_ops_image_NV12toRGB_8
|
||||
openvino_docs_ops_sequence_OneHot_1
|
||||
openvino_docs_ops_activation_PReLU_1
|
||||
openvino_docs_ops_detection_PSROIPooling_1
|
||||
openvino_docs_ops_movement_Pad_1
|
||||
openvino_docs_ops_infrastructure_Parameter_1
|
||||
openvino_docs_ops_arithmetic_Power_1
|
||||
openvino_docs_ops_detection_PriorBoxClustered_1
|
||||
openvino_docs_ops_detection_PriorBox_1
|
||||
openvino_docs_ops_detection_PriorBox_8
|
||||
openvino_docs_ops_detection_Proposal_1
|
||||
openvino_docs_ops_detection_Proposal_4
|
||||
openvino_docs_ops_generation_RandomUniform_8
|
||||
openvino_docs_ops_generation_Range_1
|
||||
openvino_docs_ops_generation_Range_4
|
||||
openvino_docs_ops_infrastructure_ReadValue_3
|
||||
openvino_docs_ops_activation_ReLU_1
|
||||
openvino_docs_ops_reduction_ReduceL1_4
|
||||
openvino_docs_ops_reduction_ReduceL2_4
|
||||
openvino_docs_ops_reduction_ReduceLogicalAnd_1
|
||||
openvino_docs_ops_reduction_ReduceLogicalOr_1
|
||||
openvino_docs_ops_reduction_ReduceMax_1
|
||||
openvino_docs_ops_reduction_ReduceMean_1
|
||||
openvino_docs_ops_reduction_ReduceMin_1
|
||||
openvino_docs_ops_reduction_ReduceProd_1
|
||||
openvino_docs_ops_reduction_ReduceSum_1
|
||||
openvino_docs_ops_detection_RegionYolo_1
|
||||
openvino_docs_ops_detection_ReorgYolo_1
|
||||
openvino_docs_ops_shape_Reshape_1
|
||||
openvino_docs_ops_infrastructure_Result_1
|
||||
openvino_docs_ops_movement_Reverse_1
|
||||
openvino_docs_ops_movement_ReverseSequence_1
|
||||
openvino_docs_ops_sequence_RNNCell_3
|
||||
openvino_docs_ops_sequence_RNNSequence_5
|
||||
openvino_docs_ops_detection_ROIAlign_3
|
||||
openvino_docs_ops_detection_ROIPooling_1
|
||||
openvino_docs_ops_movement_Roll_7
|
||||
openvino_docs_ops_arithmetic_Round_5
|
||||
openvino_docs_ops_movement_ScatterElementsUpdate_3
|
||||
openvino_docs_ops_movement_ScatterNDUpdate_3
|
||||
openvino_docs_ops_movement_ScatterUpdate_3
|
||||
openvino_docs_ops_condition_Select_1
|
||||
openvino_docs_ops_activation_Selu_1
|
||||
openvino_docs_ops_shape_ShapeOf_1
|
||||
openvino_docs_ops_shape_ShapeOf_3
|
||||
openvino_docs_ops_movement_ShuffleChannels_1
|
||||
openvino_docs_ops_activation_Sigmoid_1
|
||||
openvino_docs_ops_arithmetic_Sign_1
|
||||
openvino_docs_ops_arithmetic_Sin_1
|
||||
openvino_docs_ops_arithmetic_Sinh_1
|
||||
openvino_docs_ops_movement_Slice_8
|
||||
openvino_docs_ops_activation_SoftMax_1
|
||||
openvino_docs_ops_activation_SoftMax_8
|
||||
openvino_docs_ops_activation_SoftPlus_4
|
||||
openvino_docs_ops_movement_SpaceToBatch_2
|
||||
openvino_docs_ops_movement_SpaceToDepth_1
|
||||
openvino_docs_ops_movement_Split_1
|
||||
openvino_docs_ops_arithmetic_Sqrt_1
|
||||
openvino_docs_ops_arithmetic_SquaredDifference_1
|
||||
openvino_docs_ops_shape_Squeeze_1
|
||||
openvino_docs_ops_movement_StridedSlice_1
|
||||
openvino_docs_ops_arithmetic_Subtract_1
|
||||
openvino_docs_ops_activation_Swish_4
|
||||
openvino_docs_ops_arithmetic_Tan_1
|
||||
openvino_docs_ops_arithmetic_Tanh_1
|
||||
openvino_docs_ops_infrastructure_TensorIterator_1
|
||||
openvino_docs_ops_movement_Tile_1
|
||||
openvino_docs_ops_sort_TopK_1
|
||||
openvino_docs_ops_sort_TopK_3
|
||||
openvino_docs_ops_movement_Transpose_1
|
||||
openvino_docs_ops_shape_Unsqueeze_1
|
||||
openvino_docs_ops_movement_VariadicSplit_1
|
||||
|
||||
@endsphinxdirective
|
||||
@@ -1,52 +0,0 @@
|
||||
# Paddle Support in OpenVINO™ {#openvino_docs_IE_DG_Paddle_Support}
|
||||
|
||||
Starting from the 2022.1 release, OpenVINO™ supports reading native Paddle models.
|
||||
The `Core::ReadNetwork()` method provides a uniform way to read models from either the Paddle format or IR, which is the recommended approach.
|
||||
|
||||
## Read Paddle Models from IR
|
||||
|
||||
The Paddle Model can be read after it is [converted](../MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md) to [Intermediate Representation (IR)](../MO_DG/IR_and_opsets.md).
|
||||
|
||||
**C++ Example:**
|
||||
|
||||
```cpp
|
||||
InferenceEngine::Core core;
|
||||
auto network = core.ReadNetwork("model.xml");
|
||||
```
|
||||
|
||||
**Python Example:**
|
||||
|
||||
```sh
|
||||
from openvino.inference_engine import IECore
|
||||
ie = IECore()
|
||||
net = ie.read_network("model.xml")
|
||||
```
|
||||
|
||||
## Read Paddle Models from The Paddle Format (Paddle `inference model` model type)
|
||||
|
||||
**C++ Example:**
|
||||
|
||||
```cpp
|
||||
InferenceEngine::Core core;
|
||||
auto network = core.ReadNetwork("model.pdmodel");
|
||||
```
|
||||
|
||||
**Python Example:**
|
||||
|
||||
```sh
|
||||
from openvino.inference_engine import IECore
|
||||
ie = IECore()
|
||||
net = ie.read_network("model.pdmodel")
|
||||
```
|
||||
|
||||
**The Reshape feature:**
|
||||
|
||||
OpenVINO™ does not provide a mechanism to specify pre-processing, such as mean values subtraction or reverse input channels, for the Paddle format.
|
||||
If a Paddle model contains dynamic shapes for input, use the `CNNNetwork::reshape` method for shape specialization.
|
||||
|
||||
## NOTES
|
||||
|
||||
* The Paddle [`inference model`](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/doc/doc_en/inference_en.md) mainly contains two kinds of files `model.pdmodel`(model file) and `model.pdiparams`(params file), which are used for inference.
|
||||
* The list of supported Paddle models and a description of how to export them can be found in [Convert a Paddle Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md). The following Paddle models are supported by intel CPU only: `Fast-SCNN`, `Yolo v3`, `ppyolo`, `MobileNetv3-SSD`, `BERT`.
|
||||
* For `Normalize` Paddle Models, the input data should be in FP32 format.
|
||||
* When reading Paddle models from The Paddle format, make sure that `model.pdmodel` and `model.pdiparams` are in the same folder directory.
|
||||
@@ -1,14 +0,0 @@
|
||||
# OpenVINO™ Python* Package
|
||||
|
||||
OpenVINO™ Python\* package includes types to measure model and calibrate to low precision.
|
||||
|
||||
The OpenVINO™ Python\* package available in the `<INSTALL_DIR>/python/python3.X` directory.
|
||||
|
||||
The OpenVINO™ Python\* package includes the following sub-packages:
|
||||
|
||||
- [openvino.inference_engine](../../src/bindings/python/docs/api_overview.md) - Python\* wrapper on OpenVINO™ Inference Engine.
|
||||
- `openvino.tools.accuracy_checker` - Measure accuracy.
|
||||
- `openvino.tools.benchmark` - Measure latency and throughput.
|
||||
|
||||
## See Also
|
||||
* [Integrate with Customer Application New API](Integrate_with_customer_application_new_API.md)
|
||||
@@ -1,226 +0,0 @@
|
||||
# Using the Reshape Inference Feature {#openvino_docs_IE_DG_ShapeInference}
|
||||
|
||||
## Introduction (C++)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-cpp" class="switcher-anchor">C++</div>
|
||||
@endsphinxdirective
|
||||
|
||||
OpenVINO™ provides two methods for runtime model reshaping: setting a new input shape and setting a new batch dimension value.
|
||||
|
||||
### Set a new input shape with the reshape() method
|
||||
|
||||
The `InferenceEngine::CNNNetwork::reshape` method updates input shapes and propagates them down to the outputs of the model through all intermediate layers.
|
||||
|
||||
> **NOTES**:
|
||||
> - Starting with the 2021.1 release, the Model Optimizer converts topologies keeping shape-calculating sub-graphs by default, which enables correct shape propagation during reshaping in most cases.
|
||||
> - Older versions of IRs are not guaranteed to reshape successfully. Please regenerate them with the Model Optimizer of the latest version of OpenVINO™.<br>
|
||||
> - If an ONNX model does not have a fully defined input shape and the model was imported with the ONNX importer, reshape the model before loading it to the plugin.
|
||||
|
||||
### Set a new batch dimension value with the setBatchSize() method
|
||||
|
||||
The meaning of a model batch may vary depending on the model design.
|
||||
This method does not deduce batch placement for inputs from the model architecture.
|
||||
It assumes that the batch is placed at the zero index in the shape for all inputs and uses the `InferenceEngine::CNNNetwork::reshape` method to propagate updated shapes through the model.
|
||||
|
||||
The method transforms the model before a new shape propagation to relax a hard-coded batch dimension in the model, if any.
|
||||
|
||||
Use `InferenceEngine::CNNNetwork::reshape` instead of `InferenceEngine::CNNNetwork::setBatchSize` to set new input shapes for the model if the model has one of the following:
|
||||
|
||||
* Multiple inputs with different zero-index dimension meanings
|
||||
* Input without a batch dimension
|
||||
* 0D, 1D, or 3D shape
|
||||
|
||||
The `InferenceEngine::CNNNetwork::setBatchSize` method is a high-level API method that wraps the `InferenceEngine::CNNNetwork::reshape` method call and works for trivial models from the batch placement standpoint.
|
||||
Use `InferenceEngine::CNNNetwork::reshape` for other models.
|
||||
|
||||
Using the `InferenceEngine::CNNNetwork::setBatchSize` method for models with a non-zero index batch placement or for models with inputs that do not have a batch dimension may lead to undefined behaviour.
|
||||
|
||||
You can change input shapes multiple times using the `InferenceEngine::CNNNetwork::reshape` and `InferenceEngine::CNNNetwork::setBatchSize` methods in any order.
|
||||
If a model has a hard-coded batch dimension, use `InferenceEngine::CNNNetwork::setBatchSize` first to change the batch, then call `InferenceEngine::CNNNetwork::reshape` to update other dimensions, if needed.
|
||||
|
||||
Inference Engine takes three kinds of a model description as an input, which are converted into an `InferenceEngine::CNNNetwork` object:
|
||||
1. [Intermediate Representation (IR)](../MO_DG/IR_and_opsets.md) through `InferenceEngine::Core::ReadNetwork`
|
||||
2. [ONNX model](../IE_DG/ONNX_Support.md) through `InferenceEngine::Core::ReadNetwork`
|
||||
3. [nGraph function](../nGraph_DG/nGraph_dg.md) through the constructor of `InferenceEngine::CNNNetwork`
|
||||
|
||||
`InferenceEngine::CNNNetwork` keeps an `ngraph::Function` object with the model description internally.
|
||||
The object should have fully-defined input shapes to be successfully loaded to Inference Engine plugins.
|
||||
To resolve undefined input dimensions of a model, call the `CNNNetwork::reshape` method to provide new input shapes before loading to the Inference Engine plugin.
|
||||
|
||||
Run the following code right after `InferenceEngine::CNNNetwork` creation to explicitly check for model input names and shapes:
|
||||
|
||||
```cpp
|
||||
CNNNetwork network = ... // read IR / ONNX model or create from nGraph::Function explicitly
|
||||
const auto parameters = network.getFunction()->get_parameters();
|
||||
for (const auto & parameter : parameters) {
|
||||
std::cout << "name: " << parameter->get_friendly_name() << " shape: " << parameter->get_partial_shape() << std::endl;
|
||||
if (parameter->get_partial_shape().is_dynamic())
|
||||
std::cout << "ATTENTION: Input shape is not fully defined. Use the CNNNetwork::reshape method to resolve it." << std::endl;
|
||||
}
|
||||
```
|
||||
|
||||
To feed input data of a shape that is different from the model input shape, reshape the model first.
|
||||
|
||||
Once the input shape of `InferenceEngine::CNNNetwork` is set, call the `InferenceEngine::Core::LoadNetwork` method to get an `InferenceEngine::ExecutableNetwork` object for inference with updated shapes.
|
||||
|
||||
There are other approaches to reshape the model during the stage of <a href="_docs_MO_DG_prepare_model_convert_model_Converting_Model.html#when_to_specify_input_shapes">IR generation</a> or [nGraph::Function creation](../nGraph_DG/build_function.md).
|
||||
|
||||
Practically, some models are not ready to be reshaped. In this case, a new input shape cannot be set with the Model Optimizer or the `InferenceEngine::CNNNetwork::reshape` method.
|
||||
|
||||
### Usage of Reshape Method <a name="usage_of_reshape_method"></a>
|
||||
|
||||
The primary method of the feature is `InferenceEngine::CNNNetwork::reshape`. It gets new input shapes and propagates it from input to output for all intermediates layers of the given network.
|
||||
The method takes `InferenceEngine::ICNNNetwork::InputShapes` - a map of pairs: name of input data and its dimension.
|
||||
|
||||
The algorithm for resizing network is the following:
|
||||
|
||||
1) **Collect the map of input names and shapes from Intermediate Representation (IR)** using helper method `InferenceEngine::CNNNetwork::getInputShapes`
|
||||
|
||||
2) **Set new input shapes**
|
||||
|
||||
3) **Call reshape**
|
||||
|
||||
Here is a code example:
|
||||
|
||||
@snippet snippets/ShapeInference.cpp part0
|
||||
|
||||
The Shape Inference feature is used in [Smart Classroom Demo](@ref omz_demos_smart_classroom_demo_cpp).
|
||||
|
||||
### Troubleshooting Reshape Errors
|
||||
|
||||
Operation semantics may impose restrictions on input shapes of the operation.
|
||||
Shape collision during shape propagation may be a sign that a new shape does not satisfy the restrictions.
|
||||
Changing the model input shape may result in intermediate operations shape collision.
|
||||
|
||||
Examples of such operations:
|
||||
* [Reshape](../ops/shape/Reshape_1.md) operation with a hard-coded output shape value
|
||||
* [MatMul](../ops/matrix/MatMul_1.md) operation with the `Const` second input cannot be resized by spatial dimensions due to operation semantics
|
||||
|
||||
Model structure and logic should not change significantly after model reshaping.
|
||||
- The Global Pooling operation is commonly used to reduce output feature map of classification models output.
|
||||
Having the input of the shape [N, C, H, W], Global Pooling returns the output of the shape [N, C, 1, 1].
|
||||
Model architects usually express Global Pooling with the help of the `Pooling` operation with the fixed kernel size [H, W].
|
||||
During spatial reshape, having the input of the shape [N, C, H1, W1], Pooling with the fixed kernel size [H, W] returns the output of the shape [N, C, H2, W2], where H2 and W2 are commonly not equal to `1`.
|
||||
It breaks the classification model structure.
|
||||
For example, [publicly available Inception family models from TensorFlow*](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models) have this issue.
|
||||
|
||||
- Changing the model input shape may significantly affect its accuracy.
|
||||
For example, Object Detection models from TensorFlow have resizing restrictions by design.
|
||||
To keep the model valid after the reshape, choose a new input shape that satisfies conditions listed in the `pipeline.config` file.
|
||||
For details, refer to the <a href="_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_Object_Detection_API_Models.html#tf_od_custom_input_shape">Tensorflow Object Detection API models resizing techniques</a>.
|
||||
|
||||
### Extensibility
|
||||
The Inference Engine provides a special mechanism that allows adding support of shape inference for custom operations. This mechanism is described in the [Extensibility documentation](Extensibility_DG/Intro.md)
|
||||
|
||||
## Introduction (Python)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-python" class="switcher-anchor">Python</div>
|
||||
@endsphinxdirective
|
||||
|
||||
OpenVINO™ provides the following methods for runtime model reshaping:
|
||||
|
||||
* Set a new input shape with the [IENetwork.reshape](api/ie_python_api/_autosummary/openvino.inference_engine.IENetwork.html#openvino.inference_engine.IENetwork.reshape) method.
|
||||
|
||||
The [IENetwork.reshape](api/ie_python_api/_autosummary/openvino.inference_engine.IENetwork.html#openvino.inference_engine.IENetwork.reshape) method updates input shapes and propagates them down to the outputs of the model through all intermediate layers.
|
||||
|
||||
**NOTES**:
|
||||
* Model Optimizer converts topologies keeping shape-calculating sub-graphs by default, which enables correct shape propagation during reshaping in most cases.
|
||||
* Older versions of IRs are not guaranteed to reshape successfully. Please regenerate them with the Model Optimizer of the latest version of OpenVINO™.
|
||||
* If an ONNX model does not have a fully defined input shape and the model was imported with the ONNX importer, reshape the model before loading it to the plugin.
|
||||
|
||||
|
||||
* Set a new batch dimension value with the [IENetwork.batch_size](api/ie_python_api/_autosummary/openvino.inference_engine.IENetwork.html#openvino.inference_engine.IENetwork.batch_size) method.
|
||||
|
||||
The meaning of a model batch may vary depending on the model design. This method does not deduce batch placement for inputs from the model architecture. It assumes that the batch is placed at the zero index in the shape for all inputs and uses the [IENetwork.reshape](api/ie_python_api/_autosummary/openvino.inference_engine.IENetwork.html#openvino.inference_engine.IENetwork.reshape) method to propagate updated shapes through the model.
|
||||
|
||||
The method transforms the model before a new shape propagation to relax a hard-coded batch dimension in the model, if any.
|
||||
|
||||
Use [IENetwork.reshape](api/ie_python_api/_autosummary/openvino.inference_engine.IENetwork.html#openvino.inference_engine.IENetwork.reshape) rather than [IENetwork.batch_size](api/ie_python_api/_autosummary/openvino.inference_engine.IENetwork.html#openvino.inference_engine.IENetwork.batch_size) to set new input shapes for the model if the model has:
|
||||
|
||||
* Multiple inputs with different zero-index dimension meanings
|
||||
* Input without a batch dimension
|
||||
* 0D, 1D, or 3D shape
|
||||
|
||||
The [IENetwork.batch_size](api/ie_python_api/_autosummary/openvino.inference_engine.IENetwork.html#openvino.inference_engine.IENetwork.batch_size) method is a high-level API method that wraps the [IENetwork.reshape](api/ie_python_api/_autosummary/openvino.inference_engine.IENetwork.html#openvino.inference_engine.IENetwork.reshape) method call and works for trivial models from the batch placement standpoint. Use [IENetwork.reshape](api/ie_python_api/_autosummary/openvino.inference_engine.IENetwork.html#openvino.inference_engine.IENetwork.reshape) for other models.
|
||||
|
||||
Using the [IENetwork.batch_size](api/ie_python_api/_autosummary/openvino.inference_engine.IENetwork.html#openvino.inference_engine.IENetwork.batch_size) method for models with a non-zero index batch placement or for models with inputs that do not have a batch dimension may lead to undefined behaviour.
|
||||
|
||||
You can change input shapes multiple times using the `IENetwork.reshape` and `IENetwork.batch_size` methods in any order. If a model has a hard-coded batch dimension, use `IENetwork.batch_size` first to change the batch, then call `IENetwork.reshape` to update other dimensions, if needed.
|
||||
|
||||
Inference Engine takes three kinds of a model description as an input, which are converted into an IENetwork object:
|
||||
|
||||
1. Intermediate Representation (IR) through `IECore.read_network`
|
||||
2. ONNX model through `IECore.read_network`
|
||||
3. nGraph function through the constructor of IENetwork
|
||||
|
||||
IENetwork keeps an `ngraph::Function` object with the model description internally. The object should have fully defined input shapes to be successfully loaded to the Inference Engine plugins. To resolve undefined input dimensions of a model, call the `IENetwork.reshape` method providing new input shapes before loading to the Inference Engine plugin.
|
||||
|
||||
Run the following code right after IENetwork creation to explicitly check for model input names and shapes:
|
||||
|
||||
To feed input data of a shape that is different from the model input shape, reshape the model first.
|
||||
|
||||
Once the input shape of IENetwork is set, call the `IECore.load_network` method to get an ExecutableNetwork object for inference with updated shapes.
|
||||
|
||||
There are other approaches to reshape the model during the stage of IR generation or [nGraph function](https://docs.openvino.ai/latest/openvino_docs_nGraph_DG_PythonAPI.html#create_an_ngraph_function_from_a_graph) creation.
|
||||
|
||||
Practically, some models are not ready to be reshaped. In this case, a new input shape cannot be set with the Model Optimizer or the `IENetwork.reshape` method.
|
||||
|
||||
### Troubleshooting Reshape Errors
|
||||
Operation semantics may impose restrictions on input shapes of the operation. Shape collision during shape propagation may be a sign that a new shape does not satisfy the restrictions. Changing the model input shape may result in intermediate operations shape collision.
|
||||
|
||||
Examples of such operations:
|
||||
|
||||
* Reshape operation with a hard-coded output shape value
|
||||
* MatMul operation with the Const second input cannot be resized by spatial dimensions due to operation semantics
|
||||
|
||||
A model's structure and logic should not significantly change after model reshaping.
|
||||
|
||||
* The Global Pooling operation is commonly used to reduce output feature map of classification models output. Having the input of the shape [N, C, H, W], Global Pooling returns the output of the shape [N, C, 1, 1]. Model architects usually express Global Pooling with the help of the Pooling operation with the fixed kernel size [H, W]. During spatial reshape, having the input of the shape [N, C, H1, W1], Pooling with the fixed kernel size [H, W] returns the output of the shape [N, C, H2, W2], where H2 and W2 are commonly not equal to 1. It breaks the classification model structure. For example, publicly available Inception family models from TensorFlow* have this issue.
|
||||
|
||||
* Changing the model input shape may significantly affect its accuracy. For example, Object Detection models from TensorFlow have resizing restrictions by design. To keep the model valid after the reshape, choose a new input shape that satisfies conditions listed in the pipeline.config file. For details, refer to the Tensorflow Object Detection API models resizing techniques.
|
||||
|
||||
|
||||
### Usage of the Reshape Method
|
||||
|
||||
The primary method of the feature is `IENetwork.reshape`. It gets new input shapes and propagates it from input to output for all intermediates layers of the given network. Use `IENetwork.input_info` to get names of input_layers and `.tensor_desc.dims` to get the current network input shape.
|
||||
|
||||
The following code example shows how to reshape a model to the size of an input image.
|
||||
|
||||
```python
|
||||
import cv2
|
||||
import numpy as np
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
|
||||
# Read an input image and transpose input to NCWH
|
||||
image = cv2.imread(path_to_image_file)
|
||||
input_image = image.transpose((2, 0, 1))
|
||||
input_image = np.expand_dims(input_image, axis=0)
|
||||
|
||||
# Load the model and get input info
|
||||
# Note that this model must support arbitrary input shapes
|
||||
net = ie.read_network(model=path_to_xml_file)
|
||||
input_layer = next(iter(net.input_info))
|
||||
print(f"Input shape: {net.input_info[input_blob].tensor_desc.dims}")
|
||||
|
||||
# Call reshape
|
||||
net.reshape({input_layer: input_image.shape})
|
||||
print(f"New input shape: {net.input_info[input_blob].tensor_desc.dims}")
|
||||
|
||||
# Load the model to the device and proceed with inference
|
||||
exec_net = ie.load_network(network=net, device_name="CPU")
|
||||
```
|
||||
|
||||
### Extensibility
|
||||
The Inference Engine provides a special mechanism that allows adding support of shape inference for custom operations. This mechanism is described in the [Extensibility documentation](Extensibility_DG/Intro.md)
|
||||
|
||||
### See Also:
|
||||
|
||||
[Hello Reshape Python Sample](../../inference_engine/ie_bridges/python/sample/hello_reshape_ssd/README.html)
|
||||
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 96 KiB |
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 118 KiB |
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:25ed719bdd525dc0b606ef17a3fec5303ea032dfe6b2d167e1b19b6100b6fb37
|
||||
size 16516
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:2d147adf801535e95d8b627a8a1d23f7b89dea1eabe06218235e756b0a9866fe
|
||||
size 1636
|
||||
@@ -1,332 +0,0 @@
|
||||
# Auto-Device Plugin {#openvino_docs_IE_DG_supported_plugins_AUTO}
|
||||
|
||||
## Auto-Device Plugin Execution (C++)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-cpp" class="switcher-anchor">C++</div>
|
||||
@endsphinxdirective
|
||||
|
||||
The AUTO device is a new, special "virtual" or "proxy" device in the OpenVINO™ toolkit.
|
||||
|
||||
Use "AUTO" as the device name to delegate selection of an actual accelerator to OpenVINO. The Auto-device plugin internally recognizes and selects devices from among CPU, integrated GPU and discrete Intel GPUs (when available) depending on the device capabilities and the characteristics of CNN models (for example, precision). Then the Auto-device assigns inference requests to the selected device.
|
||||
|
||||
From the application's point of view, this is just another device that handles all accelerators in the full system.
|
||||
|
||||
With the 2021.4 release, Auto-device setup is done in three major steps:
|
||||
1. Configure each device as usual (for example, via the conventional `SetConfig()` method)
|
||||
2. Load a network to the Auto-device plugin. This is the only change needed in your application.
|
||||
3. As with any other executable network resulting from `LoadNetwork()`, create as many requests as needed to saturate the devices.
|
||||
|
||||
These steps are covered below in detail.
|
||||
|
||||
### Defining and Configuring the Auto-Device Plugin
|
||||
Following the OpenVINO convention for devices names, the Auto-device uses the label "AUTO". The only configuration option for Auto-device is a limited device list:
|
||||
|
||||
| Parameter name | Parameter values | Default | Description |
|
||||
| :--- | :--- | :--- |:-----------------------------------------------------------------------------|
|
||||
| "MULTI_DEVICE_PRIORITIES" | comma-separated device names <span style="color:red">with no spaces</span>| N/A | Device candidate list to be selected |
|
||||
|
||||
You can use the configuration name directly as a string or use `InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES` from `multi-device/multi_device_config.hpp`, which defines the same string.
|
||||
|
||||
There are two ways to use Auto-device:
|
||||
1. Directly indicate device by "AUTO" or an empty string:
|
||||
@snippet snippets/AUTO0.cpp part0
|
||||
|
||||
2. Use the Auto-device configuration:
|
||||
@snippet snippets/AUTO1.cpp part1
|
||||
|
||||
Both methods allow limiting the list of device candidates for the AUTO plugin.
|
||||
|
||||
> **NOTE**: The Inference Engine lets you use "GPU" as an alias for "GPU.0" in function calls.
|
||||
|
||||
The Auto-device plugin supports query device optimization capabilities in metric.
|
||||
|
||||
| Parameter name | Parameter values |
|
||||
| :--- | :--- |
|
||||
| "OPTIMIZATION_CAPABILITIES" | Auto-Device capabilities |
|
||||
|
||||
### Enumerating Devices and Selection Logic
|
||||
|
||||
The Inference Engine now features a dedicated API to enumerate devices and their capabilities.
|
||||
See [Hello Query Device C++ Sample](../../../samples/cpp/hello_query_device/README.md).
|
||||
This is the example output from the sample (truncated to device names only):
|
||||
|
||||
```sh
|
||||
./hello_query_device
|
||||
Available devices:
|
||||
Device: CPU
|
||||
...
|
||||
Device: GPU.0
|
||||
...
|
||||
Device: GPU.1
|
||||
```
|
||||
|
||||
### Default Auto-Device Selection Logic
|
||||
|
||||
With the 2021.4 release, the Auto-Device selects the most suitable device using the following default logic:
|
||||
|
||||
1. Check if dGPU (discrete), iGPU (integrated) and CPU devices are available
|
||||
2. Get the precision of the input model, such as FP32
|
||||
3. According to the priority of dGPU, iGPU, and CPU (in this order), if the device supports the precision of the input network, select it as the most suitable device
|
||||
|
||||
For example, CPU, dGPU and iGPU can support the following precision and optimization capabilities:
|
||||
|
||||
| Device | OPTIMIZATION_CAPABILITIES |
|
||||
| :--- | :--- |
|
||||
| CPU | WINOGRAD FP32 FP16 INT8 BIN |
|
||||
| dGPU | FP32 BIN BATCHED_BLOB FP16 INT8 |
|
||||
| iGPU | FP32 BIN BATCHED_BLOB FP16 INT8 |
|
||||
|
||||
* When the application uses the Auto-device to run FP16 IR on a system with CPU, dGPU and iGPU, Auto-device will offload this workload to dGPU.
|
||||
* When the application uses the Auto-device to run FP16 IR on a system with CPU and iGPU, Auto-device will offload this workload to iGPU.
|
||||
* When the application uses the Auto-device to run WINOGRAD-enabled IR on a system with CPU, dGPU and iGPU, Auto-device will offload this workload to CPU.
|
||||
|
||||
In cases when loading the network to dGPU or iGPU fails, CPU is the fall-back choice.
|
||||
|
||||
According to the Auto-device selection logic from the previous section, tell the Inference Engine
|
||||
to use the most suitable device from available devices as follows:
|
||||
|
||||
@snippet snippets/AUTO2.cpp part2
|
||||
|
||||
You can also use the Auto-device plugin to choose a device from a limited choice of devices, in this example CPU and GPU:
|
||||
|
||||
@snippet snippets/AUTO3.cpp part3
|
||||
|
||||
### Configuring the Individual Devices and Creating the Auto-Device on Top
|
||||
|
||||
It is possible to configure each individual device as usual and create the "AUTO" device on top:
|
||||
|
||||
@snippet snippets/AUTO4.cpp part4
|
||||
|
||||
Alternatively, you can combine all the individual device settings into single config file and load it, allowing the Auto-device plugin to parse and apply it to the right devices. See the code example here:
|
||||
|
||||
@snippet snippets/AUTO5.cpp part5
|
||||
|
||||
### Using the Auto-Device with OpenVINO Samples and Benchmark App
|
||||
|
||||
Note that every OpenVINO sample or application that supports the "-d" (which stands for "device") command-line option transparently accepts the Auto-device. The Benchmark Application is the best example of the optimal usage of the Auto-device. You do not need to set the number of requests and CPU threads, as the application provides optimal out-of-the-box performance. Below is the example command-line to evaluate AUTO performance with that:
|
||||
|
||||
@sphinxdirective
|
||||
.. tab:: Package, Docker, open-source installation
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
./benchmark_app.py –d AUTO –m <model>
|
||||
|
||||
.. tab:: pip installation
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
benchmark_app –d AUTO –m <model>
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
|
||||
You can also use the auto-device with limit device choice:
|
||||
|
||||
@sphinxdirective
|
||||
.. tab:: Package, Docker, open-source installation
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
./benchmark_app.py –d AUTO:CPU,GPU –m <model>
|
||||
|
||||
.. tab:: pip installation
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
benchmark_app –d AUTO:CPU,GPU –m <model>
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
**NOTES:**
|
||||
* The default CPU stream is 1 if using `-d AUTO`.
|
||||
* You can use the FP16 IR to work with Auto-device.
|
||||
* No demos are fully optimized for Auto-device yet to select the most suitable device,
|
||||
use GPU streams/throttling, and so on.
|
||||
|
||||
## Auto-Device Plugin Execution (Python)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-python" class="switcher-anchor">Python</div>
|
||||
@endsphinxdirective
|
||||
|
||||
The AUTO device is a new, special "virtual" or "proxy" device in the OpenVINO™ toolkit.
|
||||
|
||||
Use "AUTO" as the device name to delegate selection of an actual accelerator to OpenVINO. The Auto-device plugin internally recognizes and selects devices from among CPU, integrated GPU and discrete Intel GPUs (when available) depending on the device capabilities and the characteristics of CNN models (for example, precision). Then the Auto-device assigns inference requests to the selected device.
|
||||
|
||||
From the application's point of view, this is just another device that handles all accelerators in the full system.
|
||||
|
||||
With the 2021.4 release, Auto-device setup is done in three major steps:
|
||||
|
||||
1. Configure each device as usual (for example, via the conventional [IECore.set_config](https://docs.openvino.ai/latest/ie_python_api/classie__api_1_1IECore.html#a2c738cee90fca27146e629825c039a05) method).
|
||||
2. Load a network to the Auto-device plugin. This is the only change needed in your application.
|
||||
3. As with any other executable network resulting from [IECore.load_network](https://docs.openvino.ai/latest/ie_python_api/classie__api_1_1IECore.html#ac9a2e043d14ccfa9c6bbf626cfd69fcc), create as many requests as needed to saturate the devices.
|
||||
|
||||
These steps are covered below in detail.
|
||||
|
||||
### Defining and Configuring the Auto-Device Plugin
|
||||
Following the OpenVINO convention for devices names, the Auto-device uses the label "AUTO". The only configuration option for Auto-device is a limited device list:
|
||||
|
||||
| Parameter name | Parameter values | Default | Description |
|
||||
| -------------- | ---------------- | ------- | ----------- |
|
||||
| "AUTO_DEVICE_LIST" | comma-separated device names with no spaces | N/A | Device candidate list to be selected
|
||||
|
||||
There are two ways to use the Auto-device plugin:
|
||||
|
||||
1. Directly indicate device by "AUTO" or an empty string.
|
||||
2. Use the Auto-device configuration
|
||||
|
||||
Both methods allow limiting the list of device candidates for the AUTO plugin.
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
# Read a network in IR or ONNX format
|
||||
net = ie.read_network(model=path_to_model)
|
||||
|
||||
# Load a network on the "AUTO" device
|
||||
exec_net = ie.load_network(network=net, device_name="AUTO")
|
||||
|
||||
# Optionally specify the list of device candidates for the AUTO plugin
|
||||
# The following two lines are equivalent
|
||||
exec_net = ie.load_network(network=net, device_name="AUTO:CPU,GPU")
|
||||
exec_net = ie.load_network(network=net, device_name="AUTO",
|
||||
config={"AUTO_DEVICE_LIST": "CPU,GPU"})
|
||||
```
|
||||
|
||||
The Auto-device plugin supports query device optimization capabilities in metric.
|
||||
|
||||
| Parameter name | Parameter values |
|
||||
| --- | --- |
|
||||
| "OPTIMIZATION_CAPABILITIES" | Auto-Device capabilities |
|
||||
|
||||
### Enumerating Devices and Selection Logic
|
||||
|
||||
The Inference Engine now features a dedicated API to enumerate devices and their capabilities. See the [Hello Query Device Python Sample](../../../inference_engine/ie_bridges/python/sample_hello_query_device_README.html) for code.
|
||||
|
||||
This is the example output from the sample (truncated to device names only):
|
||||
|
||||
```python
|
||||
./hello_query_device
|
||||
|
||||
Available devices:
|
||||
Device: CPU
|
||||
...
|
||||
Device: GPU.0
|
||||
...
|
||||
Device: GPU.1
|
||||
```
|
||||
|
||||
### Default Auto-Device Selection Logic
|
||||
|
||||
With the 2021.4 release, the Auto-Device selects the most suitable device using the following default logic:
|
||||
|
||||
1. Check if dGPU (discrete), iGPU (integrated) and CPU devices are available
|
||||
2. Get the precision of the input model, such as FP32
|
||||
3. According to the priority of dGPU, iGPU, and CPU (in this order), if the device supports the precision of the input network, select it as the most suitable device
|
||||
|
||||
For example, CPU, dGPU and iGPU can support the following precision and optimization capabilities:
|
||||
|
||||
| Device | OPTIMIZATION_CAPABILITIES |
|
||||
| --- | --- |
|
||||
| CPU | WINOGRAD FP32 FP16 INT8 BIN |
|
||||
| dGPU | FP32 BIN BATCHED_BLOB FP16 INT8 |
|
||||
| iGPU | FP32 BIN BATCHED_BLOB FP16 INT8 |
|
||||
|
||||
* When the application uses the Auto-device to run FP16 IR on a system with CPU, dGPU and iGPU, Auto-device will offload this workload to dGPU.
|
||||
* When the application uses the Auto-device to run FP16 IR on a system with CPU and iGPU, Auto-device will offload this workload to iGPU.
|
||||
* When the application uses the Auto-device to run WINOGRAD-enabled IR on a system with CPU, dGPU and iGPU, Auto-device will offload this workload to CPU.
|
||||
|
||||
In cases when loading the network to dGPU or iGPU fails, CPU is the fall-back choice.
|
||||
|
||||
To show the capabilities for a specific device, query the OPTIMIZATION_CAPABILITIES metric:
|
||||
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
ie.get_metric(device_name=device,
|
||||
metric_name="OPTIMIZATION_CAPABILITIES")
|
||||
```
|
||||
|
||||
### Configuring the Individual Devices and Creating the Auto-Device on Top
|
||||
|
||||
It is possible to configure each individual device as usual and create the "AUTO" device on top:
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
net = ie.read_network(model=path_to_model)
|
||||
|
||||
cpu_config = {}
|
||||
gpu_config = {}
|
||||
|
||||
ie.set_config(config=cpu_config, device_name="CPU")
|
||||
ie.set_config(config=gpu_config, device_name="GPU")
|
||||
|
||||
# Load the network to the AUTO device
|
||||
exec_net = ie.load_network(network=net, device_name="AUTO")
|
||||
```
|
||||
|
||||
Alternatively, you can combine all the individual device settings into single config file and load it, allowing the Auto-device plugin to parse and apply it to the right devices. See the code example here:
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
# Init the Inference Engine Core
|
||||
ie = IECore()
|
||||
|
||||
# Read a network in IR or ONNX format
|
||||
net = ie.read_network(model=path_to_model)
|
||||
|
||||
full_config = {}
|
||||
|
||||
# Load the network to the AUTO device
|
||||
exec_net = ie.load_network(network=net, device_name="AUTO", config=full_config)
|
||||
```
|
||||
|
||||
### Using the Auto-Device with OpenVINO Samples and Benchmark App
|
||||
|
||||
Note that every OpenVINO sample or application that supports the "-d" (which stands for "device") command-line option transparently accepts the Auto-device. The Benchmark Application is the best example of the optimal usage of the Auto-device. You do not need to set the number of requests and CPU threads, as the application provides optimal out-of-the-box performance. Below is the example command-line to evaluate AUTO performance with that:
|
||||
|
||||
@sphinxdirective
|
||||
.. tab:: Package, Docker, open-source installation
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
./benchmark_app.py –d AUTO –m <model>
|
||||
|
||||
.. tab:: pip installation
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
benchmark_app –d AUTO –m <model>
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
You can also use the auto-device with limit device choice:
|
||||
|
||||
@sphinxdirective
|
||||
.. tab:: Package, Docker, open-source installation
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
./benchmark_app.py –d AUTO:CPU,GPU –m <model>
|
||||
|
||||
.. tab:: pip installation
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
benchmark_app –d AUTO:CPU,GPU –m <model>
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
> **NOTE**: If you installed OpenVINO with pip, use `benchmark_app -d AUTO:CPU,GPU -m <model>`
|
||||
@@ -1,139 +0,0 @@
|
||||
# CPU Plugin {#openvino_docs_IE_DG_supported_plugins_CPU}
|
||||
|
||||
|
||||
## Introducing the CPU Plugin
|
||||
The CPU plugin was developed to achieve high performance of neural networks on CPU, using the Intel® Math Kernel Library for Deep Neural Networks (Intel® MKL-DNN).
|
||||
|
||||
Currently, the CPU plugin uses Intel® Threading Building Blocks (Intel® TBB) in order to parallelize calculations. Please refer to the [Optimization Guide](../../optimization_guide/dldt_optimization_guide.md) for associated performance considerations.
|
||||
|
||||
The set of supported layers can be expanded with [the Extensibility mechanism](../Extensibility_DG/Intro.md).
|
||||
|
||||
## Supported Platforms
|
||||
|
||||
OpenVINO™ toolkit, including the CPU plugin, is officially supported and validated on the following platforms:
|
||||
|
||||
| Host | OS (64-bit) |
|
||||
| :--- | :--- |
|
||||
| Development | Ubuntu* 18.04 or 20.04, CentOS* 7.6, MS Windows* 10, macOS* 10.15 |
|
||||
| Target | Ubuntu* 18.04 or 20.04, CentOS* 7.6, MS Windows* 10, macOS* 10.15 |
|
||||
|
||||
The CPU plugin supports inference on Intel® Xeon® with Intel® Advanced Vector Extensions 2 (Intel® AVX2), Intel® Advanced Vector Extensions 512 (Intel® AVX-512), and AVX512_BF16, Intel® Core™
|
||||
Processors with Intel® AVX2, Intel Atom® Processors with Intel® Streaming SIMD Extensions (Intel® SSE).
|
||||
|
||||
You can use the `-pc` flag for samples to know which configuration is used by a layer.
|
||||
This flag shows execution statistics that you can use to get information about layer name, layer type,
|
||||
execution status, execution time, and the type of the execution primitive.
|
||||
|
||||
## Internal CPU Plugin Optimizations
|
||||
|
||||
The CPU plugin supports several graph optimization algorithms, such as fusing or removing layers.
|
||||
Refer to the sections below for details.
|
||||
|
||||
> **NOTE**: For layer descriptions, see the [IR Notation Reference](../../ops/opset.md).
|
||||
|
||||
### Lowering Inference Precision
|
||||
|
||||
The CPU plugin follows a default optimization approach. This approach means that inference is made with lower precision if it is possible on a given platform to reach better performance with an acceptable range of accuracy.
|
||||
|
||||
> **NOTE**: For details, see the [Using Bfloat16 Inference](../Bfloat16Inference.md).
|
||||
|
||||
### Fusing Convolution and Simple Layers
|
||||
|
||||
Merge of a convolution layer and any of the simple layers listed below:
|
||||
- Activation: ReLU, ELU, Sigmoid, Clamp
|
||||
- Depthwise: ScaleShift, PReLU
|
||||
- FakeQuantize
|
||||
|
||||
> **NOTE**: You can have any number and order of simple layers.
|
||||
|
||||
A combination of a convolution layer and simple layers results in a single fused layer called
|
||||
*Convolution*:
|
||||
|
||||
![conv_simple_01]
|
||||
|
||||
|
||||
### Fusing Pooling and FakeQuantize Layers
|
||||
|
||||
A combination of Pooling and FakeQuantize layers results in a single fused layer called *Pooling*:
|
||||
|
||||
![pooling_fakequant_01]
|
||||
|
||||
### Fusing FullyConnected and Activation Layers
|
||||
|
||||
A combination of FullyConnected and Activation layers results in a single fused layer called
|
||||
*FullyConnected*:
|
||||
|
||||
![fullyconnected_activation_01]
|
||||
|
||||
|
||||
### Fusing Convolution and Depthwise Convolution Layers Grouped with Simple Layers
|
||||
|
||||
> **NOTE**: This pattern is possible only on CPUs with support of Streaming SIMD Extensions 4.2
|
||||
> (SSE 4.2) and Intel AVX2 Instruction Set Architecture (ISA).
|
||||
|
||||
A combination of a group of a Convolution (or Binary Convolution) layer and simple layers and a group of a Depthwise Convolution
|
||||
layer and simple layers results in a single layer called *Convolution* (or *Binary Convolution*):
|
||||
> **NOTE**: Depthwise convolution layers should have the same values for the `group`, input channels, and output channels parameters.
|
||||
|
||||
![conv_depth_01]
|
||||
|
||||
### Fusing Convolution and Sum Layers
|
||||
|
||||
A combination of convolution, simple, and Eltwise layers with the sum operation results in a single layer called *Convolution*:
|
||||
|
||||
![conv_sum_relu_01]
|
||||
|
||||
### Fusing a Group of Convolutions
|
||||
|
||||
If a topology contains the following pipeline, a CPU plugin merges split, convolution, and concatenation layers into a single convolution layer with the group parameter:
|
||||
|
||||
![group_convolutions_01]
|
||||
|
||||
> **NOTE**: Parameters of the convolution layers must coincide.
|
||||
|
||||
|
||||
### Removing a Power Layer
|
||||
|
||||
CPU plugin removes a Power layer from a topology if it has the following parameters:
|
||||
- <b>power</b> = 1
|
||||
- <b>scale</b> = 1
|
||||
- <b>offset</b> = 0
|
||||
|
||||
|
||||
## Supported Configuration Parameters
|
||||
|
||||
The plugin supports the configuration parameters listed below.
|
||||
All parameters must be set with the `InferenceEngine::Core::LoadNetwork()` method.
|
||||
When specifying key values as raw strings (that is, when using Python API), omit the `KEY_` prefix.
|
||||
Refer to the OpenVINO samples for usage examples: [Benchmark App](../../../samples/cpp/benchmark_app/README.md).
|
||||
|
||||
These are general options, also supported by other plugins:
|
||||
|
||||
| Parameter name | Parameter values | Default | Description |
|
||||
| :--- | :--- | :--- | :----------------------------------------------------------------------------------------------------------------------------|
|
||||
| KEY_EXCLUSIVE_ASYNC_REQUESTS | YES/NO | NO | Forces async requests (also from different executable networks) to execute serially. This prevents potential oversubscription|
|
||||
| KEY_PERF_COUNT | YES/NO | NO | Enables gathering performance counters |
|
||||
|
||||
CPU-specific settings:
|
||||
|
||||
| Parameter name | Parameter values | Default | Description |
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| KEY_CPU_THREADS_NUM | positive integer values| 0 | Specifies the number of threads that CPU plugin should use for inference. Zero (default) means using all (logical) cores|
|
||||
| KEY_CPU_BIND_THREAD | YES/NUMA/NO | YES | Binds inference threads to CPU cores. 'YES' (default) binding option maps threads to cores - this works best for static/synthetic scenarios like benchmarks. The 'NUMA' binding is more relaxed, binding inference threads only to NUMA nodes, leaving further scheduling to specific cores to the OS. This option might perform better in the real-life/contended scenarios. Note that for the latency-oriented cases (number of the streams is less or equal to the number of NUMA nodes, see below) both YES and NUMA options limit number of inference threads to the number of hardware cores (ignoring hyper-threading) on the multi-socket machines. |
|
||||
| KEY_CPU_THROUGHPUT_STREAMS | KEY_CPU_THROUGHPUT_NUMA, KEY_CPU_THROUGHPUT_AUTO, or positive integer values| 1 | Specifies number of CPU "execution" streams for the throughput mode. Upper bound for the number of inference requests that can be executed simultaneously. All available CPU cores are evenly distributed between the streams. The default value is 1, which implies latency-oriented behavior for single NUMA-node machine, with all available cores processing requests one by one. On the multi-socket (multiple NUMA nodes) machine, the best latency numbers usually achieved with a number of streams matching the number of NUMA-nodes. <br>KEY_CPU_THROUGHPUT_NUMA creates as many streams as needed to accommodate NUMA and avoid associated penalties.<br>KEY_CPU_THROUGHPUT_AUTO creates bare minimum of streams to improve the performance; this is the most portable option if you don't know how many cores your target machine has (and what would be the optimal number of streams). Note that your application should provide enough parallel slack (for example, run many inference requests) to leverage the throughput mode. <br> Non-negative integer value creates the requested number of streams. If a number of streams is 0, no internal streams are created and user threads are interpreted as stream master threads.|
|
||||
| KEY_ENFORCE_BF16 | YES/NO| YES | The name for setting to execute in bfloat16 precision whenever it is possible. This option lets plugin know to downscale the precision where it sees performance benefits from bfloat16 execution. Such option does not guarantee accuracy of the network, you need to verify the accuracy in this mode separately, based on performance and accuracy results. It should be your decision whether to use this option or not. |
|
||||
|
||||
> **NOTE**: To disable all internal threading, use the following set of configuration parameters: `KEY_CPU_THROUGHPUT_STREAMS=0`, `KEY_CPU_THREADS_NUM=1`, `KEY_CPU_BIND_THREAD=NO`.
|
||||
|
||||
## See Also
|
||||
* [Supported Devices](Supported_Devices.md)
|
||||
|
||||
[mkldnn_group_conv]: ../img/mkldnn_group_conv.png
|
||||
[mkldnn_conv_sum]: ../img/mkldnn_conv_sum.png
|
||||
[mkldnn_conv_sum_result]: ../img/mkldnn_conv_sum_result.png
|
||||
[conv_simple_01]: ../img/conv_simple_01.png
|
||||
[pooling_fakequant_01]: ../img/pooling_fakequant_01.png
|
||||
[fullyconnected_activation_01]: ../img/fullyconnected_activation_01.png
|
||||
[conv_depth_01]: ../img/conv_depth_01.png
|
||||
[group_convolutions_01]: ../img/group_convolutions_01.png
|
||||
[conv_sum_relu_01]: ../img/conv_sum_relu_01.png
|
||||
@@ -1,35 +0,0 @@
|
||||
# Device Plugin Support {#openvino_docs_IE_DG_Device_Plugins}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
openvino_docs_IE_DG_InferenceEngine_QueryAPI
|
||||
openvino_docs_IE_DG_supported_plugins_CPU
|
||||
openvino_docs_IE_DG_supported_plugins_GPU
|
||||
openvino_docs_IE_DG_supported_plugins_VPU
|
||||
openvino_docs_IE_DG_supported_plugins_GNA
|
||||
openvino_docs_IE_DG_supported_plugins_AUTO
|
||||
openvino_docs_IE_DG_supported_plugins_HETERO
|
||||
openvino_docs_IE_DG_supported_plugins_MULTI
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
Inference Engine uses a plugin architecture. Inference Engine plugin is a software component that contains complete implementation for inference on a certain Intel® hardware device: CPU, GPU, VPU, GNA, etc. Each plugin implements the unified API and provides additional hardware-specific APIs.
|
||||
|
||||
The Inference Engine provides capabilities to infer deep learning models on the following device types with corresponding plugins:
|
||||
|
||||
| Plugin | Device types |
|
||||
|------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
|[GPU plugin](GPU.md) |Intel® Processor Graphics, including Intel® HD Graphics and Intel® Iris® Graphics |
|
||||
|[CPU plugin](CPU.md) |Intel® Xeon® with Intel® Advanced Vector Extensions 2 (Intel® AVX2), Intel® Advanced Vector Extensions 512 (Intel® AVX-512), and AVX512_BF16, Intel® Core™ Processors with Intel® AVX2, Intel® Atom® Processors with Intel® Streaming SIMD Extensions (Intel® SSE) |
|
||||
|[VPU plugins](VPU.md) (available in the Intel® Distribution of OpenVINO™ toolkit) |Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X, Intel® Vision Accelerator Design with Intel® Movidius™ VPUs |
|
||||
|[GNA plugin](GNA.md) (available in the Intel® Distribution of OpenVINO™ toolkit) |Intel® Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel® Pentium® Silver J5005 Processor, Intel® Pentium® Silver N5000 Processor, Intel® Celeron® J4005 Processor, Intel® Celeron® J4105 Processor, Intel® Celeron® Processor N4100, Intel® Celeron® Processor N4000, Intel® Core™ i3-8121U Processor, Intel® Core™ i7-1065G7 Processor, Intel® Core™ i7-1060G7 Processor, Intel® Core™ i5-1035G4 Processor, Intel® Core™ i5-1035G7 Processor, Intel® Core™ i5-1035G1 Processor, Intel® Core™ i5-1030G7 Processor, Intel® Core™ i5-1030G4 Processor, Intel® Core™ i3-1005G1 Processor, Intel® Core™ i3-1000G1 Processor, Intel® Core™ i3-1000G4 Processor|
|
||||
|[Multi-Device plugin](MULTI.md) |Multi-Device plugin enables simultaneous inference of the same network on several Intel® devices in parallel |
|
||||
|[Auto-Device plugin](AUTO.md) |Auto-Device plugin enables selecting Intel® device for inference automatically |
|
||||
|[Heterogeneous plugin](HETERO.md) |Heterogeneous plugin enables automatic inference splitting between several Intel® devices (for example if a device doesn't [support certain layers](#supported-layers)). |
|
||||
|
||||
Devices similar to the ones we have used for benchmarking can be accessed using [Intel® DevCloud for the Edge](https://devcloud.intel.com/edge/), a remote development environment with access to Intel® hardware and the latest versions of the Intel® Distribution of the OpenVINO™ Toolkit. [Learn more](https://devcloud.intel.com/edge/get_started/devcloud/) or [Register here](https://inteliot.force.com/DevcloudForEdge/s/).
|
||||
|
||||
@@ -1,496 +0,0 @@
|
||||
# GNA Plugin {#openvino_docs_IE_DG_supported_plugins_GNA}
|
||||
## Introducing the GNA Plugin
|
||||
|
||||
The Intel® Gaussian & Neural Accelerator is a low-power neural coprocessor for continuous inference at the edge.
|
||||
|
||||
Intel® GNA is not intended to replace typical inference devices such as the
|
||||
CPU, graphics processing unit (GPU), or vision processing unit (VPU). It is designed for offloading
|
||||
continuous inference workloads including but not limited to noise reduction or speech recognition
|
||||
to save power and free CPU resources.
|
||||
|
||||
The GNA plugin provides a way to run inference on Intel® GNA, as well as in the software execution mode on CPU.
|
||||
|
||||
## Devices with Intel® GNA
|
||||
|
||||
Devices with Intel® GNA support:
|
||||
|
||||
* [Intel® Speech Enabling Developer Kit](https://www.intel.com/content/www/us/en/support/articles/000026156/boards-and-kits/smart-home.html)
|
||||
|
||||
* [Amazon Alexa\* Premium Far-Field Developer Kit](https://developer.amazon.com/en-US/alexa/alexa-voice-service/dev-kits/amazon-premium-voice)
|
||||
|
||||
* [Intel® Pentium® Silver Processors N5xxx, J5xxx and Intel® Celeron® Processors N4xxx, J4xxx (formerly codenamed Gemini Lake)](https://ark.intel.com/content/www/us/en/ark/products/codename/83915/gemini-lake.html):
|
||||
- Intel® Pentium® Silver J5005 Processor
|
||||
- Intel® Pentium® Silver N5000 Processor
|
||||
- Intel® Celeron® J4005 Processor
|
||||
- Intel® Celeron® J4105 Processor
|
||||
- Intel® Celeron® J4125 Processor
|
||||
- Intel® Celeron® Processor N4100
|
||||
- Intel® Celeron® Processor N4000
|
||||
|
||||
* [Intel® Pentium® Processors N6xxx, J6xxx, Intel® Celeron® Processors N6xxx, J6xxx and Intel Atom® x6xxxxx (formerly codenamed Elkhart Lake)](https://ark.intel.com/content/www/us/en/ark/products/codename/128825/products-formerly-elkhart-lake.html)
|
||||
|
||||
* [Intel® Core™ Processors (formerly codenamed Cannon Lake)](https://ark.intel.com/content/www/us/en/ark/products/136863/intel-core-i3-8121u-processor-4m-cache-up-to-3-20-ghz.html)
|
||||
|
||||
* [10th Generation Intel® Core™ Processors (formerly codenamed Ice Lake)](https://ark.intel.com/content/www/us/en/ark/products/codename/74979/ice-lake.html):
|
||||
|
||||
* [11th Generation Intel® Core™ Processors (formerly codenamed Tiger Lake)](https://ark.intel.com/content/www/us/en/ark/products/codename/88759/tiger-lake.html).
|
||||
|
||||
* [12th Generation Intel® Core™ Processors (formerly codenamed Alder Lake)](https://ark.intel.com/content/www/us/en/ark/products/codename/147470/products-formerly-alder-lake.html).
|
||||
|
||||
> **NOTE**: On platforms where Intel® GNA is not enabled in the BIOS, the driver cannot be installed, so the GNA plugin uses the software emulation mode only.
|
||||
|
||||
## Intel® GNA Generational Differences
|
||||
|
||||
The first and second versions of Intel® GNA found in 10th and 11th generation Intel® Core™ Processors may be considered to be functionally equivalent. Intel® GNA 2.0 provided performance improvement with respect to Intel® GNA 1.0. Starting with 12th Generation Intel® Core™ Processors (formerly codenamed Alder Lake), support for Intel® GNA 3.0 features is being added.
|
||||
|
||||
In the rest of this documentation, "GNA 2.0" refers to Intel® GNA hardware delivered on 10th and 11th generation Intel® Core™ processors, and the term "GNA 3.0" will be used to refer to GNA hardware delivered on 12th generation Intel® Core™ processors.
|
||||
|
||||
Initially, a limited subset of Intel® GNA 3.0 features are added to the previous feature set including the following:
|
||||
|
||||
* **2D VALID Convolution With Small 2D Kernels:** Two-dimensional convolutions with the following kernel dimensions [H,W] are supported: [1,1], [2,2], [3,3], [2,1], [3,1], [4,1], [5,1], [6,1], [7,1], [1,2], or [1,3]. Input tensor dimensions are limited to [1,8,16,16] <= [N,C,H,W] <= [1,120,384,240]. Up to 384 channels C may be used with a subset of kernel sizes (see table below). Up to 256 kernels (output channels) are supported. Pooling is limited to pool shapes of [1,1], [2,2], or [3,3]. Not all combinations of kernel shape and input tensor shape are supported (see the tables below for exact limitations).
|
||||
|
||||
The tables below show that the exact limitation on the input tensor width W depends on the number of input channels C (indicated as Ci below) and the kernel shape. There is much more freedom to choose the input tensor height and number of output channels.
|
||||
|
||||
## Initially Supported Subset of Intel® GNA 2D Convolutions
|
||||
|
||||
The following tables provide a more explicit representation of the Intel(R) GNA 3.0 2D convolution operations initially supported. The limits depend strongly on number of input tensor channels (Ci) and the input tensor width (W). Other factors are kernel height (KH), kernel width (KW), pool height (PH), pool width (PW), horizontal pool step (SH), and vertical pool step (PW). For example, the first table shows that for a 3x3 kernel with max pooling, only square pools are supported, and W is limited to 87 when there are 64 input channels.
|
||||
|
||||
**Table of Maximum Input Tensor Widths (W) vs. Rest of Parameters** (Input and Kernel Precision: 2 bytes)
|
||||
|
||||
|KH|KW|PH|PW|SH|SW|H|W<br>Ci=8<br>Co=256|W<br>Ci=16<br>Co=256|W<br>Ci=32<br>Co=256|W<br>Ci=64<br>Co=256|W<br>Ci=128<br>Co=256|W<br>Ci=256<br>Co=256|W<br>Ci=384<br>Co=256|
|
||||
|:--|:--|:--|:--|:--|:--|:--|:--|:--|:--|:--|:--|:--|:--|
|
||||
|1|1|1|1|1|1|128|240|240|240|240|240|240|170|
|
||||
|1|1|1|1|1|1|256|240|240|240|240|240|128|85|
|
||||
|1|1|1|1|1|1|384|240|240|240|240|170|85|56|
|
||||
|1|2|1|1|1|1|128|240|240|240|240| | | |
|
||||
|1|2|1|1|1|1|256|240|240|240|240| | | |
|
||||
|1|2|1|1|1|1|384|240|240|240|240| | | |
|
||||
|1|3|1|1|1|1|128|240|240|240|240| | | |
|
||||
|1|3|1|1|1|1|256|240|240|240|240| | | |
|
||||
|1|3|1|1|1|1|384|240|240|240|240| | | |
|
||||
|2|1|1|1|1|1|128|192|192|192|192|192|192|128|
|
||||
|2|1|1|1|1|1|256|192|192|192|192|192|128|85|
|
||||
|2|1|1|1|1|1|384|192|192|192|192|170|85|56|
|
||||
|2|2|1|1|1|1|128|193|193|193|193| | | |
|
||||
|2|2|1|1|1|1|256|193|193|193|193| | | |
|
||||
|2|2|1|1|1|1|384|193|193|193|193| | | |
|
||||
|2|2|2|2|1|1|128|193|193|192|179| | | |
|
||||
|2|2|2|2|1|1|256|193|193|192|179| | | |
|
||||
|2|2|2|2|1|1|384|193|193|192|179| | | |
|
||||
|2|2|2|2|1|2|128|193|193|192|179| | | |
|
||||
|2|2|2|2|1|2|256|193|193|192|179| | | |
|
||||
|2|2|2|2|1|2|384|193|193|192|179| | | |
|
||||
|2|2|2|2|2|1|128|193|193|192|179| | | |
|
||||
|2|2|2|2|2|1|256|193|193|192|179| | | |
|
||||
|2|2|2|2|2|1|384|193|193|192|179| | | |
|
||||
|2|2|2|2|2|2|128|193|193|192|179| | | |
|
||||
|2|2|2|2|2|2|256|193|193|192|179| | | |
|
||||
|2|2|2|2|2|2|384|193|193|192|179| | | |
|
||||
|3|1|1|1|1|1|128|128|128|128|128|128|85|42|
|
||||
|3|1|1|1|1|1|256|128|128|128|128|128|85|42|
|
||||
|3|1|1|1|1|1|384|128|128|128|128|128|85|42|
|
||||
|3|3|1|1|1|1|128|130|130|130|87| | | |
|
||||
|3|3|1|1|1|1|256|130|130|130|87| | | |
|
||||
|3|3|1|1|1|1|384|130|130|130|87| | | |
|
||||
|3|3|2|2|1|1|128|130|130|126|87| | | |
|
||||
|3|3|2|2|1|1|256|130|130|126|87| | | |
|
||||
|3|3|2|2|1|1|384|130|130|126|87| | | |
|
||||
|3|3|2|2|1|2|128|130|130|126|87| | | |
|
||||
|3|3|2|2|1|2|256|130|130|126|87| | | |
|
||||
|3|3|2|2|1|2|384|130|130|126|87| | | |
|
||||
|3|3|2|2|2|1|128|130|130|126|87| | | |
|
||||
|3|3|2|2|2|1|256|130|130|126|87| | | |
|
||||
|3|3|2|2|2|1|384|130|130|126|87| | | |
|
||||
|3|3|2|2|2|2|128|130|130|126|87| | | |
|
||||
|3|3|2|2|2|2|256|130|130|126|87| | | |
|
||||
|3|3|2|2|2|2|384|130|130|126|87| | | |
|
||||
|3|3|3|3|1|1|128|130|128|118|87| | | |
|
||||
|3|3|3|3|1|1|256|130|128|118|87| | | |
|
||||
|3|3|3|3|1|1|384|130|128|118|87| | | |
|
||||
|3|3|3|3|1|2|128|130|128|118|87| | | |
|
||||
|3|3|3|3|1|2|256|130|128|118|87| | | |
|
||||
|3|3|3|3|1|2|384|130|128|118|87| | | |
|
||||
|3|3|3|3|1|3|128|130|128|118|87| | | |
|
||||
|3|3|3|3|1|3|256|130|128|118|87| | | |
|
||||
|3|3|3|3|1|3|384|130|128|118|87| | | |
|
||||
|3|3|3|3|2|1|128|130|128|118|87| | | |
|
||||
|3|3|3|3|2|1|256|130|128|118|87| | | |
|
||||
|3|3|3|3|2|1|384|130|128|118|87| | | |
|
||||
|3|3|3|3|2|2|128|130|128|118|87| | | |
|
||||
|3|3|3|3|2|2|256|130|128|118|87| | | |
|
||||
|3|3|3|3|2|2|384|130|128|118|87| | | |
|
||||
|3|3|3|3|2|3|128|130|128|118|87| | | |
|
||||
|3|3|3|3|2|3|256|130|128|118|87| | | |
|
||||
|3|3|3|3|2|3|384|130|128|118|87| | | |
|
||||
|3|3|3|3|3|1|128|130|128|118|87| | | |
|
||||
|3|3|3|3|3|1|256|130|128|118|87| | | |
|
||||
|3|3|3|3|3|1|384|130|128|118|87| | | |
|
||||
|3|3|3|3|3|2|128|130|128|118|87| | | |
|
||||
|3|3|3|3|3|2|256|130|128|118|87| | | |
|
||||
|3|3|3|3|3|2|384|130|128|118|87| | | |
|
||||
|3|3|3|3|3|3|128|130|128|118|87| | | |
|
||||
|3|3|3|3|3|3|256|130|128|118|87| | | |
|
||||
|3|3|3|3|3|3|384|130|128|118|87| | | |
|
||||
|4|1|1|1|1|1|128|96|96|96|96|96|64|32|
|
||||
|4|1|1|1|1|1|256|96|96|96|96|96|64|32|
|
||||
|4|1|1|1|1|1|384|96|96|96|96|96|64|32|
|
||||
|5|1|1|1|1|1|128|76|76|76|76|51|25| |
|
||||
|5|1|1|1|1|1|256|76|76|76|76|51|25| |
|
||||
|5|1|1|1|1|1|384|76|76|76|76|51|25| |
|
||||
|6|1|1|1|1|1|128|64|64|64|64|42|21| |
|
||||
|6|1|1|1|1|1|256|64|64|64|64|42|21| |
|
||||
|6|1|1|1|1|1|384|64|64|64|64|42|21| |
|
||||
|7|1|1|1|1|1|128|54|54|54|54|36| | |
|
||||
|7|1|1|1|1|1|256|54|54|54|54|36| | |
|
||||
|7|1|1|1|1|1|384|54|54|54|54|36| | |
|
||||
|
||||
**Table of Maximum Input Tensor Widths (W) vs. Rest of Parameters** (Input and Kernel Precision: 1 bytes)
|
||||
|
||||
|KH|KW|PH|PW|SH|SW|H|W<br>Ci=8<br>Co=256|W<br>Ci=16<br>Co=256|W<br>Ci=32<br>Co=256|W<br>Ci=64<br>Co=256|W<br>Ci=128<br>Co=256|W<br>Ci=256<br>Co=256|W<br>Ci=384<br>Co=256|
|
||||
|:--|:--|:--|:--|:--|:--|:--|:--|:--|:--|:--|:--|:--|:--|
|
||||
|1|1|1|1|1|1|128|240|240|240|240|240|240|240|
|
||||
|1|1|1|1|1|1|256|240|240|240|240|240|240|170|
|
||||
|1|1|1|1|1|1|384|240|240|240|240|240|170|113|
|
||||
|1|2|1|1|1|1|128|240|240|240|240|240|240|240|
|
||||
|1|2|1|1|1|1|256|240|240|240|240|240|240|170|
|
||||
|1|2|1|1|1|1|384|240|240|240|240|240|170|113|
|
||||
|1|3|1|1|1|1|128|240|240|240|240|240| | |
|
||||
|1|3|1|1|1|1|256|240|240|240|240|240| | |
|
||||
|1|3|1|1|1|1|384|240|240|240|240|240| | |
|
||||
|2|1|1|1|1|1|128|192|192|192|192|192|192|192|
|
||||
|2|1|1|1|1|1|256|192|192|192|192|192|192|170|
|
||||
|2|1|1|1|1|1|384|192|192|192|192|192|170|113|
|
||||
|2|2|1|1|1|1|128|193|193|193|193|193|193|129|
|
||||
|2|2|1|1|1|1|256|193|193|193|193|193|193|129|
|
||||
|2|2|1|1|1|1|384|193|193|193|193|193|170|113|
|
||||
|3|1|1|1|1|1|128|128|128|128|128|128|128|85|
|
||||
|3|1|1|1|1|1|256|128|128|128|128|128|128|85|
|
||||
|3|1|1|1|1|1|384|128|128|128|128|128|128|85|
|
||||
|3|3|1|1|1|1|128|130|130|130|130|87 | | |
|
||||
|3|3|1|1|1|1|256|130|130|130|130|87 | | |
|
||||
|3|3|1|1|1|1|384|130|130|130|130|87 | | |
|
||||
|4|1|1|1|1|1|128|96|96|96|96|96|96|64|
|
||||
|4|1|1|1|1|1|256|96|96|96|96|96|96|64|
|
||||
|4|1|1|1|1|1|384|96|96|96|96|96|96|64|
|
||||
|5|1|1|1|1|1|128|76|76|76|76|76|51|51|
|
||||
|5|1|1|1|1|1|256|76|76|76|76|76|51|51|
|
||||
|5|1|1|1|1|1|384|76|76|76|76|76|51|51|
|
||||
|6|1|1|1|1|1|128|64|64|64|64|64|42|21|
|
||||
|6|1|1|1|1|1|256|64|64|64|64|64|42|21|
|
||||
|6|1|1|1|1|1|384|64|64|64|64|64|42|21|
|
||||
|7|1|1|1|1|1|128|54|54|54|54|54|36|18|
|
||||
|7|1|1|1|1|1|256|54|54|54|54|54|36|18|
|
||||
|7|1|1|1|1|1|384|54|54|54|54|54|36|18|
|
||||
|
||||
|
||||
> **NOTE**: The above limitations only apply to the new hardware 2D convolution operation. When possible, the Intel® GNA plugin graph compiler flattens 2D convolutions so that the second generation Intel® GNA 1D convolution operations (without these limitations) may be used. The plugin will also flatten 2D convolutions regardless of the sizes if GNA 2.0 compilation target is selected (see below).
|
||||
|
||||
## Intel® GNA Forward and Backward Compatibility
|
||||
|
||||
In the general case, there is no guarantee that a model compiled for GNA 2.0 will run on GNA 3.0, or vice versa.
|
||||
|
||||
However, in most cases, networks compiled for GNA 2.0 will run as expected on GNA 3.0, although the performance may be worse compared to the case when a network is compiled specifically for the latter. The exception is networks with convolutions with the number of filters greater than 8192 (see the <a href="#models-and-layers-limitations">Models and Layers Limitations</a> section).
|
||||
|
||||
Networks compiled for GNA 3.0 should run on GNA 2.0 with incompatible layers emulated on CPU.
|
||||
|
||||
You can use the following options `KEY_GNA_EXEC_TARGET` and `KEY_GNA_COMPILE_TARGET` options to check interoperability (see the <a href="#supported-configuration-parameters">Supported Configuration Parameters</a> section below):
|
||||
|
||||
@sphinxdirective
|
||||
.. tab:: C++
|
||||
|
||||
``KEY_GNA_EXEC_TARGET``, ``KEY_GNA_COMPILE_TARGET``
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
``GNA_EXEC_TARGET``, ``GNA_COMPILE_TARGET``
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
## Drivers and Dependencies
|
||||
|
||||
Intel® GNA hardware requires a driver to be installed on the system.
|
||||
|
||||
* Linux\* OS:
|
||||
[Download Intel® GNA driver for Ubuntu Linux 18.04.3 LTS (with HWE Kernel version 5.4+)](https://storage.openvinotoolkit.org/drivers/gna/)
|
||||
|
||||
* Windows\* OS:
|
||||
Intel® GNA driver for Windows is available through Windows Update\*
|
||||
|
||||
## <a name="models-and-layers-limitations">Models and Layers Limitations</a>
|
||||
|
||||
Because of specifics of hardware architecture, Intel® GNA supports a limited set of layers, their kinds and combinations.
|
||||
For example, you should not expect the GNA Plugin to be able to run computer vision models, except those specifically adapted for the GNA Plugin, because the plugin does not fully support 2D convolutions.
|
||||
|
||||
For the list of supported layers, see the **GNA** column of the **Supported Layers** section in [Supported Devices](Supported_Devices.md).
|
||||
|
||||
Limitations include:
|
||||
|
||||
- Only 1D convolutions are natively supported.
|
||||
- The number of output channels for convolutions must be a multiple of 4.
|
||||
- The maximum number of filters is 65532 for GNA 2.0 and 8192 for GNA 3.0.
|
||||
- Permute layer support is limited to the cases where no data reordering is needed or when reordering is happening for two dimensions, at least one of which is not greater than 8.
|
||||
- Splits and concatenations are supported for continuous portions of memory (e.g., split of 1,2,3,4 to 1,1,3,4 and 1,1,3,4 or concats of 1,2,3,4 and 1,2,3,5 to 2,2,3,4).
|
||||
- For Multiply, Add and Subtract layers, auto broadcasting is only supported for constant inputs.
|
||||
|
||||
### Support for 2D Convolutions in Previous Generations of GNA Hardware
|
||||
|
||||
The Intel® GNA 1.0 and 2.0 hardware natively supports only 1D convolutions.
|
||||
|
||||
However, 2D convolutions can be mapped to 1D when a convolution kernel moves in a single direction. GNA Plugin performs such a transformation for Kaldi `nnet1` convolution. From this perspective, the Intel® GNA hardware convolution operation accepts an `NHWC` input and produces an `NHWC` output. Because OpenVINO™ only supports the `NCHW` layout, you may need to insert `Permute` layers before or after convolutions.
|
||||
|
||||
For example, the Kaldi model optimizer inserts such a permute after convolution for the [rm_cnn4a network](https://storage.openvinotoolkit.org/models_contrib/speech/2021.2/rm_cnn4a_smbr/). This `Permute` layer is automatically removed by the GNA Plugin, because the Intel® GNA hardware convolution layer already produces the required `NHWC` result.
|
||||
|
||||
## Operation Precision
|
||||
|
||||
Intel® GNA essentially operates in the low-precision mode, which represents a mix of 8-bit (`I8`), 16-bit (`I16`), and 32-bit (`I32`) integer computations. Outputs calculated using a reduced integer precision are different from the scores calculated using the floating point format, for example, `FP32` outputs calculated on CPU using the Inference Engine [CPU Plugin](CPU.md).
|
||||
|
||||
Unlike other plugins supporting low-precision execution, the GNA plugin can calculate quantization factors at the model loading time, so you can run a model without calibration using the [Post-Training Optimization Tool](@ref pot_README).
|
||||
However, this mode may not provide satisfactory accuracy because the internal quantization algorithm is based on heuristics which may or may not be efficient, depending on the model and dynamic range of input data.
|
||||
|
||||
Starting with 2021.4 release of OpenVINO, GNA plugin users are encouraged to use the [POT API Usage sample for GNA](@ref pot_sample_speech_README) to get a model with quantization hints based on statistics for the provided dataset.
|
||||
|
||||
## <a name="execution-modes">Execution Modes</a>
|
||||
|
||||
@sphinxdirective
|
||||
.. tab:: C++
|
||||
|
||||
============================ ==============================================================================================================================================
|
||||
Mode Description
|
||||
============================ ==============================================================================================================================================
|
||||
``KEY_GNA_AUTO`` Uses Intel® GNA if available, otherwise uses software execution mode on CPU.
|
||||
``KEY_GNA_HW`` Uses Intel® GNA if available, otherwise raises an error.
|
||||
``KEY_GNA_SW`` *Deprecated*. Executes the GNA-compiled graph on CPU performing calculations in the same precision as the Intel® GNA, but not in the bit-exact mode.
|
||||
``KEY_GNA_SW_EXACT`` Executes the GNA-compiled graph on CPU performing calculations in the same precision as the Intel® GNA in the bit-exact mode.
|
||||
``KEY_GNA_HW_WITH_SW_FBACK`` Uses Intel® GNA if available, otherwise raises an error. If the hardware queue is not empty, automatically falls back to CPU in the bit-exact mode.
|
||||
``KEY_GNA_SW_FP32`` Executes the GNA-compiled graph on CPU but substitutes parameters and calculations from low precision to floating point (``FP32``).
|
||||
============================ ==============================================================================================================================================
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
======================== ==============================================================================================================================================
|
||||
Mode Description
|
||||
======================== ==============================================================================================================================================
|
||||
``GNA_AUTO`` Uses Intel® GNA if available, otherwise uses software execution mode on CPU.
|
||||
``GNA_HW`` Uses Intel® GNA if available, otherwise raises an error.
|
||||
``GNA_SW`` *Deprecated*. Executes the GNA-compiled graph on CPU performing calculations in the same precision as the Intel® GNA, but not in the bit-exact mode.
|
||||
``GNA_SW_EXACT`` Executes the GNA-compiled graph on CPU performing calculations in the same precision as the Intel® GNA in the bit-exact mode.
|
||||
``GNA_HW_WITH_SW_FBACK`` Uses Intel® GNA if available, otherwise raises an error. If the hardware queue is not empty, automatically falls back to CPU in the bit-exact mode.
|
||||
``GNA_SW_FP32`` Executes the GNA-compiled graph on CPU but substitutes parameters and calculations from low precision to floating point (``FP32``).
|
||||
======================== ==============================================================================================================================================
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
## <a name="supported-configuration-parameters">Supported Configuration Parameters</a>
|
||||
|
||||
The plugin supports the configuration parameters listed below. The parameter names correspond to their usage through API keys, such as ``GNAConfigParams::KEY_GNA_DEVICE_MODE`` or ``PluginConfigParams::KEY_PERF_COUNT`` in C++ and ``GNA_DEVICE_MODE`` or ``PERF_COUNT`` in Python.
|
||||
|
||||
@sphinxdirective
|
||||
.. tab:: C++
|
||||
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
| Parameter Name | Values | Default Value | Description |
|
||||
+==================================+=========================+===============+=================================================================+
|
||||
| ``KEY_GNA_EXEC_TARGET`` | ``TARGET_2_0``, | *see below* | Defines the execution target. |
|
||||
| | ``TARGET_3_0`` | | |
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
| ``KEY_GNA_COMPILE_TARGET`` | ``TARGET_2_0``, | *see below* | Defines the compilation target. |
|
||||
| | ``TARGET_3_0`` | | |
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
| ``KEY_GNA_COMPACT_MODE`` | ``YES``, ``NO`` | ``NO`` | Enables I/O buffers reuse to save space. |
|
||||
| | | | Makes debugging harder. |
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
| ``KEY_GNA_SCALE_FACTOR`` | FP32 number | 1.0 | Sets the scale factor to use for input quantization. |
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
| ``KEY_GNA_DEVICE_MODE`` | ``GNA_AUTO``, | ``GNA_AUTO`` | One of the modes described |
|
||||
| | ``GNA_HW``, | | in `Execution Modes <#execution-modes>`_. |
|
||||
| | ``GNA_HW_WITH_SW_FBACK``| | |
|
||||
| | ``GNA_SW_EXACT``, | | |
|
||||
| | ``GNA_SW_FP32`` | | |
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
| ``KEY_GNA_FIRMWARE_MODEL_IMAGE`` | ``std::string`` | ``""`` | Sets the name for the embedded model binary dump file. |
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
| ``KEY_GNA_PRECISION`` | ``I16``, ``I8`` | ``I16`` | Sets the preferred integer weight resolution for quantization |
|
||||
| | | | (ignored for models produced using POT). |
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
| ``KEY_PERF_COUNT`` | ``YES``, ``NO`` | ``NO`` | Turns on performance counters reporting. |
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
|
||||
The parameters are passed as ``std::map<std::string, std::string>`` on ``InferenceEngine::Core::LoadNetwork`` or ``InferenceEngine::SetConfig``.
|
||||
|
||||
Normally, you do not need to select the execution target (``KEY_GNA_EXEC_TARGET``) and compilation target (``KEY_GNA_COMPILE_TARGET``). The default value for the execution target corresponds to available hardware, or latest hardware version supported by the plugin (i.e., GNA 3.0) if there is no GNA HW in the system. The compilation target is the same as the execution target by default. However, you may want to change the targets, for example, if you want to check how a model compiled for one generation would behave on the other generation (using the software emulation mode), or if you are willing to export a model for a specific version of GNA HW.
|
||||
|
||||
You can change the ``KEY_GNA_DEVICE_MODE`` parameter at run time using ``InferenceEngine::ExecutableNetwork::SetConfig``, which works for any value excluding ``GNA_SW_FP32``. This enables you to switch the execution between software emulation mode and hardware execution mode after the model is loaded.
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
| Parameter Name | Values | Default Value | Description |
|
||||
+==================================+=========================+===============+=================================================================+
|
||||
| ``GNA_EXEC_TARGET`` | ``TARGET_2_0``, | _see below_ | Defines the execution target. |
|
||||
| | ``TARGET_3_0`` | | |
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
| ``GNA_COMPILE_TARGET`` | ``TARGET_2_0``, | _see below_ | Defines the compilation target. |
|
||||
| | ``TARGET_3_0`` | | |
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
| ``GNA_COMPACT_MODE`` | ``YES``, ``NO`` | ``NO`` | Enables I/O buffers reuse to save space. |
|
||||
| | | | Makes debugging harder. |
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
| ``GNA_SCALE_FACTOR`` | FP32 number | 1.0 | Sets the scale factor to use for input quantization. |
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
| ``KEY_GNA_DEVICE_MODE`` | ``GNA_AUTO``, | ``GNA_AUTO`` | One of the modes described |
|
||||
| | ``GNA_HW``, | | in `Execution Modes <#execution-modes>`_. |
|
||||
| | ``GNA_HW_WITH_SW_FBACK``| | |
|
||||
| | ``GNA_SW_EXACT``, | | |
|
||||
| | ``GNA_SW_FP32`` | | |
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
| ``GNA_FIRMWARE_MODEL_IMAGE`` | ``string`` | ``""`` | Sets the name for the embedded model binary dump file. |
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
| ``GNA_PRECISION`` | ``I16``, ``I8`` | ``I16`` | Sets the preferred integer weight resolution for quantization |
|
||||
| | | | (ignored for models produced using POT). |
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
| ``PERF_COUNT`` | ``YES``, ``NO`` | ``NO`` | Turns on performance counters reporting. |
|
||||
+----------------------------------+-------------------------+---------------+-----------------------------------------------------------------+
|
||||
|
||||
The parameters are passed as strings to `IECore.load_network <api/ie_python_api/_autosummary/openvino.inference_engine.IECore.html#openvino.inference_engine.IECore.load_network>`_.
|
||||
|
||||
Normally, you do not need to select the execution target (``GNA_EXEC_TARGET``) and compilation target (``GNA_COMPILE_TARGET``). The default value for the execution target corresponds to available hardware, or latest hardware version supported by the plugin (i.e., GNA 3.0) if there is no GNA HW in the system. The compilation target is the same as the execution target by default. However, you may want to change the targets, for example, if you want to check how a model compiled for one generation would behave on the other generation (using the SW emulation mode), or if you are willing to export a model for a specific version of GNA HW.
|
||||
|
||||
You can change the ``GNA_DEVICE_MODE`` parameter at run time by sending a configuration dict to the `IECore.load_network <api/ie_python_api/_autosummary/openvino.inference_engine.IECore.html#openvino.inference_engine.IECore.load_network>`_ call, which works for any value excluding ``GNA_SW_FP32``. This enables you to switch the execution between software emulation mode and hardware execution mode after the model is loaded.
|
||||
|
||||
@endsphinxdirective
|
||||
## How to Interpret Performance Counters
|
||||
|
||||
With the following methods, you can collect performance counters that provides various performance data about execution on GNA:
|
||||
|
||||
@sphinxdirective
|
||||
.. tab:: C++
|
||||
|
||||
``InferenceEngine::InferRequest::GetPerformanceCounts``
|
||||
|
||||
The returned map stores a counter description as a key, and a counter value in the ``realTime_uSec`` field of the ``InferenceEngineProfileInfo`` structure.
|
||||
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
``openvino.inference_engine.InferRequest.get_perf_counts``
|
||||
|
||||
The returned map stores a counter description as a key, and a counter value in the ``real_time`` field.
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
The current GNA implementation calculates counters for the whole utterance scoring and does not provide per-layer information. The API enables you to retrieve counter units in cycles, you can convert cycles to seconds as follows:
|
||||
|
||||
```
|
||||
seconds = cycles / frequency
|
||||
```
|
||||
|
||||
Refer to the table below to learn about the frequency of Intel® GNA inside a particular processor:
|
||||
Processor | Frequency of Intel® GNA
|
||||
---|---
|
||||
Intel® Core™ processors| 400MHz
|
||||
Intel® processors formerly codenamed Elkhart Lake | 200MHz
|
||||
Intel® processors formerly codenamed Gemini Lake | 200MHz
|
||||
|
||||
Performance counters provided for the time being:
|
||||
|
||||
* Scoring request performance results
|
||||
* Number of total cycles spent on scoring in hardware including compute and memory stall cycles
|
||||
* Number of stall cycles spent in hardware
|
||||
|
||||
## Network Batch Size
|
||||
|
||||
Intel® GNA plugin supports the processing of context-windowed speech frames in batches of 1-8 frames in one
|
||||
input blob using the following methods:
|
||||
|
||||
@sphinxdirective
|
||||
.. tab:: C++
|
||||
|
||||
``InferenceEngine::ICNNNetwork::setBatchSize``
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
`IENetwork.batch_size <api/ie_python_api/_autosummary/openvino.inference_engine.IENetwork.html#openvino.inference_engine.IENetwork.batch_size>`_
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
Increasing batch size only improves efficiency of `Fully Connected` layers.
|
||||
|
||||
> **NOTE**: For networks with `Convolutional`, `LSTM`, or `Memory` layers, the only supported batch size is 1.
|
||||
|
||||
## Compatibility with Heterogeneous Plugin
|
||||
|
||||
Heterogeneous plugin was tested with the Intel® GNA as a primary device and CPU as a secondary device. To run inference of networks with layers unsupported by the GNA plugin, such as Softmax, use the Heterogeneous plugin with the `HETERO:GNA,CPU` configuration.
|
||||
|
||||
> **NOTE**: Due to limitation of the Intel® GNA backend library, heterogenous support is limited to cases where in the resulted sliced graph, only one subgraph is scheduled to run on GNA\_HW or GNA\_SW devices.
|
||||
|
||||
## Recovery from Interruption by High-Priority Windows Audio Processes\*
|
||||
|
||||
GNA is designed for real-time workloads such as noise reduction.
|
||||
For such workloads, processing should be time constrained, otherwise extra delays may cause undesired effects such as
|
||||
*audio glitches*. To make sure that processing can satisfy real-time requirements, the GNA driver provides a Quality of Service
|
||||
(QoS) mechanism, which interrupts requests that might cause high-priority Windows audio processes to miss
|
||||
the schedule, thereby causing long running GNA tasks to terminate early.
|
||||
|
||||
Applications should be prepared for this situation.
|
||||
|
||||
If an inference in the `GNA_HW` mode cannot be executed because of such an interruption, then the `wait` method returns the following status code:
|
||||
|
||||
@sphinxdirective
|
||||
.. tab:: C++
|
||||
|
||||
``InferRequest::Wait()`` returns status code ``StatusCode::INFER_NOT_STARTED``.
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
`InferRequest.wait <api/ie_python_api/_autosummary/openvino.inference_engine.InferRequest.html#openvino.inference_engine.InferRequest.wait>`_ returns status code `INFER_NOT_STARTED`.
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
In future releases, it will be changed to a more meaningful status code.
|
||||
|
||||
Any application working with GNA must properly react to this code.
|
||||
One of the strategies to adapt an application:
|
||||
|
||||
1. Immediately switch to the GNA_SW_EXACT emulation mode:
|
||||
@sphinxdirective
|
||||
.. tab:: C++
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
std::map<std::string, Parameter> newConfig;
|
||||
newConfig[GNAConfigParams::KEY_GNA_DEVICE_MODE] = Parameter("GNA_SW_EXACT");
|
||||
executableNet.SetConfig(newConfig);
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
new_cfg = {'GNA_DEVICE_MODE' : 'GNA_SW_EXACT'}
|
||||
net = ie.read_network(model=path_to_model)
|
||||
exec_net = ie.load_network(network=net, device_name="GNA", config=new_cfg)
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
2. Resubmit and switch back to GNA_HW expecting that the competing application has finished.
|
||||
|
||||
> **NOTE**: This method is deprecated since a new automatic QoS mode has been introduced in 2021.4.1 release of OpenVINO™ (see below).
|
||||
|
||||
## GNA3 Automatic QoS Feature on Windows*
|
||||
|
||||
Starting with 2021.4.1 release of OpenVINO and 03.00.00.1363 version of Windows* GNA driver, a new execution mode `GNA_HW_WITH_SW_FBACK` is introduced
|
||||
to assure that workloads satisfy real-time execution. In this mode, the GNA driver automatically falls back on CPU for a particular infer request
|
||||
if the HW queue is not empty, so there is no need for explicitly switching between GNA and CPU.
|
||||
|
||||
> **NOTE**: Due to the "first come - first served" nature of GNA driver and the QoS feature, this mode may lead to increased CPU consumption
|
||||
if there are several clients using GNA simultaneously.
|
||||
Even a lightweight competing infer request which has not been cleared at the time when the user's GNA client process makes its request,
|
||||
can cause the user's request to be executed on CPU, thereby unnecessarily increasing CPU utilization and power.
|
||||
|
||||
## See Also
|
||||
|
||||
* [Supported Devices](Supported_Devices.md)
|
||||
* [Converting Model](../../MO_DG/prepare_model/convert_model/Converting_Model.md)
|
||||
* [Convert model from Kaldi](../../MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md)
|
||||
@@ -1,157 +0,0 @@
|
||||
# GPU Plugin {#openvino_docs_IE_DG_supported_plugins_GPU}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
openvino_docs_IE_DG_supported_plugins_GPU_RemoteBlob_API
|
||||
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
The GPU plugin uses the Intel® Compute Library for Deep Neural Networks (clDNN) to infer deep neural networks.
|
||||
clDNN is an open source performance library for Deep Learning (DL) applications intended for acceleration of Deep Learning Inference on Intel® Processor Graphics including Intel® HD Graphics, Intel® Iris® Graphics, Intel® Iris® Xe Graphics, and Intel® Iris® Xe MAX graphics.
|
||||
For an in-depth description of clDNN, see [Inference Engine source files](https://github.com/openvinotoolkit/openvino/tree/master/src/plugins/intel_gpu/) and [Accelerate Deep Learning Inference with Intel® Processor Graphics](https://software.intel.com/en-us/articles/accelerating-deep-learning-inference-with-intel-processor-graphics).
|
||||
|
||||
## Device Naming Convention
|
||||
* Devices are enumerated as "GPU.X" where `X={0, 1, 2,...}`. Only Intel® GPU devices are considered.
|
||||
* If the system has an integrated GPU, it always has id=0 ("GPU.0").
|
||||
* Other GPUs have undefined order that depends on the GPU driver.
|
||||
* "GPU" is an alias for "GPU.0"
|
||||
* If the system doesn't have an integrated GPU, then devices are enumerated starting from 0.
|
||||
|
||||
For demonstration purposes, see the [Hello Query Device C++ Sample](../../../samples/cpp/hello_query_device/README.md) that can print out the list of available devices with associated indices. Below is an example output (truncated to the device names only):
|
||||
|
||||
```sh
|
||||
./hello_query_device
|
||||
Available devices:
|
||||
Device: CPU
|
||||
...
|
||||
Device: GPU.0
|
||||
...
|
||||
Device: GPU.1
|
||||
...
|
||||
Device: HDDL
|
||||
```
|
||||
|
||||
## Optimizations
|
||||
|
||||
The plugin supports algorithms that fuse several operations into one optimized operation. Refer to the sections below for details.
|
||||
|
||||
> **NOTE**: For operation descriptions, see the [IR Notation Reference](../../ops/opset.md).
|
||||
|
||||
### Fusing Convolution and Simple Layers
|
||||
|
||||
Merge of a Convolution layer and any of the simple layers listed below:
|
||||
- Activation: ReLU, ELU, Sigmoid, Clamp, and others
|
||||
- Depthwise: ScaleShift, PReLU
|
||||
- FakeQuantize
|
||||
|
||||
> **NOTE**: You can have any number and order of simple layers.
|
||||
|
||||
A combination of a Convolution layer and simple layers results in a single fused layer called
|
||||
*Convolution*:
|
||||
![conv_simple_01]
|
||||
|
||||
|
||||
### Fusing Pooling and FakeQuantize Layers
|
||||
|
||||
A combination of Pooling and FakeQuantize layers results in a single fused layer called *Pooling*:
|
||||
![pooling_fakequant_01]
|
||||
|
||||
### Fusing Activation Layers
|
||||
|
||||
Given the linear pattern, an Activation layer can be fused into other layers:
|
||||
|
||||
![fullyconnected_activation_01]
|
||||
|
||||
|
||||
### Fusing Convolution and Sum Layers
|
||||
|
||||
A combination of Convolution, Simple, and Eltwise layers with the sum operation results in a single layer called *Convolution*:
|
||||
![conv_sum_relu_01]
|
||||
|
||||
### Fusing a Group of Convolutions
|
||||
|
||||
If a topology contains the following pipeline, a GPU plugin merges Split, Convolution, and Concatenation layers into a single Convolution layer with the group parameter:
|
||||
> **NOTE**: Parameters of the Convolution layers must coincide.
|
||||
|
||||
![group_convolutions_01]
|
||||
|
||||
### Optimizing Layers Out
|
||||
|
||||
The following layers are optimized out under certain conditions:
|
||||
* Crop
|
||||
* Concatenate
|
||||
* Reshape
|
||||
* Flatten
|
||||
* Split
|
||||
* Copy
|
||||
|
||||
### Load-Time Execution
|
||||
|
||||
Some layers are executed during the load time, not during the inference. One of such layers is PriorBox.
|
||||
|
||||
|
||||
## CPU Executed Layers
|
||||
|
||||
The following layers are not accelerated on the GPU and executed on the host CPU instead:
|
||||
* Proposal
|
||||
* NonMaxSuppression
|
||||
* PriorBox
|
||||
* DetectionOutput
|
||||
|
||||
## Supported Configuration Parameters
|
||||
|
||||
The plugin supports the configuration parameters listed below.
|
||||
All parameters must be set before calling <code>InferenceEngine::Core::LoadNetwork()</code> in order to take effect.
|
||||
When specifying key values as raw strings (that is, when using Python API), omit the `KEY_` prefix.
|
||||
|
||||
| Parameter Name | Parameter Values | Default | Description |
|
||||
|---------------------|-----------------------------|-----------------|-----------------------------------------------------------|
|
||||
| `KEY_CACHE_DIR` | `"<cache_dir>"` | `""` | Specifies a directory where compiled OCL binaries can be cached. First model loading generates the cache, and all subsequent LoadNetwork calls use precompiled kernels which significantly improves load time. If empty - caching is disabled |
|
||||
| `KEY_PERF_COUNT` | `YES` / `NO` | `NO` | Collect performance counters during inference |
|
||||
| `KEY_CONFIG_FILE` | `"<file1> [<file2> ...]"` | `""` | Load custom layer configuration files |
|
||||
| `KEY_GPU_MODEL_`<br>`PRIORITY` | `GPU_MODEL_PRIORITY_<HIGH\|LOW>` <br/> `GPU_QUEUE_PRIORITY_<LOW\|HIGH\|MED\|DEFAULT>` <br/> `GPU_HOST_TASK_PRIORITY_<HIGH\|LOW\|ANY>` | `GPU_QUEUE_PRIORITY_DEFAULT` <br/> `\|GPU_HOST_TASK_PRIORITY_ANY` | Specifies two types of priority: host task priority and OpenCL queue priority.<br/><br/>Host task priority is specified by `GPU_HOST_TASK_PRIORITY_[level]` and there are three types of task levels: `HIGH`, `LOW`, and `ANY`. Note that `HIGH` and `LOW` are effective only when tbb is used for multithreading the LoadNetwork workload and the host processor is hybrid type. For hybrid processors, if the task priority type is set as `HIGH` the task will have higher priority for core type selection, and vice versa. If the host processor is not hybrid core or the multi threading is not using tbb, it is set as `ANY`, which is the default type.<br/><br/>OpenCL queue priority is specified by `GPU_QUEUE_PRIORITY_[level]` and there are four types of levels: `HIGH`, `MED`, `LOW`, and `DEFAULT`, where the default value is `DEFAULT`. Before usage, make sure your OpenCL driver supports appropriate extension.<br/><br/>Basically `GPU_MODEL_PRIORITY` can be set as combination of the two priority types, such as<br/>-`GPU_QUEUE_PRIORITY_HIGH\|GPU_HOST_TASK_PRIORITY_HIGH` or<br/>-`GPU_QUEUE_PRIORITY_LOW\|GPU_HOST_TASK_PRIORITY_HIGH`.<br/><br/>Also it can be set as a more abstract level of priority PLUGIN_PRIORIY_[level], which represents combination of the two priorities as follows:<br/>-`GPU_MODEL_PRIORITY_HIGH` : `GPU_QUEUE_PRIORITY_HIGH\|GPU_HOST_TASK_PRIORITY_HIGH`<br/>-`GPU_MODEL_PRIORITY_LOW` : `GPU_QUEUE_PRIORITY_LOW\|GPU_HOST_TASK_PRIORITY_LOW`<br/><br/>The default of `KEY_GPU_MODEL_PRIORITY` is `GPU_QUEUE_PRIORITY_DEFAULT\|GPU_HOST_TASK_PRIORITY_ANY`.<br> |
|
||||
| `KEY_GPU_PLUGIN_`<br>`PRIORITY` | `<0-3>` | `0` | OpenCL queue priority (before usage, make sure your OpenCL driver supports appropriate extension)<br> Higher value means higher priority for OpenCL queue. 0 disables the setting. **Deprecated**. Please use KEY_GPU_MODEL_PRIORITY |
|
||||
| `KEY_GPU_PLUGIN_`<br>`THROTTLE` | `<0-3>` | `0` | OpenCL queue throttling (before usage, make sure your OpenCL driver supports appropriate extension)<br> Lower value means lower driver thread priority and longer sleep time for it. 0 disables the setting. |
|
||||
| `KEY_CLDNN_ENABLE_`<br>`FP16_FOR_QUANTIZED_`<br>`MODELS` | `YES` / `NO` | `YES` | Allows using FP16+INT8 mixed precision mode, so non-quantized parts of a model will be executed in FP16 precision for FP16 IR. Does not affect quantized FP32 IRs |
|
||||
| `KEY_GPU_NV12_`<br>`TWO_INPUTS` | `YES` / `NO` | `NO` | Controls preprocessing logic for nv12 input. If it's set to YES, then device graph will expect that user will set biplanar nv12 blob as input wich will be directly passed to device execution graph. Otherwise, preprocessing via GAPI is used to convert NV12->BGR, thus GPU graph have to expect single input |
|
||||
| `KEY_GPU_THROUGHPUT_`<br>`STREAMS` | `KEY_GPU_THROUGHPUT_AUTO`, or positive integer| 1 | Specifies a number of GPU "execution" streams for the throughput mode (upper bound for a number of inference requests that can be executed simultaneously).<br>This option is can be used to decrease GPU stall time by providing more effective load from several streams. Increasing the number of streams usually is more effective for smaller topologies or smaller input sizes. Note that your application should provide enough parallel slack (e.g. running many inference requests) to leverage full GPU bandwidth. Additional streams consume several times more GPU memory, so make sure the system has enough memory available to suit parallel stream execution. Multiple streams might also put additional load on CPU. If CPU load increases, it can be regulated by setting an appropriate `KEY_GPU_PLUGIN_THROTTLE` option value (see above). If your target system has relatively weak CPU, keep throttling low. <br>The default value is 1, which implies latency-oriented behavior.<br>`KEY_GPU_THROUGHPUT_AUTO` creates bare minimum of streams to improve the performance; this is the most portable option if you are not sure how many resources your target machine has (and what would be the optimal number of streams). <br> A positive integer value creates the requested number of streams. |
|
||||
| `KEY_EXCLUSIVE_ASYNC_`<br>`REQUESTS` | `YES` / `NO` | `NO` | Forces async requests (also from different executable networks) to execute serially.|
|
||||
| `KEY_GPU_MAX_NUM_`<br>`THREADS` | `integer value` | `maximum # of HW threads available in host environment` | Specifies the number of CPU threads that can be used for GPU engine, e.g, JIT compilation of GPU kernels or cpu kernel processing within GPU plugin. The default value is set as the number of maximum available threads in host environment to minimize the time for LoadNetwork, where the GPU kernel build time occupies a large portion. Note that if the specified value is larger than the maximum available # of threads or less than zero, it is set as maximum available # of threads. It can be specified with a smaller number than the available HW threads according to the usage scenario, e.g., when the user wants to assign more CPU threads while GPU plugin is running. Note that setting this value with lower number will affect not only the network loading time but also the cpu layers of GPU networks that are optimized with multi-threading. |
|
||||
| `KEY_GPU_ENABLE_`<br>`LOOP_UNROLLING` | `YES` / `NO` | `YES` | Enables recurrent layers such as TensorIterator or Loop with fixed iteration count to be unrolled. It is turned on by default. Turning this key on will achieve better inference performance for loops with not too many iteration counts (less than 16, as a rule of thumb). Turning this key off will achieve better performance for both graph loading time and inference time with many iteration counts (greater than 16). Note that turning this key on will increase the graph loading time in proportion to the iteration counts. Thus, this key should be turned off if graph loading time is considered to be most important target to optimize. |
|
||||
| `KEY_CLDNN_PLUGIN_`<br>`PRIORITY` | `<0-3>` | `0` | OpenCL queue priority (before usage, make sure your OpenCL driver supports appropriate extension)<br> Higher value means higher priority for OpenCL queue. 0 disables the setting. **Deprecated**. Please use KEY_GPU_MODEL_PRIORITY |
|
||||
| `KEY_CLDNN_PLUGIN_`<br>`THROTTLE` | `<0-3>` | `0` | OpenCL queue throttling (before usage, make sure your OpenCL driver supports appropriate extension)<br> Lower value means lower driver thread priority and longer sleep time for it. 0 disables the setting. **Deprecated**. Please use KEY_GPU_PLUGIN_THROTTLE |
|
||||
| `KEY_CLDNN_GRAPH_`<br>`DUMPS_DIR` | `"<dump_dir>"` | `""` | clDNN graph optimizer stages dump output directory (in GraphViz format) **Deprecated**. Will be removed in the next release |
|
||||
| `KEY_CLDNN_SOURCES_`<br>`DUMPS_DIR` | `"<dump_dir>"` | `""` | Final optimized clDNN OpenCL sources dump output directory. **Deprecated**. Will be removed in the next release |
|
||||
| `KEY_DUMP_KERNELS` | `YES` / `NO` | `NO` | Dump the final kernels used for custom layers. **Deprecated**. Will be removed in the next release |
|
||||
| `KEY_TUNING_MODE` | `TUNING_DISABLED` <br /> `TUNING_CREATE` <br /> `TUNING_USE_EXISTING` | `TUNING_DISABLED` | Disable inference kernel tuning <br /> Create tuning file (expect much longer runtime) <br /> Use an existing tuning file. **Deprecated**. Will be removed in the next release |
|
||||
| `KEY_TUNING_FILE` | `"<filename>"` | `""` | Tuning file to create / use. **Deprecated**. Will be removed in the next release |
|
||||
|
||||
## Quering GPU specific metric keys
|
||||
* MEMORY_STATISTICS : Returns overall memory statistics of `GPU` device allocated by engine with allocation types. If the network has `TensorIterator` or `Loop` operation which is not unrolled, there will be additional allocation at the first inference phase. In such a case, querying for `MEMORY_STATISTICS` should be done after first inference for more accurate result. The code below demonstrates how to query overall memory statistics of `GPU` device:
|
||||
|
||||
@snippet snippets/GPU_Metric0.cpp part0
|
||||
|
||||
* MAX_BATCH_SIZE : Returns maximum batch size for a given network which is not only executable but also does not lose performance due to the memory swap impact. Note that the returned value may not aligned to power of 2. Also, MODEL_PTR is the required option for this metric since the available max batch size depends on the model size. If the MODEL_PTR is not given, it will return 1. The example code to set the required and optional configs for this metic is available in the following snippet:
|
||||
|
||||
@snippet snippets/GPU_Metric1.cpp part1
|
||||
|
||||
* OPTIMAL_BATCH_SIZE : Returns _optimal_ batch size for a given network on the given GPU device. The returned value is aligned to power of 2. Also, MODEL_PTR is the required option for this metric since the optimal batch size highly depends on the model. If the MODEL_PTR is not given, the value of 1 is returned. The example code to set the required and optional configs for this metric is available in the following snippet:
|
||||
|
||||
@snippet snippets/GPU_Metric1.cpp part2
|
||||
## GPU Context and Video Memory Sharing RemoteBlob API
|
||||
|
||||
See [RemoteBlob API of GPU Plugin](GPU_RemoteBlob_API.md)
|
||||
|
||||
## See Also
|
||||
* [Supported Devices](Supported_Devices.md)
|
||||
|
||||
[conv_simple_01]: ../img/conv_simple_01.png
|
||||
[pooling_fakequant_01]: ../img/pooling_fakequant_01.png
|
||||
[fullyconnected_activation_01]: ../img/fullyconnected_activation_01.png
|
||||
[group_convolutions_01]: ../img/group_convolutions_01.png
|
||||
[conv_sum_relu_01]: ../img/conv_sum_relu_01.png
|
||||
@@ -1,141 +0,0 @@
|
||||
Remote Blob API of GPU Plugin {#openvino_docs_IE_DG_supported_plugins_GPU_RemoteBlob_API}
|
||||
================================
|
||||
|
||||
The GPU plugin implementation of the `RemoteContext` and `RemoteBlob` interfaces supports GPU
|
||||
pipeline developers who need video memory sharing and interoperability with existing native APIs
|
||||
such as OpenCL\*, Microsoft DirectX\*, or VAAPI\*.
|
||||
Using these interfaces allows you to avoid any memory copy overhead when plugging the OpenVINO™ inference
|
||||
into an existing GPU pipeline. It also enables OpenCL kernels participating in the pipeline to become
|
||||
native buffer consumers or producers of the OpenVINO™ inference.
|
||||
Since the GPU plugin works on top of the clDNN library, the functionality above is also implemented
|
||||
using OpenCL and its sharing extensions provided by Intel®.
|
||||
|
||||
There are two interoperability scenarios supported by the Remote Blob API:
|
||||
|
||||
* GPU plugin context and memory objects can be constructed from low-level device, display, or memory
|
||||
handles and used to create the OpenVINO™ `ExecutableNetwork` or `Blob` class.
|
||||
* OpenCL context or buffer handles can be obtained from existing GPU plugin objects, and used in OpenCL processing.
|
||||
|
||||
Class and function declarations for the API are defined in the following files:
|
||||
* Windows\*: `gpu/gpu_context_api_ocl.hpp` and `gpu/gpu_context_api_dx.hpp`
|
||||
* Linux\*: `gpu/gpu_context_api_ocl.hpp` and `gpu/gpu_context_api_va.hpp`
|
||||
|
||||
The most common way to enable the interaction of your application with the Remote Blob API is to use user-side utility classes
|
||||
and functions that consume or produce native handles directly.
|
||||
|
||||
## Execution Context User-Side Wrappers
|
||||
|
||||
GPU plugin classes that implement the `RemoteContext` interface are responsible for context sharing.
|
||||
Obtaining a pointer to a context object is the first step of sharing pipeline objects.
|
||||
The context object of the GPU plugin directly wraps OpenCL context, setting a scope for sharing
|
||||
`ExecutableNetwork` and `RemoteBlob` objects.
|
||||
To create such objects within user context, explicitly provide the context to the plugin using the
|
||||
`make_shared_context()` overloaded function. Depending on the platform, the function accepts the
|
||||
`cl_context` handle, the pointer to the `ID3D11Device` interface, or the `VADisplay` handle, and
|
||||
returns a smart pointer to the `RemoteContext` plugin object.
|
||||
|
||||
If you do not provide any user context, the plugin uses its default internal context.
|
||||
The plugin attempts to use the same internal context object as long as plugin options are kept the same.
|
||||
Therefore, all ExecutableNetwork objects created during this time share the same context.
|
||||
Once the plugin options are changed, the internal context is replaced by the new one.
|
||||
|
||||
To request the current default context of the plugin, call the `GetDefaultContext()` method of the core engine.
|
||||
To request the internal context of the given `ExecutableNetwork`, use the `GetContext()` method.
|
||||
|
||||
## Shared Blob User-Side Wrappers
|
||||
|
||||
The classes that implement the `RemoteBlob` interface are both wrappers for native API
|
||||
memory handles (which can be obtained from them at any time) and act just like regular OpenVINO™
|
||||
`Blob` objects.
|
||||
|
||||
Once you obtain the context, you can use it to compile a new `ExecutableNetwork` or create `RemoteBlob`
|
||||
objects.
|
||||
For network compilation, use a dedicated flavor of `LoadNetwork()`, which accepts the context as an
|
||||
additional parameter.
|
||||
|
||||
To create a shared blob from a native memory handle, use `make_shared_blob()` overloaded functions
|
||||
that can accept the `cl::Buffer`, `cl::Image2D`, `cl_mem` handles, and either `ID3D11Buffer`,
|
||||
`ID3D11Texture2D` pointers or the `VASurfaceID` handle.
|
||||
All `make_shared_blob()` flavors return a smart pointer to the `Blob` object, which can be directly
|
||||
passed to the `SetBlob() `method of an inference request object.
|
||||
|
||||
## Direct NV12 video surface input
|
||||
|
||||
To support the direct consumption of a hardware video decoder output, plugin accepts two-plane video
|
||||
surfaces as arguments for the `make_shared_blob_nv12()` function, which creates an `NV12Blob` object
|
||||
and returns a smart pointer to it, which is cast to `Blob::Ptr`.
|
||||
|
||||
To ensure that the plugin generates the correct execution graph for the NV12 dual-plane input, set
|
||||
the `CLDNNConfigParams::KEY_CLDNN_NV12_TWO_INPUTS` plugin configuration flag to `PluginConfigParams::YES`.
|
||||
|
||||
## Context & queue sharing
|
||||
|
||||
GPU plugin supports creation of shared context from `cl_command_queue` handle. In that case
|
||||
opencl context handle is extracted from given queue via OpenCL™ API, and the queue itself is used inside
|
||||
the plugin for further execution of inference primitives. Sharing of the queue changes behavior of `StartAsync()`
|
||||
method to guarantee that submission of inference primitives into given queue is finished before
|
||||
returning of control back to calling thread.
|
||||
|
||||
This sharing mechanism allows to do pipeline synchronization on app side and avoid blocking of host thread
|
||||
on waiting for completion of inference. Pseudocode may look as follows:
|
||||
|
||||
@snippet snippets/GPU_RemoteBlob_API3.cpp part0
|
||||
|
||||
### Limitations
|
||||
|
||||
- Some primitives in GPU plugin may block host thread on waiting for previous primitives before adding its kernels
|
||||
to the command queue. In such cases `StartAsync()` call takes much more time to return control to the calling thread
|
||||
as internally it waits for partial or full network completion.
|
||||
Examples of operations: Loop, TensorIterator, DetectionOutput, NonMaxSuppression
|
||||
- Synchronization of pre/post processing jobs and inference pipeline inside shared queue is the user responsibility
|
||||
- Throughput mode is not available when queue sharing is used, i.e. only single stream can be used for each executable network.
|
||||
|
||||
## Low-Level Methods and Their Parameter Description
|
||||
|
||||
The high-level wrappers above bring a direct dependency on native APIs to the user program.
|
||||
If you want to avoid the dependency, you still can directly use the `CreateContext()`,
|
||||
`CreateBlob()`, and `getParams()` methods.
|
||||
On this level, native handles are re-interpreted as void pointers and all arguments are passed
|
||||
using `std::map` containers that are filled with `std::string, InferenceEngine::Parameter` pairs.
|
||||
Two types of map entries are possible: descriptor and container. The first map entry is a
|
||||
descriptor, which sets the expected structure and possible parameter values of the map.
|
||||
|
||||
**Parameter Map Entries**
|
||||
|
||||
| Key Name | Description and Possible Parameter Values |
|
||||
|----------------|---------------------------------------------------------------------|
|
||||
| `CONTEXT_TYPE` | Describes the type of the shared context in a map. Can be `OCL` (for pure OpenCL context) or `VA_SHARED` (for context shared with a video decoding device). |
|
||||
| `OCL_CONTEXT` | Contains the OpenCL context handle. |
|
||||
| `OCL_QUEUE` | Contains the OpenCL queue handle if queue sharing is needed. |
|
||||
| `VA_DEVICE` | Contains the native video decoding device handle. Can be `VADisplay` or `ID3D11Device` (a pointer). |
|
||||
| `SHARED_MEM_TYPE` | Describes the type of the shared memory buffer in a map. Can be `OCL_BUFFER` (clBuffer), `OCL_IMAGE2D` (clImage2D), `VA_SURFACE()`, or `DX_BUFFER`. |
|
||||
| `MEM_HANDLE` | Contains the OpenCL memory handle. |
|
||||
| `DEV_OBJECT_HANDLE` | Contains the native video decoder surface handle. |
|
||||
| `VA_PLANE` | Contains the NV12 video decoder surface plane index. Can be `0` or `1`. |
|
||||
|
||||
> **NOTE**: To initialize the entry key and value, use the `GPU_PARAM_KEY()` or `GPU_PARAM_VALUE()` macro.
|
||||
|
||||
## Examples
|
||||
|
||||
Refer to the sections below to see pseudo-code of usage examples.
|
||||
|
||||
> **NOTE**: For low-level parameter usage examples, see the source code of user-side wrappers from the include files mentioned above.
|
||||
|
||||
### OpenCL Kernel Execution on a Shared Buffer
|
||||
|
||||
This example uses the OpenCL context obtained from an executable network object.
|
||||
|
||||
@snippet snippets/GPU_RemoteBlob_API0.cpp part0
|
||||
|
||||
### Running GPU Plugin Inference within User-Supplied Shared Context
|
||||
|
||||
@snippet snippets/GPU_RemoteBlob_API1.cpp part1
|
||||
|
||||
### Direct Consuming of the NV12 VAAPI Video Decoder Surface on Linux
|
||||
|
||||
@snippet snippets/GPU_RemoteBlob_API2.cpp part2
|
||||
|
||||
## See Also
|
||||
|
||||
* InferenceEngine::Core
|
||||
* InferenceEngine::RemoteBlob
|
||||
@@ -1,256 +0,0 @@
|
||||
# Heterogeneous Plugin {#openvino_docs_IE_DG_supported_plugins_HETERO}
|
||||
|
||||
## Introducing the Heterogeneous Plugin (C++)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-cpp" class="switcher-anchor">C++</div>
|
||||
@endsphinxdirective
|
||||
|
||||
The heterogeneous plugin enables computing the inference of one network on several devices. The purposes of executing networks in heterogeneous mode are to:
|
||||
|
||||
* Utilize the power of accelerators to process the heaviest parts of the network and to execute unsupported layers on fallback devices like the CPU
|
||||
* Utilize all available hardware more efficiently during one inference
|
||||
|
||||
The execution through heterogeneous plugin can be divided into two independent steps:
|
||||
|
||||
1. Setting of hardware affinity to layers
|
||||
2. Loading a network to the Heterogeneous plugin, splitting the network to parts, and executing them through the plugin
|
||||
|
||||
These steps are decoupled. The setting of affinity can be done automatically using the fallback policy or in manual mode.
|
||||
|
||||
The fallback automatic policy causes "greedy" behavior and assigns all layers that can be executed on certain device according to the priorities you specify (for example, HETERO:GPU,CPU).
|
||||
Automatic policy does not take into account plugin peculiarities such as the inability to infer some layers without other special layers placed before or after that layer. The plugin is responsible for solving such cases. If the device plugin does not support the subgraph topology constructed by the HETERO plugin, then you should set affinity manually.
|
||||
|
||||
### Details of Splitting Network and Execution
|
||||
During loading of the network to the Heterogeneous plugin, the network is divided into separate parts and loaded to dedicated plugins.
|
||||
Intermediate blobs between these subgraphs are allocated automatically in the most efficient way.
|
||||
|
||||
### Sample Usage
|
||||
|
||||
Inference Engine sample programs can use the Heterogeneous plugin used with the `-d` option:
|
||||
|
||||
```sh
|
||||
./hello_classification <path_to_model>/squeezenet1.1.xml <path_to_pictures>/picture.jpg HETERO:GPU,CPU
|
||||
```
|
||||
where:
|
||||
- `HETERO` stands for the Heterogeneous plugin
|
||||
- `GPU,CPU` points to fallback policy with priority on GPU and fallback to CPU
|
||||
|
||||
You can point more than two devices: `-d HETERO:MYRIAD,GPU,CPU`
|
||||
|
||||
|
||||
### Annotation of Layers per Device and Default Fallback Policy
|
||||
|
||||
Default fallback policy decides which layer goes to which device automatically according to the support in dedicated plugins (GPU, CPU, MYRIAD).
|
||||
|
||||
Another way to annotate a network is to set affinity manually using `ngraph::Node::get_rt_info` with key `affinity`:
|
||||
|
||||
@snippet snippets/HETERO0.cpp part0
|
||||
|
||||
The fallback policy does not work if even one layer has an initialized affinity. The sequence should be to call automating affinity settings and then fix manually.
|
||||
|
||||
> **NOTE**: If you set affinity manually, be careful because currently Inference Engine plugins don't support constant (`Constant`->`Result`) and empty (`Parameter`->`Result`) networks. Please avoid such subgraphs when you set affinity manually.
|
||||
|
||||
@snippet snippets/HETERO1.cpp part1
|
||||
|
||||
If you rely on the default affinity distribution, you can avoid calling <code>InferenceEngine::Core::QueryNetwork</code> and just call <code>InferenceEngine::Core::LoadNetwork</code> instead:
|
||||
|
||||
@snippet snippets/HETERO2.cpp part2
|
||||
|
||||
> **NOTE**: `InferenceEngine::Core::QueryNetwork` does not depend on affinities set by a user. Instead, it queries for layer support based on device capabilities.
|
||||
|
||||
### Handling Difficult Topologies
|
||||
|
||||
Some topologies are not friendly to heterogeneous execution on some devices or cannot be executed at all with this plugin
|
||||
Examples are networks having activation layers that are not supported on the primary device.
|
||||
If transmitting data from one part of a network to another part in heterogeneous mode takes more time than in normal mode, it may not make sense to execute them in heterogeneous mode.
|
||||
In this case, you can define the heaviest part manually and set the affinity to avoid sending data back and forth many times during one inference.
|
||||
|
||||
### Execution Precision
|
||||
Precision for inference in the heterogeneous plugin is defined by:
|
||||
* Precision of IR
|
||||
* Ability of final plugins to execute in precision defined in IR
|
||||
|
||||
For example, if you want to execute GPU with CPU fallback with FP16 on GPU, you need to use only FP16 IR.
|
||||
|
||||
### Analyzing Performance Heterogeneous Execution
|
||||
After enabling the <code>KEY_HETERO_DUMP_GRAPH_DOT</code> config key (shown in code snippet below), you can dump GraphViz* `.dot` files with annotations of devices per layer.
|
||||
|
||||
The Heterogeneous plugin can generate two files:
|
||||
|
||||
* `hetero_affinity_<network name>.dot` - annotation of affinities per layer. This file is written to the disk only if default fallback policy was executed
|
||||
* `hetero_subgraphs_<network name>.dot` - annotation of affinities per graph. This file is written to the disk during execution of `ICNNNetwork::LoadNetwork()` for the Heterogeneous plugin
|
||||
|
||||
@snippet snippets/HETERO3.cpp part3
|
||||
|
||||
You can use the GraphViz* utility or a file converter to view the images. On the Ubuntu* operating system, you can use xdot:
|
||||
|
||||
* `sudo apt-get install xdot`
|
||||
* `xdot hetero_subgraphs.dot`
|
||||
|
||||
You can use performance data (in sample applications, it is the option `-pc`) to get the performance data on each subgraph.
|
||||
|
||||
Here is an example of the output for Googlenet v1 running on HDDL with fallback to CPU:
|
||||
|
||||
```
|
||||
subgraph1: 1. input preprocessing (mean data/HDDL):EXECUTED layerType: realTime: 129 cpu: 129 execType:
|
||||
subgraph1: 2. input transfer to DDR:EXECUTED layerType: realTime: 201 cpu: 0 execType:
|
||||
subgraph1: 3. HDDL execute time:EXECUTED layerType: realTime: 3808 cpu: 0 execType:
|
||||
subgraph1: 4. output transfer from DDR:EXECUTED layerType: realTime: 55 cpu: 0 execType:
|
||||
subgraph1: 5. HDDL output postprocessing:EXECUTED layerType: realTime: 7 cpu: 7 execType:
|
||||
subgraph1: 6. copy to IE blob:EXECUTED layerType: realTime: 2 cpu: 2 execType:
|
||||
subgraph2: out_prob: NOT_RUN layerType: Output realTime: 0 cpu: 0 execType: unknown
|
||||
subgraph2: prob: EXECUTED layerType: SoftMax realTime: 10 cpu: 10 execType: ref
|
||||
Total time: 4212 microseconds
|
||||
```
|
||||
### See Also
|
||||
[Supported Devices](Supported_Devices.md)
|
||||
|
||||
## Introducing the Heterogeneous Plugin (Python)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-python" class="switcher-anchor">Python</div>
|
||||
@endsphinxdirective
|
||||
|
||||
The heterogeneous plugin enables computing the inference of one network on several devices. The purposes of executing networks in heterogeneous mode are to:
|
||||
|
||||
* Utilize the power of accelerators to process the heaviest parts of the network and to execute unsupported layers on fallback devices like the CPU
|
||||
* Utilize all available hardware more efficiently during one inference
|
||||
|
||||
The execution through heterogeneous plugin can be divided into two independent steps:
|
||||
|
||||
1. Setting of hardware affinity to layers
|
||||
2. Loading a network to the Heterogeneous plugin, splitting the network to parts, and executing them through the plugin
|
||||
|
||||
These steps are decoupled. The setting of affinity can be done automatically using the fallback policy or in manual mode.
|
||||
|
||||
The fallback automatic policy causes "greedy" behavior and assigns all layers that can be executed on certain device according to the priorities you specify (for example, HETERO:GPU,CPU).
|
||||
Automatic policy does not take into account plugin peculiarities such as the inability to infer some layers without other special layers placed before or after that layer. The plugin is responsible for solving such cases. If the device plugin does not support the subgraph topology constructed by the HETERO plugin, then you should set affinity manually.
|
||||
|
||||
Some of the topologies are not well-supported for heterogeneous execution on some devices or cannot be executed in this mode at all. Examples of such networks are those having activation layers which are not supported on the primary device. If transmitting data from one part of a network to another part in heterogeneous mode takes more time than in normal mode, it may not make sense to execute them in heterogeneous mode. In this case, you can define the most compute intense part manually and set the affinity to avoid sending data back and forth many times during one inference.
|
||||
|
||||
### Use Default Layer Affinities
|
||||
|
||||
To use the default affinities, call `load_network` with the "HETERO" device, with an optional list of devices to consider.
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
net = ie.read_network(model=path_to_model)
|
||||
exec_net = ie.load_network(network=net, device_name='HETERO:GPU,CPU')
|
||||
```
|
||||
|
||||
|
||||
### Annotation of Layers per Device and Default Fallback Policy
|
||||
|
||||
Default fallback policy decides which layer goes to which device automatically according to the support in dedicated plugins (GPU, CPU, MYRIAD).
|
||||
|
||||
Another way to annotate a network is to set affinity manually using code.
|
||||
|
||||
### Set Affinity of All Layers to CPU
|
||||
```python
|
||||
import ngraph as ng
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
# Read a network in IR or ONNX format
|
||||
net = ie.read_network(path_to_model)
|
||||
# Create an Ngraph (graph) function from the network
|
||||
ng_func = ng.function_from_cnn(net)
|
||||
for node in ng_func.get_ordered_ops():
|
||||
rt_info = node.get_rt_info()
|
||||
rt_info["affinity"] = "CPU"
|
||||
```
|
||||
|
||||
|
||||
The fallback policy does not work if even one layer has an initialized affinity. The sequence should be calling the default affinity settings and then setting the layers manually.
|
||||
|
||||
> **NOTE**: If you set affinity manually, be aware that currently Inference Engine plugins do not support constant (*Constant -> Result*) and empty (*Parameter -> Result*) networks. Please avoid these subgraphs when you set affinity manually.
|
||||
|
||||
### Example - Manually Setting Layer Affinities
|
||||
|
||||
```python
|
||||
import ngraph as ng
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
# Read a network in IR or ONNX format
|
||||
net = ie.read_network(path_to_model)
|
||||
ng_func = ng.function_from_cnn(net)
|
||||
|
||||
for node in ng_func.get_ordered_ops():
|
||||
rt_info = node.get_rt_info()
|
||||
rt_info["affinity"] = "CPU"
|
||||
|
||||
# Load the network on the target device
|
||||
exec_net = ie.load_network(network=net, device_name='HETERO:FPGA,CPU')
|
||||
```
|
||||
|
||||
> **NOTE**: `ie.query_network` does not depend on affinities set by a user, but queries for layer support based on device capabilities.
|
||||
|
||||
### Details of Splitting Network and Execution
|
||||
|
||||
During the loading of the network to the heterogeneous plugin, the network is divided into separate parts and loaded to dedicated plugins. Intermediate blobs between these sub graphs are allocated automatically in the most efficient way.
|
||||
|
||||
### Execution Precision
|
||||
|
||||
The precision for inference in the heterogeneous plugin is defined by:
|
||||
* Precision of IR
|
||||
* Ability of final plugins to execute in precision defined in IR
|
||||
|
||||
For example, if you want to execute GPU with CPU fallback with FP16 on GPU, you need to use only FP16 IR.
|
||||
|
||||
OpenVINO samples can be used with the following command:
|
||||
```sh
|
||||
./hello_classification <path_to_model>/squeezenet1.1.xml <path_to_pictures>/picture.jpg HETERO:GPU,CPU
|
||||
```
|
||||
|
||||
where `HETERO` stands for the heterogeneous plugin.
|
||||
|
||||
You can point to more than two devices, for example: `-d HETERO:MYRIAD,GPU,CPU`
|
||||
|
||||
### Analyzing Heterogeneous Execution
|
||||
|
||||
After enabling the KEY_HETERO_DUMP_GRAPH_DOT config key, you can dump GraphViz* .dot files with annotations of devices per layer.
|
||||
|
||||
The heterogeneous plugin can generate two files:
|
||||
|
||||
* `hetero_affinity_<network name>.dot` - annotation of affinities per layer. This file is written to the disk only if the default fallback policy was executed
|
||||
* `hetero_subgraphs_<network name>.dot` - annotation of affinities per graph. This file is written to the disk during execution of `ICNNNetwork::LoadNetwork()` for the heterogeneous plugin
|
||||
|
||||
#### To Generate the .dot Files
|
||||
|
||||
```python
|
||||
ie = IECore()
|
||||
ie.set_config( config={'HETERO_DUMP_GRAPH_DOT' : 'YES'}, device_name='HETERO')
|
||||
```
|
||||
|
||||
You can use the GraphViz* utility or a file converter to view the images. On the Ubuntu* operating system, you can use xdot:
|
||||
|
||||
* `sudo apt-get install xdot`
|
||||
* `xdot hetero_subgraphs.dot`
|
||||
|
||||
You can use performance data (in sample applications, it is the option `-pc`) to get the performance data on each subgraph.
|
||||
|
||||
Here is an example of the output for Googlenet v1 running on HDDL with fallback to CPU:
|
||||
|
||||
```
|
||||
subgraph1: 1. input preprocessing (mean data/HDDL):EXECUTED layerType: realTime: 129 cpu: 129 execType:
|
||||
subgraph1: 2. input transfer to DDR:EXECUTED layerType: realTime: 201 cpu: 0 execType:
|
||||
subgraph1: 3. HDDL execute time:EXECUTED layerType: realTime: 3808 cpu: 0 execType:
|
||||
subgraph1: 4. output transfer from DDR:EXECUTED layerType: realTime: 55 cpu: 0 execType:
|
||||
subgraph1: 5. HDDL output postprocessing:EXECUTED layerType: realTime: 7 cpu: 7 execType:
|
||||
subgraph1: 6. copy to IE blob:EXECUTED layerType: realTime: 2 cpu: 2 execType:
|
||||
subgraph2: out_prob: NOT_RUN layerType: Output realTime: 0 cpu: 0 execType: unknown
|
||||
subgraph2: prob: EXECUTED layerType: SoftMax realTime: 10 cpu: 10 execType: ref
|
||||
Total time: 4212 microseconds
|
||||
```
|
||||
|
||||
|
||||
### See Also
|
||||
[Supported Devices](Supported_Devices.md)
|
||||
@@ -1,292 +0,0 @@
|
||||
# Multi-Device Plugin {#openvino_docs_IE_DG_supported_plugins_MULTI}
|
||||
|
||||
## Introducing the Multi-Device Plugin (C++)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-cpp" class="switcher-anchor">C++</div>
|
||||
@endsphinxdirective
|
||||
|
||||
The Multi-Device plugin automatically assigns inference requests to available computational devices to execute the requests in parallel. By contrast, the Heterogeneous plugin can run different layers on different devices but not in parallel. The potential gains with the Multi-Device plugin are:
|
||||
|
||||
* Improved throughput from using multiple devices (compared to single-device execution)
|
||||
* More consistent performance, since the devices share the inference burden (if one device is too busy, another can take more of the load)
|
||||
|
||||
Note that with Multi-Device the application logic is left unchanged, so you don't need to explicitly load the network to every device, create and balance the inference requests and so on. From the application point of view, this is just another device that handles the actual machinery. The only thing that is required to leverage performance is to provide the multi-device (and hence the underlying devices) with enough inference requests to process. For example, if you were processing 4 cameras on the CPU (with 4 inference requests), it might be desirable to process more cameras (with more requests in flight) to keep CPU and GPU busy via Multi-Device.
|
||||
|
||||
The setup of Multi-Device can be described in three major steps:
|
||||
|
||||
1. Configure each device as usual.
|
||||
2. Load the network to the Multi-Device plugin created on top of a (prioritized) list of the configured devices. This is the only change needed in the application.
|
||||
3. As with any other ExecutableNetwork call (resulting from `InferenceEngine::Core::LoadNetwork`), you create as many requests as needed to saturate the devices.
|
||||
|
||||
These steps are covered below in detail.
|
||||
|
||||
### Defining and Configuring the Multi-Device Plugin
|
||||
|
||||
Following the OpenVINO™ convention of labeling devices, the Multi-Device plugin uses the name "MULTI". The only configuration option for the Multi-Device plugin is a prioritized list of devices to use:
|
||||
|
||||
| Parameter name | Parameter values | Default | Description |
|
||||
| -------------- | ---------------- | --- | --- |
|
||||
| "MULTI_DEVICE_PRIORITIES" | comma-separated device names with no spaces | N/A | Prioritized list of devices |
|
||||
|
||||
You can set the configuration directly as a string, or use the metric key `MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES from the `multi/multi_device_config.hpp` file, which defines the same string.
|
||||
|
||||
Basically, there are three ways to specify the devices to be use by the "MULTI":
|
||||
|
||||
@snippet snippets/MULTI0.cpp part0
|
||||
|
||||
Notice that the priorities of the devices can be changed in real time for the executable network:
|
||||
|
||||
@snippet snippets/MULTI1.cpp part1
|
||||
|
||||
Finally, there is a way to specify number of requests that the Multi-Device will internally keep for each device. Suppose your original app was running 4 cameras with 4 inference requests. You would probably want to share these 4 requests between 2 devices used in MULTI. The easiest way is to specify a number of requests for each device using parentheses: "MULTI:CPU(2),GPU(2)" and use the same 4 requests in your app. However, such an explicit configuration is not performance-portable and hence not recommended. Instead, the better way is to configure the individual devices and query the resulting number of requests to be used at the application level (see [Configuring the Individual Devices and Creating the Multi-Device On Top](#configuring-the-individual-devices-and-creating-the-multi-device-on-top)).
|
||||
|
||||
### Enumerating Available Devices
|
||||
The Inference Engine features a dedicated API to enumerate devices and their capabilities. See the [Hello Query Device C++ Sample](../../../samples/cpp/hello_query_device/README.md). This is example output from the sample (truncated to device names only):
|
||||
|
||||
```sh
|
||||
./hello_query_device
|
||||
Available devices:
|
||||
Device: CPU
|
||||
...
|
||||
Device: GPU.0
|
||||
...
|
||||
Device: GPU.1
|
||||
...
|
||||
Device: HDDL
|
||||
```
|
||||
|
||||
A simple programmatic way to enumerate the devices and use with the multi-device is as follows:
|
||||
|
||||
@snippet snippets/MULTI2.cpp part2
|
||||
|
||||
Beyond the trivial "CPU", "GPU", "HDDL" and so on, when multiple instances of a device are available the names are more qualified. For example, this is how two Intel® Movidius™ Myriad™ X sticks are listed with the hello_query_sample:
|
||||
```
|
||||
...
|
||||
Device: MYRIAD.1.2-ma2480
|
||||
...
|
||||
Device: MYRIAD.1.4-ma2480
|
||||
```
|
||||
|
||||
So the explicit configuration to use both would be "MULTI:MYRIAD.1.2-ma2480,MYRIAD.1.4-ma2480". Accordingly, the code that loops over all available devices of "MYRIAD" type only is below:
|
||||
|
||||
@snippet snippets/MULTI3.cpp part3
|
||||
|
||||
|
||||
### Configuring the Individual Devices and Creating the Multi-Device On Top
|
||||
As discussed in the first section, you shall configure each individual device as usual and then just create the "MULTI" device on top:
|
||||
|
||||
@snippet snippets/MULTI4.cpp part4
|
||||
|
||||
An alternative is to combine all the individual device settings into a single config file and load that, allowing the Multi-Device plugin to parse and apply settings to the right devices. See the code example in the next section.
|
||||
|
||||
Note that while the performance of accelerators combines really well with Multi-Device, the CPU+GPU execution poses some performance caveats, as these devices share the power, bandwidth and other resources. For example it is recommended to enable the GPU throttling hint (which save another CPU thread for the CPU inference).
|
||||
See the [Using the Multi-Device with OpenVINO samples and benchmarking the performance](#using-the-multi-device-with-openvino-samples-and-benchmarking-the-performance) section below.
|
||||
|
||||
### Querying the Optimal Number of Inference Requests
|
||||
You can use the new GetMetric API to query the optimal number of requests. Similarly, when using the Multi-Device you don't need to sum over included devices yourself, you can query metric directly:
|
||||
|
||||
@snippet snippets/MULTI5.cpp part5
|
||||
|
||||
### Using the Multi-Device with OpenVINO Samples and Benchmarking the Performance
|
||||
|
||||
Every OpenVINO sample that supports the `-d` (which stands for "device") command-line option transparently accepts Multi-Device. The [Benchmark Application](../../../samples/cpp/benchmark_app/README.md) is the best reference for the optimal usage of Multi-Device. As discussed earlier, you do not need to set up the number of requests, CPU streams or threads because the application provides optimal performance out of the box. Below is an example command to evaluate HDDL+GPU performance with that:
|
||||
|
||||
```sh
|
||||
./benchmark_app –d MULTI:HDDL,GPU –m <model> -i <input> -niter 1000
|
||||
```
|
||||
|
||||
The Multi-Device plugin supports FP16 IR files. The CPU plugin automatically upconverts it to FP32 and the other devices support it natively. Note that no demos are (yet) fully optimized for Multi-Device, by means of supporting the OPTIMAL_NUMBER_OF_INFER_REQUESTS metric, using the GPU streams/throttling, and so on.
|
||||
|
||||
### Video: MULTI Plugin
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<iframe allowfullscreen mozallowfullscreen msallowfullscreen oallowfullscreen webkitallowfullscreen width="560" height="315" src="https://www.youtube.com/embed/xbORYFEmrqU" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
### See Also
|
||||
[Supported Devices](Supported_Devices.md)
|
||||
|
||||
## Introducing the Multi-Device Plugin (Python)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-python" class="switcher-anchor">Python</div>
|
||||
@endsphinxdirective
|
||||
|
||||
The Multi-Device plugin automatically assigns inference requests to available computational devices to execute the requests in parallel. By contrast, the Heterogeneous plugin can run different layers on different devices but not in parallel. The potential gains with the Multi-Device plugin are:
|
||||
|
||||
* Improved throughput from using multiple devices (compared to single-device execution)
|
||||
* More consistent performance, since the devices share the inference burden (if one device is too busy, another can take more of the load)
|
||||
|
||||
Note that with Multi-Device the application logic is left unchanged, so you don't need to explicitly load the network to every device, create and balance the inference requests and so on. From the application point of view, this is just another device that handles the actual machinery. The only thing that is required to leverage performance is to provide the multi-device (and hence the underlying devices) with enough inference requests to process. For example, if you were processing 4 cameras on the CPU (with 4 inference requests), it might be desirable to process more cameras (with more requests in flight) to keep CPU and GPU busy via Multi-Device.
|
||||
|
||||
The setup of Multi-Device can be described in three major steps:
|
||||
|
||||
1. Configure each device as usual (using the conventional [ie_api.IECore.set_config](api/ie_python_api/_autosummary/openvino.inference_engine.IECore.html#openvino.inference_engine.IECore.set_config) method
|
||||
2. Load the network to the Multi-Device plugin created on top of a (prioritized) list of the configured devices. This is the only change needed in the application.
|
||||
3. As with any other ExecutableNetwork call (resulting from `load_network`), you create as many requests as needed to saturate the devices.
|
||||
|
||||
These steps are covered below in detail.
|
||||
|
||||
### Defining and Configuring the Multi-Device Plugin
|
||||
|
||||
Following the OpenVINO™ convention of labeling devices, the Multi-Device plugin uses the name "MULTI". The only configuration option for the Multi-Device plugin is a prioritized list of devices to use:
|
||||
|
||||
| Parameter name | Parameter values | Default | Description |
|
||||
| -------------- | ---------------- | --- | --- |
|
||||
| "MULTI_DEVICE_PRIORITIES" | comma-separated device names with no spaces | N/A | Prioritized list of devices |
|
||||
|
||||
You can set the configuration directly as a string, or use the metric key `MULTI_DEVICE_PRIORITIES` from the `multi/multi_device_config.hpp` file, which defines the same string.
|
||||
|
||||
#### The Three Ways to Specify Devices Targets for the MULTI plugin
|
||||
|
||||
* Option 1 - Pass a Prioritized List as a Parameter in ie.load_network()
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
# Read a network in IR or ONNX format
|
||||
net = ie.read_network(model=path_to_model)
|
||||
exec_net = ie.load_network(network=net, device_name="MULTI:CPU,GPU")
|
||||
```
|
||||
|
||||
* Option 2 - Pass a List as a Parameter, and Dynamically Change Priorities during Execution
|
||||
Notice that the priorities of the devices can be changed in real time for the executable network:
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
# Init the Inference Engine Core
|
||||
ie = IECore()
|
||||
|
||||
# Read a network in IR or ONNX format
|
||||
net = ie.read_network(model=path_to_model)
|
||||
|
||||
ie.set_config( config={"MULTI_DEVICE_PRIORITIES":"HDDL,GPU"}, device_name="MULTI")
|
||||
|
||||
# Change priorities
|
||||
ie.set_config( config={"MULTI_DEVICE_PRIORITIES":"GPU,HDDL"}, device_name="MULTI")
|
||||
ie.set_config( config={"MULTI_DEVICE_PRIORITIES":"GPU"}, device_name="MULTI")
|
||||
ie.set_config( config={"MULTI_DEVICE_PRIORITIES":"HDDL,GPU"}, device_name="MULTI")
|
||||
ie.set_config( config={"MULTI_DEVICE_PRIORITIES":"CPU,HDDL,GPU"}, device_name="MULTI")
|
||||
```
|
||||
|
||||
* Option 3 - Use Explicit Hints for Controlling Request Numbers Executed by Devices
|
||||
There is a way to specify the number of requests that Multi-Device will internally keep for each device. If the original app was running 4 cameras with 4 inference requests, it might be best to share these 4 requests between 2 devices used in the MULTI. The easiest way is to specify a number of requests for each device using parentheses: “MULTI:CPU(2),GPU(2)” and use the same 4 requests in the app. However, such an explicit configuration is not performance-portable and not recommended. The better way is to configure the individual devices and query the resulting number of requests to be used at the application level. See [Configuring the Individual Devices and Creating the Multi-Device On Top](#configuring-the-individual-devices-and-creating-the-multi-device-on-top).
|
||||
|
||||
|
||||
### Enumerating Available Devices
|
||||
The Inference Engine features a dedicated API to enumerate devices and their capabilities. See the [Hello Query Device Python Sample](../../../samples/python/hello_query_device/README.md). This is example output from the sample (truncated to device names only):
|
||||
|
||||
```sh
|
||||
./hello_query_device
|
||||
Available devices:
|
||||
Device: CPU
|
||||
...
|
||||
Device: GPU.0
|
||||
...
|
||||
Device: GPU.1
|
||||
...
|
||||
Device: HDDL
|
||||
```
|
||||
|
||||
A simple programmatic way to enumerate the devices and use with the multi-device is as follows:
|
||||
|
||||
```python
|
||||
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
all_devices = "MULTI:"
|
||||
ie = IECore()
|
||||
net = ie.read_network(model=path_to_model)
|
||||
all_devices += ",".join(ie.available_devices)
|
||||
exec_net = ie.load_network(network=net, device_name=all_devices)
|
||||
```
|
||||
|
||||
Beyond the trivial "CPU", "GPU", "HDDL" and so on, when multiple instances of a device are available the names are more qualified. For example, this is how two Intel® Movidius™ Myriad™ X sticks are listed with the hello_query_sample:
|
||||
|
||||
```bash
|
||||
...
|
||||
Device: MYRIAD.1.2-ma2480
|
||||
...
|
||||
Device: MYRIAD.1.4-ma2480
|
||||
```
|
||||
|
||||
So the explicit configuration to use both would be "MULTI:MYRIAD.1.2-ma2480,MYRIAD.1.4-ma2480". Accordingly, the code that loops over all available devices of "MYRIAD" type only is below:
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
match_list = []
|
||||
all_devices = "MULTI:"
|
||||
dev_match_str = "MYRIAD"
|
||||
net = ie.read_network(model=path_to_model)
|
||||
|
||||
for d in ie.available_devices:
|
||||
if dev_match_str in d:
|
||||
match_list.append(d)
|
||||
|
||||
all_devices += ",".join(match_list)
|
||||
exec_net = ie.load_network(network=net, device_name=all_devices)
|
||||
```
|
||||
|
||||
### Configuring the Individual Devices and Creating the Multi-Device On Top
|
||||
|
||||
It is possible to configure each individual device as usual and then create the "MULTI" device on top:
|
||||
|
||||
```python
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
ie = IECore()
|
||||
net = ie.read_network(model=path_to_model)
|
||||
|
||||
cpu_config = {}
|
||||
gpu_config = {}
|
||||
|
||||
ie.set_config(config=cpu_config, device_name="CPU")
|
||||
ie.set_config(config=gpu_config, device_name="GPU")
|
||||
|
||||
# Load the network to the multi-device, specifying the priorities
|
||||
exec_net = ie.load_network(
|
||||
network=net, device_name="MULTI", config={"MULTI_DEVICE_PRIORITIES": "CPU,GPU"}
|
||||
)
|
||||
# Query the optimal number of requests
|
||||
nireq = exec_net.get_metric("OPTIMAL_NUMBER_OF_INFER_REQUESTS")
|
||||
```
|
||||
|
||||
An alternative is to combine all the individual device settings into a single config file and load that, allowing the Multi-Device plugin to parse and apply settings to the right devices. See the code example in the next section.
|
||||
|
||||
Note that while the performance of accelerators works well with Multi-Device, the CPU+GPU execution poses some performance caveats, as these devices share power, bandwidth and other resources. For example it is recommended to enable the GPU throttling hint (which saves another CPU thread for CPU inferencing). See the section below titled Using the Multi-Device with OpenVINO Samples and Benchmarking the Performance.
|
||||
|
||||
|
||||
### Using the Multi-Device with OpenVINO Samples and Benchmarking the Performance
|
||||
|
||||
Every OpenVINO sample that supports the `-d` (which stands for "device") command-line option transparently accepts Multi-Device. The [Benchmark application](../../../tools/benchmark_tool/README.md) is the best reference for the optimal usage of Multi-Device. As discussed earlier, you do not need to set up the number of requests, CPU streams or threads because the application provides optimal performance out of the box. Below is an example command to evaluate CPU+GPU performance with the Benchmark application:
|
||||
|
||||
```sh
|
||||
./benchmark_app.py –d MULTI:CPU,GPU –m <model>
|
||||
```
|
||||
|
||||
> **NOTE**: If you installed OpenVINO with pip, use `benchmark_app -d MULTI:CPU,GPU -m <model>`
|
||||
|
||||
The Multi-Device plugin supports FP16 IR files. The CPU plugin automatically upconverts it to FP32 and the other devices support it natively. Note that no demos are (yet) fully optimized for Multi-Device, by means of supporting the OPTIMAL_NUMBER_OF_INFER_REQUESTS metric, using the GPU streams/throttling, and so on.
|
||||
|
||||
### Video: MULTI Plugin
|
||||
> **NOTE**: This video is currently available only for C++, but many of the same concepts apply to Python.
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<iframe allowfullscreen mozallowfullscreen msallowfullscreen oallowfullscreen webkitallowfullscreen width="560" height="315" src="https://www.youtube.com/embed/xbORYFEmrqU" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
### See Also
|
||||
[Supported Devices](Supported_Devices.md)
|
||||
@@ -1,7 +1,7 @@
|
||||
# Asynchronous Inference Request {#openvino_docs_ie_plugin_dg_async_infer_request}
|
||||
|
||||
Asynchronous Inference Request runs an inference pipeline asynchronously in one or several task executors depending on a device pipeline structure.
|
||||
Inference Engine Plugin API provides the base InferenceEngine::AsyncInferRequestThreadSafeDefault class:
|
||||
OpenVINO Runtime Plugin API provides the base InferenceEngine::AsyncInferRequestThreadSafeDefault class:
|
||||
|
||||
- The class has the `_pipeline` field of `std::vector<std::pair<ITaskExecutor::Ptr, Task> >`, which contains pairs of an executor and executed task.
|
||||
- All executors are passed as arguments to a class constructor and they are in the running state and ready to run tasks.
|
||||
@@ -10,7 +10,7 @@ Inference Engine Plugin API provides the base InferenceEngine::AsyncInferRequest
|
||||
`AsyncInferRequest` Class
|
||||
------------------------
|
||||
|
||||
Inference Engine Plugin API provides the base InferenceEngine::AsyncInferRequestThreadSafeDefault class for a custom asynchronous inference request implementation:
|
||||
OpenVINO Runtime Plugin API provides the base InferenceEngine::AsyncInferRequestThreadSafeDefault class for a custom asynchronous inference request implementation:
|
||||
|
||||
@snippet src/template_async_infer_request.hpp async_infer_request:header
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ Once the commands above are executed, the Inference Engine Developer Package is
|
||||
* `IE::ngraph` - shared nGraph library
|
||||
* `IE::inference_engine` - shared Inference Engine library
|
||||
* `IE::inference_engine_transformations` - shared library with Inference Engine ngraph-based Transformations
|
||||
* `IE::inference_engine_preproc` - shared library with Inference Engine preprocessing plugin
|
||||
* `IE::openvino_gapi_preproc` - shared library with Inference Engine preprocessing plugin
|
||||
* `IE::inference_engine_plugin_api` - interface library with Inference Engine Plugin API headers
|
||||
* `IE::inference_engine_lp_transformations` - shared library with low-precision transformations
|
||||
* `IE::pugixml` - static Pugixml library
|
||||
|
||||
@@ -675,7 +675,7 @@ SHOW_NAMESPACES = YES
|
||||
# The FILE_VERSION_FILTER tag can be used to specify a program or script that
|
||||
# doxygen should invoke to get the current version for each file (typically from
|
||||
# the version control system). Doxygen will invoke the program by executing (via
|
||||
# popen()) the command command input-file, where command is the value of the
|
||||
# popen()) the command input-file, where command is the value of the
|
||||
# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
|
||||
# by doxygen. Whatever the program writes to standard output is used as the file
|
||||
# version. For an example see the documentation.
|
||||
|
||||
@@ -37,7 +37,7 @@ The implementation `CompileNetwork` is fully device-specific.
|
||||
|
||||
The function accepts a const shared pointer to `ngraph::Function` object and performs the following steps:
|
||||
|
||||
1. Applies ngraph passes using `TransformNetwork` function, which defines plugin-specific conversion pipeline. To support low precision inference, the pipeline can include Low Precision Transformations. These transformations are usually hardware specific. You can find how to use and configure Low Precisions Transformations in [Low Precision Transformations](@ref openvino_docs_IE_DG_lpt) guide.
|
||||
1. Applies ngraph passes using `TransformNetwork` function, which defines plugin-specific conversion pipeline. To support low precision inference, the pipeline can include Low Precision Transformations. These transformations are usually hardware specific. You can find how to use and configure Low Precisions Transformations in [Low Precision Transformations](@ref openvino_docs_OV_UG_lpt) guide.
|
||||
2. Maps the transformed graph to a backend specific graph representation (for example, to MKLDNN graph for Intel CPU).
|
||||
3. Allocates and fills memory for graph weights, backend specific memory handles and so on.
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ Decrements a number of created inference requests:
|
||||
|
||||
#### 1. `inferPreprocess`
|
||||
|
||||
Below is the code of the the `inferPreprocess` method to demonstrate Inference Engine common preprocessing step handling:
|
||||
Below is the code of the `inferPreprocess` method to demonstrate Inference Engine common preprocessing step handling:
|
||||
|
||||
@snippet src/template_infer_request.cpp infer_request:infer_preprocess
|
||||
|
||||
|
||||
@@ -9,11 +9,12 @@
|
||||
|
||||
Implement Plugin Functionality <openvino_docs_ie_plugin_dg_plugin>
|
||||
Implement Executable Network Functionality <openvino_docs_ie_plugin_dg_executable_network>
|
||||
openvino_docs_ie_plugin_dg_quantized_networks
|
||||
Implement Synchronous Inference Request <openvino_docs_ie_plugin_dg_infer_request>
|
||||
Implement Asynchronous Inference Request <openvino_docs_ie_plugin_dg_async_infer_request>
|
||||
openvino_docs_ie_plugin_dg_plugin_build
|
||||
openvino_docs_ie_plugin_dg_plugin_testing
|
||||
openvino_docs_ie_plugin_detailed_guides
|
||||
openvino_docs_ie_plugin_api_references
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -55,11 +56,11 @@ Detailed guides
|
||||
* [Build](@ref openvino_docs_ie_plugin_dg_plugin_build) a plugin library using CMake\*
|
||||
* Plugin and its components [testing](@ref openvino_docs_ie_plugin_dg_plugin_testing)
|
||||
* [Quantized networks](@ref openvino_docs_ie_plugin_dg_quantized_networks)
|
||||
* [Low precision transformations](@ref openvino_docs_IE_DG_lpt) guide
|
||||
* [Writing nGraph transformations](@ref ngraph_transformation) guide
|
||||
* [Low precision transformations](@ref openvino_docs_OV_UG_lpt) guide
|
||||
* [Writing OpenVINO™ transformations](@ref openvino_docs_transformations) guide
|
||||
|
||||
API References
|
||||
-----------------------
|
||||
|
||||
* [Inference Engine Plugin API](groupie_dev_api.html)
|
||||
* [Inference Engine Transformation API](groupie_transformation_api.html)
|
||||
* [Inference Engine Plugin API](@ref ie_dev_api)
|
||||
* [Inference Engine Transformation API](@ref ie_transformation_api)
|
||||
|
||||
@@ -30,7 +30,7 @@ Based on that, declaration of a plugin class can look as follows:
|
||||
|
||||
The provided plugin class also has several fields:
|
||||
|
||||
* `_backend` - a backend engine that is used to perform actual computations for network inference. For `Template` plugin `ngraph::runtime::Backend` is used which performs computations using ngraph reference implementations.
|
||||
* `_backend` - a backend engine that is used to perform actual computations for network inference. For `Template` plugin `ngraph::runtime::Backend` is used which performs computations using OpenVINO™ reference implementations.
|
||||
* `_waitExecutor` - a task executor that waits for a response from a device about device tasks completion.
|
||||
* `_cfg` of type `Configuration`:
|
||||
|
||||
@@ -67,7 +67,7 @@ which holds a backend-dependent compiled graph in an internal representation:
|
||||
Before a creation of an `ExecutableNetwork` instance via a constructor, a plugin may check if a provided
|
||||
InferenceEngine::ICNNNetwork object is supported by a device. In the example above, the plugin checks precision information.
|
||||
|
||||
The very important part before creation of `ExecutableNetwork` instance is to call `TransformNetwork` method which applies ngraph transformation passes.
|
||||
The very important part before creation of `ExecutableNetwork` instance is to call `TransformNetwork` method which applies OpenVINO™ transformation passes.
|
||||
|
||||
Actual graph compilation is done in the `ExecutableNetwork` constructor. Refer to the [ExecutableNetwork Implementation Guide](@ref openvino_docs_ie_plugin_dg_executable_network) for details.
|
||||
|
||||
@@ -77,27 +77,27 @@ Actual graph compilation is done in the `ExecutableNetwork` constructor. Refer t
|
||||
|
||||
### `TransformNetwork()`
|
||||
|
||||
The function accepts a const shared pointer to `ngraph::Function` object and performs the following steps:
|
||||
The function accepts a const shared pointer to `ov::Model` object and performs the following steps:
|
||||
|
||||
1. Deep copies a const object to a local object, which can later be modified.
|
||||
2. Applies common and plugin-specific transformations on a copied graph to make the graph more friendly to hardware operations. For details how to write custom plugin-specific transformation, please, refer to [Writing ngraph transformations](@ref ngraph_transformation) guide. See detailed topics about network representation:
|
||||
2. Applies common and plugin-specific transformations on a copied graph to make the graph more friendly to hardware operations. For details how to write custom plugin-specific transformation, please, refer to [Writing OpenVINO™ transformations](@ref openvino_docs_transformations) guide. See detailed topics about network representation:
|
||||
* [Intermediate Representation and Operation Sets](../_docs_MO_DG_IR_and_opsets.html)
|
||||
* [Quantized networks](@ref openvino_docs_ie_plugin_dg_quantized_networks).
|
||||
|
||||
@snippet template_plugin/src/template_plugin.cpp plugin:transform_network
|
||||
|
||||
> **NOTE**: After all these transformations, a `ngraph::Function` object contains operations which can be perfectly mapped to backend kernels. E.g. if backend has kernel computing `A + B` operations at once, the `TransformNetwork` function should contain a pass which fuses operations `A` and `B` into a single custom operation `A + B` which fits backend kernels set.
|
||||
> **NOTE**: After all these transformations, a `ov::Model` object contains operations which can be perfectly mapped to backend kernels. E.g. if backend has kernel computing `A + B` operations at once, the `TransformNetwork` function should contain a pass which fuses operations `A` and `B` into a single custom operation `A + B` which fits backend kernels set.
|
||||
|
||||
### `QueryNetwork()`
|
||||
|
||||
Use the method with the `HETERO` mode, which allows to distribute network execution between different
|
||||
devices based on the `ngraph::Node::get_rt_info()` map, which can contain the `"affinity"` key.
|
||||
devices based on the `ov::Node::get_rt_info()` map, which can contain the `"affinity"` key.
|
||||
The `QueryNetwork` method analyzes operations of provided `network` and returns a list of supported
|
||||
operations via the InferenceEngine::QueryNetworkResult structure. The `QueryNetwork` firstly applies `TransformNetwork` passes to input `ngraph::Function` argument. After this, the transformed network in ideal case contains only operations are 1:1 mapped to kernels in computational backend. In this case, it's very easy to analyze which operations is supposed (`_backend` has a kernel for such operation or extensions for the operation is provided) and not supported (kernel is missed in `_backend`):
|
||||
operations via the InferenceEngine::QueryNetworkResult structure. The `QueryNetwork` firstly applies `TransformNetwork` passes to input `ov::Model` argument. After this, the transformed network in ideal case contains only operations are 1:1 mapped to kernels in computational backend. In this case, it's very easy to analyze which operations is supposed (`_backend` has a kernel for such operation or extensions for the operation is provided) and not supported (kernel is missed in `_backend`):
|
||||
|
||||
1. Store original names of all operations in input `ngraph::Function`
|
||||
1. Store original names of all operations in input `ov::Model`
|
||||
2. Apply `TransformNetwork` passes. Note, the names of operations in a transformed network can be different and we need to restore the mapping in the steps below.
|
||||
3. Construct `supported` and `unsupported` maps which contains names of original operations. Note, that since the inference is performed using ngraph reference backend, the decision whether the operation is supported or not depends on whether the latest OpenVINO opset contains such operation.
|
||||
3. Construct `supported` and `unsupported` maps which contains names of original operations. Note, that since the inference is performed using OpenVINO™ reference backend, the decision whether the operation is supported or not depends on whether the latest OpenVINO opset contains such operation.
|
||||
4. `QueryNetworkResult.supportedLayersMap` contains only operations which are fully supported by `_backend`.
|
||||
|
||||
@snippet template_plugin/src/template_plugin.cpp plugin:query_network
|
||||
|
||||
@@ -26,7 +26,7 @@ Engine concepts: plugin creation, multiple executable networks support, multiple
|
||||
@snippet single_layer_tests/convolution.cpp test_convolution:instantiate
|
||||
|
||||
3. **Sub-graph tests** (`subgraph_tests` sub-folder). This group of tests is designed to tests small patterns or combination of layers. E.g. when a particular topology is being enabled in a plugin e.g. TF ResNet-50, there is no need to add the whole topology to test tests. In opposite way, a particular repetitive subgraph or pattern can be extracted from `ResNet-50` and added to the tests. The instantiation of the sub-graph tests is done in the same way as for single layer tests.
|
||||
> **Note**, such sub-graphs or patterns for sub-graph tests should be added to `IE::ngraphFunctions` library first (this library is a pre-defined set of small `ngraph::Function`) and re-used in sub-graph tests after.
|
||||
> **Note**, such sub-graphs or patterns for sub-graph tests should be added to `IE::ngraphFunctions` library first (this library is a pre-defined set of small `ov::Model`) and re-used in sub-graph tests after.
|
||||
|
||||
4. **HETERO tests** (`subgraph_tests` sub-folder) contains tests for `HETERO` scenario (manual or automatic affinities settings, tests for `QueryNetwork`).
|
||||
|
||||
@@ -41,18 +41,14 @@ To use these tests for your own plugin development, link the `IE::funcSharedTest
|
||||
To build test binaries together with other build artifacts, use the `make all` command. For details, see
|
||||
[Build Plugin Using CMake*](@ref openvino_docs_ie_plugin_dg_plugin_build).
|
||||
|
||||
### Tests for plugin-specific ngraph transformations
|
||||
|
||||
Please, refer to [Transformation testing](@ref ngraph_transformation) guide.
|
||||
|
||||
### How to Extend Inference Engine Plugin Tests
|
||||
|
||||
Inference Engine Plugin tests are open for contribution.
|
||||
Add common test case definitions applicable for all plugins to the `IE::funcSharedTests` target within the DLDT repository. Then, any other plugin supporting corresponding functionality can instantiate the new test.
|
||||
|
||||
All Inference Engine per-layer tests check test layers functionality. They are developed using nGraph functions
|
||||
All Inference Engine per-layer tests check test layers functionality. They are developed using ov::Model.
|
||||
as input graphs used by tests. In this case, to test a new layer with layer tests, extend
|
||||
the `IE::ngraphFunctions` library, which is also included in the Inference Engine Developer package, with a new nGraph function
|
||||
the `IE::ngraphFunctions` library, which is also included in the Inference Engine Developer package, with a new model.
|
||||
including the corresponding operation.
|
||||
|
||||
> **NOTE**: When implementing a new subgraph test, add new single-layer tests for each operation of the subgraph if such test does not exist.
|
||||
|
||||
@@ -9,7 +9,7 @@ For more details about low-precision model representation please refer to this [
|
||||
During the model load each plugin can interpret quantization rules expressed in *FakeQuantize* operations:
|
||||
- Independently based on the definition of *FakeQuantize* operation.
|
||||
- Using a special library of low-precision transformations (LPT) which applies common rules for generic operations,
|
||||
such as Convolution, Fully-Connected, Eltwise, etc., and translates "fake-quantized" models into the models with low-precision operations. For more information about low-precision flow please refer to the following [document](@ref openvino_docs_IE_DG_Int8Inference).
|
||||
such as Convolution, Fully-Connected, Eltwise, etc., and translates "fake-quantized" models into models with low-precision operations.
|
||||
|
||||
Here we provide only a high-level overview of the interpretation rules of FakeQuantize.
|
||||
At runtime each FakeQuantize can be split into two independent operations: **Quantize** and **Dequantize**.
|
||||
|
||||
18
docs/IE_PLUGIN_DG/detailed_guides.md
Normal file
18
docs/IE_PLUGIN_DG/detailed_guides.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# Advanced Topics {#openvino_docs_ie_plugin_detailed_guides}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
openvino_docs_ie_plugin_dg_quantized_networks
|
||||
openvino_docs_OV_UG_lpt
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
The guides below provides extra information about specific features of OpenVINO needed for understanding during OpenVINO plugin development:
|
||||
|
||||
* [Quantized networks](@ref openvino_docs_ie_plugin_dg_quantized_networks)
|
||||
* [Low precision transformations](@ref openvino_docs_OV_UG_lpt) guide
|
||||
* [Writing OpenVINO™ transformations](@ref openvino_docs_transformations) guide
|
||||
17
docs/IE_PLUGIN_DG/dev_api_references.md
Normal file
17
docs/IE_PLUGIN_DG/dev_api_references.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# Plugin API Reference {#openvino_docs_ie_plugin_api_references}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
../groupie_dev_api
|
||||
../groupie_transformation_api
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
The guides below provides extra API references needed for OpenVINO plugin development:
|
||||
|
||||
* [OpenVINO Plugin API](@ref ie_dev_api)
|
||||
* [OpenVINO Transformation API](@ref ie_transformation_api)
|
||||
@@ -5,74 +5,74 @@
|
||||
<tab type="usergroup" url="index.html" title="Developer Guide for Inference Engine Plugin Library">
|
||||
<tab type="user" url="@ref plugin" visibile="yes" title="Implement Plugin Functionality"/>
|
||||
<tab type="user" url="@ref executable_network" visibile="yes" title="Implement Executable Network Functionality">
|
||||
<tab type="usergroup" title="Low Precision Transformations" url="@ref openvino_docs_IE_DG_lpt">
|
||||
<tab type="user" title="Attributes" url="@ref openvino_docs_IE_DG_lpt_attributes">
|
||||
<tab type="user" title="AvgPoolPrecisionPreserved" url="@ref openvino_docs_IE_DG_lpt_AvgPoolPrecisionPreserved"/>
|
||||
<tab type="user" title="IntervalsAlignment" url="@ref openvino_docs_IE_DG_lpt_IntervalsAlignment"/>
|
||||
<tab type="user" title="PerTensorQuantization" url="@ref openvino_docs_IE_DG_lpt_PerTensorQuantization"/>
|
||||
<tab type="user" title="PrecisionPreserved" url="@ref openvino_docs_IE_DG_lpt_PrecisionPreserved"/>
|
||||
<tab type="user" title="Precisions" url="@ref openvino_docs_IE_DG_lpt_Precisions"/>
|
||||
<tab type="user" title="QuantizationAlignment" url="@ref openvino_docs_IE_DG_lpt_QuantizationAlignment"/>
|
||||
<tab type="usergroup" title="Low Precision Transformations" url="@ref openvino_docs_OV_UG_lpt">
|
||||
<tab type="user" title="Attributes" url="@ref openvino_docs_OV_UG_lpt_attributes">
|
||||
<tab type="user" title="AvgPoolPrecisionPreserved" url="@ref openvino_docs_OV_UG_lpt_AvgPoolPrecisionPreserved"/>
|
||||
<tab type="user" title="IntervalsAlignment" url="@ref openvino_docs_OV_UG_lpt_IntervalsAlignment"/>
|
||||
<tab type="user" title="PerTensorQuantization" url="@ref openvino_docs_OV_UG_lpt_PerTensorQuantization"/>
|
||||
<tab type="user" title="PrecisionPreserved" url="@ref openvino_docs_OV_UG_lpt_PrecisionPreserved"/>
|
||||
<tab type="user" title="Precisions" url="@ref openvino_docs_OV_UG_lpt_Precisions"/>
|
||||
<tab type="user" title="QuantizationAlignment" url="@ref openvino_docs_OV_UG_lpt_QuantizationAlignment"/>
|
||||
</tab>
|
||||
<tab type="user" title="Step 1. Prerequisites transformations" url="@ref openvino_docs_IE_DG_lpt_step1_prerequisites">
|
||||
<tab type="user" title="LinOpSequenceFusion" url="@ref openvino_docs_IE_DG_lpt_LinOpSequenceFusion"/>
|
||||
<tab type="user" title="PullReshapeThroughDequantization" url="@ref openvino_docs_IE_DG_lpt_PullReshapeThroughDequantization"/>
|
||||
<tab type="user" title="PullTransposeThroughDequantization" url="@ref openvino_docs_IE_DG_lpt_PullTransposeThroughDequantization"/>
|
||||
<tab type="user" title="Step 1. Prerequisites transformations" url="@ref openvino_docs_OV_UG_lpt_step1_prerequisites">
|
||||
<tab type="user" title="LinOpSequenceFusion" url="@ref openvino_docs_OV_UG_lpt_LinOpSequenceFusion"/>
|
||||
<tab type="user" title="PullReshapeThroughDequantization" url="@ref openvino_docs_OV_UG_lpt_PullReshapeThroughDequantization"/>
|
||||
<tab type="user" title="PullTransposeThroughDequantization" url="@ref openvino_docs_OV_UG_lpt_PullTransposeThroughDequantization"/>
|
||||
</tab>
|
||||
<tab type="user" title="Step 2. Markup transformations" url="@ref openvino_docs_IE_DG_lpt_step2_markup">
|
||||
<tab type="user" title="AlignQuantizationIntervals" url="@ref openvino_docs_IE_DG_lpt_AlignQuantizationIntervals"/>
|
||||
<tab type="user" title="AlignQuantizationParameters" url="@ref openvino_docs_IE_DG_lpt_AlignQuantizationParameters"/>
|
||||
<tab type="user" title="CreateAttribute" url="@ref openvino_docs_IE_DG_lpt_CreateAttribute"/>
|
||||
<tab type="user" title="CreatePrecisionsDependentAttribute" url="@ref openvino_docs_IE_DG_lpt_CreatePrecisionsDependentAttribute"/>
|
||||
<tab type="user" title="MarkupAvgPoolPrecisionPreserved" url="@ref openvino_docs_IE_DG_lpt_MarkupAvgPoolPrecisionPreserved"/>
|
||||
<tab type="user" title="MarkupCanBeQuantized" url="@ref openvino_docs_IE_DG_lpt_MarkupCanBeQuantized"/>
|
||||
<tab type="user" title="MarkupPerTensorQuantization" url="@ref openvino_docs_IE_DG_lpt_MarkupPerTensorQuantization"/>
|
||||
<tab type="user" title="MarkupPrecisions" url="@ref openvino_docs_IE_DG_lpt_MarkupPrecisions"/>
|
||||
<tab type="user" title="PropagatePrecisions" url="@ref openvino_docs_IE_DG_lpt_PropagatePrecisions"/>
|
||||
<tab type="user" title="PropagateThroughPrecisionPreserved" url="@ref openvino_docs_IE_DG_lpt_PropagateThroughPrecisionPreserved"/>
|
||||
<tab type="user" title="PropagateToInput" url="@ref openvino_docs_IE_DG_lpt_PropagateToInput"/>
|
||||
<tab type="user" title="UpdateSharedPrecisionPreserved" url="@ref openvino_docs_IE_DG_lpt_UpdateSharedPrecisionPreserved"/>
|
||||
<tab type="user" title="Step 2. Markup transformations" url="@ref openvino_docs_OV_UG_lpt_step2_markup">
|
||||
<tab type="user" title="AlignQuantizationIntervals" url="@ref openvino_docs_OV_UG_lpt_AlignQuantizationIntervals"/>
|
||||
<tab type="user" title="AlignQuantizationParameters" url="@ref openvino_docs_OV_UG_lpt_AlignQuantizationParameters"/>
|
||||
<tab type="user" title="CreateAttribute" url="@ref openvino_docs_OV_UG_lpt_CreateAttribute"/>
|
||||
<tab type="user" title="CreatePrecisionsDependentAttribute" url="@ref openvino_docs_OV_UG_lpt_CreatePrecisionsDependentAttribute"/>
|
||||
<tab type="user" title="MarkupAvgPoolPrecisionPreserved" url="@ref openvino_docs_OV_UG_lpt_MarkupAvgPoolPrecisionPreserved"/>
|
||||
<tab type="user" title="MarkupCanBeQuantized" url="@ref openvino_docs_OV_UG_lpt_MarkupCanBeQuantized"/>
|
||||
<tab type="user" title="MarkupPerTensorQuantization" url="@ref openvino_docs_OV_UG_lpt_MarkupPerTensorQuantization"/>
|
||||
<tab type="user" title="MarkupPrecisions" url="@ref openvino_docs_OV_UG_lpt_MarkupPrecisions"/>
|
||||
<tab type="user" title="PropagatePrecisions" url="@ref openvino_docs_OV_UG_lpt_PropagatePrecisions"/>
|
||||
<tab type="user" title="PropagateThroughPrecisionPreserved" url="@ref openvino_docs_OV_UG_lpt_PropagateThroughPrecisionPreserved"/>
|
||||
<tab type="user" title="PropagateToInput" url="@ref openvino_docs_OV_UG_lpt_PropagateToInput"/>
|
||||
<tab type="user" title="UpdateSharedPrecisionPreserved" url="@ref openvino_docs_OV_UG_lpt_UpdateSharedPrecisionPreserved"/>
|
||||
</tab>
|
||||
<tab type="user" title="Step 3. Main transformations" url="@ref openvino_docs_IE_DG_lpt_step3_main">
|
||||
<tab type="user" title="AddTransformation" url="@ref openvino_docs_IE_DG_lpt_AddTransformation"/>
|
||||
<tab type="user" title="AvgPoolTransformation" url="@ref openvino_docs_IE_DG_lpt_AvgPoolTransformation"/>
|
||||
<tab type="user" title="ClampTransformation" url="@ref openvino_docs_IE_DG_lpt_ClampTransformation"/>
|
||||
<tab type="user" title="ConcatTransformation" url="@ref openvino_docs_IE_DG_lpt_ConcatTransformation"/>
|
||||
<tab type="user" title="ConvolutionTransformation" url="@ref openvino_docs_IE_DG_lpt_ConvolutionTransformation"/>
|
||||
<tab type="user" title="ConvolutionBackpropDataTransformation" url="@ref openvino_docs_IE_DG_lpt_ConvolutionBackpropDataTransformation"/>
|
||||
<tab type="user" title="DepthToSpaceTransformation" url="@ref openvino_docs_IE_DG_lpt_DepthToSpaceTransformation"/>
|
||||
<tab type="user" title="FakeQuantizeDecompositionTransformation" url="@ref openvino_docs_IE_DG_lpt_FakeQuantizeDecompositionTransformation"/>
|
||||
<tab type="user" title="FakeQuantizeTransformation" url="@ref openvino_docs_IE_DG_lpt_FakeQuantizeTransformation"/>
|
||||
<tab type="user" title="InterpolateTransformation" url="@ref openvino_docs_IE_DG_lpt_InterpolateTransformation"/>
|
||||
<tab type="user" title="GroupConvolutionTransformation" url="@ref openvino_docs_IE_DG_lpt_GroupConvolutionTransformation"/>
|
||||
<tab type="user" title="MatMulTransformation" url="@ref openvino_docs_IE_DG_lpt_MatMulTransformation"/>
|
||||
<tab type="user" title="MaxPoolTransformation" url="@ref openvino_docs_IE_DG_lpt_MaxPoolTransformation"/>
|
||||
<tab type="user" title="MultiplyTransformation" url="@ref openvino_docs_IE_DG_lpt_MultiplyTransformation"/>
|
||||
<tab type="user" title="MVNTransformation" url="@ref openvino_docs_IE_DG_lpt_MVNTransformation"/>
|
||||
<tab type="user" title="NormalizeL2Transformation" url="@ref openvino_docs_IE_DG_lpt_NormalizeL2Transformation"/>
|
||||
<tab type="user" title="PadTransformation" url="@ref openvino_docs_IE_DG_lpt_PadTransformation"/>
|
||||
<tab type="user" title="PReluTransformation" url="@ref openvino_docs_IE_DG_lpt_PReluTransformation"/>
|
||||
<tab type="user" title="ReduceMaxTransformation" url="@ref openvino_docs_IE_DG_lpt_ReduceMaxTransformation"/>
|
||||
<tab type="user" title="ReduceMeanTransformation" url="@ref openvino_docs_IE_DG_lpt_ReduceMeanTransformation"/>
|
||||
<tab type="user" title="ReduceMinTransformation" url="@ref openvino_docs_IE_DG_lpt_ReduceMinTransformation"/>
|
||||
<tab type="user" title="ReduceSumTransformation" url="@ref openvino_docs_IE_DG_lpt_ReduceSumTransformation"/>
|
||||
<tab type="user" title="ReluTransformation" url="@ref openvino_docs_IE_DG_lpt_ReluTransformation"/>
|
||||
<tab type="user" title="ReshapeTransformation" url="@ref openvino_docs_IE_DG_lpt_ReshapeTransformation"/>
|
||||
<tab type="user" title="SqueezeTransformation" url="@ref openvino_docs_IE_DG_lpt_SqueezeTransformation"/>
|
||||
<tab type="user" title="ShuffleChannelsTransformation" url="@ref openvino_docs_IE_DG_lpt_ShuffleChannelsTransformation"/>
|
||||
<tab type="user" title="SplitTransformation" url="@ref openvino_docs_IE_DG_lpt_SplitTransformation"/>
|
||||
<tab type="user" title="StridedSliceTransformation" url="@ref openvino_docs_IE_DG_lpt_StridedSliceTransformation"/>
|
||||
<tab type="user" title="TransposeTransformation" url="@ref openvino_docs_IE_DG_lpt_TransposeTransformation"/>
|
||||
<tab type="user" title="UnsqueezeTransformation" url="@ref openvino_docs_IE_DG_lpt_UnsqueezeTransformation"/>
|
||||
<tab type="user" title="VariadicSplitTransformation" url="@ref openvino_docs_IE_DG_lpt_VariadicSplitTransformation"/>
|
||||
<tab type="user" title="Step 3. Main transformations" url="@ref openvino_docs_OV_UG_lpt_step3_main">
|
||||
<tab type="user" title="AddTransformation" url="@ref openvino_docs_OV_UG_lpt_AddTransformation"/>
|
||||
<tab type="user" title="AvgPoolTransformation" url="@ref openvino_docs_OV_UG_lpt_AvgPoolTransformation"/>
|
||||
<tab type="user" title="ClampTransformation" url="@ref openvino_docs_OV_UG_lpt_ClampTransformation"/>
|
||||
<tab type="user" title="ConcatTransformation" url="@ref openvino_docs_OV_UG_lpt_ConcatTransformation"/>
|
||||
<tab type="user" title="ConvolutionTransformation" url="@ref openvino_docs_OV_UG_lpt_ConvolutionTransformation"/>
|
||||
<tab type="user" title="ConvolutionBackpropDataTransformation" url="@ref openvino_docs_OV_UG_lpt_ConvolutionBackpropDataTransformation"/>
|
||||
<tab type="user" title="DepthToSpaceTransformation" url="@ref openvino_docs_OV_UG_lpt_DepthToSpaceTransformation"/>
|
||||
<tab type="user" title="FakeQuantizeDecompositionTransformation" url="@ref openvino_docs_OV_UG_lpt_FakeQuantizeDecompositionTransformation"/>
|
||||
<tab type="user" title="FakeQuantizeTransformation" url="@ref openvino_docs_OV_UG_lpt_FakeQuantizeTransformation"/>
|
||||
<tab type="user" title="InterpolateTransformation" url="@ref openvino_docs_OV_UG_lpt_InterpolateTransformation"/>
|
||||
<tab type="user" title="GroupConvolutionTransformation" url="@ref openvino_docs_OV_UG_lpt_GroupConvolutionTransformation"/>
|
||||
<tab type="user" title="MatMulTransformation" url="@ref openvino_docs_OV_UG_lpt_MatMulTransformation"/>
|
||||
<tab type="user" title="MaxPoolTransformation" url="@ref openvino_docs_OV_UG_lpt_MaxPoolTransformation"/>
|
||||
<tab type="user" title="MultiplyTransformation" url="@ref openvino_docs_OV_UG_lpt_MultiplyTransformation"/>
|
||||
<tab type="user" title="MVNTransformation" url="@ref openvino_docs_OV_UG_lpt_MVNTransformation"/>
|
||||
<tab type="user" title="NormalizeL2Transformation" url="@ref openvino_docs_OV_UG_lpt_NormalizeL2Transformation"/>
|
||||
<tab type="user" title="PadTransformation" url="@ref openvino_docs_OV_UG_lpt_PadTransformation"/>
|
||||
<tab type="user" title="PReluTransformation" url="@ref openvino_docs_OV_UG_lpt_PReluTransformation"/>
|
||||
<tab type="user" title="ReduceMaxTransformation" url="@ref openvino_docs_OV_UG_lpt_ReduceMaxTransformation"/>
|
||||
<tab type="user" title="ReduceMeanTransformation" url="@ref openvino_docs_OV_UG_lpt_ReduceMeanTransformation"/>
|
||||
<tab type="user" title="ReduceMinTransformation" url="@ref openvino_docs_OV_UG_lpt_ReduceMinTransformation"/>
|
||||
<tab type="user" title="ReduceSumTransformation" url="@ref openvino_docs_OV_UG_lpt_ReduceSumTransformation"/>
|
||||
<tab type="user" title="ReluTransformation" url="@ref openvino_docs_OV_UG_lpt_ReluTransformation"/>
|
||||
<tab type="user" title="ReshapeTransformation" url="@ref openvino_docs_OV_UG_lpt_ReshapeTransformation"/>
|
||||
<tab type="user" title="SqueezeTransformation" url="@ref openvino_docs_OV_UG_lpt_SqueezeTransformation"/>
|
||||
<tab type="user" title="ShuffleChannelsTransformation" url="@ref openvino_docs_OV_UG_lpt_ShuffleChannelsTransformation"/>
|
||||
<tab type="user" title="SplitTransformation" url="@ref openvino_docs_OV_UG_lpt_SplitTransformation"/>
|
||||
<tab type="user" title="StridedSliceTransformation" url="@ref openvino_docs_OV_UG_lpt_StridedSliceTransformation"/>
|
||||
<tab type="user" title="TransposeTransformation" url="@ref openvino_docs_OV_UG_lpt_TransposeTransformation"/>
|
||||
<tab type="user" title="UnsqueezeTransformation" url="@ref openvino_docs_OV_UG_lpt_UnsqueezeTransformation"/>
|
||||
<tab type="user" title="VariadicSplitTransformation" url="@ref openvino_docs_OV_UG_lpt_VariadicSplitTransformation"/>
|
||||
</tab>
|
||||
<tab type="user" title="Step 4. Cleanup transformations" url="@ref openvino_docs_IE_DG_lpt_step4_cleanup">
|
||||
<tab type="user" title="FoldConvertTransformation" url="@ref openvino_docs_IE_DG_lpt_FoldConvertTransformation"/>
|
||||
<tab type="user" title="FoldFakeQuantizeTransformation" url="@ref openvino_docs_IE_DG_lpt_FoldFakeQuantizeTransformation"/>
|
||||
<tab type="user" title="FuseConvertTransformation" url="@ref openvino_docs_IE_DG_lpt_FuseConvertTransformation"/>
|
||||
<tab type="user" title="FuseMultiplyToFakeQuantizeTransformation" url="@ref openvino_docs_IE_DG_lpt_FuseMultiplyToFakeQuantizeTransformation"/>
|
||||
<tab type="user" title="FuseSubtractToFakeQuantizeTransformation" url="@ref openvino_docs_IE_DG_lpt_FuseSubtractToFakeQuantizeTransformation"/>
|
||||
<tab type="user" title="MultiplyToGroupConvolutionTransformation" url="@ref openvino_docs_IE_DG_lpt_MultiplyToGroupConvolutionTransformation"/>
|
||||
<tab type="user" title="Step 4. Cleanup transformations" url="@ref openvino_docs_OV_UG_lpt_step4_cleanup">
|
||||
<tab type="user" title="FoldConvertTransformation" url="@ref openvino_docs_OV_UG_lpt_FoldConvertTransformation"/>
|
||||
<tab type="user" title="FoldFakeQuantizeTransformation" url="@ref openvino_docs_OV_UG_lpt_FoldFakeQuantizeTransformation"/>
|
||||
<tab type="user" title="FuseConvertTransformation" url="@ref openvino_docs_OV_UG_lpt_FuseConvertTransformation"/>
|
||||
<tab type="user" title="FuseMultiplyToFakeQuantizeTransformation" url="@ref openvino_docs_OV_UG_lpt_FuseMultiplyToFakeQuantizeTransformation"/>
|
||||
<tab type="user" title="FuseSubtractToFakeQuantizeTransformation" url="@ref openvino_docs_OV_UG_lpt_FuseSubtractToFakeQuantizeTransformation"/>
|
||||
<tab type="user" title="MultiplyToGroupConvolutionTransformation" url="@ref openvino_docs_OV_UG_lpt_MultiplyToGroupConvolutionTransformation"/>
|
||||
</tab>
|
||||
</tab>
|
||||
</tab>
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user