diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml
index 2b9dda46708..40f07b58922 100644
--- a/.ci/azure/linux.yml
+++ b/.ci/azure/linux.yml
@@ -117,7 +117,6 @@ jobs:
-DPYTHON_EXECUTABLE=/usr/bin/python3.8
-DENABLE_WHEEL=ON
-DENABLE_TESTS=ON
- -DNGRAPH_ONNX_IMPORT_ENABLE=ON
-DNGRAPH_ONNX_FRONTEND_ENABLE=ON
-DENABLE_FASTER_BUILD=ON
-DENABLE_STRICT_DEPENDENCIES=OFF
diff --git a/.ci/azure/linux_onnxruntime.yml b/.ci/azure/linux_onnxruntime.yml
index 0229c37c0b0..932fb76d5cc 100644
--- a/.ci/azure/linux_onnxruntime.yml
+++ b/.ci/azure/linux_onnxruntime.yml
@@ -94,7 +94,6 @@ jobs:
-DENABLE_PROFILING_ITT=OFF
-DENABLE_SAMPLES=OFF
-DENABLE_SPEECH_DEMO=OFF
- -DNGRAPH_ONNX_IMPORT_ENABLE=ON
-DNGRAPH_ONNX_FRONTEND_ENABLE=ON
-DNGRAPH_DEBUG_ENABLE=OFF
$(REPO_DIR)
diff --git a/.ci/openvino-onnx/Dockerfile b/.ci/openvino-onnx/Dockerfile
index 31559822562..ca2cbd8afbe 100644
--- a/.ci/openvino-onnx/Dockerfile
+++ b/.ci/openvino-onnx/Dockerfile
@@ -68,7 +68,6 @@ RUN cmake .. \
-DENABLE_SPEECH_DEMO=OFF \
-DENABLE_PYTHON=ON \
-DPYTHON_EXECUTABLE=/usr/bin/python3 \
- -DNGRAPH_ONNX_IMPORT_ENABLE=ON \
-DNGRAPH_ONNX_FRONTEND_ENABLE=ON \
-DNGRAPH_DEBUG_ENABLE=OFF \
-DCMAKE_INSTALL_PREFIX=/openvino/dist \
diff --git a/.github/workflows/code_style.yml b/.github/workflows/code_style.yml
index 29ec8446c48..8f30c986361 100644
--- a/.github/workflows/code_style.yml
+++ b/.github/workflows/code_style.yml
@@ -34,7 +34,7 @@ jobs:
- name: Create code style diff
if: failure()
run: |
- cmake --build . --target clang_format_fix_all
+ cmake --build . --target clang_format_fix_all -j8
git diff > code_style_diff.diff
working-directory: build
@@ -42,7 +42,7 @@ jobs:
if: failure()
with:
name: code_style_diff
- path: code_style_diff.diff
+ path: build/code_style_diff.diff
ShellCheck:
runs-on: ubuntu-18.04
diff --git a/.gitmodules b/.gitmodules
index 0b76a4b239e..095f3968264 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -18,12 +18,12 @@
path = thirdparty/ade
url = https://github.com/opencv/ade.git
ignore = dirty
-[submodule "thirdparty/gflags"]
- path = thirdparty/gflags
+[submodule "thirdparty/gflags/gflags"]
+ path = thirdparty/gflags/gflags
url = https://github.com/gflags/gflags.git
ignore = dirty
-[submodule "thirdparty/gtest"]
- path = thirdparty/gtest
+[submodule "thirdparty/gtest/gtest"]
+ path = thirdparty/gtest/gtest
url = https://github.com/openvinotoolkit/googletest.git
ignore = dirty
[submodule "thirdparty/ocl/icd_loader"]
diff --git a/cmake/coverage.cmake b/cmake/coverage.cmake
index 4d8976e0a80..137acbb8993 100644
--- a/cmake/coverage.cmake
+++ b/cmake/coverage.cmake
@@ -90,17 +90,10 @@ ie_coverage_extract(INPUT "openvino" OUTPUT "ngraph"
ie_coverage_genhtml(INFO_FILE "ngraph"
PREFIX "${OV_COVERAGE_BASE_DIRECTORY}")
-if(NGRAPH_ONNX_IMPORT_ENABLE)
- ie_coverage_extract(INPUT "openvino" OUTPUT "onnx_importer"
- PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/ngraph/frontend/onnx/onnx_common*"
- "${OV_COVERAGE_BASE_DIRECTORY}/ngraph/frontend/onnx/onnx_import*")
- ie_coverage_genhtml(INFO_FILE "onnx_importer"
- PREFIX "${OV_COVERAGE_BASE_DIRECTORY}")
-endif()
-
if(NGRAPH_ONNX_FRONTEND_ENABLE)
- ie_coverage_extract(INPUT "openvino" OUTPUT "onnx_ngraph_frontend"
- PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/ngraph/frontend/onnx/frontend*")
- ie_coverage_genhtml(INFO_FILE "onnx_ngraph_frontend"
+ ie_coverage_extract(INPUT "openvino" OUTPUT "onnx"
+ PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/ngraph/frontend/onnx/*"
+ "${OV_COVERAGE_BASE_DIRECTORY}/ngraph/frontend/onnx/*")
+ ie_coverage_genhtml(INFO_FILE "onnx"
PREFIX "${OV_COVERAGE_BASE_DIRECTORY}")
endif()
diff --git a/cmake/features.cmake b/cmake/features.cmake
index 26bf48f3824..3df1af5ef91 100644
--- a/cmake/features.cmake
+++ b/cmake/features.cmake
@@ -122,13 +122,12 @@ else()
set(protoc_available ON)
endif()
-ie_dependent_option(NGRAPH_ONNX_IMPORT_ENABLE "Enable ONNX importer" ON "protoc_available" OFF)
-ie_dependent_option(NGRAPH_ONNX_FRONTEND_ENABLE "Enable ONNX FrontEnd" OFF "NGRAPH_ONNX_IMPORT_ENABLE" OFF)
+ie_dependent_option(NGRAPH_ONNX_FRONTEND_ENABLE "Enable ONNX FrontEnd" ON "protoc_available" OFF)
ie_dependent_option(NGRAPH_PDPD_FRONTEND_ENABLE "Enable PaddlePaddle FrontEnd" ON "protoc_available" OFF)
ie_dependent_option(NGRAPH_USE_PROTOBUF_LITE "Compiles and links with protobuf-lite" ON
- "NGRAPH_ONNX_IMPORT_ENABLE" OFF)
+ "NGRAPH_ONNX_FRONTEND_ENABLE" OFF)
ie_dependent_option(NGRAPH_USE_SYSTEM_PROTOBUF "Use system protobuf" OFF
- "NGRAPH_ONNX_IMPORT_ENABLE OR NGRAPH_PDPD_FRONTEND_ENABLE" OFF)
+ "NGRAPH_ONNX_FRONTEND_ENABLE OR NGRAPH_PDPD_FRONTEND_ENABLE" OFF)
ie_dependent_option(NGRAPH_UNIT_TEST_ENABLE "Enables ngraph unit tests" ON "ENABLE_TESTS;NOT ANDROID" OFF)
ie_dependent_option(NGRAPH_UNIT_TEST_BACKENDS_ENABLE "Control the building of unit tests using backends" ON
"NGRAPH_UNIT_TEST_ENABLE" OFF)
diff --git a/cmake/templates/ngraphConfig.cmake.in b/cmake/templates/ngraphConfig.cmake.in
index e9c943f37d1..b945d0148aa 100644
--- a/cmake/templates/ngraphConfig.cmake.in
+++ b/cmake/templates/ngraphConfig.cmake.in
@@ -28,9 +28,8 @@
#
# ngraph::frontend_manager - nGraph frontend manager
#
-# ngraph_onnx_importer_FOUND - True if the system has onnx_importer library
-# ngraph::onnx_importer - ONNX importer target (optional)
-# ONNX_IMPORTER_LIBRARIES - ONNX importer libraries
+# ngraph_onnx_ngraph_frontend_FOUND - True if the system has onnx_ngraph_frontend library
+# ngraph::onnx_ngraph_frontend - ONNX FrontEnd target (optional)
#
# ngraph_paddlepaddle_frontend_FOUND - True if the system has PDPD frontend
# ngraph::paddlepaddle_ngraph_frontend - nGraph PDPD frontend (optional)
@@ -38,18 +37,30 @@
@PACKAGE_INIT@
+function(set_imported_global target)
+ get_target_property(IS_IMPORTED_GLOBAL ${target} IMPORTED_GLOBAL)
+ if (NOT IS_IMPORTED_GLOBAL)
+ set_target_properties(${target} PROPERTIES IMPORTED_GLOBAL TRUE)
+ endif()
+endfunction()
+
if(NOT TARGET ngraph)
include("${CMAKE_CURRENT_LIST_DIR}/ngraphTargets.cmake")
+ set_imported_global(ngraph::ngraph)
+ set_imported_global(ngraph::frontend_manager)
endif()
set(ngraph_ngraph_FOUND ON)
set(NGRAPH_LIBRARIES ngraph::ngraph)
-set(ngraph_onnx_importer_FOUND @NGRAPH_ONNX_IMPORT_ENABLE@)
-if(ngraph_onnx_importer_FOUND)
- set(ONNX_IMPORTER_LIBRARIES ngraph::onnx_importer)
+set(ngraph_onnx_ngraph_frontend_FOUND @NGRAPH_ONNX_FRONTEND_ENABLE@)
+if (ngraph_onnx_ngraph_frontend_FOUND AND NOT TARGET onnx_ngraph_frontend AND NOT TARGET ngraph::onnx_importer)
+ set_imported_global(ngraph::onnx_ngraph_frontend)
+ add_library(ngraph::onnx_importer ALIAS ngraph::onnx_ngraph_frontend)
+endif()
+set(ngraph_paddlepaddle_frontend_FOUND @NGRAPH_PDPD_FRONTEND_ENABLE@)
+if(ngraph_paddlepaddle_frontend_FOUND AND NOT TARGET paddlepaddle_ngraph_frontend)
+ set_imported_global(ngraph::paddlepaddle_ngraph_frontend)
endif()
-set(ngraph_paddlepaddle_frontend_FOUND @NGRAPH_PDPD_FRONTEND_ENABLE@)
-
check_required_components(ngraph)
diff --git a/cmake/test_model_zoo.cmake b/cmake/test_model_zoo.cmake
index c3f158626cd..580cab35ec4 100644
--- a/cmake/test_model_zoo.cmake
+++ b/cmake/test_model_zoo.cmake
@@ -17,7 +17,7 @@ function(ov_model_convert SRC DST OUT)
get_filename_component(name_we "${in_file}" NAME_WE)
set(model_source_dir "${SRC}/${rel_dir}")
- if(NOT NGRAPH_ONNX_IMPORT_ENABLE AND ext MATCHES "^\\.(onnx|prototxt)$")
+ if(NOT NGRAPH_ONNX_FRONTEND_ENABLE AND ext MATCHES "^\\.(onnx|prototxt)$")
# don't copy / process ONNX / prototxt files
continue()
endif()
@@ -78,7 +78,7 @@ ov_model_convert("${OpenVINO_SOURCE_DIR}/${rel_path}"
ie_onnx_import_out_files)
if(ENABLE_TESTS)
- if(NGRAPH_ONNX_IMPORT_ENABLE AND ENABLE_REQUIREMENTS_INSTALL)
+ if(NGRAPH_ONNX_FRONTEND_ENABLE AND ENABLE_REQUIREMENTS_INSTALL)
find_package(PythonInterp 3 REQUIRED)
get_filename_component(PYTHON_EXEC_DIR ${PYTHON_EXECUTABLE} DIRECTORY)
diff --git a/docs/.clang-format b/docs/.clang-format
index c93e6254b5b..ebe747b7838 100644
--- a/docs/.clang-format
+++ b/docs/.clang-format
@@ -1,6 +1,7 @@
BasedOnStyle: Google
IndentWidth: 4
UseTab: Never
+ColumnLimit: 120
Language: Cpp
Standard: Cpp11
@@ -8,18 +9,20 @@ Standard: Cpp11
AccessModifierOffset: -4
AlignConsecutiveMacros: true
AllowAllArgumentsOnNextLine: false
+AllowAllConstructorInitializersOnNextLine: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortFunctionsOnASingleLine: Empty
AllowShortIfStatementsOnASingleLine: Never
AllowShortLambdasOnASingleLine: Empty
AllowShortLoopsOnASingleLine: false
AlwaysBreakBeforeMultilineStrings: false
-ColumnLimit: 160
-# Specialize this comment pragma in order to avoid changes in SEA copyrights
+BinPackArguments: false
+BinPackParameters: false
CommentPragmas: '^#'
DerivePointerAlignment: false
FixNamespaceComments: true
IndentCaseLabels: false
-IndentPPDirectives: BeforeHash
-SpaceBeforeCpp11BracedList: true
-SpaceBeforeCtorInitializerColon: false
\ No newline at end of file
+IndentPPDirectives: AfterHash
+ForEachMacros:
+ - foreach
+ - FOREACH_CHILD
diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt
index c9859464ee1..4d3135903de 100644
--- a/docs/CMakeLists.txt
+++ b/docs/CMakeLists.txt
@@ -25,7 +25,7 @@ if(NOT ENABLE_DOCKER)
set(InferenceEngine_DIR ${CMAKE_BINARY_DIR})
endif()
- if(NGRAPH_ONNX_IMPORT_ENABLE)
+ if(NGRAPH_ONNX_FRONTEND_ENABLE)
add_subdirectory(onnx_custom_op)
endif()
add_subdirectory(template_extension)
@@ -223,7 +223,11 @@ function(build_docs)
"${OpenVINO_SOURCE_DIR}/inference-engine/*.md"
"${OpenVINO_SOURCE_DIR}/inference-engine/*.png"
"${OpenVINO_SOURCE_DIR}/inference-engine/*.gif"
- "${OpenVINO_SOURCE_DIR}/inference-engine/*.jpg")
+ "${OpenVINO_SOURCE_DIR}/inference-engine/*.jpg"
+ "${OpenVINO_SOURCE_DIR}/tools/*.md"
+ "${OpenVINO_SOURCE_DIR}/tools/*.png"
+ "${OpenVINO_SOURCE_DIR}/tools/*.gif"
+ "${OpenVINO_SOURCE_DIR}/tools/*.jpg")
foreach(source_file ${ovino_doc_files})
list(APPEND commands COMMAND ${CMAKE_COMMAND} -E copy
diff --git a/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md b/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md
index 252d67df81f..a9a9841cac4 100644
--- a/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md
+++ b/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md
@@ -50,10 +50,9 @@ The example below demonstrates how to unregister an operator from the destructor
## Requirements for Building with CMake
-A program that uses the `register_operator` functionality requires `ngraph` and `onnx_importer` libraries in addition to the Inference Engine.
-The `onnx_importer` is a component of the `ngraph` package , so `find_package(ngraph REQUIRED COMPONENTS onnx_importer)` can find both.
-The `ngraph` package exposes two variables, `${NGRAPH_LIBRARIES}` and `${ONNX_IMPORTER_LIBRARIES}`, which reference the `ngraph` and `onnx_importer` libraries.
-Those variables need to be passed to the `target_link_libraries` command in the CMakeLists.txt file.
+A program that uses the `register_operator` functionality requires `ngraph::ngraph` and `ngraph::onnx_ngraph_frontend` libraries in addition to the Inference Engine.
+The `onnx_ngraph_frontend` is a component of the `ngraph` package , so `find_package(ngraph REQUIRED COMPONENTS onnx_ngraph_frontend)` can find both.
+Those libraries need to be passed to the `target_link_libraries` command in the CMakeLists.txt file.
See CMakeLists.txt below for reference:
@snippet onnx_custom_op/CMakeLists.txt cmake:onnx_custom_op
diff --git a/docs/IE_DG/Samples_Overview.md b/docs/IE_DG/Samples_Overview.md
index db39cbfc5b4..f9e21cf5e4d 100644
--- a/docs/IE_DG/Samples_Overview.md
+++ b/docs/IE_DG/Samples_Overview.md
@@ -14,7 +14,7 @@ Inference Engine sample applications include the following:
- [Automatic Speech Recognition Python Sample](../../inference-engine/ie_bridges/python/sample/speech_sample/README.md)
- **Benchmark Application** – Estimates deep learning inference performance on supported devices for synchronous and asynchronous modes.
- [Benchmark C++ Tool](../../inference-engine/samples/benchmark_app/README.md)
- - [Benchmark Python Tool](../../inference-engine/tools/benchmark_tool/README.md)
+ - [Benchmark Python Tool](../../tools/benchmark_tool/README.md)
- **Hello Classification Sample** – Inference of image classification networks like AlexNet and GoogLeNet using Synchronous Inference Request API. Input of any size and layout can be set to an infer request which will be pre-processed automatically during inference (the sample supports only images as inputs and supports Unicode paths).
- [Hello Classification C++ Sample](../../inference-engine/samples/hello_classification/README.md)
- [Hello Classification C Sample](../../inference-engine/ie_bridges/c/samples/hello_classification/README.md)
diff --git a/docs/benchmarks/performance_benchmarks_faq.md b/docs/benchmarks/performance_benchmarks_faq.md
index a89d0fc07c3..2ff33612097 100644
--- a/docs/benchmarks/performance_benchmarks_faq.md
+++ b/docs/benchmarks/performance_benchmarks_faq.md
@@ -15,7 +15,7 @@ The models used in the performance benchmarks were chosen based on general adopt
CF means Caffe*, while TF means TensorFlow*.
#### 5. How can I run the benchmark results on my own?
-All of the performance benchmarks were generated using the open-sourced tool within the Intel® Distribution of OpenVINO™ toolkit called `benchmark_app`, which is available in both [C++](../../inference-engine/samples/benchmark_app/README.md) and [Python](../../inference-engine/tools/benchmark_tool/README.md).
+All of the performance benchmarks were generated using the open-sourced tool within the Intel® Distribution of OpenVINO™ toolkit called `benchmark_app`, which is available in both [C++](../../inference-engine/samples/benchmark_app/README.md) and [Python](../../tools/benchmark_tool/README.md).
#### 6. What image sizes are used for the classification network models?
The image size used in the inference depends on the network being benchmarked. The following table shows the list of input sizes for each network model.
diff --git a/docs/doxygen/ie_docs.xml b/docs/doxygen/ie_docs.xml
index d1fbbe89b2d..f5ef147751f 100644
--- a/docs/doxygen/ie_docs.xml
+++ b/docs/doxygen/ie_docs.xml
@@ -61,7 +61,7 @@ limitations under the License.
-
+
diff --git a/docs/onnx_custom_op/CMakeLists.txt b/docs/onnx_custom_op/CMakeLists.txt
index f38ead369d8..09d6635ee92 100644
--- a/docs/onnx_custom_op/CMakeLists.txt
+++ b/docs/onnx_custom_op/CMakeLists.txt
@@ -7,11 +7,11 @@ set(CMAKE_CXX_STANDARD 11)
set(TARGET_NAME "onnx_custom_op")
-find_package(ngraph REQUIRED COMPONENTS onnx_importer)
+find_package(ngraph REQUIRED COMPONENTS onnx_ngraph_frontend)
add_library(${TARGET_NAME} STATIC onnx_custom_op.cpp onnx_custom_op.hpp)
-target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES} ${ONNX_IMPORTER_LIBRARIES})
+target_link_libraries(${TARGET_NAME} PUBLIC ngraph::ngraph ngraph::onnx_ngraph_frontend)
# [cmake:onnx_custom_op]
# Enable code style check
diff --git a/docs/ops/activation/Clamp_1.md b/docs/ops/activation/Clamp_1.md
index d168ae8ce57..bc6b7edd3c9 100644
--- a/docs/ops/activation/Clamp_1.md
+++ b/docs/ops/activation/Clamp_1.md
@@ -15,7 +15,7 @@
Let *min_value* and *max_value* be *min* and *max*, respectively. The mathematical formula of *Clamp* is as follows:
\f[
-clamp( x_{i} )=\min\big( \max\left( x_{i}, min\_value \right), max\_value \big)
+clamp( x_{i} )=\min\big( \max\left( x_{i},\ min\_value \right),\ max\_value \big)
\f]
**Attributes**:
diff --git a/docs/ops/activation/GELU_2.md b/docs/ops/activation/GELU_2.md
index c61905191a4..3d2adaa14de 100644
--- a/docs/ops/activation/GELU_2.md
+++ b/docs/ops/activation/GELU_2.md
@@ -12,7 +12,7 @@
It performs element-wise activation function on a given input tensor, based on the following mathematical formula:
\f[
- Gelu(x) = x\cdot\Phi(x) = x\cdot\frac{1}{2}\cdot\left[1 + erf\left(x/\sqrt{2}\right)\right]
+ Gelu(x) = x\cdot\Phi(x) = x\cdot\frac{1}{2}\cdot\left[1 + erf\frac{x}{\sqrt{2}}\right]
\f]
where Φ(x) is the Cumulative Distribution Function for Gaussian Distribution.
diff --git a/docs/ops/activation/GELU_7.md b/docs/ops/activation/GELU_7.md
index 44f182a9ab3..f11a4813a07 100644
--- a/docs/ops/activation/GELU_7.md
+++ b/docs/ops/activation/GELU_7.md
@@ -22,13 +22,13 @@ The *Gelu* function may be approximated in two different ways based on *approxim
For `erf` approximation mode, *Gelu* function is represented as:
\f[
- Gelu(x) = x\cdot\Phi(x) = x\cdot\frac{1}{2}\cdot\left[1 + erf\left(x/\sqrt{2}\right)\right]
+ Gelu(x) = x\cdot\Phi(x) = x\cdot\frac{1}{2}\cdot\left[1 + erf\frac{x}{\sqrt{2}}\right]
\f]
For `tanh` approximation mode, *Gelu* function is represented as:
\f[
- Gelu(x) \approx x\cdot\frac{1}{2}\cdot \left(1 + \tanh\left[\sqrt{2/\pi} \cdot (x + 0.044715 \cdot x^3)\right]\right)
+ Gelu(x) \approx x\cdot\frac{1}{2}\cdot \left(1 + \tanh\left[\sqrt{\frac{2}{\pi}} \cdot (x + 0.044715 \cdot x^3)\right]\right)
\f]
**Attributes**
diff --git a/docs/ops/activation/HSigmoid_5.md b/docs/ops/activation/HSigmoid_5.md
index 2470ccb00da..367327a4f85 100644
--- a/docs/ops/activation/HSigmoid_5.md
+++ b/docs/ops/activation/HSigmoid_5.md
@@ -10,7 +10,7 @@
element in the output tensor with the following formula:
\f[
-HSigmoid(x) = \frac{min(max(x + 3, 0), 6)}{6}
+HSigmoid(x) = \frac{min(max(x + 3,\ 0),\ 6)}{6}
\f]
The HSigmoid operation is introduced in the following [article](https://arxiv.org/pdf/1905.02244.pdf).
diff --git a/docs/ops/activation/HSwish_4.md b/docs/ops/activation/HSwish_4.md
index a9ae8168a1d..3f27517a44b 100644
--- a/docs/ops/activation/HSwish_4.md
+++ b/docs/ops/activation/HSwish_4.md
@@ -10,7 +10,7 @@
element in the output tensor with the following formula:
\f[
-HSwish(x) = x \frac{min(max(x + 3, 0), 6)}{6}
+HSwish(x) = x \cdot \frac{min(max(x + 3,\ 0),\ 6)}{6}
\f]
The HSwish operation is introduced in the following [article](https://arxiv.org/pdf/1905.02244.pdf).
diff --git a/docs/ops/activation/HardSigmoid_1.md b/docs/ops/activation/HardSigmoid_1.md
index 03c5c11606e..8403ca8a1ec 100644
--- a/docs/ops/activation/HardSigmoid_1.md
+++ b/docs/ops/activation/HardSigmoid_1.md
@@ -12,10 +12,13 @@
For each element from the input tensor calculates corresponding
element in the output tensor with the following formula:
+
\f[
- y = max(0, min(1, alpha * x + beta))
+ y = max(0,\ min(1,\ \alpha x + \beta))
\f]
+ where α corresponds to `alpha` scalar input and β corresponds to `beta` scalar input.
+
**Inputs**
* **1**: An tensor of type *T*. **Required.**
diff --git a/docs/ops/activation/LogSoftmax_5.md b/docs/ops/activation/LogSoftmax_5.md
index 60035120417..d26488fa968 100644
--- a/docs/ops/activation/LogSoftmax_5.md
+++ b/docs/ops/activation/LogSoftmax_5.md
@@ -8,8 +8,8 @@
**Note**: This is recommended to not compute LogSoftmax directly as Log(Softmax(x, axis)), more numeric stable is to compute LogSoftmax as:
\f[
-t = (x - ReduceMax(x, axis)) \\
-LogSoftmax(x, axis) = t - Log(ReduceSum(Exp(t), axis))
+t = (x - ReduceMax(x,\ axis)) \\
+LogSoftmax(x, axis) = t - Log(ReduceSum(Exp(t),\ axis))
\f]
**Attributes**
diff --git a/docs/ops/activation/ReLU_1.md b/docs/ops/activation/ReLU_1.md
index b3edf994e01..5b401dbc908 100644
--- a/docs/ops/activation/ReLU_1.md
+++ b/docs/ops/activation/ReLU_1.md
@@ -15,7 +15,7 @@
For each element from the input tensor calculates corresponding
element in the output tensor with the following formula:
\f[
- Y_{i}^{( l )} = max(0, Y_{i}^{( l - 1 )})
+ Y_{i}^{( l )} = max(0,\ Y_{i}^{( l - 1 )})
\f]
**Inputs**:
diff --git a/docs/ops/arithmetic/Abs_1.md b/docs/ops/arithmetic/Abs_1.md
index 426daee3806..1dc73dee933 100644
--- a/docs/ops/arithmetic/Abs_1.md
+++ b/docs/ops/arithmetic/Abs_1.md
@@ -25,7 +25,7 @@
*Abs* does the following with the input tensor *a*:
\f[
-a_{i} = abs(a_{i})
+a_{i} = \vert a_{i} \vert
\f]
**Examples**
diff --git a/docs/ops/arithmetic/Atanh_3.md b/docs/ops/arithmetic/Atanh_3.md
index c6dc4a5a89c..d08486c4205 100644
--- a/docs/ops/arithmetic/Atanh_3.md
+++ b/docs/ops/arithmetic/Atanh_3.md
@@ -4,11 +4,13 @@
**Category**: Arithmetic unary operation
-**Short description**: *Atanh* performs element-wise hyperbolic inverse tangent (arctangenth) operation with given tensor.
+**Short description**: *Atanh* performs element-wise hyperbolic inverse tangent (arctangenth) operation with a given tensor.
-**Attributes**:
+**Detailed description**: *Atanh* performs element-wise hyperbolic inverse tangent (arctangenth) operation on a given input tensor, based on the following mathematical formula:
- No attributes available.
+\f[ a_{i} = atanh(a_{i}) \f]
+
+**Attributes**: Atanh operation has no attributes.
**Inputs**
@@ -16,22 +18,14 @@
**Outputs**
-* **1**: The result of element-wise atanh operation. A tensor of type *T*.
+* **1**: The result of element-wise atanh operation applied to the input tensor. A tensor of type *T* and the same shape as input tensor.
**Types**
-* *T*: any floating-point type.
-
-*Atanh* does the following with the input tensor *a*:
-
-\f[
-a_{i} = atanh(a_{i})
-\f]
+* *T*: any supported numeric type.
**Examples**
-*Example 1*
-
```xml
diff --git a/docs/ops/arithmetic/Ceiling_1.md b/docs/ops/arithmetic/Ceiling_1.md
index 4d4cfeb9450..e091824c96d 100644
--- a/docs/ops/arithmetic/Ceiling_1.md
+++ b/docs/ops/arithmetic/Ceiling_1.md
@@ -10,7 +10,7 @@
element in the output tensor with the following formula:
\f[
-a_{i} = ceiling(a_{i})
+a_{i} = \lceil a_{i} \rceil
\f]
**Attributes**: *Ceiling* operation has no attributes.
diff --git a/docs/ops/arithmetic/Divide_1.md b/docs/ops/arithmetic/Divide_1.md
index b16198a05ad..b69a07454a1 100644
--- a/docs/ops/arithmetic/Divide_1.md
+++ b/docs/ops/arithmetic/Divide_1.md
@@ -11,7 +11,7 @@ Before performing arithmetic operation, input tensors *a* and *b* are broadcaste
After broadcasting *Divide* performs division operation for the input tensors *a* and *b* using the formula below:
\f[
-o_{i} = a_{i} / b_{i}
+o_{i} = \frac{a_{i}}{b_{i}}
\f]
The result of division by zero is undefined.
diff --git a/docs/ops/arithmetic/FloorMod_1.md b/docs/ops/arithmetic/FloorMod_1.md
index 27c77ade3fa..c573dee8304 100644
--- a/docs/ops/arithmetic/FloorMod_1.md
+++ b/docs/ops/arithmetic/FloorMod_1.md
@@ -10,7 +10,7 @@
As a first step input tensors *a* and *b* are broadcasted if their shapes differ. Broadcasting is performed according to `auto_broadcast` attribute specification. As a second step *FloorMod* operation is computed element-wise on the input tensors *a* and *b* according to the formula below:
\f[
-o_{i} = a_{i} % b_{i}
+o_{i} = a_{i} \mod b_{i}
\f]
*FloorMod* operation computes a reminder of a floored division. It is the same behaviour like in Python programming language: `floor(x / y) * y + floor_mod(x, y) = x`. The sign of the result is equal to a sign of a divisor. The result of division by zero is undefined.
diff --git a/docs/ops/arithmetic/Floor_1.md b/docs/ops/arithmetic/Floor_1.md
index 910ce43d590..06690f06df8 100644
--- a/docs/ops/arithmetic/Floor_1.md
+++ b/docs/ops/arithmetic/Floor_1.md
@@ -10,7 +10,7 @@
element in the output tensor with the following formula:
\f[
-a_{i} = floor(a_{i})
+a_{i} = \lfloor a_{i} \rfloor
\f]
**Attributes**: *Floor* operation has no attributes.
diff --git a/docs/ops/arithmetic/Maximum_1.md b/docs/ops/arithmetic/Maximum_1.md
index d16db0e0d77..18eb0e757b9 100644
--- a/docs/ops/arithmetic/Maximum_1.md
+++ b/docs/ops/arithmetic/Maximum_1.md
@@ -12,7 +12,7 @@ As a first step input tensors *a* and *b* are broadcasted if their shapes differ
After broadcasting *Maximum* does the following with the input tensors *a* and *b*:
\f[
-o_{i} = max(a_{i}, b_{i})
+o_{i} = max(a_{i},\ b_{i})
\f]
**Attributes**:
diff --git a/docs/ops/arithmetic/Minimum_1.md b/docs/ops/arithmetic/Minimum_1.md
index 69d5e8d85ef..30204e136dc 100644
--- a/docs/ops/arithmetic/Minimum_1.md
+++ b/docs/ops/arithmetic/Minimum_1.md
@@ -10,7 +10,7 @@
As a first step input tensors *a* and *b* are broadcasted if their shapes differ. Broadcasting is performed according to `auto_broadcast` attribute specification. As a second step *Minimum* operation is computed element-wise on the input tensors *a* and *b* according to the formula below:
\f[
-o_{i} = min(a_{i}, b_{i})
+o_{i} = min(a_{i},\ b_{i})
\f]
**Attributes**:
diff --git a/docs/ops/arithmetic/Mod_1.md b/docs/ops/arithmetic/Mod_1.md
index 7daf20d565c..df414c0f4fe 100644
--- a/docs/ops/arithmetic/Mod_1.md
+++ b/docs/ops/arithmetic/Mod_1.md
@@ -10,7 +10,7 @@
As a first step input tensors *a* and *b* are broadcasted if their shapes differ. Broadcasting is performed according to `auto_broadcast` attribute specification. As a second step *Mod* operation is computed element-wise on the input tensors *a* and *b* according to the formula below:
\f[
-o_{i} = a_{i} % b_{i}
+o_{i} = a_{i} \mod b_{i}
\f]
*Mod* operation computes a reminder of a truncated division. It is the same behaviour like in C programming language: `truncated(x / y) * y + truncated_mod(x, y) = x`. The sign of the result is equal to a sign of a dividend. The result of division by zero is undefined.
diff --git a/docs/ops/arithmetic/Multiply_1.md b/docs/ops/arithmetic/Multiply_1.md
index 6b8273922f5..a713c9c0eac 100644
--- a/docs/ops/arithmetic/Multiply_1.md
+++ b/docs/ops/arithmetic/Multiply_1.md
@@ -11,7 +11,7 @@ Before performing arithmetic operation, input tensors *a* and *b* are broadcaste
After broadcasting *Multiply* performs multiplication operation for the input tensors *a* and *b* using the formula below:
\f[
-o_{i} = a_{i} * b_{i}
+o_{i} = a_{i} \cdot b_{i}
\f]
**Attributes**:
diff --git a/docs/ops/comparison/Equal_1.md b/docs/ops/comparison/Equal_1.md
index f72d3302665..9bdd3361c26 100644
--- a/docs/ops/comparison/Equal_1.md
+++ b/docs/ops/comparison/Equal_1.md
@@ -4,35 +4,10 @@
**Category**: Comparison binary operation
-**Short description**: *Equal* performs element-wise comparison operation with two given tensors applying multi-directional broadcast rules.
-
-**Attributes**:
-
-* *auto_broadcast*
-
- * **Description**: specifies rules used for auto-broadcasting of input tensors.
- * **Range of values**:
- * *none* - no auto-broadcasting is allowed, all input shapes should match
- * *numpy* - numpy broadcasting rules, aligned with ONNX Broadcasting. Description is available in ONNX docs.
- * **Type**: string
- * **Default value**: "numpy"
- * **Required**: *no*
-
-**Inputs**
-
-* **1**: A tensor of type *T*. **Required.**
-* **2**: A tensor of type *T*. **Required.**
-
-**Outputs**
-
-* **1**: The result of element-wise comparison operation. A tensor of type boolean.
-
-**Types**
-
-* *T*: arbitrary supported type.
+**Short description**: *Equal* performs element-wise comparison operation with two given input tensors applying multi-directional broadcast rules specified in the *auto_broadcast* attribute.
**Detailed description**
-Before performing arithmetic operation, input tensors *a* and *b* are broadcasted if their shapes are different and `auto_broadcast` attributes is not `none`. Broadcasting is performed according to `auto_broadcast` value.
+Before performing arithmetic operation, input tensors *a* and *b* are broadcasted if their shapes are different and *auto_broadcast* attributes is not *none*. Broadcasting is performed according to *auto_broadcast* value.
After broadcasting *Equal* does the following with the input tensors *a* and *b*:
@@ -40,12 +15,40 @@ After broadcasting *Equal* does the following with the input tensors *a* and *b*
o_{i} = a_{i} == b_{i}
\f]
+**Attributes**:
+
+* *auto_broadcast*
+
+ * **Description**: specifies rules used for auto-broadcasting of input tensors.
+ * **Range of values**:
+ * *none* - no auto-broadcasting is allowed, all input shapes should match,
+ * *numpy* - numpy broadcasting rules, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md),
+ * *pdpd* - PaddlePaddle-style implicit broadcasting, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md).
+ * **Type**: string
+ * **Default value**: "numpy"
+ * **Required**: *no*
+
+**Inputs**
+
+* **1**: A tensor of type *T* and arbitrary shape. **Required.**
+* **2**: A tensor of type *T* and arbitrary shape. **Required.**
+
+**Outputs**
+
+* **1**: The result of element-wise **comparison** operation applied to the input tensors. A tensor of type *T_BOOL* and the same shape equal to broadcasted shape of two inputs.
+
+**Types**
+
+* *T*: arbitrary supported type.
+* *T_BOOL*: `boolean`.
+
**Examples**
-*Example 1*
+*Example 1: no broadcast*
```xml
+
256
@@ -65,9 +68,10 @@ o_{i} = a_{i} == b_{i}
```
-*Example 2: broadcast*
+*Example 2: numpy broadcast*
```xml
+
8
diff --git a/docs/ops/comparison/GreaterEqual_1.md b/docs/ops/comparison/GreaterEqual_1.md
index 5acf4cbe6d6..f4a29c667fe 100644
--- a/docs/ops/comparison/GreaterEqual_1.md
+++ b/docs/ops/comparison/GreaterEqual_1.md
@@ -37,7 +37,7 @@ Before performing arithmetic operation, input tensors *a* and *b* are broadcaste
After broadcasting *GreaterEqual* does the following with the input tensors *a* and *b*:
\f[
-o_{i} = a_{i} >= b_{i}
+o_{i} = a_{i} \geq b_{i}
\f]
**Examples**
diff --git a/docs/ops/comparison/LessEqual_1.md b/docs/ops/comparison/LessEqual_1.md
index a8b7c810181..bb7eed13793 100644
--- a/docs/ops/comparison/LessEqual_1.md
+++ b/docs/ops/comparison/LessEqual_1.md
@@ -12,7 +12,7 @@ Before performing arithmetic operation, input tensors *a* and *b* are broadcaste
After broadcasting *LessEqual* does the following with the input tensors *a* and *b*:
\f[
-o_{i} = a_{i} <= b_{i}
+o_{i} = a_{i} \leq b_{i}
\f]
**Attributes**:
diff --git a/docs/ops/comparison/NotEqual_1.md b/docs/ops/comparison/NotEqual_1.md
index 456aeb7a785..448f4bcb66a 100644
--- a/docs/ops/comparison/NotEqual_1.md
+++ b/docs/ops/comparison/NotEqual_1.md
@@ -37,7 +37,7 @@ Before performing arithmetic operation, input tensors *a* and *b* are broadcaste
After broadcasting *NotEqual* does the following with the input tensors *a* and *b*:
\f[
-o_{i} = a_{i} != b_{i}
+o_{i} = a_{i} \neq b_{i}
\f]
**Examples**
diff --git a/docs/ops/convolution/Convolution_1.md b/docs/ops/convolution/Convolution_1.md
index e77967e4130..431575b99c3 100644
--- a/docs/ops/convolution/Convolution_1.md
+++ b/docs/ops/convolution/Convolution_1.md
@@ -16,15 +16,15 @@ n_{out} = \left ( \frac{n_{in} + 2p - k}{s} \right ) + 1
The receptive field in each layer is calculated using the formulas:
* Jump in the output feature map:
\f[
- j_{out} = j_{in} * s
+ j_{out} = j_{in} \cdot s
\f]
* Size of the receptive field of output feature:
\f[
- r_{out} = r_{in} + ( k - 1 ) * j_{in}
+ r_{out} = r_{in} + ( k - 1 ) \cdot j_{in}
\f]
* Center position of the receptive field of the first output feature:
\f[
- start_{out} = start_{in} + ( \frac{k - 1}{2} - p ) * j_{in}
+ start_{out} = start_{in} + ( \frac{k - 1}{2} - p ) \cdot j_{in}
\f]
* Output is calculated using the following formula:
\f[
diff --git a/docs/ops/convolution/DeformableConvolution_1.md b/docs/ops/convolution/DeformableConvolution_1.md
index 77140cb30c7..6c73e202be5 100644
--- a/docs/ops/convolution/DeformableConvolution_1.md
+++ b/docs/ops/convolution/DeformableConvolution_1.md
@@ -12,7 +12,7 @@ Output is calculated using the following formula:
\f[
- y(p) = \sum_{k = 1}^{K}w_{k}x(p + p_{k} + {\Delta}p_{k})
+ y(p) = \displaystyle{\sum_{k = 1}^{K}}w_{k}x(p + p_{k} + {\Delta}p_{k})
\f]
diff --git a/docs/ops/convolution/DeformableConvolution_8.md b/docs/ops/convolution/DeformableConvolution_8.md
index 0474a71193d..fc7c05a235c 100644
--- a/docs/ops/convolution/DeformableConvolution_8.md
+++ b/docs/ops/convolution/DeformableConvolution_8.md
@@ -14,7 +14,7 @@ Output is calculated using the following formula:
\f[
- y(p) = \sum_{k = 1}^{K}w_{k}x(p + p_{k} + {\Delta}p_{k}) * {\Delta}m_{k}
+ y(p) = \displaystyle{\sum_{k = 1}^{K}}w_{k}x(p + p_{k} + {\Delta}p_{k}) \cdot {\Delta}m_{k}
\f]
Where
diff --git a/docs/ops/generation/RandomUniform_8.md b/docs/ops/generation/RandomUniform_8.md
index 4269c82bc6a..4fff2684d6c 100644
--- a/docs/ops/generation/RandomUniform_8.md
+++ b/docs/ops/generation/RandomUniform_8.md
@@ -8,7 +8,7 @@
**Detailed description**:
-*RandomUniform* operation generates random numbers from a uniform distribution in the range `[*minval*, *maxval*)`.
+*RandomUniform* operation generates random numbers from a uniform distribution in the range `[minval, maxval)`.
The generation algorithm is based on underlying random integer generator that uses Philox algorithm. Philox algorithm
is a counter-based pseudo-random generator, which produces uint32 values. Single invocation of Philox algorithm returns
four result random values, depending on the given *key* and *counter* values. *Key* and *counter* are initialized
@@ -42,7 +42,7 @@ R' = mulhi(R, M) {\oplus} k {\oplus} L \\
mulhi(a, b) = floor((a {\times} b) / 2^{32}) \\
mullo(a, b) = (a {\times} b) \mod 2^{32}
\f]
-where `{\oplus}` - bitwise xor, *k* = `R_{key}` for updating counter, *k* = `L_{key}` for updating *n*,
+where \f${\oplus}\f$ - bitwise xor, *k* = \f$R_{key}\f$ for updating counter, *k* = \f$L_{key}\f$ for updating *n*,
*M* = `0xD2511F53` for updating *n*, *M* = `0xCD9E8D57` for updating *counter*.
After each round *key* is raised by summing with another pair of const values:
@@ -50,7 +50,7 @@ After each round *key* is raised by summing with another pair of const values:
L += 0x9E3779B9 \\
R += 0xBB67AE85
\f]
-Values *L'_{n}*, *R'_{n}*, *L'_{counter}*, *R'_{counter}* are resulting four random numbers.
+Values \f$L'_{n}, R'_{n}, L'_{counter}, R'_{counter}\f$ are resulting four random numbers.
Float values between [0..1) are obtained from 32-bit integers by the following rules.
diff --git a/docs/ops/logical/LogicalNot_1.md b/docs/ops/logical/LogicalNot_1.md
index 9dd9132383f..97c41ddb14c 100644
--- a/docs/ops/logical/LogicalNot_1.md
+++ b/docs/ops/logical/LogicalNot_1.md
@@ -25,7 +25,7 @@
*LogicalNot* does the following with the input tensor *a*:
\f[
-a_{i} = not(a_{i})
+a_{i} = \lnot a_{i}
\f]
**Examples**
diff --git a/docs/ops/logical/LogicalXor_1.md b/docs/ops/logical/LogicalXor_1.md
index 61bfa9bc25c..16072f01183 100644
--- a/docs/ops/logical/LogicalXor_1.md
+++ b/docs/ops/logical/LogicalXor_1.md
@@ -37,7 +37,7 @@ Before performing logical operation, input tensors *a* and *b* are broadcasted i
After broadcasting *LogicalXor* does the following with the input tensors *a* and *b*:
\f[
-o_{i} = a_{i} xor b_{i}
+o_{i} = a_{i} \oplus b_{i}
\f]
**Examples**
diff --git a/docs/ops/pooling/AdaptiveAvgPool_8.md b/docs/ops/pooling/AdaptiveAvgPool_8.md
index cff1e91e92c..3c6193045ca 100644
--- a/docs/ops/pooling/AdaptiveAvgPool_8.md
+++ b/docs/ops/pooling/AdaptiveAvgPool_8.md
@@ -11,19 +11,19 @@ The kernel dimensions are calculated using the following formulae for the `NCDHW
\f[
\begin{array}{lcl}
-d_{start} &=& floor(i*D_{in}/D_{out})\\
-d_{end} &=& ceil((i+1)*D_{in}/D_{out})\\
-h_{start} &=& floor(j*H_{in}/H_{out})\\
-h_{end} &=& ceil((j+1)*H_{in}/H_{out})\\
-w_{start} &=& floor(k*W_{in}/W_{out})\\
-w_{end} &=& ceil((k+1)*W_{in}/W_{out})
+d_{start} &=& \lfloor i \cdot \frac{D_{in}}{D_{out}}\rfloor\\
+d_{end} &=& \lceil(i+1) \cdot \frac{D_{in}}{D_{out}}\rceil\\
+h_{start} &=& \lfloor j \cdot \frac{H_{in}}{H_{out}}\rfloor\\
+h_{end} &=& \lceil(j+1) \cdot \frac{H_{in}}{H_{out}}\rceil\\
+w_{start} &=& \lfloor k \cdot \frac{W_{in}}{W_{out}}\rfloor\\
+w_{end} &=& \lceil(k+1) \cdot \frac{W_{in}}{W_{out}}\rceil
\end{array}
\f]
The output is calculated with the following formula:
\f[
-Output(i,j,k) = \frac{Input[d_{start}:d_{end}, h_{start}:h_{end}, w_{start}:w_{end}]}{(d_{end}-d_{start})*(h_{end}-h_{start})*(w_{end}-w_{start})}
+Output(i,j,k) = \frac{Input[d_{start}:d_{end}, h_{start}:h_{end}, w_{start}:w_{end}]}{(d_{end}-d_{start}) \cdot (h_{end}-h_{start}) \cdot (w_{end}-w_{start})}
\f]
**Inputs**:
diff --git a/docs/ops/pooling/AdaptiveMaxPool_8.md b/docs/ops/pooling/AdaptiveMaxPool_8.md
index a86c3f67ac0..c34629351b8 100644
--- a/docs/ops/pooling/AdaptiveMaxPool_8.md
+++ b/docs/ops/pooling/AdaptiveMaxPool_8.md
@@ -11,12 +11,12 @@ The kernel dimensions are calculated using the following formulae for the `NCDHW
\f[
\begin{array}{lcl}
-d_{start} &=& floor(i*D_{in}/D_{out})\\
-d_{end} &=& ceil((i+1)*D_{in}/D_{out})\\
-h_{start} &=& floor(j*H_{in}/H_{out})\\
-h_{end} &=& ceil((j+1)*H_{in}/H_{out})\\
-w_{start} &=& floor(k*W_{in}/W_{out})\\
-w_{end} &=& ceil((k+1)*W_{in}/W_{out})
+d_{start} &=& \lfloor i \cdot \frac{D_{in}}{D_{out}}\rfloor\\
+d_{end} &=& \lceil(i+1) \cdot \frac{D_{in}}{D_{out}}\rceil\\
+h_{start} &=& \lfloor j \cdot \frac{H_{in}}{H_{out}}\rfloor\\
+h_{end} &=& \lceil(j+1) \cdot \frac{H_{in}}{H_{out}}\rceil\\
+w_{start} &=& \lfloor k \cdot \frac{W_{in}}{W_{out}}\rfloor\\
+w_{end} &=& \lceil(k+1) \cdot \frac{W_{in}}{W_{out}}\rceil
\end{array}
\f]
diff --git a/docs/snippets/CMakeLists.txt b/docs/snippets/CMakeLists.txt
index 48edae1e832..9edc3e4f327 100644
--- a/docs/snippets/CMakeLists.txt
+++ b/docs/snippets/CMakeLists.txt
@@ -44,8 +44,8 @@ if(OpenCV_FOUND)
target_link_libraries(${TARGET_NAME} PRIVATE opencv_core)
endif()
-if(NGRAPH_ONNX_IMPORT_ENABLE)
- target_link_libraries(${TARGET_NAME} PRIVATE onnx_importer)
+if(NGRAPH_ONNX_FRONTEND_ENABLE)
+ target_link_libraries(${TARGET_NAME} PRIVATE onnx_ngraph_frontend)
endif()
if(NOT MSVC)
diff --git a/docs/template_extension/CMakeLists.txt b/docs/template_extension/CMakeLists.txt
index a6e7527e55f..230323768e0 100644
--- a/docs/template_extension/CMakeLists.txt
+++ b/docs/template_extension/CMakeLists.txt
@@ -7,7 +7,7 @@ set(CMAKE_CXX_STANDARD 11)
set(TARGET_NAME "template_extension")
-find_package(ngraph REQUIRED OPTIONAL_COMPONENTS onnx_importer)
+find_package(ngraph REQUIRED OPTIONAL_COMPONENTS onnx_ngraph_frontend)
find_package(InferenceEngine REQUIRED)
find_package(OpenCV QUIET COMPONENTS core)
@@ -28,9 +28,9 @@ target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_INFERENCE_EXTENSION_
target_link_libraries(${TARGET_NAME} PRIVATE IE::inference_engine
${NGRAPH_LIBRARIES})
-if (ngraph_onnx_importer_FOUND)
- target_link_libraries(${TARGET_NAME} PRIVATE ${ONNX_IMPORTER_LIBRARIES})
- target_compile_definitions(${TARGET_NAME} PRIVATE NGRAPH_ONNX_IMPORT_ENABLED)
+if (ngraph_onnx_ngraph_frontend_FOUND)
+ target_link_libraries(${TARGET_NAME} PRIVATE ngraph::onnx_ngraph_frontend)
+ target_compile_definitions(${TARGET_NAME} PRIVATE NGRAPH_ONNX_FRONTEND_ENABLED)
endif()
# [cmake:extension]
diff --git a/docs/template_extension/cpu_kernel.cpp b/docs/template_extension/cpu_kernel.cpp
index b1d426b1582..84a57dbe9e9 100644
--- a/docs/template_extension/cpu_kernel.cpp
+++ b/docs/template_extension/cpu_kernel.cpp
@@ -22,7 +22,8 @@ OpImplementation::OpImplementation(const std::shared_ptr& node) {
IE_THROW() << "Cannot create implementation for op with dynamic shapes!";
if (castedNode->get_input_shape(0).size() != 4 || castedNode->get_output_shape(0).size() != 4)
IE_THROW() << "Operation supports only 4d tensors for input and output.";
- if (castedNode->get_input_element_type(0) != ngraph::element::f32 || castedNode->get_output_element_type(0) != ngraph::element::f32)
+ if (castedNode->get_input_element_type(0) != ngraph::element::f32 ||
+ castedNode->get_output_element_type(0) != ngraph::element::f32)
IE_THROW() << "Operation supports only FP32 tensors.";
add = castedNode->getAddAttr();
inShape = castedNode->get_input_shape(0);
@@ -34,9 +35,12 @@ OpImplementation::OpImplementation(const std::shared_ptr& node) {
//! [cpu_implementation:ctor]
//! [cpu_implementation:getSupportedConfigurations]
-InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector& conf,
- InferenceEngine::ResponseDesc* resp) noexcept {
- auto createConfig = [](const InferenceEngine::SizeVector inShape, const InferenceEngine::SizeVector& outShape, bool planar) {
+InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(
+ std::vector& conf,
+ InferenceEngine::ResponseDesc* resp) noexcept {
+ auto createConfig = [](const InferenceEngine::SizeVector inShape,
+ const InferenceEngine::SizeVector& outShape,
+ bool planar) {
InferenceEngine::LayerConfig config;
config.dynBatchSupport = false;
InferenceEngine::DataConfig inData;
@@ -45,9 +49,11 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve
// Allow any offset before data
size_t offset((std::numeric_limits::max)());
if (planar) {
- inData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inShape, order, offset});
+ inData.desc =
+ InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inShape, order, offset});
config.inConfs.push_back(inData);
- outData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outShape, order, offset});
+ outData.desc =
+ InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outShape, order, offset});
config.outConfs.push_back(outData);
} else {
// Add blocked (nChw8c) format
@@ -64,9 +70,11 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve
InferenceEngine::SizeVector outBlkDims = outShape;
outBlkDims[1] = div_up(outBlkDims[1], 8);
outBlkDims.push_back(8);
- inData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inBlkDims, order, offset});
+ inData.desc =
+ InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inBlkDims, order, offset});
config.inConfs.push_back(inData);
- outData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outBlkDims, order, offset});
+ outData.desc =
+ InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outBlkDims, order, offset});
config.outConfs.push_back(outData);
}
return config;
@@ -87,7 +95,8 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve
//! [cpu_implementation:getSupportedConfigurations]
//! [cpu_implementation:init]
-InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept {
+InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig& config,
+ InferenceEngine::ResponseDesc* resp) noexcept {
try {
if (config.inConfs.size() != 1 || config.outConfs.size() != 1) {
IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!";
@@ -115,10 +124,13 @@ InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig&
//! [cpu_implementation:init]
//! [cpu_implementation:execute]
-InferenceEngine::StatusCode OpImplementation::execute(std::vector& inputs, std::vector& outputs,
+InferenceEngine::StatusCode OpImplementation::execute(std::vector& inputs,
+ std::vector& outputs,
InferenceEngine::ResponseDesc* resp) noexcept {
- const float* src_data = inputs[0]->cbuffer().as() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
- float* dst_data = outputs[0]->buffer().as() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
+ const float* src_data =
+ inputs[0]->cbuffer().as() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
+ float* dst_data =
+ outputs[0]->buffer().as() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
for (size_t i = 0; i < inputs[0]->size(); i++) {
dst_data[i] = src_data[i] + add;
diff --git a/docs/template_extension/cpu_kernel.hpp b/docs/template_extension/cpu_kernel.hpp
index 901d33093b5..9c71bdb0cef 100644
--- a/docs/template_extension/cpu_kernel.hpp
+++ b/docs/template_extension/cpu_kernel.hpp
@@ -16,8 +16,10 @@ public:
explicit OpImplementation(const std::shared_ptr& node);
InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf,
InferenceEngine::ResponseDesc* resp) noexcept override;
- InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override;
- InferenceEngine::StatusCode execute(std::vector& inputs, std::vector& outputs,
+ InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config,
+ InferenceEngine::ResponseDesc* resp) noexcept override;
+ InferenceEngine::StatusCode execute(std::vector& inputs,
+ std::vector& outputs,
InferenceEngine::ResponseDesc* resp) noexcept override;
private:
diff --git a/docs/template_extension/extension.cpp b/docs/template_extension/extension.cpp
index 7a0874f2bea..d3be82d1120 100644
--- a/docs/template_extension/extension.cpp
+++ b/docs/template_extension/extension.cpp
@@ -7,12 +7,12 @@
#include "cpu_kernel.hpp"
#include "op.hpp"
#ifdef OPENCV_IMPORT_ENABLED
- #include "fft_kernel.hpp"
- #include "fft_op.hpp"
+# include "fft_kernel.hpp"
+# include "fft_op.hpp"
#endif
#include
-#ifdef NGRAPH_ONNX_IMPORT_ENABLED
- #include
+#ifdef NGRAPH_ONNX_FRONTEND_ENABLED
+# include
#endif
#include