From e0c178e431cdb91d49f97809491d51509f990bb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dawid=20Ko=C5=BCykowski?= Date: Fri, 3 Sep 2021 06:28:21 +0200 Subject: [PATCH 01/52] Update SLT classes functions to use parameters passed by reference (#7256) * update single layer tests * add missing update to squeeze_unsqueeze --- .../shared_test_classes/single_layer/adaptive_pooling.hpp | 2 +- .../shared_test_classes/single_layer/binary_convolution.hpp | 2 +- .../include/shared_test_classes/single_layer/bucketize.hpp | 2 +- .../include/shared_test_classes/single_layer/clamp.hpp | 2 +- .../shared_test_classes/single_layer/convolution.hpp | 2 +- .../single_layer/convolution_backprop.hpp | 2 +- .../single_layer/convolution_backprop_data.hpp | 2 +- .../include/shared_test_classes/single_layer/cum_sum.hpp | 2 +- .../single_layer/deformable_convolution.hpp | 2 +- .../single_layer/deformable_psroi_pooling.hpp | 2 +- .../shared_test_classes/single_layer/detection_output.hpp | 2 +- .../include/shared_test_classes/single_layer/dft.hpp | 2 +- .../include/shared_test_classes/single_layer/eltwise.hpp | 2 +- .../single_layer/embedding_bag_offsets_sum.hpp | 2 +- .../single_layer/embedding_bag_packed_sum.hpp | 2 +- .../single_layer/embedding_segments_sum.hpp | 2 +- .../shared_test_classes/single_layer/group_convolution.hpp | 4 ++-- .../single_layer/group_convolution_backprop_data.hpp | 2 +- .../shared_test_classes/single_layer/interpolate.hpp | 2 +- .../shared_test_classes/single_layer/log_softmax.hpp | 2 +- .../include/shared_test_classes/single_layer/logical.hpp | 2 +- .../shared_test_classes/single_layer/low_precision.hpp | 2 +- .../include/shared_test_classes/single_layer/lrn.hpp | 2 +- .../include/shared_test_classes/single_layer/matrix_nms.hpp | 2 +- .../shared_test_classes/single_layer/multiclass_nms.hpp | 2 +- .../single_layer/non_max_suppression.hpp | 2 +- .../include/shared_test_classes/single_layer/nonzero.hpp | 2 +- .../shared_test_classes/single_layer/normalize_l2.hpp | 2 +- .../include/shared_test_classes/single_layer/one_hot.hpp | 2 +- .../include/shared_test_classes/single_layer/pad.hpp | 2 +- .../include/shared_test_classes/single_layer/pooling.hpp | 4 ++-- .../include/shared_test_classes/single_layer/proposal.hpp | 2 +- .../shared_test_classes/single_layer/psroi_pooling.hpp | 2 +- .../include/shared_test_classes/single_layer/range.hpp | 6 +++--- .../include/shared_test_classes/single_layer/reduce_ops.hpp | 4 ++-- .../include/shared_test_classes/single_layer/reshape.hpp | 3 +-- .../include/shared_test_classes/single_layer/result.hpp | 2 +- .../include/shared_test_classes/single_layer/roi_align.hpp | 2 +- .../shared_test_classes/single_layer/roi_pooling.hpp | 2 +- .../include/shared_test_classes/single_layer/roll.hpp | 2 +- .../include/shared_test_classes/single_layer/shape_of.hpp | 2 +- .../shared_test_classes/single_layer/shuffle_channels.hpp | 2 +- .../include/shared_test_classes/single_layer/softmax.hpp | 2 +- .../include/shared_test_classes/single_layer/split.hpp | 2 +- .../shared_test_classes/single_layer/squeeze_unsqueeze.hpp | 4 ++-- .../include/shared_test_classes/single_layer/tile.hpp | 2 +- .../include/shared_test_classes/single_layer/topk.hpp | 4 ++-- .../include/shared_test_classes/single_layer/transpose.hpp | 4 ++-- .../shared_test_classes/single_layer/variadic_split.hpp | 2 +- .../src/single_layer/adaptive_pooling.cpp | 2 +- .../src/single_layer/binary_convolution.cpp | 2 +- .../shared_test_classes/src/single_layer/bucketize.cpp | 2 +- .../shared_test_classes/src/single_layer/clamp.cpp | 2 +- .../shared_test_classes/src/single_layer/convolution.cpp | 2 +- .../src/single_layer/convolution_backprop.cpp | 2 +- .../src/single_layer/convolution_backprop_data.cpp | 2 +- .../shared_test_classes/src/single_layer/cum_sum.cpp | 2 +- .../src/single_layer/deformable_convolution.cpp | 4 ++-- .../src/single_layer/deformable_psroi_pooling.cpp | 2 +- .../src/single_layer/detection_output.cpp | 3 +-- .../functional/shared_test_classes/src/single_layer/dft.cpp | 2 +- .../shared_test_classes/src/single_layer/eltwise.cpp | 2 +- .../src/single_layer/embedding_bag_offsets_sum.cpp | 2 +- .../src/single_layer/embedding_bag_packed_sum.cpp | 2 +- .../src/single_layer/embedding_segments_sum.cpp | 2 +- .../src/single_layer/group_convolution.cpp | 2 +- .../src/single_layer/group_convolution_backprop_data.cpp | 2 +- .../shared_test_classes/src/single_layer/interpolate.cpp | 2 +- .../shared_test_classes/src/single_layer/log_softmax.cpp | 2 +- .../shared_test_classes/src/single_layer/logical.cpp | 4 ++-- .../shared_test_classes/src/single_layer/low_precision.cpp | 2 +- .../functional/shared_test_classes/src/single_layer/lrn.cpp | 2 +- .../shared_test_classes/src/single_layer/matrix_nms.cpp | 2 +- .../shared_test_classes/src/single_layer/multiclass_nms.cpp | 2 +- .../src/single_layer/non_max_suppression.cpp | 4 ++-- .../shared_test_classes/src/single_layer/nonzero.cpp | 2 +- .../shared_test_classes/src/single_layer/normalize_l2.cpp | 2 +- .../shared_test_classes/src/single_layer/one_hot.cpp | 4 ++-- .../functional/shared_test_classes/src/single_layer/pad.cpp | 4 ++-- .../shared_test_classes/src/single_layer/pooling.cpp | 4 ++-- .../shared_test_classes/src/single_layer/proposal.cpp | 2 +- .../shared_test_classes/src/single_layer/psroi_pooling.cpp | 2 +- .../shared_test_classes/src/single_layer/range.cpp | 6 +++--- .../shared_test_classes/src/single_layer/reduce_ops.cpp | 4 ++-- .../shared_test_classes/src/single_layer/reshape.cpp | 2 +- .../shared_test_classes/src/single_layer/result.cpp | 2 +- .../shared_test_classes/src/single_layer/roi_align.cpp | 2 +- .../shared_test_classes/src/single_layer/roi_pooling.cpp | 2 +- .../shared_test_classes/src/single_layer/roll.cpp | 2 +- .../shared_test_classes/src/single_layer/shape_of.cpp | 4 ++-- .../src/single_layer/shuffle_channels.cpp | 2 +- .../shared_test_classes/src/single_layer/softmax.cpp | 2 +- .../shared_test_classes/src/single_layer/split.cpp | 2 +- .../src/single_layer/squeeze_unsqueeze.cpp | 2 +- .../shared_test_classes/src/single_layer/tile.cpp | 2 +- .../shared_test_classes/src/single_layer/topk.cpp | 4 ++-- .../shared_test_classes/src/single_layer/transpose.cpp | 2 +- .../shared_test_classes/src/single_layer/variadic_split.cpp | 2 +- 98 files changed, 117 insertions(+), 119 deletions(-) diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/adaptive_pooling.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/adaptive_pooling.hpp index fcbb8d0ebcf..83ca0a9a9f4 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/adaptive_pooling.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/adaptive_pooling.hpp @@ -17,7 +17,7 @@ using adapoolParams = std::tuple< class AdaPoolLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp index 196c659786e..8c4b87c3081 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp @@ -38,7 +38,7 @@ using binaryConvolutionTestParamsSet = std::tuple< class BinaryConvolutionLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; protected: diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/bucketize.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/bucketize.hpp index 8917ee3681a..d5ffc42b36f 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/bucketize.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/bucketize.hpp @@ -26,7 +26,7 @@ using bucketizeParamsTuple = std::tuple< class BucketizeLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/clamp.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/clamp.hpp index bcf34636513..65e1ee381e3 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/clamp.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/clamp.hpp @@ -23,7 +23,7 @@ using clampParamsTuple = std::tuple< class ClampLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; }; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp index c8d7b0b8969..ce8badb6da7 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp @@ -39,7 +39,7 @@ typedef std::tuple< class ConvolutionLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp index 794782396da..ef0af87cad5 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp @@ -40,7 +40,7 @@ typedef std::tuple< class ConvolutionBackpropLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp index 933f3d6d89b..d73bdf73289 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp @@ -42,7 +42,7 @@ typedef std::tuple< class ConvolutionBackpropDataLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/cum_sum.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/cum_sum.hpp index 6a52af687c2..d59918fecfd 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/cum_sum.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/cum_sum.hpp @@ -22,7 +22,7 @@ typedef std::tuple< class CumSumLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp index 40cf7db1f5a..1b1cff6a0d3 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp @@ -44,7 +44,7 @@ typedef std::tuple< class DeformableConvolutionLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; protected: diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_psroi_pooling.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_psroi_pooling.hpp index f870e11aba8..a891d1ca87f 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_psroi_pooling.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_psroi_pooling.hpp @@ -35,7 +35,7 @@ using deformablePSROILayerTestParams = std::tuple< class DeformablePSROIPoolingLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); void GenerateInputs() override; protected: diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp index e7279c83c74..31cfd4b79af 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp @@ -59,7 +59,7 @@ using DetectionOutputParams = std::tuple< class DetectionOutputLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); ngraph::op::DetectionOutputAttrs attrs; std::vector inShapes; void GenerateInputs() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/dft.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/dft.hpp index 9f46000fc56..bd44f88e3b2 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/dft.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/dft.hpp @@ -22,7 +22,7 @@ typedef std::tuple< class DFTLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eltwise.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eltwise.hpp index b17b8891922..842bcdd0f7e 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eltwise.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eltwise.hpp @@ -35,6 +35,6 @@ protected: void SetUp() override; public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); }; } // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_bag_offsets_sum.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_bag_offsets_sum.hpp index 1d72b248bdc..b18014a2cb5 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_bag_offsets_sum.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_bag_offsets_sum.hpp @@ -30,7 +30,7 @@ typedef std::tuple< class EmbeddingBagOffsetsSumLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_bag_packed_sum.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_bag_packed_sum.hpp index 711de79841b..03b18af7aa1 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_bag_packed_sum.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_bag_packed_sum.hpp @@ -28,7 +28,7 @@ typedef std::tuple< class EmbeddingBagPackedSumLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_segments_sum.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_segments_sum.hpp index d6498a1379f..80390f26c14 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_segments_sum.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_segments_sum.hpp @@ -31,7 +31,7 @@ typedef std::tuple< class EmbeddingSegmentsSumLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp index 37b61b3f958..874573b810e 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp @@ -36,10 +36,10 @@ typedef std::tuple< class GroupConvolutionLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; }; -} // namespace LayerTestsDefinitions \ No newline at end of file +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp index fbeaecb719f..2d61e90059f 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp @@ -38,7 +38,7 @@ using groupConvBackpropDataLayerTestParamsSet = std::tuple< class GroupConvBackpropDataLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/interpolate.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/interpolate.hpp index 541f3c4fa16..37cfacc51ef 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/interpolate.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/interpolate.hpp @@ -45,7 +45,7 @@ typedef std::tuple< class InterpolateLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/log_softmax.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/log_softmax.hpp index 3b32a1f6658..3a086c03d25 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/log_softmax.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/log_softmax.hpp @@ -31,7 +31,7 @@ using logSoftmaxLayerTestParams = std::tuple< class LogSoftmaxLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/logical.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/logical.hpp index d5702f43302..697dc1ffdd1 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/logical.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/logical.hpp @@ -40,7 +40,7 @@ protected: void SetUp() override; public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); static std::vector combineShapes(const std::map, std::vector>>& inputShapes); protected: diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/low_precision.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/low_precision.hpp index d45b6d605d9..46cb931bdf4 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/low_precision.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/low_precision.hpp @@ -24,7 +24,7 @@ typedef std::tuple< class LowPrecisionTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lrn.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lrn.hpp index 5b5c94236f2..f47f883871f 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lrn.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lrn.hpp @@ -33,7 +33,7 @@ class LrnLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp index 9be3b082c3b..f318c6930af 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp @@ -40,7 +40,7 @@ using NmsParams = std::tuple, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); void GenerateInputs() override; void Compare(const std::vector>> &expectedOutputs, const std::vector &actualOutputs) diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp index 4add46d8ce1..4dd7777b4b9 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp @@ -42,7 +42,7 @@ using MulticlassNmsParams = std::tuple, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); void GenerateInputs() override; void Compare(const std::vector>>& expectedOutputs, const std::vector& actualOutputs) override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp index 0714f0236d8..92a4e6cde22 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp @@ -43,7 +43,7 @@ using NmsParams = std::tuple, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); void GenerateInputs() override; void Compare(const std::vector>> &expectedOutputs, const std::vector &actualOutputs) diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/nonzero.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/nonzero.hpp index 7953aab58b0..e3fbc5b0c84 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/nonzero.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/nonzero.hpp @@ -27,7 +27,7 @@ using NonZeroLayerTestParamsSet = typename std::tuple< class NonZeroLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp index c0740d48f40..5d047f52def 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp @@ -25,7 +25,7 @@ using NormalizeL2LayerTestParams = std::tuple< class NormalizeL2LayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/one_hot.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/one_hot.hpp index 13d8b533687..62b6b0fdc61 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/one_hot.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/one_hot.hpp @@ -27,7 +27,7 @@ typedef std::tuple< class OneHotLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pad.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pad.hpp index 799c0c8ba67..1580ecd45e6 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pad.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pad.hpp @@ -29,7 +29,7 @@ typedef std::tuple< class PadLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp index 72c96115806..12417959d22 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp @@ -51,7 +51,7 @@ typedef std::tuple< class PoolingLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; @@ -60,7 +60,7 @@ protected: class GlobalPoolingLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/proposal.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/proposal.hpp index 2514dcd1b51..5d889dec8fa 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/proposal.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/proposal.hpp @@ -55,7 +55,7 @@ class ProposalLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); static std::string SerializeProposalSpecificParams(proposalSpecificParams& params); InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; void Compare(const std::vector>> &expectedOutputs, diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/psroi_pooling.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/psroi_pooling.hpp index 1787cc04784..3e15e38da62 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/psroi_pooling.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/psroi_pooling.hpp @@ -30,7 +30,7 @@ using psroiParams = std::tuple, // input shape class PSROIPoolingLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); void GenerateInputs() override; static void fillROITensor(float* buffer, int numROIs, int batchSize, int height, int width, int groupSize, diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/range.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/range.hpp index 573f79eeec2..1901d71dc6d 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/range.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/range.hpp @@ -29,7 +29,7 @@ class RangeLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { float start, stop, step; public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); void Infer() override; protected: @@ -39,7 +39,7 @@ protected: class RangeNumpyLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); void Infer() override; protected: void SetUp() override; @@ -47,4 +47,4 @@ private: float start, stop, step; }; -} // namespace LayerTestsDefinitions \ No newline at end of file +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reduce_ops.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reduce_ops.hpp index 98029b6ebef..dc164a0d1e0 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reduce_ops.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reduce_ops.hpp @@ -30,7 +30,7 @@ typedef std::tuple< class ReduceOpsLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; protected: @@ -42,4 +42,4 @@ protected: InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; }; -} // namespace LayerTestsDefinitions \ No newline at end of file +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reshape.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reshape.hpp index ead04a5f4e9..7ca1c3f29be 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reshape.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reshape.hpp @@ -30,8 +30,7 @@ typedef std::tuple, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName( - testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo &obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/result.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/result.hpp index f3573eea4c3..8287603463b 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/result.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/result.hpp @@ -26,7 +26,7 @@ using ResultTestParamSet = typename std::tuple< class ResultLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_align.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_align.hpp index b4f40642927..1f7281d57fa 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_align.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_align.hpp @@ -21,7 +21,7 @@ using roialignParams = std::tuple, // feature map shape class ROIAlignLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); static void fillCoordTensor(std::vector& coords, int height, int width, float spatialScale, int pooledRatio, int pooledH, int pooledW); static void fillIdxTensor(std::vector& idx, int batchSize); diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_pooling.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_pooling.hpp index bdd446c0216..798123c1bda 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_pooling.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_pooling.hpp @@ -28,7 +28,7 @@ using roiPoolingParamsTuple = std::tuple< class ROIPoolingLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); void GenerateInputs() override; protected: diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roll.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roll.hpp index 97dfcdb7fbc..b43042f4646 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roll.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roll.hpp @@ -21,7 +21,7 @@ typedef std::tuple< class RollLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shape_of.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shape_of.hpp index 8756f824c1a..135b327cc3b 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shape_of.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shape_of.hpp @@ -23,7 +23,7 @@ typedef std::tuple< class ShapeOfLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shuffle_channels.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shuffle_channels.hpp index 6646b622a82..d0cf2bfc4fc 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shuffle_channels.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shuffle_channels.hpp @@ -32,7 +32,7 @@ typedef std::tuple< class ShuffleChannelsLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/softmax.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/softmax.hpp index 02fd1000356..b997e3651f2 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/softmax.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/softmax.hpp @@ -31,7 +31,7 @@ using softMaxLayerTestParams = std::tuple< class SoftMaxLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/split.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/split.hpp index bc45e82a0db..d0fb931919b 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/split.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/split.hpp @@ -30,7 +30,7 @@ typedef std::tuple< class SplitLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/squeeze_unsqueeze.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/squeeze_unsqueeze.hpp index 6dd1d0dad90..2c4edf90c21 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/squeeze_unsqueeze.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/squeeze_unsqueeze.hpp @@ -29,9 +29,9 @@ typedef std::tuple< class SqueezeUnsqueezeLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; }; -} // namespace LayerTestsDefinitions \ No newline at end of file +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tile.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tile.hpp index c4cffefe29a..9b17424e26d 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tile.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tile.hpp @@ -29,7 +29,7 @@ typedef std::tuple< class TileLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp index 26262fe1d5d..f2e3667d78f 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp @@ -27,10 +27,10 @@ typedef std::tuple< class TopKLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; }; -} // namespace LayerTestsDefinitions \ No newline at end of file +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/transpose.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/transpose.hpp index 22c253dede9..6fc59a9dcc0 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/transpose.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/transpose.hpp @@ -28,10 +28,10 @@ typedef std::tuple< class TransposeLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; }; -} // namespace LayerTestsDefinitions \ No newline at end of file +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/variadic_split.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/variadic_split.hpp index 02f40f99077..2e6b731fd7c 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/variadic_split.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/variadic_split.hpp @@ -29,7 +29,7 @@ typedef std::tuple< class VariadicSplitLayerTest : public testing::WithParamInterface, public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp index 4cf40860130..a7248f61f0c 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp @@ -12,7 +12,7 @@ using namespace FuncTestUtils::PrecisionUtils; namespace LayerTestsDefinitions { -std::string AdaPoolLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string AdaPoolLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { std::vector inputShape; std::vector pooledSpatialShape; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/binary_convolution.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/binary_convolution.cpp index a5ca59e73fa..42195b5c6d8 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/binary_convolution.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/binary_convolution.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string BinaryConvolutionLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string BinaryConvolutionLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { binConvSpecificParams binConvParams; InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp index c10d47cc699..18b88fe52b5 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { - std::string BucketizeLayerTest::getTestCaseName(testing::TestParamInfo obj) { + std::string BucketizeLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::SizeVector dataShape; InferenceEngine::SizeVector bucketsShape; bool with_right_bound; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/clamp.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/clamp.cpp index 0fae07ccc74..e95406dde34 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/clamp.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/clamp.cpp @@ -7,7 +7,7 @@ namespace LayerTestsDefinitions { -std::string ClampLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string ClampLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::SizeVector inShape; std::pair interval; InferenceEngine::Precision netPrc; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/convolution.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/convolution.cpp index 744bb9a2b73..e419e08e0d8 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/convolution.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/convolution.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string ConvolutionLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string ConvolutionLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { convSpecificParams convParams; InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/convolution_backprop.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/convolution_backprop.cpp index 55aae5e0a21..124dcc0fb94 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/convolution_backprop.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/convolution_backprop.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string ConvolutionBackpropLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string ConvolutionBackpropLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { convBackpropSpecificParams convBackpropDataParams; InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/convolution_backprop_data.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/convolution_backprop_data.cpp index c1e642d6764..83d0587fda3 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/convolution_backprop_data.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/convolution_backprop_data.cpp @@ -8,7 +8,7 @@ namespace LayerTestsDefinitions { -std::string ConvolutionBackpropDataLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string ConvolutionBackpropDataLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { convBackpropDataSpecificParams convBackpropDataParams; InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp index 3d0b62963a6..024fdf43c7b 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string CumSumLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string CumSumLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::SizeVector inputShapes; InferenceEngine::Precision inputPrecision; int64_t axis; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/deformable_convolution.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/deformable_convolution.cpp index d8014598cba..e787de0226f 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/deformable_convolution.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/deformable_convolution.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/single_layer/deformable_convolution.hpp" namespace LayerTestsDefinitions { -std::string DeformableConvolutionLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string DeformableConvolutionLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { deformableConvSpecificParams convParams; InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; @@ -101,4 +101,4 @@ void DeformableConvolutionLayerTest::SetUp() { ngraph::ResultVector results{std::make_shared(deformable_conv)}; function = std::make_shared(results, parameters, "deformable_convolution"); } -} // namespace LayerTestsDefinitions \ No newline at end of file +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp index 4f8fa43b06a..a48d7faca51 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp @@ -7,7 +7,7 @@ namespace LayerTestsDefinitions { - std::string DeformablePSROIPoolingLayerTest::getTestCaseName(testing::TestParamInfo obj) { + std::string DeformablePSROIPoolingLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { std::vector dataShape; std::vector roisShape; std::vector offsetsShape; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp index 4068abff126..02a1655d56c 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp @@ -7,7 +7,7 @@ namespace LayerTestsDefinitions { -std::string DetectionOutputLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string DetectionOutputLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { DetectionOutputAttributes commonAttrs; ParamsWhichSizeDepends specificAttrs; ngraph::op::DetectionOutputAttrs attrs; @@ -153,4 +153,3 @@ void DetectionOutputLayerTest::SetUp() { function = std::make_shared(results, params, "DetectionOutput"); } } // namespace LayerTestsDefinitions - diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/dft.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/dft.cpp index 28b3b0b11dc..5f31beff78a 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/dft.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/dft.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string DFTLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string DFTLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::SizeVector inputShapes; InferenceEngine::Precision inputPrecision; std::vector axes; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp index 0a84a71aa6e..3c4366db2d0 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp @@ -8,7 +8,7 @@ namespace LayerTestsDefinitions { -std::string EltwiseLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string EltwiseLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { std::vector> inputShapes; InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp index 54991d0b237..6dd70b9183b 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp @@ -7,7 +7,7 @@ namespace LayerTestsDefinitions { -std::string EmbeddingBagOffsetsSumLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string EmbeddingBagOffsetsSumLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { embeddingBagOffsetsSumParams params; InferenceEngine::Precision netPrecision, indPrecision; std::string targetDevice; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp index 32ba24efa30..910f37ce512 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp @@ -7,7 +7,7 @@ namespace LayerTestsDefinitions { -std::string EmbeddingBagPackedSumLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string EmbeddingBagPackedSumLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { embeddingBagPackedSumParams params; InferenceEngine::Precision netPrecision, indPrecision; std::string targetDevice; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp index 3fe0faf5639..3a86daf0e31 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp @@ -8,7 +8,7 @@ namespace LayerTestsDefinitions { -std::string EmbeddingSegmentsSumLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string EmbeddingSegmentsSumLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { embeddingSegmentsSumParams params; InferenceEngine::Precision netPrecision, indPrecision; std::string targetDevice; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/group_convolution.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/group_convolution.cpp index 0b301b5d273..dcf59efd210 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/group_convolution.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/group_convolution.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string GroupConvolutionLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string GroupConvolutionLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { groupConvSpecificParams groupConvParams; InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/group_convolution_backprop_data.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/group_convolution_backprop_data.cpp index 5f536397de7..caf94578935 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/group_convolution_backprop_data.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/group_convolution_backprop_data.cpp @@ -8,7 +8,7 @@ namespace LayerTestsDefinitions { // DEPRECATED, remove this old API when KMB (#58495) and ARM (#58496) plugins are migrated to new API -std::string GroupConvBackpropDataLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string GroupConvBackpropDataLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { groupConvBackpropDataSpecificParams groupConvBackpropDataParams; InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp index 77d22a1af3a..ce01e26d97f 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp @@ -11,7 +11,7 @@ using ngraph::helpers::operator<<; namespace LayerTestsDefinitions { -std::string InterpolateLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string InterpolateLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InterpolateSpecificParams interpolateParams; InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/log_softmax.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/log_softmax.cpp index 8b1bf040748..669ea0ec501 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/log_softmax.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/log_softmax.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string LogSoftmaxLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string LogSoftmaxLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; InferenceEngine::Layout inLayout, outLayout; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/logical.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/logical.cpp index 2172ec87e2e..a6015debc23 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/logical.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/logical.cpp @@ -8,7 +8,7 @@ using namespace LayerTestsDefinitions::LogicalParams; namespace LayerTestsDefinitions { -std::string LogicalLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string LogicalLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InputShapesTuple inputShapes; ngraph::helpers::LogicalTypes comparisonOpType; ngraph::helpers::InputLayerType secondInputType; @@ -79,4 +79,4 @@ void LogicalLayerTest::SetUp() { function = std::make_shared(logicalNode, inputs, "Logical"); } -} // namespace LayerTestsDefinitions \ No newline at end of file +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp index bc4ad3f3f08..d4aee621def 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp @@ -7,7 +7,7 @@ namespace LowPrecisionTestDefinitions { -std::string LowPrecisionTest::getTestCaseName(testing::TestParamInfo obj) { +std::string LowPrecisionTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::Precision netPrecision; std::string targetDevice; std::pair> config; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/lrn.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/lrn.cpp index 248493a3851..71ac393a729 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/lrn.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/lrn.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string LrnLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string LrnLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { double alpha, beta, bias; size_t size; std::vector axes; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp index 2b33a25ae1e..de4787cdf22 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp @@ -10,7 +10,7 @@ using namespace ngraph; using namespace InferenceEngine; using namespace FuncTestUtils::PrecisionUtils; -std::string MatrixNmsLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string MatrixNmsLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InputShapeParams inShapeParams; InputPrecisions inPrecisions; op::v8::MatrixNms::SortResultType sortResultType; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/multiclass_nms.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/multiclass_nms.cpp index e8532bad227..dd42206f63f 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/multiclass_nms.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/multiclass_nms.cpp @@ -10,7 +10,7 @@ using namespace ngraph; using namespace InferenceEngine; using namespace FuncTestUtils::PrecisionUtils; -std::string MulticlassNmsLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string MulticlassNmsLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InputShapeParams inShapeParams; InputPrecisions inPrecisions; int32_t nmsTopK, backgroundClass, keepTopK; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/non_max_suppression.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/non_max_suppression.cpp index fd8499e8897..3ae555b6d53 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/non_max_suppression.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/non_max_suppression.cpp @@ -11,7 +11,7 @@ using namespace ngraph; using namespace InferenceEngine; using namespace FuncTestUtils::PrecisionUtils; -std::string NmsLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string NmsLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InputShapeParams inShapeParams; InputPrecisions inPrecisions; int32_t maxOutBoxesPerClass; @@ -162,7 +162,7 @@ public: }; /* - * 1: selected_indices - tensor of type T_IND and shape [number of selected boxes, 3] containing information about selected boxes as triplets + * 1: selected_indices - tensor of type T_IND and shape [number of selected boxes, 3] containing information about selected boxes as triplets * [batch_index, class_index, box_index]. * 2: selected_scores - tensor of type T_THRESHOLDS and shape [number of selected boxes, 3] containing information about scores for each selected box as triplets * [batch_index, class_index, box_score]. diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/nonzero.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/nonzero.cpp index 1613cabc719..744d850071d 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/nonzero.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/nonzero.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string NonZeroLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string NonZeroLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { std::vector inputShape; InferenceEngine::Precision inputPrecision; std::string targetDevice; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/normalize_l2.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/normalize_l2.cpp index a7816e39571..140175b1532 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/normalize_l2.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/normalize_l2.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string NormalizeL2LayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string NormalizeL2LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { std::vector axes; float eps; ngraph::op::EpsMode epsMode; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/one_hot.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/one_hot.cpp index 324989b93f7..a2713d97e3c 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/one_hot.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/one_hot.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string OneHotLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string OneHotLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { int64_t axis; ngraph::element::Type depth_type, set_type; int64_t depth_val; @@ -49,4 +49,4 @@ void OneHotLayerTest::SetUp() { ngraph::ResultVector results{std::make_shared(onehot)}; function = std::make_shared(results, params, "OneHot"); } -} // namespace LayerTestsDefinitions \ No newline at end of file +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/pad.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/pad.cpp index 71353007f6f..6f9127b2431 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/pad.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/pad.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string PadLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string PadLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; InferenceEngine::Layout inLayout; @@ -50,4 +50,4 @@ void PadLayerTest::SetUp() { ngraph::ResultVector results{std::make_shared(pad)}; function = std::make_shared(results, params, "pad"); } -} // namespace LayerTestsDefinitions \ No newline at end of file +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/pooling.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/pooling.cpp index 8a68be10a8d..276fc2bfb6f 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/pooling.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/pooling.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string PoolingLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string PoolingLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { poolSpecificParams poolParams; InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; @@ -48,7 +48,7 @@ std::string PoolingLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string GlobalPoolingLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { poolSpecificParams poolParams; InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/proposal.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/proposal.cpp index 6d228135221..a0e575ffe5c 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/proposal.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/proposal.cpp @@ -51,7 +51,7 @@ std::string ProposalLayerTest::SerializeProposalSpecificParams(proposalSpecificP return result.str(); } -std::string ProposalLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string ProposalLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { proposalSpecificParams proposalParams; std::string targetDevice; std::tie(proposalParams, targetDevice) = obj.param; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp index d2260b3d3dc..d0701be3bd3 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string PSROIPoolingLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string PSROIPoolingLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { std::vector inputShape; std::vector coordsShape; size_t outputDim; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/range.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/range.cpp index 1695737c3ef..a008c09991d 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/range.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/range.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string RangeLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string RangeLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; InferenceEngine::Layout inLayout, outLayout; @@ -59,7 +59,7 @@ void RangeLayerTest::SetUp() { function = std::make_shared(results, params, "Range"); } -std::string RangeNumpyLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string RangeNumpyLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::Precision netPrc; InferenceEngine::Precision paramPrc; InferenceEngine::Precision outPrc; @@ -113,4 +113,4 @@ void RangeNumpyLayerTest::SetUp() { const ngraph::ResultVector results{std::make_shared(range)}; function = std::make_shared(results, params, "Range"); } -} // namespace LayerTestsDefinitions \ No newline at end of file +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/reduce_ops.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/reduce_ops.cpp index 32dd59edd5a..bafb2f60af8 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/reduce_ops.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/reduce_ops.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string ReduceOpsLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string ReduceOpsLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; InferenceEngine::Layout inLayout; @@ -104,4 +104,4 @@ InferenceEngine::Blob::Ptr ReduceOpsLayerWithSpecificInputTest::GenerateInput(co return blob; } -} // namespace LayerTestsDefinitions \ No newline at end of file +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/reshape.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/reshape.cpp index 27fd61ea169..478f2ce0e27 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/reshape.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/reshape.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/reshape.hpp" namespace LayerTestsDefinitions { -std::string ReshapeLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string ReshapeLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; InferenceEngine::Layout inLayout, outLayout; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/result.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/result.cpp index 0a3456e1438..e349a92ec0e 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/result.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/result.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string ResultLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string ResultLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { std::vector inputShape; InferenceEngine::Precision inputPrecision; std::string targetDevice; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp index 57b5393811d..be7318f31c5 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp @@ -12,7 +12,7 @@ using namespace FuncTestUtils::PrecisionUtils; namespace LayerTestsDefinitions { -std::string ROIAlignLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string ROIAlignLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { std::vector inputShape; std::vector coordsShape; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/roi_pooling.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/roi_pooling.cpp index c9a6ed3d135..a59a224a52b 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/roi_pooling.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/roi_pooling.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { - std::string ROIPoolingLayerTest::getTestCaseName(testing::TestParamInfo obj) { + std::string ROIPoolingLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { std::vector inputShape; std::vector coordsShape; std::vector poolShape; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/roll.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/roll.cpp index e54abc943d9..a9a385fd29c 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/roll.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/roll.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string RollLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string RollLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::SizeVector inputShapes; InferenceEngine::Precision inputPrecision; std::vector shift; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/shape_of.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/shape_of.cpp index fabec53f8ba..cef18b4b1cf 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/shape_of.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/shape_of.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { - std::string ShapeOfLayerTest::getTestCaseName(testing::TestParamInfo obj) { + std::string ShapeOfLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::SizeVector inputShapes; InferenceEngine::Precision inputPrecision; std::string targetDevice; @@ -30,4 +30,4 @@ namespace LayerTestsDefinitions { function = std::make_shared(results, param, "shapeOf"); } -} // namespace LayerTestsDefinitions \ No newline at end of file +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp index c6312fac05b..b8bfe7ff4d9 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp @@ -7,7 +7,7 @@ namespace LayerTestsDefinitions { -std::string ShuffleChannelsLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string ShuffleChannelsLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { shuffleChannelsSpecificParams shuffleChannelsParams; InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/softmax.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/softmax.cpp index d492f5e763c..1f88945baa1 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/softmax.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/softmax.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string SoftMaxLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string SoftMaxLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; InferenceEngine::Layout inLayout, outLayout; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/split.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/split.cpp index 33a6c385412..446533dd3de 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/split.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/split.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string SplitLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string SplitLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { size_t numSplits; int64_t axis; InferenceEngine::Precision netPrecision; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/squeeze_unsqueeze.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/squeeze_unsqueeze.cpp index ca78f3805ef..0b30db48aec 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/squeeze_unsqueeze.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/squeeze_unsqueeze.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/squeeze_unsqueeze.hpp" namespace LayerTestsDefinitions { -std::string SqueezeUnsqueezeLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string SqueezeUnsqueezeLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; InferenceEngine::Layout inLayout, outLayout; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/tile.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/tile.cpp index 6e22c9769bf..765448b7ed1 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/tile.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/tile.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string TileLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string TileLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { TileSpecificParams tileParams; InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/topk.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/topk.cpp index 3886f7f391a..9f84287cb4d 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/topk.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/topk.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/topk.hpp" namespace LayerTestsDefinitions { - std::string TopKLayerTest::getTestCaseName(testing::TestParamInfo obj) { + std::string TopKLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; InferenceEngine::Layout inLayout; @@ -52,4 +52,4 @@ void TopKLayerTest::SetUp() { } function = std::make_shared(results, params, "TopK"); } -} // namespace LayerTestsDefinitions \ No newline at end of file +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/transpose.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/transpose.cpp index bbcda7b53c4..f78868ccfd2 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/transpose.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/transpose.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { -std::string TransposeLayerTest::getTestCaseName(testing::TestParamInfo obj) { +std::string TransposeLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::Precision netPrecision; InferenceEngine::Precision inPrc, outPrc; InferenceEngine::Layout inLayout, outLayout; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/variadic_split.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/variadic_split.cpp index 7f3e7c4b2c6..b7b948828a4 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/variadic_split.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/variadic_split.cpp @@ -6,7 +6,7 @@ namespace LayerTestsDefinitions { - std::string VariadicSplitLayerTest::getTestCaseName(testing::TestParamInfo obj) { + std::string VariadicSplitLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { size_t axis; std::vector numSplits; InferenceEngine::Precision netPrecision; From d748f2aa998edf55e26a22ab354d6048a3c67da3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Karzy=C5=84ski?= Date: Fri, 3 Sep 2021 07:32:43 +0200 Subject: [PATCH 02/52] Remove references to prototxt from documentation and docstrings (#7346) --- docs/IE_DG/Integrate_with_customer_application_new_API.md | 2 +- .../python/src/openvino/inference_engine/ie_api.pyx | 2 +- inference-engine/samples/benchmark_app/benchmark_app.hpp | 2 +- inference-engine/src/inference_engine/include/ie/ie_core.hpp | 4 ++-- .../src/inference_engine/include/openvino/runtime/core.hpp | 4 ++-- tests/time_tests/scripts/run_timetest.py | 2 +- tests/time_tests/src/timetests_helper/cli.h | 2 +- tools/benchmark_tool/openvino/tools/benchmark/parameters.py | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/IE_DG/Integrate_with_customer_application_new_API.md b/docs/IE_DG/Integrate_with_customer_application_new_API.md index 93482a90938..9e5ac71189c 100644 --- a/docs/IE_DG/Integrate_with_customer_application_new_API.md +++ b/docs/IE_DG/Integrate_with_customer_application_new_API.md @@ -35,7 +35,7 @@ Integration process includes the following steps: @snippet snippets/Integrate_with_customer_application_new_API.cpp part1 -**Or read the model from ONNX format** (.onnx and .prototxt are supported formats). You can find more information about the ONNX format support in the document [ONNX format support in the OpenVINO™](./ONNX_Support.md). +**Or read the model from ONNX format**. You can find more information about the ONNX format support in the document [ONNX format support in the OpenVINO™](./ONNX_Support.md). @snippet snippets/Integrate_with_customer_application_new_API.cpp part2 diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx index 109a6a51a02..944557084e9 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx @@ -304,7 +304,7 @@ cdef class IECore: return versions ## Reads a network from Intermediate Representation (IR) or ONNX formats and creates an `IENetwork`. - # @param model: A `.xml`, `.onnx`or `.prototxt` model file or string with IR. + # @param model: A `.xml` or `.onnx` model file or string with IR. # @param weights: A `.bin` file of the IR. Depending on `init_from_buffer` value, can be a string path or # bytes with file content. # @param init_from_buffer: Defines the way of how `model` and `weights` attributes are interpreted. diff --git a/inference-engine/samples/benchmark_app/benchmark_app.hpp b/inference-engine/samples/benchmark_app/benchmark_app.hpp index a752152ec22..352baa09b05 100644 --- a/inference-engine/samples/benchmark_app/benchmark_app.hpp +++ b/inference-engine/samples/benchmark_app/benchmark_app.hpp @@ -19,7 +19,7 @@ static const char input_message[] = /// @brief message for model argument static const char model_message[] = - "Required. Path to an .xml/.onnx/.prototxt file with a trained model or to a .blob files with " + "Required. Path to an .xml/.onnx file with a trained model or to a .blob files with " "a trained compiled model."; /// @brief message for execution mode diff --git a/inference-engine/src/inference_engine/include/ie/ie_core.hpp b/inference-engine/src/inference_engine/include/ie/ie_core.hpp index a02232bc394..3f899c72079 100644 --- a/inference-engine/src/inference_engine/include/ie/ie_core.hpp +++ b/inference-engine/src/inference_engine/include/ie/ie_core.hpp @@ -59,7 +59,7 @@ public: * For IR format (*.bin): * * if path is empty, will try to read bin file with the same name as xml and * * if bin file with the same name was not found, will load IR without weights. - * For ONNX format (*.onnx or *.prototxt): + * For ONNX format (*.onnx): * * binPath parameter is not used. * @return CNNNetwork */ @@ -73,7 +73,7 @@ public: * For IR format (*.bin): * * if path is empty, will try to read bin file with the same name as xml and * * if bin file with the same name was not found, will load IR without weights. - * For ONNX format (*.onnx or *.prototxt): + * For ONNX format (*.onnx): * * binPath parameter is not used. * @return CNNNetwork */ diff --git a/inference-engine/src/inference_engine/include/openvino/runtime/core.hpp b/inference-engine/src/inference_engine/include/openvino/runtime/core.hpp index 0ee954167e7..fc35257f830 100644 --- a/inference-engine/src/inference_engine/include/openvino/runtime/core.hpp +++ b/inference-engine/src/inference_engine/include/openvino/runtime/core.hpp @@ -69,7 +69,7 @@ public: * For IR format (*.bin): * * if path is empty, will try to read bin file with the same name as xml and * * if bin file with the same name was not found, will load IR without weights. - * For ONNX format (*.onnx or *.prototxt): + * For ONNX format (*.onnx): * * binPath parameter is not used. * @return Function */ @@ -83,7 +83,7 @@ public: * For IR format (*.bin): * * if path is empty, will try to read bin file with the same name as xml and * * if bin file with the same name was not found, will load IR without weights. - * For ONNX format (*.onnx or *.prototxt): + * For ONNX format (*.onnx): * * binPath parameter is not used. * @return Function */ diff --git a/tests/time_tests/scripts/run_timetest.py b/tests/time_tests/scripts/run_timetest.py index 7b14c1e9c72..8f938082624 100644 --- a/tests/time_tests/scripts/run_timetest.py +++ b/tests/time_tests/scripts/run_timetest.py @@ -151,7 +151,7 @@ def cli_parser(): required=True, dest="model", type=Path, - help='path to an .xml/.onnx/.prototxt file with a trained model or' + help='path to an .xml/.onnx file with a trained model or' ' to a .blob files with a trained compiled model') parser.add_argument('-d', required=True, diff --git a/tests/time_tests/src/timetests_helper/cli.h b/tests/time_tests/src/timetests_helper/cli.h index 3370f16eebd..9beaceda373 100644 --- a/tests/time_tests/src/timetests_helper/cli.h +++ b/tests/time_tests/src/timetests_helper/cli.h @@ -14,7 +14,7 @@ static const char help_message[] = "Print a usage message"; /// @brief message for model argument static const char model_message[] = - "Required. Path to an .xml/.onnx/.prototxt file with a trained model or to " + "Required. Path to an .xml/.onnx file with a trained model or to " "a .blob files with a trained compiled model."; /// @brief message for target device argument diff --git a/tools/benchmark_tool/openvino/tools/benchmark/parameters.py b/tools/benchmark_tool/openvino/tools/benchmark/parameters.py index 2eb6f45f0ec..212fb9e2414 100644 --- a/tools/benchmark_tool/openvino/tools/benchmark/parameters.py +++ b/tools/benchmark_tool/openvino/tools/benchmark/parameters.py @@ -35,7 +35,7 @@ def parse_args(): help='Optional. ' 'Path to a folder with images and/or binaries or to specific image or binary file.') args.add_argument('-m', '--path_to_model', type=str, required=True, - help='Required. Path to an .xml/.onnx/.prototxt file with a trained model or ' + help='Required. Path to an .xml/.onnx file with a trained model or ' 'to a .blob file with a trained compiled model.') args.add_argument('-d', '--target_device', type=str, required=False, default='CPU', help='Optional. Specify a target device to infer on (the list of available devices is shown below). ' From f33c03edd6cb89f91b3f76a7e61712850179d41e Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 3 Sep 2021 10:53:16 +0300 Subject: [PATCH 03/52] Move all a ops to ov (#7336) * Moved ngraph::Node to ov namespace * Fixed code style * Fixed VPU * Fixed GNA * Fixed tests * Added aliases for backward compatibility * Fix clDNN * Try to fix build * Fixed comment * Renamed RTTI macros * Moved op utils to ov namespace * Fixed ngraph library build * Fixed unit-tests * Changed src folder * Fixed recurrent_sequence * Changed low latency * Fixed serialize * Fixed ieFuncTests * Try to fix windows * Remove custom operator<< from tests * Fixed build * Moved operations from A to ov namespace --- ngraph/core/include/ngraph/op/abs.hpp | 29 +- ngraph/core/include/ngraph/op/acos.hpp | 27 +- ngraph/core/include/ngraph/op/acosh.hpp | 26 +- .../include/ngraph/op/adaptive_avg_pool.hpp | 25 +- .../include/ngraph/op/adaptive_max_pool.hpp | 37 +- ngraph/core/include/ngraph/op/add.hpp | 33 +- ngraph/core/include/ngraph/op/and.hpp | 29 +- ngraph/core/include/ngraph/op/asin.hpp | 28 +- ngraph/core/include/ngraph/op/asinh.hpp | 26 +- ngraph/core/include/ngraph/op/assign.hpp | 65 +- ngraph/core/include/ngraph/op/atan.hpp | 26 +- ngraph/core/include/ngraph/op/atanh.hpp | 26 +- ngraph/core/include/ngraph/op/avg_pool.hpp | 75 +- ngraph/core/include/ngraph/op/constant.hpp | 686 +---------------- ngraph/core/include/ngraph/op/read_value.hpp | 74 +- ngraph/core/include/ngraph/op/sink.hpp | 13 +- .../include/ngraph/runtime/host_tensor.hpp | 9 +- ngraph/core/include/openvino/op/abs.hpp | 40 + ngraph/core/include/openvino/op/acos.hpp | 38 + ngraph/core/include/openvino/op/acosh.hpp | 40 + .../include/openvino/op/adaptive_avg_pool.hpp | 38 + .../include/openvino/op/adaptive_max_pool.hpp | 50 ++ ngraph/core/include/openvino/op/add.hpp | 47 ++ ngraph/core/include/openvino/op/asin.hpp | 39 + ngraph/core/include/openvino/op/asinh.hpp | 40 + ngraph/core/include/openvino/op/assign.hpp | 72 ++ ngraph/core/include/openvino/op/atan.hpp | 40 + ngraph/core/include/openvino/op/atanh.hpp | 40 + ngraph/core/include/openvino/op/avg_pool.hpp | 88 +++ ngraph/core/include/openvino/op/constant.hpp | 711 ++++++++++++++++++ .../core/include/openvino/op/logical_and.hpp | 43 ++ .../core/include/openvino/op/read_value.hpp | 79 ++ ngraph/core/include/openvino/op/sink.hpp | 26 + .../include/openvino/op/util/assign_base.hpp | 22 + .../openvino/op/util/read_value_base.hpp | 24 + ngraph/core/src/op/abs.cpp | 25 +- ngraph/core/src/op/acos.cpp | 25 +- ngraph/core/src/op/acosh.cpp | 23 +- ngraph/core/src/op/adaptive_avg_pool.cpp | 2 +- ngraph/core/src/op/adaptive_max_pool.cpp | 2 +- ngraph/core/src/op/add.cpp | 2 +- ngraph/core/src/op/asin.cpp | 2 +- ngraph/core/src/op/asinh.cpp | 2 +- ngraph/core/src/op/assign.cpp | 5 +- ngraph/core/src/op/atan.cpp | 3 +- ngraph/core/src/op/atanh.cpp | 2 +- ngraph/core/src/op/avg_pool.cpp | 4 +- ngraph/core/src/op/constant.cpp | 126 ++-- .../core/src/op/{and.cpp => logical_and.cpp} | 5 +- ngraph/core/src/op/read_value.cpp | 9 +- ngraph/core/src/op/util/assign_base.cpp | 7 + ngraph/core/src/op/util/read_value_base.cpp | 7 + 52 files changed, 1648 insertions(+), 1314 deletions(-) create mode 100644 ngraph/core/include/openvino/op/abs.hpp create mode 100644 ngraph/core/include/openvino/op/acos.hpp create mode 100644 ngraph/core/include/openvino/op/acosh.hpp create mode 100644 ngraph/core/include/openvino/op/adaptive_avg_pool.hpp create mode 100644 ngraph/core/include/openvino/op/adaptive_max_pool.hpp create mode 100644 ngraph/core/include/openvino/op/add.hpp create mode 100644 ngraph/core/include/openvino/op/asin.hpp create mode 100644 ngraph/core/include/openvino/op/asinh.hpp create mode 100644 ngraph/core/include/openvino/op/assign.hpp create mode 100644 ngraph/core/include/openvino/op/atan.hpp create mode 100644 ngraph/core/include/openvino/op/atanh.hpp create mode 100644 ngraph/core/include/openvino/op/avg_pool.hpp create mode 100644 ngraph/core/include/openvino/op/constant.hpp create mode 100644 ngraph/core/include/openvino/op/logical_and.hpp create mode 100644 ngraph/core/include/openvino/op/read_value.hpp create mode 100644 ngraph/core/include/openvino/op/sink.hpp create mode 100644 ngraph/core/include/openvino/op/util/assign_base.hpp create mode 100644 ngraph/core/include/openvino/op/util/read_value_base.hpp rename ngraph/core/src/op/{and.cpp => logical_and.cpp} (96%) create mode 100644 ngraph/core/src/op/util/assign_base.cpp create mode 100644 ngraph/core/src/op/util/read_value_base.cpp diff --git a/ngraph/core/include/ngraph/op/abs.hpp b/ngraph/core/include/ngraph/op/abs.hpp index f85805edde6..6661027581a 100644 --- a/ngraph/core/include/ngraph/op/abs.hpp +++ b/ngraph/core/include/ngraph/op/abs.hpp @@ -7,37 +7,12 @@ #include #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/abs.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise absolute value operation. -/// -class NGRAPH_API Abs : public util::UnaryElementwiseArithmetic { -public: - static constexpr NodeTypeInfo type_info{"Abs", 0}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - /// \brief Constructs an absolute value operation. - Abs() = default; - bool visit_attributes(AttributeVisitor&) override { - return true; - } - /// \brief Constructs an absolute value operation. - /// - /// \param arg Output that produces the input tensor.
- /// `[d1, ...]` - /// - /// Output `[d1, ...]` - /// - Abs(const Output& arg); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Abs; } // namespace v0 using v0::Abs; } // namespace op diff --git a/ngraph/core/include/ngraph/op/acos.hpp b/ngraph/core/include/ngraph/op/acos.hpp index e4dc89f30a6..d43aedd7ea7 100644 --- a/ngraph/core/include/ngraph/op/acos.hpp +++ b/ngraph/core/include/ngraph/op/acos.hpp @@ -7,35 +7,12 @@ #include #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/acos.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise inverse cosine (arccos) operation. -/// -class NGRAPH_API Acos : public util::UnaryElementwiseArithmetic { -public: - static constexpr NodeTypeInfo type_info{"Acos", 0}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - /// \brief Constructs an arccos operation. - Acos() = default; - /// \brief Constructs an arccos operation. - /// - /// \param arg Output that produces the input tensor.
- /// `[d1, ...]` - /// - /// Output `[d1, ...]` - /// - Acos(const Output& arg); - bool visit_attributes(AttributeVisitor&) override { - return true; - } - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Acos; } // namespace v0 using v0::Acos; } // namespace op diff --git a/ngraph/core/include/ngraph/op/acosh.hpp b/ngraph/core/include/ngraph/op/acosh.hpp index 95ad7b297f5..fd1958f0bc0 100644 --- a/ngraph/core/include/ngraph/op/acosh.hpp +++ b/ngraph/core/include/ngraph/op/acosh.hpp @@ -7,34 +7,12 @@ #include #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/acosh.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief Elementwise inverse hyperbolic cos operation. -/// -class NGRAPH_API Acosh : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs an Acosh operation. - Acosh() = default; - /// \brief Constructs an Acosh operation. - /// - /// \param arg Output that produces the input tensor.
- /// `[d1, ...]` - /// - /// Output `[d1, ...]` - /// - Acosh(const Output& arg); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor&) override { - return true; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v3::Acosh; } // namespace v3 using v3::Acosh; } // namespace op diff --git a/ngraph/core/include/ngraph/op/adaptive_avg_pool.hpp b/ngraph/core/include/ngraph/op/adaptive_avg_pool.hpp index 11b7de1699d..eb0a8b82ed4 100644 --- a/ngraph/core/include/ngraph/op/adaptive_avg_pool.hpp +++ b/ngraph/core/include/ngraph/op/adaptive_avg_pool.hpp @@ -6,33 +6,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/adaptive_avg_pool.hpp" namespace ngraph { namespace op { namespace v8 { -/// \brief Adaptive average pooling operation. -/// -class NGRAPH_API AdaptiveAvgPool : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - AdaptiveAvgPool() = default; - - /// - /// \brief Constructs adaptive average pooling operation. - /// - /// \param data Input data - /// - /// \param output_shape 1D tensor describing output shape for spatial - /// dimensions. - /// - AdaptiveAvgPool(const Output& data, const Output& output_shape); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v8::AdaptiveAvgPool; } // namespace v8 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/adaptive_max_pool.hpp b/ngraph/core/include/ngraph/op/adaptive_max_pool.hpp index 7554ad272cd..6ba0a20f2e6 100644 --- a/ngraph/core/include/ngraph/op/adaptive_max_pool.hpp +++ b/ngraph/core/include/ngraph/op/adaptive_max_pool.hpp @@ -6,45 +6,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/adaptive_max_pool.hpp" namespace ngraph { namespace op { namespace v8 { -/// \brief Adaptive max pooling operation. -/// -class NGRAPH_API AdaptiveMaxPool : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - AdaptiveMaxPool() = default; - - /// - /// \brief Constructs adaptive max pooling operation. - /// - /// \param data Input data - /// - /// \param output_shape 1D tensor describing output shape for spatial - /// dimensions. - /// - /// \param index_element_type Specifies the output tensor type for indices - /// output - /// - AdaptiveMaxPool(const Output& data, - const Output& output_shape, - const ngraph::element::Type& index_element_type = ngraph::element::i64); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - element::Type get_index_element_type() const { - return m_index_element_type; - } - -protected: - ngraph::element::Type m_index_element_type = ngraph::element::i64; -}; +using ov::op::v8::AdaptiveMaxPool; } // namespace v8 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/add.hpp b/ngraph/core/include/ngraph/op/add.hpp index ac06fc51a34..875f7baf56f 100644 --- a/ngraph/core/include/ngraph/op/add.hpp +++ b/ngraph/core/include/ngraph/op/add.hpp @@ -7,41 +7,12 @@ #include #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/add.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise addition operation. -/// -class NGRAPH_API Add : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs an uninitialized addition operation - Add() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - - /// \brief Constructs an addition operation. - /// - /// \param arg0 Output that produces the first input tensor.
- /// `[d0, ...]` - /// \param arg1 Output that produces the second input tensor.
- /// `[d0, ...]` - /// \param auto_broadcast Auto broadcast specification. Default is Numpy-style - /// implicit broadcasting. - /// - /// Output `[d0, ...]` - /// - Add(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Add; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/and.hpp b/ngraph/core/include/ngraph/op/and.hpp index ea60b6a7190..1186a86ba99 100644 --- a/ngraph/core/include/ngraph/op/and.hpp +++ b/ngraph/core/include/ngraph/op/and.hpp @@ -7,37 +7,12 @@ #include #include "ngraph/op/util/binary_elementwise_logical.hpp" +#include "openvino/op/logical_and.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise logical-and operation. -/// -class NGRAPH_API LogicalAnd : public util::BinaryElementwiseLogical { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a logical-and operation. - LogicalAnd() = default; - - /// \brief Constructs a logical-and operation. - /// - /// \param arg0 Output that produces the first input tensor.
- /// `[d0, ...]` - /// \param arg1 Output that produces the second input tensor.
- /// `[d0, ...]` - /// \param auto_broadcast Auto broadcast specification - /// - /// Output `[d0, ...]` - /// - LogicalAnd(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor& visitor) override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::LogicalAnd; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/asin.hpp b/ngraph/core/include/ngraph/op/asin.hpp index 2d0f78378b7..d91e863995f 100644 --- a/ngraph/core/include/ngraph/op/asin.hpp +++ b/ngraph/core/include/ngraph/op/asin.hpp @@ -7,36 +7,12 @@ #include #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/asin.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise inverse sine (arcsin) operation. -/// -class NGRAPH_API Asin : public util::UnaryElementwiseArithmetic { -public: - static constexpr NodeTypeInfo type_info{"Asin", 0}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - /// \brief Constructs an arcsin operation. - Asin() = default; - /// \brief Constructs an arcsin operation. - /// - /// \param arg Output that produces the input tensor.
- /// `[d1, ...]` - /// - /// Output `[d1, ...]` - /// - Asin(const Output& arg); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor&) override { - return true; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Asin; } // namespace v0 using v0::Asin; } // namespace op diff --git a/ngraph/core/include/ngraph/op/asinh.hpp b/ngraph/core/include/ngraph/op/asinh.hpp index c3414eee7d4..c7eb418a95b 100644 --- a/ngraph/core/include/ngraph/op/asinh.hpp +++ b/ngraph/core/include/ngraph/op/asinh.hpp @@ -7,34 +7,12 @@ #include #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/asinh.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief Elementwise inverse hyperbolic sin operation. -/// -class NGRAPH_API Asinh : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs an Asinh operation. - Asinh() = default; - /// \brief Constructs an Asinh operation. - /// - /// \param arg Output that produces the input tensor.
- /// `[d1, ...]` - /// - /// Output `[d1, ...]` - /// - Asinh(const Output& arg); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor&) override { - return true; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v3::Asinh; } // namespace v3 using v3::Asinh; } // namespace op diff --git a/ngraph/core/include/ngraph/op/assign.hpp b/ngraph/core/include/ngraph/op/assign.hpp index c99dc8c321e..316a61cbd37 100644 --- a/ngraph/core/include/ngraph/op/assign.hpp +++ b/ngraph/core/include/ngraph/op/assign.hpp @@ -7,74 +7,17 @@ #include "ngraph/op/sink.hpp" #include "ngraph/op/util/variable.hpp" #include "ngraph/op/util/variable_extension.hpp" +#include "openvino/op/assign.hpp" namespace ngraph { namespace op { -class NGRAPH_API AssignBase : public Sink, public VariableExtension { -public: - NGRAPH_RTTI_DECLARATION; - AssignBase() = default; - /// \brief Constructs an AssignBase operation. - explicit AssignBase(const OutputVector& arguments) : Sink(arguments) {} -}; +using ov::op::util::AssignBase; namespace v3 { -/// \brief Assign operation sets an input value to the variable with `variable_id` -class NGRAPH_API Assign : public AssignBase { -public: - NGRAPH_RTTI_DECLARATION; - Assign() = default; - - /// \brief Constructs an Assign operation. - /// - /// \param new_value Node that produces the input tensor. - /// \param variable_id identifier of the variable to be updated. - Assign(const Output& new_value, const std::string& variable_id); - - void validate_and_infer_types() override; - std::string get_variable_id() const override { - return m_variable_id; - } - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool visit_attributes(AttributeVisitor& visitor) override; - -private: - std::string m_variable_id; -}; +using ov::op::v3::Assign; } // namespace v3 namespace v6 { -/// \brief Assign operation sets an input value to the variable with `variable_id` -class NGRAPH_API Assign : public AssignBase { -public: - NGRAPH_RTTI_DECLARATION; - Assign() = default; - - /// \brief Constructs an Assign operation. - /// - /// \param new_value Node that produces the input tensor. - /// \param variable Class for storing and synchronizing element types, shapes and - /// identifiers - /// between pairs of Assign/ReadValue nodes. - Assign(const Output& new_value, const std::shared_ptr& variable); - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::string get_variable_id() const override { - NGRAPH_CHECK(m_variable, "Variable is not initialized. Variable_id is unavailable"); - return m_variable->get_info().variable_id; - } - bool evaluate(const HostTensorVector& outputs, - const HostTensorVector& inputs, - const EvaluationContext& evaluation_context) const override; - bool has_evaluate() const override; - bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; -}; +using ov::op::v6::Assign; } // namespace v6 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/atan.hpp b/ngraph/core/include/ngraph/op/atan.hpp index d43fd997141..af3a168c9a3 100644 --- a/ngraph/core/include/ngraph/op/atan.hpp +++ b/ngraph/core/include/ngraph/op/atan.hpp @@ -7,34 +7,12 @@ #include #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/atan.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise inverse tangent (arctan) operation. -/// -class NGRAPH_API Atan : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs an arctan operation. - Atan() = default; - - /// \brief Constructs an arctan operation. - /// - /// \param arg Output that produces the input tensor.
- /// `[d1, ...]` - /// - /// Output `[d1, ...]` - /// - Atan(const Output& arg); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor&) override { - return true; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Atan; } // namespace v0 using v0::Atan; } // namespace op diff --git a/ngraph/core/include/ngraph/op/atanh.hpp b/ngraph/core/include/ngraph/op/atanh.hpp index c9de03b05aa..dbc4eebb7e1 100644 --- a/ngraph/core/include/ngraph/op/atanh.hpp +++ b/ngraph/core/include/ngraph/op/atanh.hpp @@ -7,34 +7,12 @@ #include #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/atanh.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief Elementwise inverse hyperbolic tangent operation. -/// -class NGRAPH_API Atanh : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs an Atanh operation. - Atanh() = default; - /// \brief Constructs an Atanh operation. - /// - /// \param arg Output that produces the input tensor.
- /// `[d1, ...]` - /// - /// Output `[d1, ...]` - /// - Atanh(const Output& arg); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor&) override { - return true; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v3::Atanh; } // namespace v3 using v3::Atanh; } // namespace op diff --git a/ngraph/core/include/ngraph/op/avg_pool.hpp b/ngraph/core/include/ngraph/op/avg_pool.hpp index 3d1d1aa52c8..8943d5ac827 100644 --- a/ngraph/core/include/ngraph/op/avg_pool.hpp +++ b/ngraph/core/include/ngraph/op/avg_pool.hpp @@ -6,83 +6,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/avg_pool.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Batched average pooling operation. -/// -class NGRAPH_API AvgPool : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a batched average pooling operation. - AvgPool() = default; - - /// - /// \brief Constructs a batched average pooling operation. - /// - /// \param arg The output producing the input data batch tensor.
- /// `[d1, dn]` - /// \param strides The strides.
`[n]` - /// \param pads_begin The beginning of padding shape.
`[n]` - /// \param pads_end The end of padding shape.
`[n]` - /// \param kernel The kernel shape.
`[n]` - /// \param exclude_pad If false then averages include padding elements, each - /// treated as the number zero. If true, padding - /// elements - /// are entirely ignored when computing averages. - /// \param rounding_type Whether to use ceiling or floor rounding type while - /// computing output shape. - /// \param auto_pad Padding type to use for additional padded dimensions - /// - AvgPool(const Output& arg, - const Strides& strides, - const Shape& pads_begin, - const Shape& pads_end, - const Shape& kernel, - bool exclude_pad, - op::RoundingType rounding_type = op::RoundingType::FLOOR, - const PadType& auto_pad = op::PadType::EXPLICIT); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The kernel shape. - const Shape& get_kernel() const; - void set_kernel(const Shape& kernel); - /// \return The strides. - const Strides& get_strides() const; - void set_strides(const Strides& strides); - /// \return The beginning of padding shape. - const Shape& get_pads_begin() const; - void set_pads_begin(const Shape& pads_begin); - /// \return The end of padding shape. - const Shape& get_pads_end() const; - void set_pads_end(const Shape& pads_end); - bool get_exclude_pad() const; - void set_exclude_pad(bool exclude_pad); - /// \return The pad type for pooling. - const PadType& get_auto_pad() const; - void set_auto_pad(const PadType& auto_pad); - op::RoundingType get_rounding_type() const; - void set_rounding_type(op::RoundingType rounding_type); - /// \return The default value for AvgPool. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - -protected: - Shape m_kernel; - Strides m_strides; - Shape m_pads_begin; - Shape m_pads_end; - bool m_exclude_pad{true}; - PadType m_auto_pad{PadType::EXPLICIT}; - op::RoundingType m_rounding_type{op::RoundingType::FLOOR}; -}; +using ov::op::v1::AvgPool; } // namespace v1 using v1::AvgPool; diff --git a/ngraph/core/include/ngraph/op/constant.hpp b/ngraph/core/include/ngraph/op/constant.hpp index aeca47c5cb8..f13fd70fa5a 100644 --- a/ngraph/core/include/ngraph/op/constant.hpp +++ b/ngraph/core/include/ngraph/op/constant.hpp @@ -15,694 +15,12 @@ #include "ngraph/type/element_type.hpp" #include "ngraph/type/element_type_traits.hpp" #include "ngraph/util.hpp" +#include "openvino/op/constant.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Class for constants. -class NGRAPH_API Constant : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - Constant() = default; - - /// \brief Initialize a constant from tensor - /// \param tensor The tensor with data - Constant(const std::shared_ptr& tensor); - - /// \brief Constructs a tensor constant. - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param values A vector of literals for initializing the tensor constant. The - /// size of values must match the size of the shape. - template - Constant(const element::Type& type, const Shape& shape, const std::vector& values) : Constant(type, shape) { - NODE_VALIDATION_CHECK(this, - values.size() == 1 || values.size() == shape_size(m_shape), - "Did not get the expected number of literals for a constant of shape ", - m_shape, - " (got ", - values.size(), - ", expected ", - (shape_size(m_shape) == 1 ? "" : "1 or "), - shape_size(m_shape), - ")."); - - if (values.size() == 1) { - fill_data(type, values.front()); - } else { - write_values(values); - } - m_all_elements_bitwise_identical = are_all_data_elements_bitwise_identical(); - } - - /// \brief Create uninitialized constant - Constant(const element::Type& type, const Shape& shape); - /// \brief Constructs a uniform tensor constant. - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param value A scalar for initializing the uniform tensor constant. The - /// value is broadcast to the specified shape. - template ::value>::type> - Constant(const element::Type& type, const Shape& shape, T value) : Constant(type, shape) { - fill_data(type, value); - m_all_elements_bitwise_identical = true; - } - - template - void fill_data(const element::Type& type, T value) { - using Type_t = element::Type_t; -#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) -# pragma GCC diagnostic push -# pragma GCC diagnostic error "-Wswitch" -# pragma GCC diagnostic error "-Wswitch-enum" -#endif - switch (type) { - case Type_t::boolean: - fill_data(value); - break; - case Type_t::bf16: - fill_data(value); - break; - case Type_t::f16: - fill_data(value); - break; - case Type_t::f32: - fill_data(value); - break; - case Type_t::f64: - fill_data(value); - break; - case Type_t::i4: - fill_data(value); - break; - case Type_t::i8: - fill_data(value); - break; - case Type_t::i16: - fill_data(value); - break; - case Type_t::i32: - fill_data(value); - break; - case Type_t::i64: - fill_data(value); - break; - case Type_t::u1: - fill_data(value); - break; - case Type_t::u4: - fill_data(value); - break; - case Type_t::u8: - fill_data(value); - break; - case Type_t::u16: - fill_data(value); - break; - case Type_t::u32: - fill_data(value); - break; - case Type_t::u64: - fill_data(value); - break; - case Type_t::undefined: - case Type_t::dynamic: - throw std::runtime_error("unsupported type"); - } -#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) -# pragma GCC diagnostic pop -#endif - } - - /// \brief Constructs a tensor constant - /// This constructor is mainly to support deserialization of constants. - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param values A list of string values to use as the constant data. - Constant(const element::Type& type, const Shape& shape, const std::vector& values); - - /// \brief Constructs a tensor constant with the supplied data - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param data A void* to constant data. - Constant(const element::Type& type, const Shape& shape, const void* data); - - /// \brief Constructs a tensor constant with the supplied data - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param data A pointer to pre-allocated shared data. - template - Constant(const element::Type& type, const Shape& shape, std::shared_ptr> data) - : m_element_type(type), - m_shape(shape) { - m_data = data; - constructor_validate_and_infer_types(); - } - - Constant(const Constant& other); - Constant(const Constant& other, const Shape& new_shape); - Constant& operator=(const Constant&) = delete; - - virtual ~Constant() override; - - void validate_and_infer_types() override { - infer_element_type(); - set_output_type(0, m_element_type, m_shape); - } - - bool visit_attributes(AttributeVisitor& visitor) override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& outputs) const override; - bool evaluate_upper(const HostTensorVector& outputs) const override; - - // Don't constant fold a constant; it would make a copy - bool constant_fold(OutputVector& outputs, const OutputVector& inputs) override { - (void)outputs; - (void)inputs; - return false; - } - - /// \brief Returns the value of the constant node as a Shape object - /// Can only be used on element::i64 nodes and interprets - /// negative values as zeros. - Shape get_shape_val() const; - /// \brief Returns the value of the constant node as a Strides - /// object - /// Can only be used on element::i64 nodes and interprets - /// negative values as zeros. - Strides get_strides_val() const; - /// \brief Returns the value of the constant node as a Coordinate - /// object - /// Can only be used on element::i64 nodes and interprets - /// negative values as zeros. - Coordinate get_coordinate_val() const; - /// \brief Returns the value of the constant node as a - /// CoordinateDiff object - /// Can only be used on element::i64 nodes. - CoordinateDiff get_coordinate_diff_val() const; - /// \brief Returns the value of the constant node as an AxisVector - /// object - /// Can only be used on element::i64 nodes and interprets - /// negative values as zeros. - AxisVector get_axis_vector_val() const; - /// \brief Returns the value of the constant node as an AxisSet - /// object - /// Can only be used on element::i64 nodes and interprets - /// negative values as zeros. - /// Repeated values are allowed. - AxisSet get_axis_set_val() const; - - /// \brief Update Constant shape. New shape size must equal to the data elements - /// count - /// - /// \param shape The shape of the tensor constant. - NGRAPH_DEPRECATED("Use Constant c-tor with shape argument instead") - void set_data_shape(const Shape& shape); - - /// \brief Wrapper around constructing a shared_ptr of a Constant - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param values A vector of values to use as the constant data. - template - static std::shared_ptr create(const element::Type& type, - const Shape& shape, - const std::vector& values) { - return std::make_shared(type, shape, values); - } - - /// \brief Wrapper around constructing a shared_ptr of a Constant - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param values An initializer_list of values to use as the constant data. - template - static std::shared_ptr create(const element::Type& type, - const Shape& shape, - std::initializer_list values) { - return std::make_shared(type, shape, std::vector{values}); - } - - /// \brief Wrapper around constructing a shared_ptr of a Constant - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param memory An continues memory chunk which contains the constant data. - static std::shared_ptr create(const element::Type& type, const Shape& shape, const void* memory) { - return std::make_shared(type, shape, memory); - } - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The initialization literals for the tensor constant. - std::vector get_value_strings() const; - - template - std::vector get_vector() const { - const T* p = get_data_ptr(); - if (p == nullptr) - throw std::runtime_error("Cannot create vector! Buffer is not allocated."); - return std::vector(p, p + shape_size(m_shape)); - } - - /// \brief Return the Constant's value as a vector cast to type T - /// - /// \tparam T Type to which data vector's entries will be cast. - /// \return Constant's data vector. - template - std::vector cast_vector() const { - auto source_type = get_element_type(); - std::vector rc; - using Type_t = element::Type_t; -#if defined(_MSC_VER) -# pragma warning(push) -# pragma warning(disable : 4244) -#endif - switch (source_type) { - case Type_t::boolean: - cast_vector(rc); - break; - case Type_t::bf16: - cast_vector(rc); - break; - case Type_t::f16: - cast_vector(rc); - break; - case Type_t::f32: - cast_vector(rc); - break; - case Type_t::f64: - cast_vector(rc); - break; - case Type_t::i4: - cast_vector(rc); - break; - case Type_t::i8: - cast_vector(rc); - break; - case Type_t::i16: - cast_vector(rc); - break; - case Type_t::i32: - cast_vector(rc); - break; - case Type_t::i64: - cast_vector(rc); - break; - case Type_t::u1: - cast_vector(rc); - break; - case Type_t::u4: - cast_vector(rc); - break; - case Type_t::u8: - cast_vector(rc); - break; - case Type_t::u16: - cast_vector(rc); - break; - case Type_t::u32: - cast_vector(rc); - break; - case Type_t::u64: - cast_vector(rc); - break; - default: - throw std::runtime_error("unsupported type"); - } -#if defined(_MSC_VER) -# pragma warning(pop) -#endif - return rc; - } - - const void* get_data_ptr() const { - return (m_data ? m_data->get_ptr() : nullptr); - } - template - const T* get_data_ptr() const { - if (sizeof(T) > m_element_type.size() && shape_size(m_shape) > 0) { - throw ngraph_error("Buffer over-read"); - } - - return static_cast(get_data_ptr()); - } - - template - const typename element_type_traits::value_type* get_data_ptr() const { - NGRAPH_CHECK(ET == get_element_type(), "get_data_ptr() called for incorrect element type."); - return static_cast::value_type*>(get_data_ptr()); - } - - bool get_all_data_elements_bitwise_identical() const { - return m_all_elements_bitwise_identical; - } - std::string convert_value_to_string(size_t index) const; - - /** - * \brief Allows to avoid buffer allocation on the visit_attributes call - */ - void alloc_buffer_on_visit_attributes(bool val) { - m_alloc_buffer_on_visit_attributes = val; - } - -private: - template , - typename std::enable_if::type = true> - StorageDataType get_element_value(size_t index) const { - return get_data_ptr()[index]; - } - - template , - typename std::enable_if::type = true> - StorageDataType get_element_value(size_t index) const { - return (get_data_ptr()[index / 8] >> (7 - (index % 8))) & 1; - } - - template , - typename std::enable_if::type = true> - StorageDataType get_element_value(size_t index) const { - return (get_data_ptr()[index / 2] >> (index % 2 ? 0 : 4)) & 0x0F; - } - - template , - typename std::enable_if::type = true> - StorageDataType get_element_value(size_t index) const { - const uint8_t i4data = (get_data_ptr()[index / 2] >> (index % 2 ? 0 : 4)) & 0x0F; - const bool is_negative_number = (i4data >> 3) & 0x01; - const int8_t data = is_negative_number ? i4data | 0xF0 : i4data; - return data; - } - - template ::type = true> - void cast_vector(std::vector& output_vector) const { - // this function is workaround for waring during windows building - // build complains for vector creation based on iterators - // which point on different type than destination vector::value_type - using IN_T = fundamental_type_for; - auto source_vector = get_vector(); - output_vector.reserve(source_vector.size()); - - std::transform(source_vector.begin(), source_vector.end(), std::back_inserter(output_vector), [](IN_T c) { - return static_cast(c); - }); - } - - template ::type = true> - void cast_vector(std::vector& output) const { - using IN_T = fundamental_type_for; - const auto element_number = shape_size(m_shape); - const auto source_begin = get_data_ptr(); - const auto source_end = std::next(source_begin, (element_number + 7) / 8); - const auto round_element_no = element_number % 8 ? element_number - element_number % 8 + 8 : element_number; - output.reserve(round_element_no); // adds 7 more elements here? - std::for_each(source_begin, source_end, [&](IN_T c) { - for (const auto i : {7, 6, 5, 4, 3, 2, 1, 0}) { - const uint8_t data = (c >> i) & 0x01; - output.push_back(data); - } - }); - output.resize(element_number); - } - - template ::type = true> - void cast_vector(std::vector& output) const { - using IN_T = fundamental_type_for; - const auto element_number = shape_size(m_shape); - const auto source_begin = get_data_ptr(); - const auto source_end = std::next(source_begin, (element_number + 1) / 2); - const auto round_element_no = element_number % 2 ? element_number + 1 : element_number; - output.reserve(round_element_no); // adds 1 more elements here? - std::for_each(source_begin, source_end, [&](IN_T c) { - for (const auto i : {4, 0}) { - const uint8_t data = (c >> i) & 0x0F; - output.push_back(data); - } - }); - output.resize(element_number); - } - template ::type = true> - void cast_vector(std::vector& output) const { - using IN_T = fundamental_type_for; - const auto element_number = shape_size(m_shape); - const auto source_begin = get_data_ptr(); - const auto source_end = std::next(source_begin, (element_number + 1) / 2); - const auto round_element_no = element_number % 2 ? element_number + 1 : element_number; - output.reserve(round_element_no); // adds 1 more elements here? - std::for_each(source_begin, source_end, [&](IN_T c) { - for (const auto i : {4, 0}) { - const uint8_t i4data = (c >> i) & 0x0F; - const bool is_negative_number = (i4data >> 3) & 0x01; - const int8_t data = is_negative_number ? i4data | 0xF0 : i4data; - output.push_back(data); - } - }); - output.resize(element_number); - } - - template , - typename std::enable_if::type = true> - void fill_data(const T& value) { - const auto size = shape_size(m_shape); - const auto v = static_cast(value); - std::fill_n(get_data_ptr_nc(), size, v); - } - - template , - typename std::enable_if::type = true> - void fill_data(const T& value) { - const StorageDataType v = value ? 0xFF : 0x00; - std::fill_n(get_data_ptr_nc(), mem_size(), v); - } - - template , - typename std::enable_if::type = true> - void fill_data(const T& value) { - uint8_t v = value_in_range(value); - v &= 0x0F; - v += v << 4; - std::fill_n(get_data_ptr_nc(), mem_size(), v); - } - - void allocate_buffer(); - - void* get_data_ptr_nc() { - return (m_data ? m_data->get_ptr() : nullptr); - } - - template - typename element_type_traits::value_type* get_data_ptr_nc() { - NGRAPH_CHECK(ET == get_element_type(), "get_data_ptr_nc() called for incorrect element type."); - return static_cast::value_type*>(get_data_ptr_nc()); - } - - Constant(const OutputVector& args) : Op(args), m_shape({}) {} - - virtual void infer_element_type() {} - template - void write_values(const std::vector& values) { - write_to_buffer(values); - } - - template , - typename std::enable_if::type = true> - void write_buffer(const std::vector& source) { - auto p = get_data_ptr_nc(); - for (size_t i = 0; i < source.size(); i++) { - p[i] = static_cast(source[i]); - } - } - - template , - typename std::enable_if::type = true> - void write_buffer(const std::vector& source) { - auto p = get_data_ptr_nc(); - size_t i = 0; - for (; i < source.size() / 2; i++) { - const auto v1 = value_in_range(source[i * 2]) & 0x0F; - const auto v2 = value_in_range(source[i * 2 + 1]) & 0x0F; - const auto v = (v1 << 4) | v2; - p[i] = static_cast(v); - } - if (source.size() % 2) { - const auto v1 = value_in_range(source[i * 2]) & 0x0F; - const auto v = v1 << 4; - p[i] = static_cast(v); - } - } - - template , - typename std::enable_if::type = true> - void write_buffer(const std::vector& source) { - auto p = get_data_ptr_nc(); - size_t i = 0; - for (; i < source.size() / 8; i++) { - uint8_t v{}; - for (int j = 0; j != 8; j++) { - const uint8_t b = source[i * 8 + j] ? 0x01 << (7 - j) : 0; - v |= b; - } - p[i] = static_cast(v); - } - uint8_t v{}; - for (unsigned j = 0; j != source.size() % 8; j++) { - const uint8_t b = source[i * 8 + j] ? 0x01 << (7 - j) : 0; - v |= b; - } - p[i] = static_cast(v); - } - - template - void write_to_buffer(const std::vector& source) { - const auto& target_type = m_element_type; - size_t target_element_count = shape_size(m_shape); - if (source.size() != target_element_count) { - throw std::runtime_error("Constant initializer does not match shape"); - } - using Type_t = element::Type_t; -#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) -# pragma GCC diagnostic push -# pragma GCC diagnostic error "-Wswitch" -# pragma GCC diagnostic error "-Wswitch-enum" -#endif - switch (target_type) { - case Type_t::boolean: - write_buffer(source); - break; - case Type_t::bf16: - write_buffer(source); - break; - case Type_t::f16: - write_buffer(source); - break; - case Type_t::f32: - write_buffer(source); - break; - case Type_t::f64: - write_buffer(source); - break; - case Type_t::i4: - write_buffer(source); - break; - case Type_t::i8: - write_buffer(source); - break; - case Type_t::i16: - write_buffer(source); - break; - case Type_t::i32: - write_buffer(source); - break; - case Type_t::i64: - write_buffer(source); - break; - case Type_t::u1: - write_buffer(source); - break; - case Type_t::u4: - write_buffer(source); - break; - case Type_t::u8: - write_buffer(source); - break; - case Type_t::u16: - write_buffer(source); - break; - case Type_t::u32: - write_buffer(source); - break; - case Type_t::u64: - write_buffer(source); - break; - case element::Type_t::undefined: - case element::Type_t::dynamic: - throw std::runtime_error("unsupported type"); - } -#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) -# pragma GCC diagnostic pop -#endif - } - template ::type = true> - static ngraph::fundamental_type_for value_in_range(const ValueT& value) { - const auto result = ngraph::fundamental_type_for(value); - NGRAPH_CHECK(0 <= result && result <= 15, "assigned value out of range u4 values"); - return result; - } - - template ::type = true> - static ngraph::fundamental_type_for value_in_range(const ValueT& value) { - const auto result = ngraph::fundamental_type_for(value); - NGRAPH_CHECK(-8 <= result && result <= 7, "assigned value out of range i4 values"); - return result; - } - - bool are_all_data_elements_bitwise_identical() const; - static constexpr size_t host_alignment() { - return 64; - } - - size_t mem_size() const { - const bool bitwidth_less_than_byte = m_element_type.bitwidth() < 8; - if (bitwidth_less_than_byte) { - const auto size = shape_size(m_shape); - const auto bitwidth = size * m_element_type.bitwidth(); - // for rounding by `(bitwidth + 7) / 8` will work for - // `bitwidth < numeric_limits::max() - 7` - return bitwidth / 8 + (bitwidth % 8 ? 1 : 0); - } - return shape_size(m_shape) * m_element_type.size(); - } - - element::Type m_element_type; - Shape m_shape{}; - std::shared_ptr m_data; - bool m_all_elements_bitwise_identical; - bool m_alloc_buffer_on_visit_attributes = true; -}; +using ov::op::v0::Constant; } // namespace v0 using v0::Constant; } // namespace op diff --git a/ngraph/core/include/ngraph/op/read_value.hpp b/ngraph/core/include/ngraph/op/read_value.hpp index 1737cfce678..8106aa9384a 100644 --- a/ngraph/core/include/ngraph/op/read_value.hpp +++ b/ngraph/core/include/ngraph/op/read_value.hpp @@ -7,84 +7,18 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/variable.hpp" #include "ngraph/op/util/variable_extension.hpp" +#include "openvino/op/read_value.hpp" namespace ngraph { namespace op { -class NGRAPH_API ReadValueBase : public Op, public VariableExtension { -public: - NGRAPH_RTTI_DECLARATION; - - ReadValueBase() = default; - - /// \brief Constructs an AssignBase operation. - explicit ReadValueBase(const OutputVector& arguments) : Op(arguments) {} -}; +using ov::op::util::ReadValueBase; namespace v3 { -/// \brief ReadValue operation creates the variable with `variable_id` and returns value -/// of this variable. -class NGRAPH_API ReadValue : public ReadValueBase { -public: - NGRAPH_RTTI_DECLARATION; - ReadValue() = default; - - /// \brief Constructs a ReadValue operation. - /// - /// \param init_value Node that produces the input tensor. - /// \param variable_id identificator of the variable to create. - ReadValue(const Output& init_value, const std::string& variable_id); - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::string get_variable_id() const override { - return m_variable_id; - } - -private: - std::string m_variable_id; -}; +using ov::op::v3::ReadValue; } // namespace v3 namespace v6 { -/// \brief ReadValue operation gets an input value from the variable with `variable_id` -/// and returns it as an output. -class NGRAPH_API ReadValue : public ReadValueBase { -public: - NGRAPH_RTTI_DECLARATION; - ReadValue() = default; - - /// \brief Constructs a ReadValue operation. - /// - /// \param init_value Node that produces the input tensor. - /// \param variable Class for storing and synchronizing element types, shapes and - /// identifiers - /// between pairs of Assign/ReadValue nodes. - ReadValue(const Output& init_value, const std::shared_ptr& variable); - - void validate_and_infer_types() override; - - void revalidate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::string get_variable_id() const override { - NGRAPH_CHECK(m_variable, "Variable is not initialized. Variable_id is unavailable"); - return m_variable->get_info().variable_id; - } - - bool evaluate(const HostTensorVector& outputs, - const HostTensorVector& inputs, - const EvaluationContext& evaluation_context) const override; - bool has_evaluate() const override; - - bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; -}; +using ov::op::v6::ReadValue; } // namespace v6 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/sink.hpp b/ngraph/core/include/ngraph/op/sink.hpp index 0dbec019079..53e9fa9d523 100644 --- a/ngraph/core/include/ngraph/op/sink.hpp +++ b/ngraph/core/include/ngraph/op/sink.hpp @@ -7,20 +7,11 @@ #include #include "ngraph/op/op.hpp" +#include "openvino/op/sink.hpp" namespace ngraph { namespace op { -/// Root of nodes that can be sink nodes -class NGRAPH_API Sink : public Op { -public: - virtual ~Sink() = 0; - NGRAPH_RTTI_DECLARATION; - -protected: - Sink() : Op() {} - - explicit Sink(const OutputVector& arguments) : Op(arguments) {} -}; +using ov::op::Sink; } // namespace op using SinkVector = std::vector>; } // namespace ngraph diff --git a/ngraph/core/include/ngraph/runtime/host_tensor.hpp b/ngraph/core/include/ngraph/runtime/host_tensor.hpp index 5f49c7f4159..157fb27be3b 100644 --- a/ngraph/core/include/ngraph/runtime/host_tensor.hpp +++ b/ngraph/core/include/ngraph/runtime/host_tensor.hpp @@ -13,13 +13,18 @@ namespace ov { class Node; -} -namespace ngraph { namespace op { namespace v0 { class Constant; } } // namespace op +} // namespace ov +namespace ngraph { +namespace op { +namespace v0 { +using ov::op::v0::Constant; +} +} // namespace op namespace runtime { class NGRAPH_API HostTensor : public ngraph::runtime::Tensor { public: diff --git a/ngraph/core/include/openvino/op/abs.hpp b/ngraph/core/include/openvino/op/abs.hpp new file mode 100644 index 00000000000..b3cc02d724f --- /dev/null +++ b/ngraph/core/include/openvino/op/abs.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise absolute value operation. +/// +class OPENVINO_API Abs : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs an absolute value operation. + Abs() = default; + bool visit_attributes(AttributeVisitor&) override { + return true; + } + /// \brief Constructs an absolute value operation. + /// + /// \param arg Output that produces the input tensor.
+ /// `[d1, ...]` + /// + /// Output `[d1, ...]` + /// + Abs(const Output& arg); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/acos.hpp b/ngraph/core/include/openvino/op/acos.hpp new file mode 100644 index 00000000000..0804926bdda --- /dev/null +++ b/ngraph/core/include/openvino/op/acos.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise inverse cosine (arccos) operation. +/// +class OPENVINO_API Acos : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs an arccos operation. + Acos() = default; + /// \brief Constructs an arccos operation. + /// + /// \param arg Output that produces the input tensor.
+ /// `[d1, ...]` + /// + /// Output `[d1, ...]` + /// + Acos(const Output& arg); + bool visit_attributes(AttributeVisitor&) override { + return true; + } + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/acosh.hpp b/ngraph/core/include/openvino/op/acosh.hpp new file mode 100644 index 00000000000..5e7e638a3f6 --- /dev/null +++ b/ngraph/core/include/openvino/op/acosh.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Elementwise inverse hyperbolic cos operation. +/// +class OPENVINO_API Acosh : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs an Acosh operation. + Acosh() = default; + /// \brief Constructs an Acosh operation. + /// + /// \param arg Output that produces the input tensor.
+ /// `[d1, ...]` + /// + /// Output `[d1, ...]` + /// + Acosh(const Output& arg); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor&) override { + return true; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/adaptive_avg_pool.hpp b/ngraph/core/include/openvino/op/adaptive_avg_pool.hpp new file mode 100644 index 00000000000..fd4419e5d8b --- /dev/null +++ b/ngraph/core/include/openvino/op/adaptive_avg_pool.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v8 { +/// \brief Adaptive average pooling operation. +/// +class OPENVINO_API AdaptiveAvgPool : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + AdaptiveAvgPool() = default; + + /// + /// \brief Constructs adaptive average pooling operation. + /// + /// \param data Input data + /// + /// \param output_shape 1D tensor describing output shape for spatial + /// dimensions. + /// + AdaptiveAvgPool(const Output& data, const Output& output_shape); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/adaptive_max_pool.hpp b/ngraph/core/include/openvino/op/adaptive_max_pool.hpp new file mode 100644 index 00000000000..512131ed0aa --- /dev/null +++ b/ngraph/core/include/openvino/op/adaptive_max_pool.hpp @@ -0,0 +1,50 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v8 { +/// \brief Adaptive max pooling operation. +/// +class OPENVINO_API AdaptiveMaxPool : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + AdaptiveMaxPool() = default; + + /// + /// \brief Constructs adaptive max pooling operation. + /// + /// \param data Input data + /// + /// \param output_shape 1D tensor describing output shape for spatial + /// dimensions. + /// + /// \param index_element_type Specifies the output tensor type for indices + /// output + /// + AdaptiveMaxPool(const Output& data, + const Output& output_shape, + const ngraph::element::Type& index_element_type = ngraph::element::i64); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + element::Type get_index_element_type() const { + return m_index_element_type; + } + +protected: + ngraph::element::Type m_index_element_type = ngraph::element::i64; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/add.hpp b/ngraph/core/include/openvino/op/add.hpp new file mode 100644 index 00000000000..38b3121ab0b --- /dev/null +++ b/ngraph/core/include/openvino/op/add.hpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise addition operation. +/// +class OPENVINO_API Add : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs an uninitialized addition operation + Add() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + + /// \brief Constructs an addition operation. + /// + /// \param arg0 Output that produces the first input tensor.
+ /// `[d0, ...]` + /// \param arg1 Output that produces the second input tensor.
+ /// `[d0, ...]` + /// \param auto_broadcast Auto broadcast specification. Default is Numpy-style + /// implicit broadcasting. + /// + /// Output `[d0, ...]` + /// + Add(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/asin.hpp b/ngraph/core/include/openvino/op/asin.hpp new file mode 100644 index 00000000000..b08569cddee --- /dev/null +++ b/ngraph/core/include/openvino/op/asin.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise inverse sine (arcsin) operation. +/// +class OPENVINO_API Asin : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs an arcsin operation. + Asin() = default; + /// \brief Constructs an arcsin operation. + /// + /// \param arg Output that produces the input tensor.
+ /// `[d1, ...]` + /// + /// Output `[d1, ...]` + /// + Asin(const Output& arg); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor&) override { + return true; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/asinh.hpp b/ngraph/core/include/openvino/op/asinh.hpp new file mode 100644 index 00000000000..5ad07bddd26 --- /dev/null +++ b/ngraph/core/include/openvino/op/asinh.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Elementwise inverse hyperbolic sin operation. +/// +class OPENVINO_API Asinh : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs an Asinh operation. + Asinh() = default; + /// \brief Constructs an Asinh operation. + /// + /// \param arg Output that produces the input tensor.
+ /// `[d1, ...]` + /// + /// Output `[d1, ...]` + /// + Asinh(const Output& arg); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor&) override { + return true; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/assign.hpp b/ngraph/core/include/openvino/op/assign.hpp new file mode 100644 index 00000000000..01502de7410 --- /dev/null +++ b/ngraph/core/include/openvino/op/assign.hpp @@ -0,0 +1,72 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/assign_base.hpp" +#include "openvino/op/util/variable.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Assign operation sets an input value to the variable with `variable_id` +class OPENVINO_API Assign : public util::AssignBase { +public: + OPENVINO_RTTI_DECLARATION; + Assign() = default; + + /// \brief Constructs an Assign operation. + /// + /// \param new_value Node that produces the input tensor. + /// \param variable_id identifier of the variable to be updated. + Assign(const Output& new_value, const std::string& variable_id); + + void validate_and_infer_types() override; + std::string get_variable_id() const override { + return m_variable_id; + } + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool visit_attributes(AttributeVisitor& visitor) override; + +private: + std::string m_variable_id; +}; +} // namespace v3 + +namespace v6 { +/// \brief Assign operation sets an input value to the variable with `variable_id` +class OPENVINO_API Assign : public util::AssignBase { +public: + OPENVINO_RTTI_DECLARATION; + Assign() = default; + + /// \brief Constructs an Assign operation. + /// + /// \param new_value Node that produces the input tensor. + /// \param variable Class for storing and synchronizing element types, shapes and + /// identifiers + /// between pairs of Assign/ReadValue nodes. + Assign(const Output& new_value, const std::shared_ptr& variable); + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::string get_variable_id() const override { + NGRAPH_CHECK(m_variable, "Variable is not initialized. Variable_id is unavailable"); + return m_variable->get_info().variable_id; + } + bool evaluate(const HostTensorVector& outputs, + const HostTensorVector& inputs, + const EvaluationContext& evaluation_context) const override; + bool has_evaluate() const override; + bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; +}; +} // namespace v6 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/atan.hpp b/ngraph/core/include/openvino/op/atan.hpp new file mode 100644 index 00000000000..3b336946c25 --- /dev/null +++ b/ngraph/core/include/openvino/op/atan.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise inverse tangent (arctan) operation. +/// +class OPENVINO_API Atan : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs an arctan operation. + Atan() = default; + + /// \brief Constructs an arctan operation. + /// + /// \param arg Output that produces the input tensor.
+ /// `[d1, ...]` + /// + /// Output `[d1, ...]` + /// + Atan(const Output& arg); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor&) override { + return true; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/atanh.hpp b/ngraph/core/include/openvino/op/atanh.hpp new file mode 100644 index 00000000000..5332636a9fd --- /dev/null +++ b/ngraph/core/include/openvino/op/atanh.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Elementwise inverse hyperbolic tangent operation. +/// +class OPENVINO_API Atanh : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs an Atanh operation. + Atanh() = default; + /// \brief Constructs an Atanh operation. + /// + /// \param arg Output that produces the input tensor.
+ /// `[d1, ...]` + /// + /// Output `[d1, ...]` + /// + Atanh(const Output& arg); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor&) override { + return true; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/avg_pool.hpp b/ngraph/core/include/openvino/op/avg_pool.hpp new file mode 100644 index 00000000000..66f94c7cf6f --- /dev/null +++ b/ngraph/core/include/openvino/op/avg_pool.hpp @@ -0,0 +1,88 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Batched average pooling operation. +/// +class OPENVINO_API AvgPool : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a batched average pooling operation. + AvgPool() = default; + + /// + /// \brief Constructs a batched average pooling operation. + /// + /// \param arg The output producing the input data batch tensor.
+ /// `[d1, dn]` + /// \param strides The strides.
`[n]` + /// \param pads_begin The beginning of padding shape.
`[n]` + /// \param pads_end The end of padding shape.
`[n]` + /// \param kernel The kernel shape.
`[n]` + /// \param exclude_pad If false then averages include padding elements, each + /// treated as the number zero. If true, padding + /// elements + /// are entirely ignored when computing averages. + /// \param rounding_type Whether to use ceiling or floor rounding type while + /// computing output shape. + /// \param auto_pad Padding type to use for additional padded dimensions + /// + AvgPool(const Output& arg, + const Strides& strides, + const ngraph::Shape& pads_begin, + const ngraph::Shape& pads_end, + const ngraph::Shape& kernel, + bool exclude_pad, + op::RoundingType rounding_type = op::RoundingType::FLOOR, + const PadType& auto_pad = op::PadType::EXPLICIT); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The kernel shape. + const ngraph::Shape& get_kernel() const; + void set_kernel(const ngraph::Shape& kernel); + /// \return The strides. + const Strides& get_strides() const; + void set_strides(const Strides& strides); + /// \return The beginning of padding shape. + const ngraph::Shape& get_pads_begin() const; + void set_pads_begin(const ngraph::Shape& pads_begin); + /// \return The end of padding shape. + const ngraph::Shape& get_pads_end() const; + void set_pads_end(const ngraph::Shape& pads_end); + bool get_exclude_pad() const; + void set_exclude_pad(bool exclude_pad); + /// \return The pad type for pooling. + const PadType& get_auto_pad() const; + void set_auto_pad(const PadType& auto_pad); + op::RoundingType get_rounding_type() const; + void set_rounding_type(op::RoundingType rounding_type); + /// \return The default value for AvgPool. + OPENVINO_SUPPRESS_DEPRECATED_START + std::shared_ptr get_default_value() const override; + OPENVINO_SUPPRESS_DEPRECATED_END + +protected: + ngraph::Shape m_kernel; + Strides m_strides; + ngraph::Shape m_pads_begin; + ngraph::Shape m_pads_end; + bool m_exclude_pad{true}; + PadType m_auto_pad{PadType::EXPLICIT}; + op::RoundingType m_rounding_type{op::RoundingType::FLOOR}; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/constant.hpp b/ngraph/core/include/openvino/op/constant.hpp new file mode 100644 index 00000000000..f13899b31ca --- /dev/null +++ b/ngraph/core/include/openvino/op/constant.hpp @@ -0,0 +1,711 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "ngraph/runtime/aligned_buffer.hpp" +#include "ngraph/runtime/host_tensor.hpp" +#include "ngraph/runtime/shared_buffer.hpp" +#include "ngraph/util.hpp" +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/core/node.hpp" +#include "openvino/core/type/element_type.hpp" +#include "openvino/core/type/element_type_traits.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Class for constants. +class OPENVINO_API Constant : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Constant() = default; + + /// \brief Initialize a constant from tensor + /// \param tensor The tensor with data + Constant(const std::shared_ptr& tensor); + + /// \brief Constructs a tensor constant. + /// + /// \param type The element type of the tensor constant. + /// \param shape The shape of the tensor constant. + /// \param values A vector of literals for initializing the tensor constant. The + /// size of values must match the size of the shape. + template + Constant(const element::Type& type, const ngraph::Shape& shape, const std::vector& values) + : Constant(type, shape) { + NODE_VALIDATION_CHECK(this, + values.size() == 1 || values.size() == shape_size(m_shape), + "Did not get the expected number of literals for a constant of shape ", + m_shape, + " (got ", + values.size(), + ", expected ", + (shape_size(m_shape) == 1 ? "" : "1 or "), + shape_size(m_shape), + ")."); + + if (values.size() == 1) { + fill_data(type, values.front()); + } else { + write_values(values); + } + m_all_elements_bitwise_identical = are_all_data_elements_bitwise_identical(); + } + + /// \brief Create uninitialized constant + Constant(const element::Type& type, const ngraph::Shape& shape); + /// \brief Constructs a uniform tensor constant. + /// + /// \param type The element type of the tensor constant. + /// \param shape The shape of the tensor constant. + /// \param value A scalar for initializing the uniform tensor constant. The + /// value is broadcast to the specified shape. + template ::value>::type> + Constant(const element::Type& type, const ngraph::Shape& shape, T value) : Constant(type, shape) { + fill_data(type, value); + m_all_elements_bitwise_identical = true; + } + + template + void fill_data(const element::Type& type, T value) { + using Type_t = element::Type_t; +#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) +# pragma GCC diagnostic push +# pragma GCC diagnostic error "-Wswitch" +# pragma GCC diagnostic error "-Wswitch-enum" +#endif + switch (type) { + case Type_t::boolean: + fill_data(value); + break; + case Type_t::bf16: + fill_data(value); + break; + case Type_t::f16: + fill_data(value); + break; + case Type_t::f32: + fill_data(value); + break; + case Type_t::f64: + fill_data(value); + break; + case Type_t::i4: + fill_data(value); + break; + case Type_t::i8: + fill_data(value); + break; + case Type_t::i16: + fill_data(value); + break; + case Type_t::i32: + fill_data(value); + break; + case Type_t::i64: + fill_data(value); + break; + case Type_t::u1: + fill_data(value); + break; + case Type_t::u4: + fill_data(value); + break; + case Type_t::u8: + fill_data(value); + break; + case Type_t::u16: + fill_data(value); + break; + case Type_t::u32: + fill_data(value); + break; + case Type_t::u64: + fill_data(value); + break; + case Type_t::undefined: + case Type_t::dynamic: + throw std::runtime_error("unsupported type"); + } +#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) +# pragma GCC diagnostic pop +#endif + } + + /// \brief Constructs a tensor constant + /// This constructor is mainly to support deserialization of constants. + /// + /// \param type The element type of the tensor constant. + /// \param shape The shape of the tensor constant. + /// \param values A list of string values to use as the constant data. + Constant(const element::Type& type, const ngraph::Shape& shape, const std::vector& values); + + /// \brief Constructs a tensor constant with the supplied data + /// + /// \param type The element type of the tensor constant. + /// \param shape The shape of the tensor constant. + /// \param data A void* to constant data. + Constant(const element::Type& type, const ngraph::Shape& shape, const void* data); + + /// \brief Constructs a tensor constant with the supplied data + /// + /// \param type The element type of the tensor constant. + /// \param shape The shape of the tensor constant. + /// \param data A pointer to pre-allocated shared data. + template + Constant(const element::Type& type, + const ngraph::Shape& shape, + std::shared_ptr> data) + : m_element_type(type), + m_shape(shape) { + m_data = data; + constructor_validate_and_infer_types(); + } + + Constant(const Constant& other); + Constant(const Constant& other, const ngraph::Shape& new_shape); + Constant& operator=(const Constant&) = delete; + + ~Constant() override; + + void validate_and_infer_types() override { + infer_element_type(); + set_output_type(0, m_element_type, m_shape); + } + + bool visit_attributes(AttributeVisitor& visitor) override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& outputs) const override; + bool evaluate_upper(const HostTensorVector& outputs) const override; + + // Don't constant fold a constant; it would make a copy + bool constant_fold(OutputVector& outputs, const OutputVector& inputs) override { + (void)outputs; + (void)inputs; + return false; + } + + /// \brief Returns the value of the constant node as a Shape object + /// Can only be used on element::i64 nodes and interprets + /// negative values as zeros. + ngraph::Shape get_shape_val() const; + /// \brief Returns the value of the constant node as a Strides + /// object + /// Can only be used on element::i64 nodes and interprets + /// negative values as zeros. + Strides get_strides_val() const; + /// \brief Returns the value of the constant node as a Coordinate + /// object + /// Can only be used on element::i64 nodes and interprets + /// negative values as zeros. + Coordinate get_coordinate_val() const; + /// \brief Returns the value of the constant node as a + /// CoordinateDiff object + /// Can only be used on element::i64 nodes. + CoordinateDiff get_coordinate_diff_val() const; + /// \brief Returns the value of the constant node as an AxisVector + /// object + /// Can only be used on element::i64 nodes and interprets + /// negative values as zeros. + AxisVector get_axis_vector_val() const; + /// \brief Returns the value of the constant node as an AxisSet + /// object + /// Can only be used on element::i64 nodes and interprets + /// negative values as zeros. + /// Repeated values are allowed. + AxisSet get_axis_set_val() const; + + /// \brief Update Constant shape. New shape size must equal to the data elements + /// count + /// + /// \param shape The shape of the tensor constant. + OPENVINO_DEPRECATED("Use Constant c-tor with shape argument instead") + void set_data_shape(const ngraph::Shape& shape); + + /// \brief Wrapper around constructing a shared_ptr of a Constant + /// + /// \param type The element type of the tensor constant. + /// \param shape The shape of the tensor constant. + /// \param values A vector of values to use as the constant data. + template + static std::shared_ptr create(const element::Type& type, + const ngraph::Shape& shape, + const std::vector& values) { + return std::make_shared(type, shape, values); + } + + /// \brief Wrapper around constructing a shared_ptr of a Constant + /// + /// \param type The element type of the tensor constant. + /// \param shape The shape of the tensor constant. + /// \param values An initializer_list of values to use as the constant data. + template + static std::shared_ptr create(const element::Type& type, + const ngraph::Shape& shape, + std::initializer_list values) { + return std::make_shared(type, shape, std::vector{values}); + } + + /// \brief Wrapper around constructing a shared_ptr of a Constant + /// + /// \param type The element type of the tensor constant. + /// \param shape The shape of the tensor constant. + /// \param memory An continues memory chunk which contains the constant data. + static std::shared_ptr create(const element::Type& type, const ngraph::Shape& shape, const void* memory) { + return std::make_shared(type, shape, memory); + } + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The initialization literals for the tensor constant. + std::vector get_value_strings() const; + + template + std::vector get_vector() const { + const T* p = get_data_ptr(); + if (p == nullptr) + throw std::runtime_error("Cannot create vector! Buffer is not allocated."); + return std::vector(p, p + shape_size(m_shape)); + } + + /// \brief Return the Constant's value as a vector cast to type T + /// + /// \tparam T Type to which data vector's entries will be cast. + /// \return Constant's data vector. + template + std::vector cast_vector() const { + auto source_type = get_element_type(); + std::vector rc; + using Type_t = element::Type_t; +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4244) +#endif + switch (source_type) { + case Type_t::boolean: + cast_vector(rc); + break; + case Type_t::bf16: + cast_vector(rc); + break; + case Type_t::f16: + cast_vector(rc); + break; + case Type_t::f32: + cast_vector(rc); + break; + case Type_t::f64: + cast_vector(rc); + break; + case Type_t::i4: + cast_vector(rc); + break; + case Type_t::i8: + cast_vector(rc); + break; + case Type_t::i16: + cast_vector(rc); + break; + case Type_t::i32: + cast_vector(rc); + break; + case Type_t::i64: + cast_vector(rc); + break; + case Type_t::u1: + cast_vector(rc); + break; + case Type_t::u4: + cast_vector(rc); + break; + case Type_t::u8: + cast_vector(rc); + break; + case Type_t::u16: + cast_vector(rc); + break; + case Type_t::u32: + cast_vector(rc); + break; + case Type_t::u64: + cast_vector(rc); + break; + default: + throw std::runtime_error("unsupported type"); + } +#if defined(_MSC_VER) +# pragma warning(pop) +#endif + return rc; + } + + const void* get_data_ptr() const { + return (m_data ? m_data->get_ptr() : nullptr); + } + template + const T* get_data_ptr() const { + if (sizeof(T) > m_element_type.size() && shape_size(m_shape) > 0) { + throw ov::Exception("Buffer over-read"); + } + + return static_cast(get_data_ptr()); + } + + template + const typename element_type_traits::value_type* get_data_ptr() const { + NGRAPH_CHECK(ET == get_element_type(), "get_data_ptr() called for incorrect element type."); + return static_cast::value_type*>(get_data_ptr()); + } + + bool get_all_data_elements_bitwise_identical() const { + return m_all_elements_bitwise_identical; + } + std::string convert_value_to_string(size_t index) const; + + /** + * \brief Allows to avoid buffer allocation on the visit_attributes call + */ + void alloc_buffer_on_visit_attributes(bool val) { + m_alloc_buffer_on_visit_attributes = val; + } + +private: + template , + typename std::enable_if::type = true> + StorageDataType get_element_value(size_t index) const { + return get_data_ptr()[index]; + } + + template , + typename std::enable_if::type = true> + StorageDataType get_element_value(size_t index) const { + return (get_data_ptr()[index / 8] >> (7 - (index % 8))) & 1; + } + + template , + typename std::enable_if::type = true> + StorageDataType get_element_value(size_t index) const { + return (get_data_ptr()[index / 2] >> (index % 2 ? 0 : 4)) & 0x0F; + } + + template , + typename std::enable_if::type = true> + StorageDataType get_element_value(size_t index) const { + const uint8_t i4data = (get_data_ptr()[index / 2] >> (index % 2 ? 0 : 4)) & 0x0F; + const bool is_negative_number = (i4data >> 3) & 0x01; + const int8_t data = is_negative_number ? i4data | 0xF0 : i4data; + return data; + } + + template ::type = true> + void cast_vector(std::vector& output_vector) const { + // this function is workaround for waring during windows building + // build complains for vector creation based on iterators + // which point on different type than destination vector::value_type + using IN_T = fundamental_type_for; + auto source_vector = get_vector(); + output_vector.reserve(source_vector.size()); + + std::transform(source_vector.begin(), source_vector.end(), std::back_inserter(output_vector), [](IN_T c) { + return static_cast(c); + }); + } + + template ::type = true> + void cast_vector(std::vector& output) const { + using IN_T = fundamental_type_for; + const auto element_number = shape_size(m_shape); + const auto source_begin = get_data_ptr(); + const auto source_end = std::next(source_begin, (element_number + 7) / 8); + const auto round_element_no = element_number % 8 ? element_number - element_number % 8 + 8 : element_number; + output.reserve(round_element_no); // adds 7 more elements here? + std::for_each(source_begin, source_end, [&](IN_T c) { + for (const auto i : {7, 6, 5, 4, 3, 2, 1, 0}) { + const uint8_t data = (c >> i) & 0x01; + output.push_back(data); + } + }); + output.resize(element_number); + } + + template ::type = true> + void cast_vector(std::vector& output) const { + using IN_T = fundamental_type_for; + const auto element_number = shape_size(m_shape); + const auto source_begin = get_data_ptr(); + const auto source_end = std::next(source_begin, (element_number + 1) / 2); + const auto round_element_no = element_number % 2 ? element_number + 1 : element_number; + output.reserve(round_element_no); // adds 1 more elements here? + std::for_each(source_begin, source_end, [&](IN_T c) { + for (const auto i : {4, 0}) { + const uint8_t data = (c >> i) & 0x0F; + output.push_back(data); + } + }); + output.resize(element_number); + } + template ::type = true> + void cast_vector(std::vector& output) const { + using IN_T = fundamental_type_for; + const auto element_number = shape_size(m_shape); + const auto source_begin = get_data_ptr(); + const auto source_end = std::next(source_begin, (element_number + 1) / 2); + const auto round_element_no = element_number % 2 ? element_number + 1 : element_number; + output.reserve(round_element_no); // adds 1 more elements here? + std::for_each(source_begin, source_end, [&](IN_T c) { + for (const auto i : {4, 0}) { + const uint8_t i4data = (c >> i) & 0x0F; + const bool is_negative_number = (i4data >> 3) & 0x01; + const int8_t data = is_negative_number ? i4data | 0xF0 : i4data; + output.push_back(data); + } + }); + output.resize(element_number); + } + + template , + typename std::enable_if::type = true> + void fill_data(const T& value) { + const auto size = shape_size(m_shape); + const auto v = static_cast(value); + std::fill_n(get_data_ptr_nc(), size, v); + } + + template , + typename std::enable_if::type = true> + void fill_data(const T& value) { + const StorageDataType v = value ? 0xFF : 0x00; + std::fill_n(get_data_ptr_nc(), mem_size(), v); + } + + template , + typename std::enable_if::type = true> + void fill_data(const T& value) { + uint8_t v = value_in_range(value); + v &= 0x0F; + v += v << 4; + std::fill_n(get_data_ptr_nc(), mem_size(), v); + } + + void allocate_buffer(); + + void* get_data_ptr_nc() { + return (m_data ? m_data->get_ptr() : nullptr); + } + + template + typename element_type_traits::value_type* get_data_ptr_nc() { + NGRAPH_CHECK(ET == get_element_type(), "get_data_ptr_nc() called for incorrect element type."); + return static_cast::value_type*>(get_data_ptr_nc()); + } + + Constant(const OutputVector& args) : Op(args), m_shape({}) {} + + virtual void infer_element_type() {} + template + void write_values(const std::vector& values) { + write_to_buffer(values); + } + + template , + typename std::enable_if::type = true> + void write_buffer(const std::vector& source) { + auto p = get_data_ptr_nc(); + for (size_t i = 0; i < source.size(); i++) { + p[i] = static_cast(source[i]); + } + } + + template , + typename std::enable_if::type = true> + void write_buffer(const std::vector& source) { + auto p = get_data_ptr_nc(); + size_t i = 0; + for (; i < source.size() / 2; i++) { + const auto v1 = value_in_range(source[i * 2]) & 0x0F; + const auto v2 = value_in_range(source[i * 2 + 1]) & 0x0F; + const auto v = (v1 << 4) | v2; + p[i] = static_cast(v); + } + if (source.size() % 2) { + const auto v1 = value_in_range(source[i * 2]) & 0x0F; + const auto v = v1 << 4; + p[i] = static_cast(v); + } + } + + template , + typename std::enable_if::type = true> + void write_buffer(const std::vector& source) { + auto p = get_data_ptr_nc(); + size_t i = 0; + for (; i < source.size() / 8; i++) { + uint8_t v{}; + for (int j = 0; j != 8; j++) { + const uint8_t b = source[i * 8 + j] ? 0x01 << (7 - j) : 0; + v |= b; + } + p[i] = static_cast(v); + } + uint8_t v{}; + for (unsigned j = 0; j != source.size() % 8; j++) { + const uint8_t b = source[i * 8 + j] ? 0x01 << (7 - j) : 0; + v |= b; + } + p[i] = static_cast(v); + } + + template + void write_to_buffer(const std::vector& source) { + const auto& target_type = m_element_type; + size_t target_element_count = shape_size(m_shape); + if (source.size() != target_element_count) { + throw std::runtime_error("Constant initializer does not match shape"); + } + using Type_t = element::Type_t; +#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) +# pragma GCC diagnostic push +# pragma GCC diagnostic error "-Wswitch" +# pragma GCC diagnostic error "-Wswitch-enum" +#endif + switch (target_type) { + case Type_t::boolean: + write_buffer(source); + break; + case Type_t::bf16: + write_buffer(source); + break; + case Type_t::f16: + write_buffer(source); + break; + case Type_t::f32: + write_buffer(source); + break; + case Type_t::f64: + write_buffer(source); + break; + case Type_t::i4: + write_buffer(source); + break; + case Type_t::i8: + write_buffer(source); + break; + case Type_t::i16: + write_buffer(source); + break; + case Type_t::i32: + write_buffer(source); + break; + case Type_t::i64: + write_buffer(source); + break; + case Type_t::u1: + write_buffer(source); + break; + case Type_t::u4: + write_buffer(source); + break; + case Type_t::u8: + write_buffer(source); + break; + case Type_t::u16: + write_buffer(source); + break; + case Type_t::u32: + write_buffer(source); + break; + case Type_t::u64: + write_buffer(source); + break; + case element::Type_t::undefined: + case element::Type_t::dynamic: + throw std::runtime_error("unsupported type"); + } +#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) +# pragma GCC diagnostic pop +#endif + } + template ::type = true> + static ngraph::fundamental_type_for value_in_range(const ValueT& value) { + const auto result = ngraph::fundamental_type_for(value); + NGRAPH_CHECK(0 <= result && result <= 15, "assigned value out of range u4 values"); + return result; + } + + template ::type = true> + static ngraph::fundamental_type_for value_in_range(const ValueT& value) { + const auto result = ngraph::fundamental_type_for(value); + NGRAPH_CHECK(-8 <= result && result <= 7, "assigned value out of range i4 values"); + return result; + } + + bool are_all_data_elements_bitwise_identical() const; + static constexpr size_t host_alignment() { + return 64; + } + + size_t mem_size() const { + const bool bitwidth_less_than_byte = m_element_type.bitwidth() < 8; + if (bitwidth_less_than_byte) { + const auto size = shape_size(m_shape); + const auto bitwidth = size * m_element_type.bitwidth(); + // for rounding by `(bitwidth + 7) / 8` will work for + // `bitwidth < numeric_limits::max() - 7` + return bitwidth / 8 + (bitwidth % 8 ? 1 : 0); + } + return shape_size(m_shape) * m_element_type.size(); + } + + element::Type m_element_type; + ngraph::Shape m_shape{}; + std::shared_ptr m_data; + bool m_all_elements_bitwise_identical; + bool m_alloc_buffer_on_visit_attributes = true; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/logical_and.hpp b/ngraph/core/include/openvino/op/logical_and.hpp new file mode 100644 index 00000000000..a2d09829abe --- /dev/null +++ b/ngraph/core/include/openvino/op/logical_and.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/binary_elementwise_logical.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise logical-and operation. +/// +class OPENVINO_API LogicalAnd : public util::BinaryElementwiseLogical { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a logical-and operation. + LogicalAnd() = default; + + /// \brief Constructs a logical-and operation. + /// + /// \param arg0 Output that produces the first input tensor.
+ /// `[d0, ...]` + /// \param arg1 Output that produces the second input tensor.
+ /// `[d0, ...]` + /// \param auto_broadcast Auto broadcast specification + /// + /// Output `[d0, ...]` + /// + LogicalAnd(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor& visitor) override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/read_value.hpp b/ngraph/core/include/openvino/op/read_value.hpp new file mode 100644 index 00000000000..f3e83254a93 --- /dev/null +++ b/ngraph/core/include/openvino/op/read_value.hpp @@ -0,0 +1,79 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/read_value_base.hpp" +#include "openvino/op/util/variable.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief ReadValue operation creates the variable with `variable_id` and returns value +/// of this variable. +class OPENVINO_API ReadValue : public util::ReadValueBase { +public: + OPENVINO_RTTI_DECLARATION; + ReadValue() = default; + + /// \brief Constructs a ReadValue operation. + /// + /// \param init_value Node that produces the input tensor. + /// \param variable_id identificator of the variable to create. + ReadValue(const Output& init_value, const std::string& variable_id); + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::string get_variable_id() const override { + return m_variable_id; + } + +private: + std::string m_variable_id; +}; +} // namespace v3 + +namespace v6 { +/// \brief ReadValue operation gets an input value from the variable with `variable_id` +/// and returns it as an output. +class OPENVINO_API ReadValue : public util::ReadValueBase { +public: + OPENVINO_RTTI_DECLARATION; + ReadValue() = default; + + /// \brief Constructs a ReadValue operation. + /// + /// \param init_value Node that produces the input tensor. + /// \param variable Class for storing and synchronizing element types, shapes and + /// identifiers + /// between pairs of Assign/ReadValue nodes. + ReadValue(const Output& init_value, const std::shared_ptr& variable); + + void validate_and_infer_types() override; + + void revalidate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::string get_variable_id() const override { + NGRAPH_CHECK(m_variable, "Variable is not initialized. Variable_id is unavailable"); + return m_variable->get_info().variable_id; + } + + bool evaluate(const HostTensorVector& outputs, + const HostTensorVector& inputs, + const EvaluationContext& evaluation_context) const override; + bool has_evaluate() const override; + + bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; +}; +} // namespace v6 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/sink.hpp b/ngraph/core/include/openvino/op/sink.hpp new file mode 100644 index 00000000000..e603378183d --- /dev/null +++ b/ngraph/core/include/openvino/op/sink.hpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +/// Root of nodes that can be sink nodes +class OPENVINO_API Sink : public Op { +public: + ~Sink() override = 0; + OPENVINO_RTTI_DECLARATION; + +protected: + Sink() : Op() {} + + explicit Sink(const OutputVector& arguments) : Op(arguments) {} +}; +} // namespace op +using SinkVector = std::vector>; +} // namespace ov diff --git a/ngraph/core/include/openvino/op/util/assign_base.hpp b/ngraph/core/include/openvino/op/util/assign_base.hpp new file mode 100644 index 00000000000..af195f6496a --- /dev/null +++ b/ngraph/core/include/openvino/op/util/assign_base.hpp @@ -0,0 +1,22 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/sink.hpp" +#include "openvino/op/util/variable_extension.hpp" + +namespace ov { +namespace op { +namespace util { +class OPENVINO_API AssignBase : public Sink, public VariableExtension { +public: + OPENVINO_RTTI_DECLARATION; + AssignBase() = default; + /// \brief Constructs an AssignBase operation. + explicit AssignBase(const OutputVector& arguments) : Sink(arguments) {} +}; +} // namespace util +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/util/read_value_base.hpp b/ngraph/core/include/openvino/op/util/read_value_base.hpp new file mode 100644 index 00000000000..97e101c9394 --- /dev/null +++ b/ngraph/core/include/openvino/op/util/read_value_base.hpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/variable_extension.hpp" + +namespace ov { +namespace op { +namespace util { +class OPENVINO_API ReadValueBase : public Op, public VariableExtension { +public: + OPENVINO_RTTI_DECLARATION; + + ReadValueBase() = default; + + /// \brief Constructs an AssignBase operation. + explicit ReadValueBase(const OutputVector& arguments) : Op(arguments) {} +}; +} // namespace util +} // namespace op +} // namespace ov diff --git a/ngraph/core/src/op/abs.cpp b/ngraph/core/src/op/abs.cpp index de7cdf63499..d863b224b10 100644 --- a/ngraph/core/src/op/abs.cpp +++ b/ngraph/core/src/op/abs.cpp @@ -10,30 +10,27 @@ #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/abs.hpp" -using namespace std; -using namespace ngraph; +NGRAPH_RTTI_DEFINITION(ov::op::v0::Abs, "Abs", 0); -constexpr NodeTypeInfo op::Abs::type_info; - -op::Abs::Abs(const Output& arg) : UnaryElementwiseArithmetic(arg) { +ov::op::v0::Abs::Abs(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); } -shared_ptr op::Abs::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v0::Abs::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_Abs_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0)); + return std::make_shared(new_args.at(0)); } namespace absop { -template -inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) { - using T = typename element_type_traits::value_type; - runtime::reference::abs((arg0->get_data_ptr()), (out->get_data_ptr()), count); +template +inline bool evaluate(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out, const size_t count) { + using T = typename ov::element_type_traits::value_type; + ngraph::runtime::reference::abs((arg0->get_data_ptr()), (out->get_data_ptr()), count); return true; } -bool evaluate_abs(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) { +bool evaluate_abs(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out, const size_t count) { bool rc = true; out->set_unary(arg0); @@ -54,12 +51,12 @@ bool evaluate_abs(const HostTensorPtr& arg0, const HostTensorPtr& out, const siz } } // namespace absop -bool op::Abs::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ov::op::v0::Abs::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v0_Abs_evaluate); return absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0))); } -bool op::Abs::has_evaluate() const { +bool ov::op::v0::Abs::has_evaluate() const { NGRAPH_OP_SCOPE(v0_Abs_has_evaluate); switch (get_input_element_type(0)) { case ngraph::element::i32: diff --git a/ngraph/core/src/op/acos.cpp b/ngraph/core/src/op/acos.cpp index fb66557d4f4..53b56b6762c 100644 --- a/ngraph/core/src/op/acos.cpp +++ b/ngraph/core/src/op/acos.cpp @@ -18,30 +18,27 @@ #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/acos.hpp" -using namespace std; -using namespace ngraph; +NGRAPH_RTTI_DEFINITION(ov::op::v0::Acos, "Acos", 0); -constexpr NodeTypeInfo op::Acos::type_info; - -op::Acos::Acos(const Output& arg) : UnaryElementwiseArithmetic(arg) { +ov::op::v0::Acos::Acos(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); } -shared_ptr op::Acos::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v0::Acos::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_Acos_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0)); + return std::make_shared(new_args.at(0)); } namespace acosop { -template -inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) { - using T = typename element_type_traits::value_type; - runtime::reference::acos(arg0->get_data_ptr(), out->get_data_ptr(), count); +template +inline bool evaluate(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out, const size_t count) { + using T = typename ov::element_type_traits::value_type; + ngraph::runtime::reference::acos(arg0->get_data_ptr(), out->get_data_ptr(), count); return true; } -bool evaluate_acos(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) { +bool evaluate_acos(const ov::HostTensorPtr& arg0, const ov::HostTensorPtr& out, const size_t count) { bool rc = true; out->set_unary(arg0); @@ -61,12 +58,12 @@ bool evaluate_acos(const HostTensorPtr& arg0, const HostTensorPtr& out, const si } } // namespace acosop -bool op::Acos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ov::op::v0::Acos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v0_Acos_evaluate); return acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0))); } -bool op::Acos::has_evaluate() const { +bool ov::op::v0::Acos::has_evaluate() const { NGRAPH_OP_SCOPE(v0_Acos_has_evaluate); switch (get_input_element_type(0)) { case ngraph::element::i32: diff --git a/ngraph/core/src/op/acosh.cpp b/ngraph/core/src/op/acosh.cpp index 230167c9f9b..99f9bd03bca 100644 --- a/ngraph/core/src/op/acosh.cpp +++ b/ngraph/core/src/op/acosh.cpp @@ -12,29 +12,26 @@ #include "ngraph/runtime/reference/acosh.hpp" #include "ngraph/type/element_type.hpp" -using namespace std; -using namespace ngraph; +OPENVINO_RTTI_DEFINITION(ov::op::v3::Acosh, "Acosh", 3, util::UnaryElementwiseArithmetic); -NGRAPH_RTTI_DEFINITION(op::v3::Acosh, "Acosh", 3, util::UnaryElementwiseArithmetic); - -op::v3::Acosh::Acosh(const Output& arg) : UnaryElementwiseArithmetic(arg) { +ov::op::v3::Acosh::Acosh(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); } -shared_ptr op::v3::Acosh::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v3::Acosh::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v3_Acosh_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0)); + return std::make_shared(new_args.at(0)); } namespace acoshop { -template -bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out) { - runtime::reference::acosh(arg0->get_data_ptr(), out->get_data_ptr(), shape_size(arg0->get_shape())); +template +bool evaluate(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out) { + ngraph::runtime::reference::acosh(arg0->get_data_ptr(), out->get_data_ptr(), shape_size(arg0->get_shape())); return true; } -bool evaluate_acosh(const HostTensorPtr& arg0, const HostTensorPtr& out) { +bool evaluate_acosh(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out) { bool rc = true; out->set_unary(arg0); switch (arg0->get_element_type()) { @@ -52,12 +49,12 @@ bool evaluate_acosh(const HostTensorPtr& arg0, const HostTensorPtr& out) { } } // namespace acoshop -bool op::v3::Acosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ov::op::v3::Acosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v3_Acosh_evaluate); return acoshop::evaluate_acosh(inputs[0], outputs[0]); } -bool op::v3::Acosh::has_evaluate() const { +bool ov::op::v3::Acosh::has_evaluate() const { NGRAPH_OP_SCOPE(v3_Acosh_has_evaluate); switch (get_input_element_type(0)) { case ngraph::element::i32: diff --git a/ngraph/core/src/op/adaptive_avg_pool.cpp b/ngraph/core/src/op/adaptive_avg_pool.cpp index e8989c9eceb..de8c00f5d01 100644 --- a/ngraph/core/src/op/adaptive_avg_pool.cpp +++ b/ngraph/core/src/op/adaptive_avg_pool.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v8::AdaptiveAvgPool, "AdaptiveAvgPool", 8); +OPENVINO_RTTI_DEFINITION(ov::op::v8::AdaptiveAvgPool, "AdaptiveAvgPool", 8); op::v8::AdaptiveAvgPool::AdaptiveAvgPool(const Output& data, const Output& output_shape) : Op({data, output_shape}) { diff --git a/ngraph/core/src/op/adaptive_max_pool.cpp b/ngraph/core/src/op/adaptive_max_pool.cpp index e5f57db4727..186e08799f1 100644 --- a/ngraph/core/src/op/adaptive_max_pool.cpp +++ b/ngraph/core/src/op/adaptive_max_pool.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v8::AdaptiveMaxPool, "AdaptiveMaxPool", 8); +OPENVINO_RTTI_DEFINITION(ov::op::v8::AdaptiveMaxPool, "AdaptiveMaxPool", 8); op::v8::AdaptiveMaxPool::AdaptiveMaxPool(const Output& data, const Output& output_shape, diff --git a/ngraph/core/src/op/add.cpp b/ngraph/core/src/op/add.cpp index d6a58e8efb1..35eec6a242f 100644 --- a/ngraph/core/src/op/add.cpp +++ b/ngraph/core/src/op/add.cpp @@ -54,7 +54,7 @@ bool evaluate_add(const HostTensorPtr& arg0, // ------------------------------- v1 ------------------------------------------ -NGRAPH_RTTI_DEFINITION(op::v1::Add, "Add", 1, util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(ov::op::v1::Add, "Add", 1, util::BinaryElementwiseArithmetic); op::v1::Add::Add(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/asin.cpp b/ngraph/core/src/op/asin.cpp index b7675408de4..06bf6904bcf 100644 --- a/ngraph/core/src/op/asin.cpp +++ b/ngraph/core/src/op/asin.cpp @@ -22,7 +22,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::Asin::type_info; +OPENVINO_RTTI_DEFINITION(ov::op::v0::Asin, "Asin", 0, util::BinaryElementwiseArithmetic); op::Asin::Asin(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/asinh.cpp b/ngraph/core/src/op/asinh.cpp index e6b78fc0f11..3c44b1fb953 100644 --- a/ngraph/core/src/op/asinh.cpp +++ b/ngraph/core/src/op/asinh.cpp @@ -16,7 +16,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v3::Asinh, "Asinh", 3, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(ov::op::v3::Asinh, "Asinh", 3, util::UnaryElementwiseArithmetic); op::v3::Asinh::Asinh(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/assign.cpp b/ngraph/core/src/op/assign.cpp index 3476abd5d87..2b7dc2c8741 100644 --- a/ngraph/core/src/op/assign.cpp +++ b/ngraph/core/src/op/assign.cpp @@ -13,9 +13,8 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::AssignBase, "AssignBase", 0); -NGRAPH_RTTI_DEFINITION(op::v3::Assign, "Assign", 3, op::Sink); -NGRAPH_RTTI_DEFINITION(op::v6::Assign, "Assign", 6, op::Sink); +OPENVINO_RTTI_DEFINITION(ov::op::v3::Assign, "Assign", 3, op::Sink); +OPENVINO_RTTI_DEFINITION(ov::op::v6::Assign, "Assign", 6, op::Sink); op::v3::Assign::Assign(const Output& new_value, const std::string& variable_id) : AssignBase({new_value}), diff --git a/ngraph/core/src/op/atan.cpp b/ngraph/core/src/op/atan.cpp index 95a7058164d..131324d18a7 100644 --- a/ngraph/core/src/op/atan.cpp +++ b/ngraph/core/src/op/atan.cpp @@ -11,7 +11,6 @@ #include "ngraph/axis_set.hpp" #include "ngraph/op/add.hpp" #include "ngraph/op/broadcast.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/op/divide.hpp" #include "ngraph/op/multiply.hpp" #include "ngraph/runtime/host_tensor.hpp" @@ -21,7 +20,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Atan, "Atan", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(ov::op::v0::Atan, "Atan", 0, util::UnaryElementwiseArithmetic); op::Atan::Atan(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/atanh.cpp b/ngraph/core/src/op/atanh.cpp index 980ac6b7952..1a6c89c3aaa 100644 --- a/ngraph/core/src/op/atanh.cpp +++ b/ngraph/core/src/op/atanh.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v3::Atanh, "Atanh", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(ov::op::v3::Atanh, "Atanh", 0, util::UnaryElementwiseArithmetic); op::v3::Atanh::Atanh(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/avg_pool.cpp b/ngraph/core/src/op/avg_pool.cpp index 0f1a7f63d3e..b6cef6ad773 100644 --- a/ngraph/core/src/op/avg_pool.cpp +++ b/ngraph/core/src/op/avg_pool.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; // *** AvgPool OP SET 1 *** -NGRAPH_RTTI_DEFINITION(op::v1::AvgPool, "AvgPool", 1); +OPENVINO_RTTI_DEFINITION(op::v1::AvgPool, "AvgPool", 1); op::v1::AvgPool::AvgPool(const Output& arg, const Strides& strides, @@ -205,5 +205,5 @@ shared_ptr op::v1::AvgPool::clone_with_new_inputs(const OutputVector& new_ } shared_ptr op::v1::AvgPool::get_default_value() const { - return op::Constant::create(get_element_type(), get_shape(), {0}); + return op::v0::Constant::create(get_element_type(), get_shape(), {0}); } diff --git a/ngraph/core/src/op/constant.cpp b/ngraph/core/src/op/constant.cpp index 763f508d16d..4374b8691a9 100644 --- a/ngraph/core/src/op/constant.cpp +++ b/ngraph/core/src/op/constant.cpp @@ -15,7 +15,6 @@ #include "ngraph/op/util/attr_types.hpp" #include "ngraph/util.hpp" -using namespace ngraph; using namespace std; template @@ -33,15 +32,15 @@ static inline string to_cpp_string(T value) { return rc; } -NGRAPH_RTTI_DEFINITION(op::Constant, "Constant", 0); +OPENVINO_RTTI_DEFINITION(ov::op::v0::Constant, "Constant", 0); -op::Constant::Constant(const shared_ptr& tensor) { +ov::op::v0::Constant::Constant(const shared_ptr& tensor) { m_element_type = tensor->get_element_type(); m_shape = tensor->get_shape(); // Share data from HostTensor if we work with it // And copy data in other cas - if (auto hostTensor = std::dynamic_pointer_cast(tensor)) { - m_data = make_shared>>( + if (auto hostTensor = std::dynamic_pointer_cast(tensor)) { + m_data = make_shared>>( static_cast(hostTensor->get_data_ptr()), tensor->get_size_in_bytes(), tensor); @@ -54,7 +53,9 @@ op::Constant::Constant(const shared_ptr& tensor) { constructor_validate_and_infer_types(); } -op::Constant::Constant(const element::Type& type, const Shape& shape, const std::vector& values) +ov::op::v0::Constant::Constant(const element::Type& type, + const ngraph::Shape& shape, + const std::vector& values) : Constant(type, shape) { NGRAPH_SUPPRESS_DEPRECATED_START NODE_VALIDATION_CHECK(this, @@ -76,49 +77,49 @@ op::Constant::Constant(const element::Type& type, const Shape& shape, const std: fill_data(stoi(values[0])); break; case Type_t::bf16: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::f16: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::f32: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::f64: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::i4: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::i8: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::i16: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::i32: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::i64: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::u1: fill_data(stoi(values[0])); break; case Type_t::u4: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::u8: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::u16: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::u32: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::u64: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::undefined: throw std::runtime_error("deserialize unsupported type undefined"); @@ -129,52 +130,52 @@ op::Constant::Constant(const element::Type& type, const Shape& shape, const std: } else { switch (m_element_type) { case Type_t::boolean: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::bf16: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::f16: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::f32: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::f64: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::i4: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::i8: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::i16: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::i32: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::i64: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::u1: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::u4: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::u8: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::u16: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::u32: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::u64: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::undefined: throw std::runtime_error("deserialize unsupported type undefined"); @@ -186,23 +187,26 @@ op::Constant::Constant(const element::Type& type, const Shape& shape, const std: NGRAPH_SUPPRESS_DEPRECATED_END } -op::Constant::Constant(const element::Type& type, const Shape& shape) : m_element_type(type), m_shape(shape) { +ov::op::v0::Constant::Constant(const element::Type& type, const ngraph::Shape& shape) + : m_element_type(type), + m_shape(shape) { allocate_buffer(); constructor_validate_and_infer_types(); } -void op::Constant::allocate_buffer() { - m_data = make_shared(mem_size(), host_alignment()); +void ov::op::v0::Constant::allocate_buffer() { + m_data = make_shared(mem_size(), host_alignment()); std::memset(m_data->get_ptr(), 0, m_data->size()); } -op::Constant::Constant(const element::Type& type, const Shape& shape, const void* data) : Constant(type, shape) { +ov::op::v0::Constant::Constant(const element::Type& type, const ngraph::Shape& shape, const void* data) + : Constant(type, shape) { size_t size = ceil(shape_size(m_shape) * m_element_type.bitwidth() / 8.f); std::memcpy(get_data_ptr_nc(), data, size); m_all_elements_bitwise_identical = are_all_data_elements_bitwise_identical(); } -op::Constant::Constant(const Constant& other) { +ov::op::v0::Constant::Constant(const Constant& other) { m_element_type = other.m_element_type; m_shape = other.m_shape; m_data = other.m_data; @@ -210,7 +214,7 @@ op::Constant::Constant(const Constant& other) { constructor_validate_and_infer_types(); } -op::Constant::Constant(const Constant& other, const Shape& new_shape) { +ov::op::v0::Constant::Constant(const Constant& other, const ngraph::Shape& new_shape) { NGRAPH_CHECK(shape_size(other.m_shape) == shape_size(new_shape), "Shape size " + std::to_string(shape_size(new_shape)) + " is not equal to " + std::to_string(shape_size(other.m_shape))); @@ -221,9 +225,9 @@ op::Constant::Constant(const Constant& other, const Shape& new_shape) { constructor_validate_and_infer_types(); } -op::Constant::~Constant() {} +ov::op::v0::Constant::~Constant() = default; -string op::Constant::convert_value_to_string(size_t index) const { +string ov::op::v0::Constant::convert_value_to_string(size_t index) const { string rc; #if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) # pragma GCC diagnostic push @@ -291,7 +295,7 @@ string op::Constant::convert_value_to_string(size_t index) const { return rc; } -vector op::Constant::get_value_strings() const { +vector ov::op::v0::Constant::get_value_strings() const { vector rc; #if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) @@ -387,17 +391,17 @@ vector op::Constant::get_value_strings() const { return rc; } -Shape op::Constant::get_shape_val() const { +ngraph::Shape ov::op::v0::Constant::get_shape_val() const { NGRAPH_CHECK(m_element_type.is_integral_number()); std::vector out_shape = cast_vector(); - Shape output_shape(shape_size(m_shape)); + ngraph::Shape output_shape(shape_size(m_shape)); std::transform(out_shape.begin(), out_shape.end(), output_shape.begin(), [&](const int64_t& v) { return (v > 0) ? v : 0; }); return output_shape; } -Strides op::Constant::get_strides_val() const { +ov::Strides ov::op::v0::Constant::get_strides_val() const { NGRAPH_CHECK(m_element_type == element::i64); std::vector out_strides = cast_vector(); Strides output_strides(shape_size(m_shape)); @@ -407,7 +411,7 @@ Strides op::Constant::get_strides_val() const { return output_strides; } -Coordinate op::Constant::get_coordinate_val() const { +ov::Coordinate ov::op::v0::Constant::get_coordinate_val() const { NGRAPH_CHECK(m_element_type == element::i64); std::vector out_coordinate = cast_vector(); Coordinate output_coordinate(shape_size(m_shape)); @@ -417,7 +421,7 @@ Coordinate op::Constant::get_coordinate_val() const { return output_coordinate; } -CoordinateDiff op::Constant::get_coordinate_diff_val() const { +ov::CoordinateDiff ov::op::v0::Constant::get_coordinate_diff_val() const { NGRAPH_CHECK(m_element_type == element::i64); std::vector out_coordinate_diff = cast_vector(); CoordinateDiff output_coordinate_diff(shape_size(m_shape)); @@ -430,7 +434,7 @@ CoordinateDiff op::Constant::get_coordinate_diff_val() const { return output_coordinate_diff; } -AxisVector op::Constant::get_axis_vector_val() const { +ov::AxisVector ov::op::v0::Constant::get_axis_vector_val() const { NGRAPH_CHECK(m_element_type.is_integral_number()); std::vector out_axis_vector = cast_vector(); AxisVector output_axis_vector(shape_size(m_shape)); @@ -440,7 +444,7 @@ AxisVector op::Constant::get_axis_vector_val() const { return output_axis_vector; } -AxisSet op::Constant::get_axis_set_val() const { +ov::AxisSet ov::op::v0::Constant::get_axis_set_val() const { NGRAPH_CHECK(m_element_type.is_integral_number()); std::vector out_axis_set = cast_vector(); AxisSet output_axis_set; @@ -450,12 +454,12 @@ AxisSet op::Constant::get_axis_set_val() const { return output_axis_set; } -void op::Constant::set_data_shape(const Shape& shape) { +void ov::op::v0::Constant::set_data_shape(const ngraph::Shape& shape) { NGRAPH_CHECK(shape_size(shape) == shape_size(m_shape)); m_shape = shape; } -shared_ptr op::Constant::clone_with_new_inputs(const OutputVector& new_args) const { +shared_ptr ov::op::v0::Constant::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_Constant_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(*this); @@ -476,7 +480,7 @@ static bool test_bitwise_identical(const T* data, const size_t size) { return data_is_constant; } -bool op::Constant::are_all_data_elements_bitwise_identical() const { +bool ov::op::v0::Constant::are_all_data_elements_bitwise_identical() const { bool rc = false; #if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) # pragma GCC diagnostic push @@ -522,9 +526,9 @@ bool op::Constant::are_all_data_elements_bitwise_identical() const { return rc; } -bool op::v0::Constant::visit_attributes(AttributeVisitor& visitor) { +bool ov::op::v0::Constant::visit_attributes(AttributeVisitor& visitor) { NGRAPH_OP_SCOPE(v0_Constant_visit_attributes); - Shape prev_shape = m_shape; + ngraph::Shape prev_shape = m_shape; element::Type prev_type = m_element_type; visitor.on_attribute("element_type", m_element_type); visitor.on_attribute("shape", m_shape); @@ -539,21 +543,21 @@ bool op::v0::Constant::visit_attributes(AttributeVisitor& visitor) { return true; } -bool op::v0::Constant::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ov::op::v0::Constant::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v0_Constant_evaluate); auto output = outputs[0]; output->write(get_data_ptr(), output->get_size_in_bytes()); return true; } -bool op::v0::Constant::has_evaluate() const { +bool ov::op::v0::Constant::has_evaluate() const { NGRAPH_OP_SCOPE(v0_Constant_has_evaluate); return true; } -bool op::v0::Constant::evaluate_lower(const HostTensorVector& outputs) const { +bool ov::op::v0::Constant::evaluate_lower(const HostTensorVector& outputs) const { return evaluate(outputs, {}); } -bool op::v0::Constant::evaluate_upper(const HostTensorVector& outputs) const { +bool ov::op::v0::Constant::evaluate_upper(const HostTensorVector& outputs) const { return evaluate(outputs, {}); } diff --git a/ngraph/core/src/op/and.cpp b/ngraph/core/src/op/logical_and.cpp similarity index 96% rename from ngraph/core/src/op/and.cpp rename to ngraph/core/src/op/logical_and.cpp index 174eb72653e..30bd9e43a1b 100644 --- a/ngraph/core/src/op/and.cpp +++ b/ngraph/core/src/op/logical_and.cpp @@ -2,9 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/and.hpp" - #include "itt.hpp" +#include "ngraph/op/and.hpp" #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/and.hpp" #include "ngraph/validation_util.hpp" @@ -12,7 +11,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::LogicalAnd, "LogicalAnd", 1, util::BinaryElementwiseLogical); +OPENVINO_RTTI_DEFINITION(ov::op::v1::LogicalAnd, "LogicalAnd", 1, util::BinaryElementwiseLogical); op::v1::LogicalAnd::LogicalAnd(const Output& arg0, const Output& arg1, diff --git a/ngraph/core/src/op/read_value.cpp b/ngraph/core/src/op/read_value.cpp index 9b77406acd2..41c2242b2e1 100644 --- a/ngraph/core/src/op/read_value.cpp +++ b/ngraph/core/src/op/read_value.cpp @@ -11,9 +11,8 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::ReadValueBase, "ReadValueBase", 0); -NGRAPH_RTTI_DEFINITION(op::v3::ReadValue, "ReadValue", 3); -NGRAPH_RTTI_DEFINITION(op::v6::ReadValue, "ReadValue", 6); +OPENVINO_RTTI_DEFINITION(ov::op::v3::ReadValue, "ReadValue", 3); +OPENVINO_RTTI_DEFINITION(ov::op::v6::ReadValue, "ReadValue", 6); op::v3::ReadValue::ReadValue(const Output& init_value, const std::string& variable_id) : ReadValueBase({init_value}), @@ -101,7 +100,7 @@ bool op::v6::ReadValue::evaluate(const HostTensorVector& outputs, bool use_context = var_value != variable_values.end() && !var_value->second->get_reset(); // initial value (inputs[0]) is not supported, use zeros - auto zero_const = make_shared(inputs[0]->get_element_type(), inputs[0]->get_shape(), 0); + auto zero_const = make_shared(inputs[0]->get_element_type(), inputs[0]->get_shape(), 0); auto zero_tensor = make_shared(zero_const); const auto& input_tensor = use_context ? var_value->second->get_value() : zero_tensor; outputs[0]->set_unary(input_tensor); @@ -118,4 +117,4 @@ bool op::v6::ReadValue::has_evaluate() const { bool op::v6::ReadValue::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) { return false; -} \ No newline at end of file +} diff --git a/ngraph/core/src/op/util/assign_base.cpp b/ngraph/core/src/op/util/assign_base.cpp new file mode 100644 index 00000000000..63e5ab51b8b --- /dev/null +++ b/ngraph/core/src/op/util/assign_base.cpp @@ -0,0 +1,7 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/util/assign_base.hpp" + +OPENVINO_RTTI_DEFINITION(ov::op::util::AssignBase, "AssignBase", 0); diff --git a/ngraph/core/src/op/util/read_value_base.cpp b/ngraph/core/src/op/util/read_value_base.cpp new file mode 100644 index 00000000000..c2c00ef84c1 --- /dev/null +++ b/ngraph/core/src/op/util/read_value_base.cpp @@ -0,0 +1,7 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/util/read_value_base.hpp" + +OPENVINO_RTTI_DEFINITION(ov::op::util::ReadValueBase, "ReadValueBase", 0); From bf8113c05671090adc041eb8c01f25ab18a7e757 Mon Sep 17 00:00:00 2001 From: Egor Duplensky Date: Fri, 3 Sep 2021 11:05:20 +0300 Subject: [PATCH 04/52] [CPU] Fix graph serialization, use ngraph serialization directly (#7261) --- .../src/mkldnn_plugin/mkldnn_graph_dumper.cpp | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_graph_dumper.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_graph_dumper.cpp index 523d5dce81b..55e50a24286 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_graph_dumper.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_graph_dumper.cpp @@ -3,13 +3,17 @@ // #include "mkldnn_graph_dumper.h" + +#include "utils/debug_capabilities.h" #include #include "exec_graph_info.hpp" #include "ie_common.h" #include "mkldnn_debug.h" #include #include "ngraph/ngraph.hpp" -#include "utils/debug_capabilities.h" +#include +#include + #include #include #include @@ -222,7 +226,12 @@ void serializeToXML(const MKLDNNGraph &graph, const std::string& path) { if (path.empty()) return; - graph.dump().serialize(path); + std::string binPath; + ngraph::pass::Manager manager; + manager.register_pass(path, + binPath, + ngraph::pass::Serialize::Version::IR_V10); + manager.run_passes(graph.dump()); } void serializeToCout(const MKLDNNGraph &graph) { From 1eca8a6e49e1db80ed2c50c9ccae1aa0adbeae70 Mon Sep 17 00:00:00 2001 From: Mikhail Nosov Date: Fri, 3 Sep 2021 11:27:14 +0300 Subject: [PATCH 05/52] Combine all PDPD model generation scripts into one python command (#7349) --- .../test/frontend/paddlepaddle/CMakeLists.txt | 25 +++++++------------ .../paddlepaddle/test_models/gen_wrapper.py | 22 ++++++++++------ 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/ngraph/test/frontend/paddlepaddle/CMakeLists.txt b/ngraph/test/frontend/paddlepaddle/CMakeLists.txt index 52b898192d0..dc29868bd0e 100644 --- a/ngraph/test/frontend/paddlepaddle/CMakeLists.txt +++ b/ngraph/test/frontend/paddlepaddle/CMakeLists.txt @@ -30,23 +30,16 @@ target_compile_definitions(${TARGET_NAME} PRIVATE -D TEST_PADDLE_MODELS_DIRNAME= if (paddlepaddle_FOUND) set(TEST_PADDLE_MODELS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${TEST_PADDLE_MODELS_DIRNAME}/) - file(GLOB_RECURSE PADDLE_GEN_SCRIPTS ${CMAKE_CURRENT_SOURCE_DIR}/test_models/gen_scripts/generate_*.py) file(GLOB_RECURSE PADDLE_ALL_SCRIPTS ${CMAKE_CURRENT_SOURCE_DIR}/*.py) - set(OUT_FILES "") - foreach(GEN_SCRIPT ${PADDLE_GEN_SCRIPTS}) - get_filename_component(FILE_WE ${GEN_SCRIPT} NAME_WE) - set(OUT_DONE_FILE ${TEST_PADDLE_MODELS}/${FILE_WE}_done.txt) - set(OUT_FILES ${OUT_DONE_FILE} ${OUT_FILES}) - add_custom_command(OUTPUT ${OUT_DONE_FILE} - COMMAND ${PYTHON_EXECUTABLE} - ${CMAKE_CURRENT_SOURCE_DIR}/test_models/gen_wrapper.py - ${GEN_SCRIPT} - ${TEST_PADDLE_MODELS} - ${OUT_DONE_FILE} - DEPENDS ${PADDLE_ALL_SCRIPTS} - ) - endforeach() - add_custom_target(paddlepaddle_test_models DEPENDS ${OUT_FILES}) + set(OUT_FILE ${TEST_PADDLE_MODELS}/generate_done.txt) + add_custom_command(OUTPUT ${OUT_FILE} + COMMAND ${PYTHON_EXECUTABLE} + ${CMAKE_CURRENT_SOURCE_DIR}/test_models/gen_wrapper.py + ${CMAKE_CURRENT_SOURCE_DIR}/test_models/gen_scripts + ${TEST_PADDLE_MODELS} + DEPENDS ${PADDLE_ALL_SCRIPTS} + ) + add_custom_target(paddlepaddle_test_models DEPENDS ${OUT_FILE}) install(DIRECTORY ${TEST_PADDLE_MODELS} DESTINATION tests/${TEST_PADDLE_MODELS_DIRNAME} diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_wrapper.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_wrapper.py index bb982baca18..fc860357ce7 100644 --- a/ngraph/test/frontend/paddlepaddle/test_models/gen_wrapper.py +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_wrapper.py @@ -1,19 +1,25 @@ +import glob import os import subprocess - import sys print(sys.argv) -if len(sys.argv) < 4: - print("Script, output folder and mark file must be specified as arguments") - exit(1) +if len(sys.argv) < 3: + print("Gen folder and output folder must be specified as arguments") + sys.exit(1) -gen_script = sys.argv[1] +gen_folder = sys.argv[1] out_folder = sys.argv[2] -mark_file = sys.argv[3] +mark_file = os.path.join(out_folder, "generate_done.txt") -print("Processing: {} ".format(gen_script)) -subprocess.run([sys.executable, gen_script, out_folder], env=os.environ) +gen_files = glob.glob(os.path.join(gen_folder, '**/generate_*.py'), recursive=True) + +for gen_script in gen_files: + print("Processing: {} ".format(gen_script)) + status = subprocess.run([sys.executable, gen_script, out_folder], env=os.environ) + if status.returncode != 0: + print("ERROR: PaddlePaddle model gen script FAILED: {}".format(gen_script)) + sys.exit(1) # Create mark file indicating that script was executed with open(mark_file, "w") as fp: From 6dd14bfe910effa50458ace62c68a672047c847c Mon Sep 17 00:00:00 2001 From: Krzysztof Bruniecki Date: Fri, 3 Sep 2021 11:56:51 +0200 Subject: [PATCH 06/52] [GNA] Fixes for GNA 3.0 library (#7236) * Cherry-picked (PR #7180) (ce21344585dff1fc2e59b34d59e98e4724eae153) from releases/2021/4 branch * Pass compileTarget to am_intel_dnn * Enable tests for GNA lib version prefix 3.0 * Fix conv split transform for 2d cnn tests * Apply review --- .../src/gna_plugin/backend/am_intel_dnn.cpp | 18 ++++++- .../src/gna_plugin/backend/am_intel_dnn.hpp | 5 ++ .../gna_plugin/backend/gna_limitations.cpp | 2 +- .../src/gna_plugin/gna_device.cpp | 34 +++++++++++-- .../src/gna_plugin/gna_device.hpp | 2 + .../src/gna_plugin/gna_plugin.cpp | 8 ++- ...lit_convolution_with_large_buffer_size.cpp | 49 ++++++++++++++----- .../skip_tests_check.hpp | 4 +- 8 files changed, 100 insertions(+), 22 deletions(-) diff --git a/inference-engine/src/gna_plugin/backend/am_intel_dnn.cpp b/inference-engine/src/gna_plugin/backend/am_intel_dnn.cpp index 8b3e0901ba2..322dbd2f9d3 100644 --- a/inference-engine/src/gna_plugin/backend/am_intel_dnn.cpp +++ b/inference-engine/src/gna_plugin/backend/am_intel_dnn.cpp @@ -23,6 +23,7 @@ #include "dnn.hpp" #include "am_intel_dnn.hpp" #include "dnn_types.h" +#include "gna/gna_config.hpp" #include "gna_types.h" #include "gna_limitations.hpp" #include "layers/gna_convolution_layer.hpp" @@ -248,6 +249,16 @@ void GNAPluginNS::backend::AMIntelDNN::InitConvolutional2DComponentPrivate(intel ptr_inputs = &comp.ptr_inputs; ptr_outputs = &comp.ptr_outputs; } + +bool GNAPluginNS::backend::AMIntelDNN::isOperationCnnLegacySpecific(const Gna2Operation& op) { + // GNA compile target GNA_TARGET_3_0 does not support pooling window < pooling stride + return op.Type == Gna2OperationTypeConvolution && + op.NumberOfParameters > std::max(PoolStrideParamIdx, PoolWinParamIdx) && + op.Parameters[PoolStrideParamIdx] != nullptr && + op.Parameters[PoolWinParamIdx] != nullptr && + static_cast(op.Parameters[PoolStrideParamIdx])->NumberOfDimensions == 1 && + static_cast(op.Parameters[PoolStrideParamIdx])->Dimensions[0] > static_cast(op.Parameters[PoolWinParamIdx])->Dimensions[0]; +} #endif void GNAPluginNS::backend::AMIntelDNN::InitMaxpoolComponentPrivate(intel_dnn_component_t &comp, @@ -1677,7 +1688,12 @@ void GNAPluginNS::backend::AMIntelDNN::InitGNAStruct(intel_nnet_type_t *ptr_nnet const auto fltStride = fltStrideShape->Dimensions[0]; const auto outFromConv = outputFromConv(inVecCnt, nFltSize, fltStride); // FLAT input matrix, pooled outputs per filter - if (gnaCompileTarget == InferenceEngine::GNAConfigParams::GNA_TARGET_3_0) { + + auto effectiveCompileTarget = gnaCompileTarget; + if (isOperationCnnLegacySpecific(*gnaOperation)) { + effectiveCompileTarget = InferenceEngine::GNAConfigParams::GNA_TARGET_2_0; + } + if (effectiveCompileTarget == InferenceEngine::GNAConfigParams::GNA_TARGET_3_0) { outputTensor.Shape.Dimensions[1] = outputFromPooling(outFromConv, poolWindow->Dimensions[0], poolStride->Dimensions[0]); } else { outputTensor.Shape.Dimensions[1] = outputFromPoolingLegacy(outFromConv, poolStride->Dimensions[0]); diff --git a/inference-engine/src/gna_plugin/backend/am_intel_dnn.hpp b/inference-engine/src/gna_plugin/backend/am_intel_dnn.hpp index 19ca045647f..5099e090188 100644 --- a/inference-engine/src/gna_plugin/backend/am_intel_dnn.hpp +++ b/inference-engine/src/gna_plugin/backend/am_intel_dnn.hpp @@ -10,6 +10,7 @@ #include "dnn_types.h" #include "gna_types.h" +#include "gna/gna_config.hpp" #include "gna_plugin_log.hpp" @@ -153,6 +154,10 @@ public: (void*&)ptr_filters, (void*&)ptr_biases); } + + // Checks whether operation is Convolution and its parameters makes it specific to GNA1/GNA2 targets + // It does not guarantee that operation fully compatible to GNA1/GNA2, but for sure is not comaptible with GNA3 target + static bool isOperationCnnLegacySpecific(const Gna2Operation& operation); #endif template diff --git a/inference-engine/src/gna_plugin/backend/gna_limitations.cpp b/inference-engine/src/gna_plugin/backend/gna_limitations.cpp index 6afe55bd043..34fd7534ba6 100644 --- a/inference-engine/src/gna_plugin/backend/gna_limitations.cpp +++ b/inference-engine/src/gna_plugin/backend/gna_limitations.cpp @@ -32,7 +32,7 @@ bool RangeLimit2D::isValid(const uint32_t h, const uint32_t w) const { } std::string RangeLimit2D::GetErrorOrEmpty(const uint32_t h, const uint32_t w) const { - return hLimit.GetErrorOrEmpty(h) + hLimit.GetErrorOrEmpty(w); + return hLimit.GetErrorOrEmpty(h) + wLimit.GetErrorOrEmpty(w); } RangeMultipleLimit::RangeMultipleLimit(RangeLimit rlIn, uint32_t multiplierIn) : RangeLimit(rlIn), multiplier(multiplierIn) { diff --git a/inference-engine/src/gna_plugin/gna_device.cpp b/inference-engine/src/gna_plugin/gna_device.cpp index 85a246ea34f..af561cc0c47 100644 --- a/inference-engine/src/gna_plugin/gna_device.cpp +++ b/inference-engine/src/gna_plugin/gna_device.cpp @@ -24,6 +24,7 @@ #include "gna-api.h" #endif +#include "backend/am_intel_dnn.hpp" #include "gna/gna_config.hpp" #include "gna_plugin_log.hpp" @@ -115,13 +116,26 @@ uint32_t GNADeviceHelper::propagate(const uint32_t requestConfigId, Gna2Accelera return reqId; } +void enforceLegacyCnn(Gna2Operation& operation) { + snprintf( + const_cast(operation.Operands[1]->Layout), + sizeof(operation.Operands[1]->Layout) / sizeof(char), + "GNA1"); +} + void GNADeviceHelper::enforceLegacyCnns(Gna2Model& gnaModel) { for (uint32_t i = 0; i < gnaModel.NumberOfOperations; i++) { if (gnaModel.Operations[i].Type == Gna2OperationTypeConvolution) { - snprintf( - const_cast(gnaModel.Operations[i].Operands[1]->Layout), - sizeof(gnaModel.Operations[i].Operands[1]->Layout) / sizeof(char), - "GNA1"); + enforceLegacyCnn(gnaModel.Operations[i]); + } + } +} + +void GNADeviceHelper::enforceLegacyCnnsWhenNeeded(Gna2Model& gnaModel) { + for (uint32_t i = 0; i < gnaModel.NumberOfOperations; i++) { + auto& op = gnaModel.Operations[i]; + if (GNAPluginNS::backend::AMIntelDNN::isOperationCnnLegacySpecific(op)) { + enforceLegacyCnn(op); } } } @@ -132,6 +146,7 @@ uint32_t GNADeviceHelper::createModel(Gna2Model& gnaModel) const { if (enforceLegacyCnnNeeded()) { enforceLegacyCnns(gnaModel); } + enforceLegacyCnnsWhenNeeded(gnaModel); #if GNA_LIB_VER == 2 && defined MODEL_DUMP std::string path = #ifdef _WIN32 @@ -581,3 +596,14 @@ void GNADeviceHelper::getGnaPerfCounters(std::map& retPerfCounters); static std::string GetGnaLibraryVersion(); + std::string getEffectiveGnaCompileTarget() const; private: void open(uint8_t const n_threads); @@ -190,6 +191,7 @@ public: static const std::map , const std::string > operandTypes; static void enforceLegacyCnns(Gna2Model& gnaModel); + static void enforceLegacyCnnsWhenNeeded(Gna2Model& gnaModel); Gna2DeviceVersion parseDeclaredTarget(std::string target, const bool execTarget) const; Gna2DeviceVersion getDefaultTarget() const; Gna2DeviceVersion getTargetDevice(bool execTarget) const; diff --git a/inference-engine/src/gna_plugin/gna_plugin.cpp b/inference-engine/src/gna_plugin/gna_plugin.cpp index fb9f406446e..3f61d3289c7 100644 --- a/inference-engine/src/gna_plugin/gna_plugin.cpp +++ b/inference-engine/src/gna_plugin/gna_plugin.cpp @@ -1032,10 +1032,14 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) { #else nnets.emplace_back(make_shared>(), -1, InferenceEngine::BlobMap()); #endif + std::string effectiveGnaCompileTarget = config.gnaCompileTarget; + if (gnadevice) { + effectiveGnaCompileTarget = gnadevice->getEffectiveGnaCompileTarget(); + } if (!gnaFlags->sw_fp32 && !graphCompiler.dnnComponents.components.empty()) { // number of layer gets calculated inside that InitGNAStruct function #if GNA_LIB_VER == 2 - dnn->InitGNAStruct(&std::get<0>(gnaModels.front())->obj, config.gnaCompileTarget); + dnn->InitGNAStruct(&std::get<0>(gnaModels.front())->obj, effectiveGnaCompileTarget); #else dnn->InitGNAStruct(&std::get<0>(nnets.front())->obj); #endif @@ -1046,7 +1050,7 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) { #if GNA_LIB_VER == 2 gnaModels.push_back(std::make_tuple(make_shared>())); // this can be improved by just copy all structures, but we are too lazy - dnn->InitGNAStruct(&std::get<0>(gnaModels.back())->obj, config.gnaCompileTarget); + dnn->InitGNAStruct(&std::get<0>(gnaModels.back())->obj, effectiveGnaCompileTarget); #else nnets.emplace_back(make_shared>(), -1, InferenceEngine::BlobMap()); dnn->InitGNAStruct(&std::get<0>(nnets.back())->obj); diff --git a/inference-engine/src/gna_plugin/transformations/split_convolution_with_large_buffer_size.cpp b/inference-engine/src/gna_plugin/transformations/split_convolution_with_large_buffer_size.cpp index b29cc04dac0..1f757740274 100644 --- a/inference-engine/src/gna_plugin/transformations/split_convolution_with_large_buffer_size.cpp +++ b/inference-engine/src/gna_plugin/transformations/split_convolution_with_large_buffer_size.cpp @@ -13,6 +13,7 @@ #include #include "backend/gna_limitations.hpp" #include "layers/gna_split_layer.hpp" +#include "layers/gna_convolution_layer.hpp" using namespace GNAPluginNS; @@ -20,6 +21,34 @@ NGRAPH_RTTI_DEFINITION(SplitConvolution, "SplitConvolution", 0); NGRAPH_RTTI_DEFINITION(SplitConvolutionWithBias, "SplitConvolutionWithBias", 0); NGRAPH_RTTI_DEFINITION(SplitConvolutionWithFq, "SplitConvolutionWithFq", 0); +// Don't split when convolution is 2D and is not mappable to 1D +static bool shouldSplitCnn(const ngraph::Output& node) { + auto convolution = dynamic_cast(node.get_node()); + IE_ASSERT(convolution != nullptr); + auto& input = convolution->get_input_shape(0); + auto& filters = convolution->get_input_shape(1); + uint32_t width = input.back(); + uint32_t in_channels = input.at(1); + if (input.size() >= 4 && filters.size() >= 4) { + uint32_t height = input.at(2); + auto kH = filters.at(2); + auto kW = filters.at(3); + auto sW = convolution->get_strides().at(1); + if (GNAConvolutionLayer::isConv2D(height, width, in_channels, kH, kW) && + !GNAConvolutionLayer::isMappableFrom2DTo1D(height, width, kW, sW)) { + return false; + } + } + return true; +} + +std::shared_ptr getConvForMatcher() { + return ngraph::pattern::wrap_type({ ngraph::pattern::any_input(), + ngraph::pattern::any_input() }, [](const ngraph::Output& convolution) { + return shouldSplitCnn(convolution); + }); +} + static bool Convert(std::shared_ptr conv, std::shared_ptr add, std::shared_ptr bias, @@ -29,9 +58,9 @@ static bool Convert(std::shared_ptr conv, if (input_size <= GNALimitations::bufferMaxSize) { return false; } - - uint32_t width = conv->get_input_shape(0).back(); - uint32_t in_channels = conv->get_input_shape(0).at(1); + auto& input = conv->get_input_shape(0); + uint32_t width = input.back(); + uint32_t in_channels = input.at(1); auto split_sizes = GetAlignedSplitSizes(width, GNALimitations::bufferMaxSize / in_channels); IE_ASSERT(split_sizes.size() > 1); std::vector split_sizes_casted(split_sizes.size()); @@ -41,7 +70,7 @@ static bool Convert(std::shared_ptr conv, /* TODO check if it's NHWC convolution wrapped with transposes or all input dimensions except of width == 1, otherwise this split axis isn't supported */ - const int64_t width_axis = conv->get_input_shape(0).size() - 1; + const int64_t width_axis = input.size() - 1; auto split_node = std::make_shared(conv->input_value(0), ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape({1}), std::vector{width_axis}), ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape({split_sizes_casted.size()}), split_sizes_casted)); @@ -75,9 +104,7 @@ static bool Convert(std::shared_ptr conv, SplitConvolution::SplitConvolution() { MATCHER_SCOPE(SplitConvolution); - auto conv = ngraph::pattern::wrap_type({ngraph::pattern::any_input(), - ngraph::pattern::any_input()}); - + auto conv = getConvForMatcher(); ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher &m) { const auto& pattern_map = m.get_pattern_value_map(); return Convert(pattern_map.at(conv).get_node_shared_ptr(), nullptr, nullptr, nullptr); @@ -89,8 +116,7 @@ SplitConvolution::SplitConvolution() { SplitConvolutionWithBias::SplitConvolutionWithBias() { MATCHER_SCOPE(SplitConvolutionWithBias); - auto conv = ngraph::pattern::wrap_type({ngraph::pattern::any_input(), - ngraph::pattern::any_input()}); + auto conv = getConvForMatcher(); auto bias = ngraph::pattern::wrap_type(); auto add = ngraph::pattern::wrap_type({conv, bias}); @@ -106,8 +132,7 @@ SplitConvolutionWithBias::SplitConvolutionWithBias() { SplitConvolutionWithFq::SplitConvolutionWithFq() { MATCHER_SCOPE(SplitConvolutionWithFq); - auto conv = ngraph::pattern::wrap_type({ngraph::pattern::any_input(), - ngraph::pattern::any_input()}); + auto conv = getConvForMatcher(); auto bias = ngraph::pattern::wrap_type(); auto add = ngraph::pattern::wrap_type({conv, bias}); auto conv_output = std::make_shared(ngraph::OutputVector{conv, add}); @@ -128,4 +153,4 @@ SplitConvolutionWithFq::SplitConvolutionWithFq() { auto m = std::make_shared(out_fq, matcher_name); this->register_matcher(m, callback); -} \ No newline at end of file +} diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_check.hpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_check.hpp index 501b7134c60..22a6771ea0d 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_check.hpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_check.hpp @@ -16,8 +16,8 @@ protected: if (std::find(metrics.begin(), metrics.end(), METRIC_KEY(GNA_LIBRARY_FULL_VERSION)) != metrics.end()) { std::string gnaLibVer = ie_core.GetMetric(targetDevice, METRIC_KEY(GNA_LIBRARY_FULL_VERSION)); - if (gnaLibVer.rfind("2.1", 0) != 0) { - GTEST_SKIP() << "Disabled test due to GNA library version being < 2.1" << std::endl; + if (gnaLibVer.rfind("2.1", 0) != 0 && gnaLibVer.rfind("3.0", 0) != 0) { + GTEST_SKIP() << "Disabled test due to GNA library version being not 2.1 or 3.0" << std::endl; } skipTest = false; } From 7e9d98fc620bcb82ad18b26d3c3cce62a221d9cd Mon Sep 17 00:00:00 2001 From: Konstantin Satunin Date: Fri, 3 Sep 2021 13:29:10 +0300 Subject: [PATCH 07/52] Revert "Azure CI: Remove IncrediBuild on Windows (#7085)" (#7358) This reverts commit 1aca3019eca6fa27f94a366a6265220711e2c2e7. --- .ci/azure/windows.yml | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml index 3d0936c5411..e5ec0486f9b 100644 --- a/.ci/azure/windows.yml +++ b/.ci/azure/windows.yml @@ -16,7 +16,7 @@ jobs: timeoutInMinutes: 120 pool: - name: WIN_VMSS_VENV_F16S_WU2 + name: WIN_VMSS_VENV_F8S_WU2 variables: system.debug: true @@ -34,6 +34,8 @@ jobs: INSTALL_DIR: $(WORK_DIR)\install_pkg INSTALL_TEST_DIR: $(INSTALL_DIR)\tests SETUPVARS: $(INSTALL_DIR)\bin\setupvars.bat + IB_DIR: C:\Program Files (x86)\IncrediBuild + IB_TESTCONSOLE: $(IB_DIR)\IBTestConsole.exe steps: - script: | @@ -57,6 +59,12 @@ jobs: rd /Q /S $(BUILD_SAMPLES_DIR) & mkdir $(BUILD_SAMPLES_DIR) displayName: 'Make dir' + - script: | + certutil -urlcache -split -f https://openvinoweb.z5.web.core.windows.net/incredibuild/install_ib_console.bat install_ib_console.bat + call install_ib_console.bat + workingDirectory: $(WORK_DIR) + displayName: 'Install IncrediBuild' + - checkout: self clean: true lfs: false @@ -101,7 +109,9 @@ jobs: - script: dir $(REPO_DIR)\inference-engine\temp\ /s displayName: 'List temp SDKs' - - script: call "$(MSVS_VARS_PATH)" && $(WORK_DIR)\ninja-win\ninja + - script: | + set PATH=$(WORK_DIR)\ninja-win;%PATH% + call "$(MSVS_VARS_PATH)" && "C:\Program Files (x86)\IncrediBuild\BuildConsole.exe" /COMMAND="ninja" workingDirectory: $(BUILD_DIR) displayName: 'Build Win' @@ -143,8 +153,10 @@ jobs: displayName: 'PaddlePaddle Frontend UT' continueOnError: false - - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\InferenceEngineUnitTests.exe --gtest_output=xml:TEST-InferenceEngineUnitTests.xml - displayName: 'IE UT old' + - script: | + set PATH=$(IB_DIR);%PATH% + call $(SETUPVARS) && "$(IB_TESTCONSOLE)" $(INSTALL_TEST_DIR)\InferenceEngineUnitTests.exe --gtest_output=xml:TEST-InferenceEngineUnitTests-IB.xml + displayName: 'IE UT old - IB' continueOnError: false - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ieUnitTests --gtest_output=xml:TEST-ieUnitTests.xml @@ -175,8 +187,11 @@ jobs: displayName: 'TEMPLATE FuncTests' continueOnError: false - - script: $(SETUPVARS) && $(INSTALL_TEST_DIR)\cpuFuncTests.exe --gtest_filter=*smoke* --gtest_output=xml:TEST-cpuFuncTests.xml - displayName: 'CPU FuncTests' + # call $(SETUPVARS) && $(INSTALL_TEST_DIR)\cpuFuncTests.exe --gtest_filter=*smoke* --gtest_output=xml:TEST-cpuFuncTests.xml + - script: | + set PATH=$(IB_DIR);%PATH% + call $(SETUPVARS) && "$(IB_TESTCONSOLE)" $(INSTALL_TEST_DIR)\cpuFuncTests.exe --gtest_filter=*smoke*:-*CompareWithRefs/base_size=16_pre_nms_topn=100_post_nms_topn=100_nms_thresh=0.7_feat_stride=1_min_size=1_ratio*:*smoke_GRUSequenceCommonZeroClip/GRUSequenceTest.CompareWithRefs/mode=CONVERT_TO_TI_MAX_SEQ_LEN_CONST_seq_lengths* --gtest_output=xml:TEST-cpuFuncTests-IB.xml /testlevel=24 + displayName: 'CPU FuncTests - IB' continueOnError: false - script: | @@ -198,3 +213,8 @@ jobs: buildPlatform: 'x64' # Optional buildConfiguration: 'Windows' # Optional #publishRunAttachments: true # Optional + + - script: echo Stop IncrediBuild_Agent && net stop IncrediBuild_Agent + displayName: Stop IncrediBuild + continueOnError: true + enabled: false From 63cb9899550e9010018e47ba9a2ba7104c9b779b Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 3 Sep 2021 14:03:10 +0300 Subject: [PATCH 08/52] Fixed leftovers after PR 7336 (#7355) --- ngraph/core/src/op/abs.cpp | 2 +- ngraph/core/src/op/acos.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ngraph/core/src/op/abs.cpp b/ngraph/core/src/op/abs.cpp index d863b224b10..2dca43fd013 100644 --- a/ngraph/core/src/op/abs.cpp +++ b/ngraph/core/src/op/abs.cpp @@ -10,7 +10,7 @@ #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/abs.hpp" -NGRAPH_RTTI_DEFINITION(ov::op::v0::Abs, "Abs", 0); +OPENVINO_RTTI_DEFINITION(ov::op::v0::Abs, "Abs", 0, util::UnaryElementwiseArithmetic); ov::op::v0::Abs::Abs(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/acos.cpp b/ngraph/core/src/op/acos.cpp index 53b56b6762c..fc668804eaa 100644 --- a/ngraph/core/src/op/acos.cpp +++ b/ngraph/core/src/op/acos.cpp @@ -18,7 +18,7 @@ #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/acos.hpp" -NGRAPH_RTTI_DEFINITION(ov::op::v0::Acos, "Acos", 0); +OPENVINO_RTTI_DEFINITION(ov::op::v0::Acos, "Acos", 0, util::UnaryElementwiseArithmetic); ov::op::v0::Acos::Acos(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); From f68f4236ec40e24a591b43f1cbf6b820e2f3a958 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 3 Sep 2021 17:28:09 +0300 Subject: [PATCH 09/52] All operations from B and C symbols moved to ov namespace (#7338) * Moved ngraph::Node to ov namespace * Fixed code style * Fixed VPU * Fixed GNA * Fixed tests * Added aliases for backward compatibility * Fix clDNN * Try to fix build * Fixed comment * Renamed RTTI macros * Moved op utils to ov namespace * Fixed ngraph library build * Fixed unit-tests * Changed src folder * Fixed recurrent_sequence * Changed low latency * Fixed serialize * Fixed ieFuncTests * Try to fix windows * Remove custom operator<< from tests * Fixed build * Moved operations from A to ov namespace * Moved operations from B and C to ov namespace --- ngraph/core/include/ngraph/op/batch_norm.hpp | 80 +------ .../core/include/ngraph/op/batch_to_space.hpp | 36 +-- .../include/ngraph/op/binary_convolution.hpp | 133 +---------- ngraph/core/include/ngraph/op/broadcast.hpp | 116 +-------- ngraph/core/include/ngraph/op/bucketize.hpp | 45 +--- ngraph/core/include/ngraph/op/ceiling.hpp | 20 +- ngraph/core/include/ngraph/op/clamp.hpp | 38 +-- ngraph/core/include/ngraph/op/concat.hpp | 51 +--- ngraph/core/include/ngraph/op/convert.hpp | 39 +-- .../core/include/ngraph/op/convert_like.hpp | 24 +- ngraph/core/include/ngraph/op/convolution.hpp | 218 +---------------- ngraph/core/include/ngraph/op/cos.hpp | 19 +- ngraph/core/include/ngraph/op/cosh.hpp | 19 +- .../include/ngraph/op/ctc_greedy_decoder.hpp | 25 +- .../ngraph/op/ctc_greedy_decoder_seq_len.hpp | 88 +------ ngraph/core/include/ngraph/op/ctc_loss.hpp | 59 +---- ngraph/core/include/ngraph/op/cum_sum.hpp | 47 +--- .../core/include/openvino/op/batch_norm.hpp | 94 ++++++++ .../include/openvino/op/batch_to_space.hpp | 48 ++++ .../openvino/op/binary_convolution.hpp | 143 +++++++++++ ngraph/core/include/openvino/op/broadcast.hpp | 133 +++++++++++ ngraph/core/include/openvino/op/bucketize.hpp | 57 +++++ ngraph/core/include/openvino/op/ceiling.hpp | 32 +++ ngraph/core/include/openvino/op/clamp.hpp | 50 ++++ ngraph/core/include/openvino/op/concat.hpp | 65 +++++ ngraph/core/include/openvino/op/convert.hpp | 52 ++++ .../core/include/openvino/op/convert_like.hpp | 33 +++ .../core/include/openvino/op/convolution.hpp | 225 ++++++++++++++++++ ngraph/core/include/openvino/op/cos.hpp | 31 +++ ngraph/core/include/openvino/op/cosh.hpp | 31 +++ .../openvino/op/ctc_greedy_decoder.hpp | 37 +++ .../op/ctc_greedy_decoder_seq_len.hpp | 100 ++++++++ ngraph/core/include/openvino/op/ctc_loss.hpp | 71 ++++++ ngraph/core/include/openvino/op/cum_sum.hpp | 60 +++++ ngraph/core/src/op/batch_norm.cpp | 4 +- ngraph/core/src/op/batch_to_space.cpp | 2 +- ngraph/core/src/op/binary_convolution.cpp | 67 +++--- ngraph/core/src/op/broadcast.cpp | 4 +- ngraph/core/src/op/bucketize.cpp | 2 +- ngraph/core/src/op/ceiling.cpp | 2 +- ngraph/core/src/op/clamp.cpp | 2 +- ngraph/core/src/op/concat.cpp | 2 +- ngraph/core/src/op/convert.cpp | 2 +- ngraph/core/src/op/convert_like.cpp | 4 +- ngraph/core/src/op/convolution.cpp | 6 +- ngraph/core/src/op/cos.cpp | 2 +- ngraph/core/src/op/cosh.cpp | 2 +- ngraph/core/src/op/ctc_greedy_decoder.cpp | 2 +- .../src/op/ctc_greedy_decoder_seq_len.cpp | 2 +- ngraph/core/src/op/ctc_loss.cpp | 2 +- ngraph/core/src/op/cum_sum.cpp | 4 +- 51 files changed, 1357 insertions(+), 1073 deletions(-) create mode 100644 ngraph/core/include/openvino/op/batch_norm.hpp create mode 100644 ngraph/core/include/openvino/op/batch_to_space.hpp create mode 100644 ngraph/core/include/openvino/op/binary_convolution.hpp create mode 100644 ngraph/core/include/openvino/op/broadcast.hpp create mode 100644 ngraph/core/include/openvino/op/bucketize.hpp create mode 100644 ngraph/core/include/openvino/op/ceiling.hpp create mode 100644 ngraph/core/include/openvino/op/clamp.hpp create mode 100644 ngraph/core/include/openvino/op/concat.hpp create mode 100644 ngraph/core/include/openvino/op/convert.hpp create mode 100644 ngraph/core/include/openvino/op/convert_like.hpp create mode 100644 ngraph/core/include/openvino/op/convolution.hpp create mode 100644 ngraph/core/include/openvino/op/cos.hpp create mode 100644 ngraph/core/include/openvino/op/cosh.hpp create mode 100644 ngraph/core/include/openvino/op/ctc_greedy_decoder.hpp create mode 100644 ngraph/core/include/openvino/op/ctc_greedy_decoder_seq_len.hpp create mode 100644 ngraph/core/include/openvino/op/ctc_loss.hpp create mode 100644 ngraph/core/include/openvino/op/cum_sum.hpp diff --git a/ngraph/core/include/ngraph/op/batch_norm.hpp b/ngraph/core/include/ngraph/op/batch_norm.hpp index 20f8ebc30de..749d7fdeb6c 100644 --- a/ngraph/core/include/ngraph/op/batch_norm.hpp +++ b/ngraph/core/include/ngraph/op/batch_norm.hpp @@ -6,91 +6,17 @@ #include -#include "ngraph/deprecated.hpp" #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/batch_norm.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API BatchNormInference : public Op { -public: - NGRAPH_RTTI_DECLARATION; - BatchNormInference() = default; - /// \param input [., C, ...] - /// \param gamma gamma scaling for normalized value. [C] - /// \param beta bias added to the scaled normalized value [C] - /// \param mean value for mean normalization [C] - /// \param variance value for variance normalization [C] - /// \param epsilon Avoids divsion by 0 if input has 0 variance - BatchNormInference(const Output& input, - const Output& gamma, - const Output& beta, - const Output& mean, - const Output& variance, - double epsilon); - - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - double get_eps_value() const { - return m_epsilon; - } - void set_eps_value(double epsilon) { - m_epsilon = epsilon; - } - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - -private: - static constexpr size_t INPUT_GAMMA = 0; - static constexpr size_t INPUT_BETA = 1; - static constexpr size_t INPUT_DATA = 2; - static constexpr size_t INPUT_MEAN = 3; - static constexpr size_t INPUT_VARIANCE = 4; - - double m_epsilon; -}; +using ov::op::v0::BatchNormInference; } // namespace v0 namespace v5 { -class NGRAPH_API BatchNormInference : public Op { -public: - NGRAPH_RTTI_DECLARATION; - BatchNormInference() = default; - /// \param input [., C, ...] - /// \param gamma gamma scaling for normalized value. [C] - /// \param beta bias added to the scaled normalized value [C] - /// \param mean value for mean normalization [C] - /// \param variance value for variance normalization [C] - /// \param epsilon Avoids divsion by 0 if input has 0 variance - BatchNormInference(const Output& input, - const Output& gamma, - const Output& beta, - const Output& mean, - const Output& variance, - double epsilon); - - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - double get_eps_value() const { - return m_epsilon; - } - void set_eps_value(double epsilon) { - m_epsilon = epsilon; - } - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - -private: - static constexpr size_t INPUT_DATA = 0; - static constexpr size_t INPUT_GAMMA = 1; - static constexpr size_t INPUT_BETA = 2; - static constexpr size_t INPUT_MEAN = 3; - static constexpr size_t INPUT_VARIANCE = 4; - - double m_epsilon; -}; +using ov::op::v5::BatchNormInference; } // namespace v5 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/batch_to_space.hpp b/ngraph/core/include/ngraph/op/batch_to_space.hpp index 836ccbc4c9d..7f18a376ac6 100644 --- a/ngraph/core/include/ngraph/op/batch_to_space.hpp +++ b/ngraph/core/include/ngraph/op/batch_to_space.hpp @@ -6,44 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/batch_to_space.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief BatchToSpace permutes data from the batch dimension of the data tensor into -/// spatial dimensions. -/// -/// \note Values from the batch dimension are moved in spatial blocks dimensions. -/// -/// Output node produces a tensor with shape: -/// `[batch / (block_shape[0] * block_shape[1] * ... * block_shape[N - 1]), -/// D_1 * block_shape[1] - crops_begin[1] - crops_end[1], -/// D_2 * block_shape[2] - crops_begin[2] - crops_end[2], ..., -/// D_{N - 1} * block_shape[N - 1] - crops_begin[N - 1] - crops_end[N - 1]` -/// of the same type as `data` input. -class NGRAPH_API BatchToSpace : public Op { -public: - NGRAPH_RTTI_DECLARATION; - BatchToSpace() = default; - /// \brief Constructs a BatchToSpace operation. - /// - /// \param data Node producing the data tensor - /// \param block_shape The sizes of the block of values to be moved - /// \param crops_begin Specifies the amount to crop from the beginning along each - /// axis of `data` input - /// \param crops_end Specifies the amount to crop from the ending along each axis of - /// `data` input. - BatchToSpace(const Output& data, - const Output& block_shape, - const Output& crops_begin, - const Output& crops_end); - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - - void validate_and_infer_types() override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor& visitor) override; -}; +using ov::op::v1::BatchToSpace; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/binary_convolution.hpp b/ngraph/core/include/ngraph/op/binary_convolution.hpp index 14f7ccf6c05..c795309fa0d 100644 --- a/ngraph/core/include/ngraph/op/binary_convolution.hpp +++ b/ngraph/core/include/ngraph/op/binary_convolution.hpp @@ -7,141 +7,12 @@ #include "ngraph/coordinate_diff.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/binary_convolution.hpp" namespace ngraph { namespace op { namespace v1 { -class NGRAPH_API BinaryConvolution : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - enum class BinaryConvolutionMode { - // Interpret input data and kernel values: 0 as -1, 1 as 1 - XNOR_POPCOUNT - }; - - /// \brief Constructs a binary convolution operation. - BinaryConvolution() = default; - /// \brief Constructs a binary convolution operation. - /// \param data The node producing the input data batch tensor. - /// \param kernel The node producing the filters tensor. - /// \param strides The strides. - /// \param pads_begin The beginning of padding shape. - /// \param pads_end The end of padding shape. - /// \param dilations The dilations. - /// \param mode Defines how input tensor 0/1 values and weights 0/1 are interpreted. - /// \param pad_value Floating-point value used to fill pad area. - /// \param auto_pad The pad type for automatically computing padding sizes. - /// - /// Output `[N, C_OUT, R1, ... Rf]` - BinaryConvolution(const Output& data, - const Output& kernel, - const Strides& strides, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const Strides& dilations, - BinaryConvolutionMode mode, - float pad_value, - const PadType& auto_pad = PadType::EXPLICIT); - - BinaryConvolution(const Output& data, - const Output& kernel, - const Strides& strides, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const Strides& dilations, - const std::string& mode, - float pad_value, - const PadType& auto_pad = PadType::EXPLICIT); - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The strides. - const Strides& get_strides() const { - return m_strides; - } - void set_strides(const Strides& strides) { - m_strides = strides; - } - /// \return The dilations. - const Strides& get_dilations() const { - return m_dilations; - } - void set_dilations(const Strides& dilations) { - m_dilations = dilations; - } - /// \return The padding-below sizes (possibly negative). - const CoordinateDiff& get_pads_begin() const { - return m_pads_begin; - } - void set_pads_begin(const CoordinateDiff& pads_begin) { - m_pads_begin = pads_begin; - } - /// \return The padding-above sizes (possibly negative). - const CoordinateDiff& get_pads_end() const { - return m_pads_end; - } - void set_adding_above(const CoordinateDiff& pads_end) { - m_pads_end = pads_end; - } - /// \return The pad type for convolution. - const PadType& get_auto_pad() const { - return m_auto_pad; - } - void set_auto_pad(const PadType& auto_pad) { - m_auto_pad = auto_pad; - } - /// \return The mode of convolution. - const BinaryConvolutionMode& get_mode() const { - return m_mode; - } - void set_mode(const BinaryConvolutionMode& mode) { - m_mode = mode; - } - /// \return The pad value. - float get_pad_value() const { - return m_pad_value; - } - void set_pad_value(float pad_value) { - m_pad_value = pad_value; - } - -protected: - BinaryConvolutionMode mode_from_string(const std::string& mode) const; - Strides m_strides; - Strides m_dilations; - CoordinateDiff m_pads_begin; - CoordinateDiff m_pads_end; - BinaryConvolutionMode m_mode; - float m_pad_value; - PadType m_auto_pad; -}; +using ov::op::v1::BinaryConvolution; } // namespace v1 } // namespace op - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v1::BinaryConvolution::BinaryConvolutionMode& type); - } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v1::BinaryConvolution::BinaryConvolutionMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", - 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/broadcast.hpp b/ngraph/core/include/ngraph/op/broadcast.hpp index 5637ea720a0..34eef26e703 100644 --- a/ngraph/core/include/ngraph/op/broadcast.hpp +++ b/ngraph/core/include/ngraph/op/broadcast.hpp @@ -8,126 +8,16 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/broadcast_base.hpp" +#include "openvino/op/broadcast.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief Operation which "adds" axes to an input tensor, replicating elements from the -/// input as needed along the new axes. -class NGRAPH_API Broadcast : public util::BroadcastBase { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a broadcast operation. - Broadcast() = default; - /// \brief Constructs a broadcast operation. - /// - /// \param arg The input tensor to be broadcast. - /// \param target_shape The shape of the output tensor. - /// \param axes_mapping The axis positions (0-based) in the result that correspond - /// to input axes. 'Arg' tensor is broadcast along the - /// remaining axes. - /// E.g., Input Shape - [3, 4], Target Shape - [3, 5, 4, 4] - /// axes_mapping - [0, 2] => Broadcast along axes 1 and 3. - /// axes_mapping - [0, 3] => Broadcast along axes 1 and 2. - /// \param broadcast_spec Broadcast specification to use for determining broadcast - /// axes. 'axes_mapping' should not be provided if mode other - /// than explicit (none) is used. - Broadcast(const Output& arg, - const Output& target_shape, - const Output& axes_mapping, - const BroadcastModeSpec& broadcast_spec = BroadcastType::EXPLICIT); - - /// \brief Constructs a broadcast operation. - /// - /// \param arg The input tensor to be broadcast. - /// \param target_shape The shape of the output tensor. - /// \param broadcast_spec Broadcast specification to use for determining broadcast - /// axes - Broadcast(const Output& arg, - const Output& target_shape, - const BroadcastModeSpec& broadcast_spec = BroadcastType::NUMPY); - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - // \return Broadcast Specification. - const BroadcastModeSpec& get_broadcast_spec() const { - return m_mode; - } - void set_broadcast_spec(const BroadcastModeSpec& broadcast_spec) { - m_mode = broadcast_spec; - } - - void validate_and_infer_types() override; - - /// \return true and the AxisSet if broadcast axes can be fully determined. - std::pair get_broadcast_axes() const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - bool broadcast_evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v3::Broadcast; } // namespace v3 namespace v1 { -/// \brief Operation which "adds" axes to an input tensor, replicating elements from the -/// input as needed along the new axes. -class NGRAPH_API Broadcast : public util::BroadcastBase { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a broadcast operation. - Broadcast() = default; - /// \brief Constructs a broadcast operation. - /// - /// \param arg The input tensor to be broadcast. - /// \param target_shape The shape of the output tensor. - /// \param axes_mapping The axis positions (0-based) in the result that correspond - /// to input axes. 'Arg' tensor is broadcast along the - /// remaining axes. - /// E.g., Input Shape - [3, 4], Target Shape - [3, 5, 4, 4] - /// axes_mapping - [0, 2] => Broadcast along axes 1 and 3. - /// axes_mapping - [0, 3] => Broadcast along axes 1 and 2. - /// \param broadcast_spec Broadcast specification to use for determining broadcast - /// axes. 'axes_mapping' is ignored if broadcast_spec is not - /// NONE - Broadcast(const Output& arg, - const Output& target_shape, - const Output& axes_mapping, - const AutoBroadcastSpec& broadcast_spec = AutoBroadcastSpec()); - - /// \brief Constructs a broadcast operation. - /// - /// \param arg The input tensor to be broadcast. - /// \param target_shape The shape of the output tensor. - /// \param broadcast_spec Broadcast specification to use for determining broadcast - /// axes - Broadcast(const Output& arg, - const Output& target_shape, - const AutoBroadcastSpec& broadcast_spec = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return Broadcast Specification. - const AutoBroadcastSpec& get_broadcast_spec() const { - return m_broadcast_spec; - } - void set_broadcast_spec(const AutoBroadcastSpec& broadcast_spec) { - m_broadcast_spec = broadcast_spec; - } - - void validate_and_infer_types() override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - AutoBroadcastSpec m_broadcast_spec; -}; +using ov::op::v1::Broadcast; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/bucketize.hpp b/ngraph/core/include/ngraph/op/bucketize.hpp index 354fe8733bd..693e3f44d4a 100644 --- a/ngraph/core/include/ngraph/op/bucketize.hpp +++ b/ngraph/core/include/ngraph/op/bucketize.hpp @@ -5,53 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/bucketize.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief Operation that bucketizes the input based on boundaries -class NGRAPH_API Bucketize : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - Bucketize() = default; - /// \brief Constructs a Bucketize node - - /// \param data Input data to bucketize - /// \param buckets 1-D of sorted unique boundaries for buckets - /// \param output_type Output tensor type, "i64" or "i32", defaults to i64 - /// \param with_right_bound indicates whether bucket includes the right or left - /// edge of interval. default true = includes right edge - Bucketize(const Output& data, - const Output& buckets, - const element::Type output_type = element::i64, - const bool with_right_bound = true); - - virtual void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; - - element::Type get_output_type() const { - return m_output_type; - } - void set_output_type(element::Type output_type) { - m_output_type = output_type; - } - // Overload collision with method on Node - using Node::set_output_type; - - bool get_with_right_bound() const { - return m_with_right_bound; - } - void set_with_right_bound(bool with_right_bound) { - m_with_right_bound = with_right_bound; - } - -private: - element::Type m_output_type; - bool m_with_right_bound; -}; +using ov::op::v3::Bucketize; } // namespace v3 using v3::Bucketize; } // namespace op diff --git a/ngraph/core/include/ngraph/op/ceiling.hpp b/ngraph/core/include/ngraph/op/ceiling.hpp index eac8cde6f30..7c015d140f1 100644 --- a/ngraph/core/include/ngraph/op/ceiling.hpp +++ b/ngraph/core/include/ngraph/op/ceiling.hpp @@ -5,28 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/ceiling.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise ceiling operation. -class NGRAPH_API Ceiling : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a ceiling operation. - Ceiling() = default; - /// \brief Constructs a ceiling operation. - /// - /// \param arg Node that produces the input tensor. - Ceiling(const Output& arg); - - bool visit_attributes(AttributeVisitor&) override { - return true; - } - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Ceiling; } // namespace v0 using v0::Ceiling; } // namespace op diff --git a/ngraph/core/include/ngraph/op/clamp.hpp b/ngraph/core/include/ngraph/op/clamp.hpp index c22829fadbb..f288956008e 100644 --- a/ngraph/core/include/ngraph/op/clamp.hpp +++ b/ngraph/core/include/ngraph/op/clamp.hpp @@ -6,46 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/clamp.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Performs a clipping operation on all elements of the input node -/// -/// All input values that are outside of the range are set to 'min' or 'max' -/// depending on which side of the range they are. The values that fall into -/// this range remain unchanged. -class NGRAPH_API Clamp : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - - Clamp(); - /// \brief Constructs a Clamp node. - /// - /// \param data - Node producing the input tensor - /// \param min - the lower bound of the range - /// \param max - the upper bound of the range - Clamp(const Output& data, const double min, const double max); - - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - double get_min() const { - return m_min; - } - double get_max() const { - return m_max; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - double m_min; - double m_max; -}; +using ov::op::v0::Clamp; } // namespace v0 using v0::Clamp; } // namespace op diff --git a/ngraph/core/include/ngraph/op/concat.hpp b/ngraph/core/include/ngraph/op/concat.hpp index 80e941be4f8..a597fdf6035 100644 --- a/ngraph/core/include/ngraph/op/concat.hpp +++ b/ngraph/core/include/ngraph/op/concat.hpp @@ -7,59 +7,12 @@ #include #include "ngraph/op/op.hpp" +#include "openvino/op/concat.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Concatenation operation. -class NGRAPH_API Concat : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a concatenation operation. - Concat() = default; - /// \brief Constructs a concatenation operation. - /// - /// \param args The outputs producing the input tensors. - /// \param axis The axis along which to concatenate the input tensors. - Concat(const OutputVector& args, int64_t axis); - - /// \brief Constructs a concatenation operation. - /// - /// \param args The nodes producing the input tensors. - /// \param axis The axis along which to concatenate the input tensors. - Concat(const NodeVector& args, int64_t axis); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The concatenation axis. - int64_t get_concatenation_axis() const { - return m_concat_axis; - } - void set_concatenation_axis(int64_t concatenation_axis) { - m_concat_axis = concatenation_axis; - } - /// \return The concatenation axis. - int64_t get_axis() const { - return m_axis; - } - void set_axis(int64_t axis) { - m_axis = axis; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& output_values) const override; - bool evaluate_upper(const HostTensorVector& output_values) const override; - -protected: - /// \ brief m_axis stores default value for all iterations - int64_t m_axis; - /// \brief m_concat_axis stores m_axis plus the number of rank for each iteration - int64_t m_concat_axis = -1; -}; +using ov::op::v0::Concat; } // namespace v0 using v0::Concat; } // namespace op diff --git a/ngraph/core/include/ngraph/op/convert.hpp b/ngraph/core/include/ngraph/op/convert.hpp index db29280bf62..cc5181da8ee 100644 --- a/ngraph/core/include/ngraph/op/convert.hpp +++ b/ngraph/core/include/ngraph/op/convert.hpp @@ -6,47 +6,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/runtime/host_tensor.hpp" +#include "openvino/op/convert.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise type conversion operation. -class NGRAPH_API Convert : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a conversion operation. - Convert() = default; - /// \brief Constructs a conversion operation. - /// - /// \param arg Node that produces the input tensor. - /// \param destination_type Element type for the output tensor. - Convert(const Output& arg, const ngraph::element::Type& destination_type); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - const element::Type& get_destination_type() const { - return m_destination_type; - } - void set_destination_type(const element::Type& destination_type) { - m_destination_type = destination_type; - } - const element::Type& get_convert_element_type() const { - return m_destination_type; - } - void set_convert_element_type(const element::Type& destination_type) { - m_destination_type = destination_type; - } - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& outputs) const override; - bool evaluate_upper(const HostTensorVector& outputs) const override; - -protected: - ngraph::element::Type m_destination_type; -}; +using ov::op::v0::Convert; } // namespace v0 using v0::Convert; } // namespace op diff --git a/ngraph/core/include/ngraph/op/convert_like.hpp b/ngraph/core/include/ngraph/op/convert_like.hpp index 068abf4d5db..47f20f87646 100644 --- a/ngraph/core/include/ngraph/op/convert_like.hpp +++ b/ngraph/core/include/ngraph/op/convert_like.hpp @@ -5,32 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/convert_like.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise type conversion operation. -class NGRAPH_API ConvertLike : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a conversion operation. - ConvertLike() = default; - /// \brief Constructs a conversion operation. - /// \param data Node that produces the input tensor. - /// \param like Node which provides the target type information for the conversion. - ConvertLike(const Output& data, const Output& like); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool constant_fold(OutputVector& output_values, const OutputVector& input_values) override; -}; - +using ov::op::v1::ConvertLike; } // namespace v1 - } // namespace op - } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/convolution.hpp b/ngraph/core/include/ngraph/op/convolution.hpp index 1db18078f66..e974e695e87 100644 --- a/ngraph/core/include/ngraph/op/convolution.hpp +++ b/ngraph/core/include/ngraph/op/convolution.hpp @@ -7,219 +7,13 @@ #include "ngraph/coordinate_diff.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/convolution.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Batched convolution operation, with optional window dilation and stride. -/// -class NGRAPH_API Convolution : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a batched convolution operation. - Convolution() = default; - /// \brief Constructs a batched convolution operation. - /// - /// \param data_batch The node producing the input data batch tensor.
- /// `[N, C_IN, D1, ... Df]` - /// \param filters The node producing the filters tensor.
- /// `[C_OUT, C_IN, F1, ... Ff]` - /// \param strides The strides.
- /// `[f]` - /// \param dilations The dilations.
- /// `[f]` - /// \param pads_begin The beginning of padding shape.
- /// `[f]` - /// \param pads_end The end of padding shape.
- /// `[f]` - /// \param auto_pad The pad type for automatically computing padding sizes.
- /// `[f]` - /// - /// Output `[N, C_OUT, R1, ... Rf]` - /// - Convolution(const Output& data_batch, - const Output& filters, - const Strides& strides, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const Strides& dilations, - const PadType& auto_pad = PadType::EXPLICIT); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The strides. - const Strides& get_strides() const { - return m_strides; - } - void set_strides(const Strides& strides) { - m_strides = strides; - } - /// \return The dilations. - const Strides& get_dilations() const { - return m_dilations; - } - void set_dilations(const Strides& dilations) { - m_dilations = dilations; - } - /// \return The padding-below sizes (possibly negative). - const CoordinateDiff& get_pads_begin() const { - return m_pads_begin; - } - void set_pads_begin(const CoordinateDiff& pads_begin) { - m_pads_begin = pads_begin; - } - /// \return The padding-above sizes (possibly negative). - const CoordinateDiff& get_pads_end() const { - return m_pads_end; - } - void set_adding_above(const CoordinateDiff& pads_end) { - m_pads_end = pads_end; - } - /// \return The pad type for convolution. - const PadType& get_auto_pad() const { - return m_auto_pad; - } - void set_auto_pad(const PadType& auto_pad) { - m_auto_pad = auto_pad; - } - /// \return The default value for Convolution. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - -protected: - Strides m_strides; - Strides m_dilations; - CoordinateDiff m_pads_begin; - CoordinateDiff m_pads_end; - PadType m_auto_pad; -}; - -/// \brief Data batch backprop for batched convolution operation. -class NGRAPH_API ConvolutionBackpropData : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a batched-convolution data batch-backprop operation. - ConvolutionBackpropData() = default; - // clang-format off - // - // \brief Constructs a batched-convolution data batch-backprop operation. - // - // \param data The node producing data from forward-prop. Shape: [N, - // C_INPUT, X1, ..., XD]. - // \param filters The node producing the filter from forward-prop. Shape: - // [C_INPUT, C_OUTPUT, K_D, ..., K_1] - // \param output_shape The shape of the data batch from forward-prop. It's size - // should be equal to number of data spatial dimensions. - // \param strides The strides from forward-prop. - // \param pads_begin The padding-below sizes from forward-prop. - // \param pads_end The padding-above sizes from forward-prop. - // \param dilations The dilations from forward-prop. - // \param auto_pad The pad type for automatically computing padding sizes. - // \param output_padding The output padding adds additional amount of paddings per - // each spatial axis in the output tensor. clang-format on - // - ConvolutionBackpropData(const Output& data, - const Output& filters, - const Output& output_shape, - const Strides& strides, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const Strides& dilations, - const PadType& auto_pad = PadType::EXPLICIT, - const CoordinateDiff& output_padding = {}); - - // clang-format off - // - // \brief Constructs a batched-convolution data batch-backprop operation. - // - // \param data The node producing data from forward-prop. Shape: [N, - // C_INPUT, X1, ..., XD]. - // \param filters The node producing the filter from forward-prop. Shape: - // [C_INPUT, C_OUTPUT, K_D, ..., K_1] - // \param strides The strides from forward-prop. - // \param pads_begin The padding-below sizes from forward-prop. - // \param pads_end The padding-above sizes from forward-prop. - // \param dilations The dilations from forward-prop. - // \param auto_pad The pad type for automatically computing padding sizes. - // \param output_padding The output padding adds additional amount of paddings per - // each spatial axis in the output tensor. clang-format on - // - ConvolutionBackpropData(const Output& data, - const Output& filters, - const Strides& strides, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const Strides& dilations, - const PadType& auto_pad = PadType::EXPLICIT, - const CoordinateDiff& output_padding = {}); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - virtual bool is_dynamic() const override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The output spatial dimensions shape. - const PartialShape get_output_shape() const; - void set_output_shape(const Shape& output_shape); - /// \return The strides from the forward prop. - const Strides& get_strides() const { return m_strides; } - void set_strides(const Strides& strides) { m_strides = strides; } - /// \return The dilations from the forward prop. - const Strides& get_dilations() const { return m_dilations; } - void set_dilations(const Strides& dilations) { m_dilations = dilations; } - /// \return The padding-below sizes (possibly negative) from the forward prop. - const CoordinateDiff& get_pads_begin() const { return m_pads_begin; } - void set_pads_begin(const CoordinateDiff& pads_begin) { m_pads_begin = pads_begin; } - /// \return The padding-above sizes (possibly negative) from the forward prop. - const CoordinateDiff& get_pads_end() const { return m_pads_end; } - void set_pads_end(const CoordinateDiff& pads_end) { m_pads_end = pads_end; } - /// \return The auto pad. - const PadType& get_auto_pad() const { return m_auto_pad; } - void set_auto_pad(const PadType& auto_pad) { m_auto_pad = auto_pad; } - /// \return The output padding. - const CoordinateDiff& get_output_padding() const { return m_output_padding; } - void set_output_padding(const CoordinateDiff& output_padding) - { - m_output_padding = output_padding; - } - /// \brief Calculates output spatial features size. - /// - /// \param[in] input_data_shape The input data partial shape - /// \param[in] filters_shape The filters partial shape - /// \param[in] strides The strides values. - /// \param[in] dilations The dilations values. - /// \param[in] pads_begin The paddings at the beginning of axis. - /// \param[in] pads_end The paddings at the end of axis. - /// \param[in] output_padding The output padding values. - /// \param output_spatial_shape The placeholder for computed output spatial partial - /// shape. - /// - void - infer_conv_backprop_output_spatial_shape(const std::vector& input_data_shape, - const std::vector& filters_shape, - const Strides& strides, - const Strides& dilations, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const CoordinateDiff& output_padding, - std::vector& output_spatial_shape); - - protected: - Strides m_strides; - Strides m_dilations; - CoordinateDiff m_pads_begin; - CoordinateDiff m_pads_end; - PadType m_auto_pad; - CoordinateDiff m_output_padding; - }; - } // namespace v1 - } // namespace op -} // namespace ngraph +using ov::op::v1::Convolution; +using ov::op::v1::ConvolutionBackpropData; +} // namespace v1 +} // namespace op +} // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/cos.hpp b/ngraph/core/include/ngraph/op/cos.hpp index 902b763588a..200d95ce9ad 100644 --- a/ngraph/core/include/ngraph/op/cos.hpp +++ b/ngraph/core/include/ngraph/op/cos.hpp @@ -5,27 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/cos.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise cosine operation. -class NGRAPH_API Cos : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a cosine operation. - Cos() = default; - /// \brief Constructs a cosine operation. - /// - /// \param arg Node that produces the input tensor. - Cos(const Output& arg); - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Cos; } // namespace v0 using v0::Cos; } // namespace op diff --git a/ngraph/core/include/ngraph/op/cosh.hpp b/ngraph/core/include/ngraph/op/cosh.hpp index f279a857951..bea0453b73f 100644 --- a/ngraph/core/include/ngraph/op/cosh.hpp +++ b/ngraph/core/include/ngraph/op/cosh.hpp @@ -5,27 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/cosh.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise hyperbolic cosine (cosh) operation. -class NGRAPH_API Cosh : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a hyperbolic cosine operation. - Cosh() = default; - /// \brief Constructs a hyperbolic cosine operation. - /// - /// \param arg Node that produces the input tensor. - Cosh(const Output& arg); - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Cosh; } // namespace v0 using v0::Cosh; } // namespace op diff --git a/ngraph/core/include/ngraph/op/ctc_greedy_decoder.hpp b/ngraph/core/include/ngraph/op/ctc_greedy_decoder.hpp index 3749f2832e2..1b2d0784a9e 100644 --- a/ngraph/core/include/ngraph/op/ctc_greedy_decoder.hpp +++ b/ngraph/core/include/ngraph/op/ctc_greedy_decoder.hpp @@ -5,33 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/ctc_greedy_decoder.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API CTCGreedyDecoder : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - CTCGreedyDecoder() = default; - /// \brief Constructs a CTCGreedyDecoder operation - /// - /// \param input Logits on which greedy decoding is performed - /// \param seq_len Sequence lengths - /// \param ctc_merge_repeated Whether to merge repeated labels - CTCGreedyDecoder(const Output& input, const Output& seq_len, const bool ctc_merge_repeated); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool get_ctc_merge_repeated() const { - return m_ctc_merge_repeated; - } - -private: - bool m_ctc_merge_repeated; -}; +using ov::op::v0::CTCGreedyDecoder; } // namespace v0 using v0::CTCGreedyDecoder; } // namespace op diff --git a/ngraph/core/include/ngraph/op/ctc_greedy_decoder_seq_len.hpp b/ngraph/core/include/ngraph/op/ctc_greedy_decoder_seq_len.hpp index c1131ec3b40..4e74038047d 100644 --- a/ngraph/core/include/ngraph/op/ctc_greedy_decoder_seq_len.hpp +++ b/ngraph/core/include/ngraph/op/ctc_greedy_decoder_seq_len.hpp @@ -5,96 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/ctc_greedy_decoder_seq_len.hpp" namespace ngraph { namespace op { namespace v6 { -/// \brief Operator performing CTCGreedyDecoder -/// -class NGRAPH_API CTCGreedyDecoderSeqLen : public Op { -public: - NGRAPH_RTTI_DECLARATION; - CTCGreedyDecoderSeqLen() = default; - /// \brief Constructs a CTCGreedyDecoderSeqLen operation - /// - /// \param input 3-D tensor of logits on which greedy decoding is - /// performed - /// \param seq_len 1-D tensor of sequence lengths - /// \param merge_repeated Whether to merge repeated labels - /// \param classes_index_type Specifies the output classes_index tensor type - /// \param sequence_length_type Specifies the output sequence_length tensor type - CTCGreedyDecoderSeqLen(const Output& input, - const Output& seq_len, - const bool merge_repeated = true, - const element::Type& classes_index_type = element::i32, - const element::Type& sequence_length_type = element::i32); - /// \brief Constructs a CTCGreedyDecoderSeqLen operation - /// - /// \param input 3-D tensor of logits on which greedy decoding is - /// performed - /// \param seq_len 1-D tensor of sequence lengths - /// \param blank_index Scalar or 1-D tensor with 1 element used to mark a - /// blank index - /// \param merge_repeated Whether to merge repeated labels - /// \param classes_index_type Specifies the output classes_index tensor type - /// \param sequence_length_type Specifies the output sequence_length tensor type - CTCGreedyDecoderSeqLen(const Output& input, - const Output& seq_len, - const Output& blank_index, - const bool merge_repeated = true, - const element::Type& classes_index_type = element::i32, - const element::Type& sequence_length_type = element::i32); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \brief Get merge_repeated attribute - /// - /// \return Current value of merge_repeated attribute - /// - bool get_merge_repeated() const { - return m_merge_repeated; - } - /// \brief Get classes_index_type attribute - /// - /// \return Current value of classes_index_type attribute - /// - const element::Type& get_classes_index_type() const { - return m_classes_index_type; - } - /// \brief Set classes_index_type attribute - /// - /// \param classes_index_type Type of classes_index - /// - void set_classes_index_type(const element::Type& classes_index_type) { - m_classes_index_type = classes_index_type; - validate_and_infer_types(); - } - - /// \brief Get sequence_length_type attribute - /// - /// \return Current value of sequence_length_type attribute - /// - const element::Type& get_sequence_length_type() const { - return m_sequence_length_type; - } - - /// \brief Set sequence_length_type attribute - /// - /// \param sequence_length_type Type of sequence length - /// - void set_sequence_length_type(const element::Type& sequence_length_type) { - m_sequence_length_type = sequence_length_type; - validate_and_infer_types(); - } - -private: - bool m_merge_repeated; - element::Type m_classes_index_type{element::i32}; - element::Type m_sequence_length_type{element::i32}; -}; +using ov::op::v6::CTCGreedyDecoderSeqLen; } // namespace v6 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/ctc_loss.hpp b/ngraph/core/include/ngraph/op/ctc_loss.hpp index b4ddf80d5eb..63f989d7479 100644 --- a/ngraph/core/include/ngraph/op/ctc_loss.hpp +++ b/ngraph/core/include/ngraph/op/ctc_loss.hpp @@ -5,67 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/ctc_loss.hpp" namespace ngraph { namespace op { namespace v4 { -class NGRAPH_API CTCLoss : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - CTCLoss() = default; - /// \brief Constructs a CTCLoss operation - /// - /// \param logits 3-D tensor of logits - /// \param logit_length 1-D tensor of length for each object from - /// a batch - /// \param labels 2-D tensor of labels for which likelyhood - /// is estimated using logist - /// \param label_length 1-D tensor of length for each label - /// sequence - /// \param blank_index Scalar used to mark a blank index - /// \param preprocess_collapse_repeated Flag for preprocessing labels before loss - /// calculation - /// \param ctc_merge_repeated Flag for merging repeated characters in a - /// potential alignment - /// \param unique Flag to find unique elements in a target - /// before matching with alignment - CTCLoss(const Output& logits, - const Output& logit_length, - const Output& labels, - const Output& label_length, - const bool preprocess_collapse_repeated = false, - const bool ctc_merge_repeated = true, - const bool unique = false); - - CTCLoss(const Output& logits, - const Output& logit_length, - const Output& labels, - const Output& label_length, - const Output& blank_index, - const bool preprocess_collapse_repeated = false, - const bool ctc_merge_repeated = true, - const bool unique = false); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool get_preprocess_collapse_repeated() const { - return preprocess_collapse_repeated_; - } - bool get_ctc_merge_repeated() const { - return ctc_merge_repeated_; - } - bool get_unique() const { - return unique_; - } - -private: - bool preprocess_collapse_repeated_; - bool ctc_merge_repeated_; - bool unique_; -}; +using ov::op::v4::CTCLoss; } // namespace v4 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/cum_sum.hpp b/ngraph/core/include/ngraph/op/cum_sum.hpp index a2edb446a4d..7ed1580f80e 100644 --- a/ngraph/core/include/ngraph/op/cum_sum.hpp +++ b/ngraph/core/include/ngraph/op/cum_sum.hpp @@ -6,55 +6,12 @@ #include "ngraph/axis_set.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/cum_sum.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Tensor cumulative sum operation. -/// -/// Compute the cumulative sum of the input tensor along the axis specified. -/// -class NGRAPH_API CumSum : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a cumulative summation operation. - CumSum() = default; - - /// \brief Constructs a cumulative summation operation. - /// - /// \param arg The tensor to be summed. - /// \param axis zero dimension tensor specifying axis position along which - /// cumulative sum must be performed - /// \param exclusive if set to true, the top element is not included - /// \param reverse if set to true, will perform the sums in reverse direction - CumSum(const Output& arg, const Output& axis, const bool exclusive = false, const bool reverse = false); - - /// \brief Constructs a cumulative summation operation with axis = 0 - /// - /// \param arg The tensor to be summed - CumSum(const Output& arg, const bool exclusive = false, const bool reverse = false); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - /// \return The default value for CumSum. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - bool is_exclusive() const { - return m_exclusive; - } - bool is_reverse() const { - return m_reverse; - } - -private: - bool m_exclusive = false; - bool m_reverse = false; -}; +using ov::op::v0::CumSum; } // namespace v0 using v0::CumSum; } // namespace op diff --git a/ngraph/core/include/openvino/op/batch_norm.hpp b/ngraph/core/include/openvino/op/batch_norm.hpp new file mode 100644 index 00000000000..77f45f3f90f --- /dev/null +++ b/ngraph/core/include/openvino/op/batch_norm.hpp @@ -0,0 +1,94 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API BatchNormInference : public Op { +public: + OPENVINO_RTTI_DECLARATION; + BatchNormInference() = default; + /// \param input [., C, ...] + /// \param gamma gamma scaling for normalized value. [C] + /// \param beta bias added to the scaled normalized value [C] + /// \param mean value for mean normalization [C] + /// \param variance value for variance normalization [C] + /// \param epsilon Avoids divsion by 0 if input has 0 variance + BatchNormInference(const Output& input, + const Output& gamma, + const Output& beta, + const Output& mean, + const Output& variance, + double epsilon); + + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + double get_eps_value() const { + return m_epsilon; + } + void set_eps_value(double epsilon) { + m_epsilon = epsilon; + } + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + +private: + static constexpr size_t INPUT_GAMMA = 0; + static constexpr size_t INPUT_BETA = 1; + static constexpr size_t INPUT_DATA = 2; + static constexpr size_t INPUT_MEAN = 3; + static constexpr size_t INPUT_VARIANCE = 4; + + double m_epsilon; +}; +} // namespace v0 +namespace v5 { +class OPENVINO_API BatchNormInference : public Op { +public: + OPENVINO_RTTI_DECLARATION; + BatchNormInference() = default; + /// \param input [., C, ...] + /// \param gamma gamma scaling for normalized value. [C] + /// \param beta bias added to the scaled normalized value [C] + /// \param mean value for mean normalization [C] + /// \param variance value for variance normalization [C] + /// \param epsilon Avoids divsion by 0 if input has 0 variance + BatchNormInference(const Output& input, + const Output& gamma, + const Output& beta, + const Output& mean, + const Output& variance, + double epsilon); + + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + double get_eps_value() const { + return m_epsilon; + } + void set_eps_value(double epsilon) { + m_epsilon = epsilon; + } + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + +private: + static constexpr size_t INPUT_DATA = 0; + static constexpr size_t INPUT_GAMMA = 1; + static constexpr size_t INPUT_BETA = 2; + static constexpr size_t INPUT_MEAN = 3; + static constexpr size_t INPUT_VARIANCE = 4; + + double m_epsilon; +}; +} // namespace v5 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/batch_to_space.hpp b/ngraph/core/include/openvino/op/batch_to_space.hpp new file mode 100644 index 00000000000..11a72013f3b --- /dev/null +++ b/ngraph/core/include/openvino/op/batch_to_space.hpp @@ -0,0 +1,48 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief BatchToSpace permutes data from the batch dimension of the data tensor into +/// spatial dimensions. +/// +/// \note Values from the batch dimension are moved in spatial blocks dimensions. +/// +/// Output node produces a tensor with shape: +/// `[batch / (block_shape[0] * block_shape[1] * ... * block_shape[N - 1]), +/// D_1 * block_shape[1] - crops_begin[1] - crops_end[1], +/// D_2 * block_shape[2] - crops_begin[2] - crops_end[2], ..., +/// D_{N - 1} * block_shape[N - 1] - crops_begin[N - 1] - crops_end[N - 1]` +/// of the same type as `data` input. +class OPENVINO_API BatchToSpace : public Op { +public: + OPENVINO_RTTI_DECLARATION; + BatchToSpace() = default; + /// \brief Constructs a BatchToSpace operation. + /// + /// \param data Node producing the data tensor + /// \param block_shape The sizes of the block of values to be moved + /// \param crops_begin Specifies the amount to crop from the beginning along each + /// axis of `data` input + /// \param crops_end Specifies the amount to crop from the ending along each axis of + /// `data` input. + BatchToSpace(const Output& data, + const Output& block_shape, + const Output& crops_begin, + const Output& crops_end); + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor& visitor) override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/binary_convolution.hpp b/ngraph/core/include/openvino/op/binary_convolution.hpp new file mode 100644 index 00000000000..d7ef7493bcd --- /dev/null +++ b/ngraph/core/include/openvino/op/binary_convolution.hpp @@ -0,0 +1,143 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v1 { +class OPENVINO_API BinaryConvolution : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + enum class BinaryConvolutionMode { + // Interpret input data and kernel values: 0 as -1, 1 as 1 + XNOR_POPCOUNT + }; + + /// \brief Constructs a binary convolution operation. + BinaryConvolution() = default; + /// \brief Constructs a binary convolution operation. + /// \param data The node producing the input data batch tensor. + /// \param kernel The node producing the filters tensor. + /// \param strides The strides. + /// \param pads_begin The beginning of padding shape. + /// \param pads_end The end of padding shape. + /// \param dilations The dilations. + /// \param mode Defines how input tensor 0/1 values and weights 0/1 are interpreted. + /// \param pad_value Floating-point value used to fill pad area. + /// \param auto_pad The pad type for automatically computing padding sizes. + /// + /// Output `[N, C_OUT, R1, ... Rf]` + BinaryConvolution(const Output& data, + const Output& kernel, + const Strides& strides, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const Strides& dilations, + BinaryConvolutionMode mode, + float pad_value, + const PadType& auto_pad = PadType::EXPLICIT); + + BinaryConvolution(const Output& data, + const Output& kernel, + const Strides& strides, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const Strides& dilations, + const std::string& mode, + float pad_value, + const PadType& auto_pad = PadType::EXPLICIT); + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The strides. + const Strides& get_strides() const { + return m_strides; + } + void set_strides(const Strides& strides) { + m_strides = strides; + } + /// \return The dilations. + const Strides& get_dilations() const { + return m_dilations; + } + void set_dilations(const Strides& dilations) { + m_dilations = dilations; + } + /// \return The padding-below sizes (possibly negative). + const CoordinateDiff& get_pads_begin() const { + return m_pads_begin; + } + void set_pads_begin(const CoordinateDiff& pads_begin) { + m_pads_begin = pads_begin; + } + /// \return The padding-above sizes (possibly negative). + const CoordinateDiff& get_pads_end() const { + return m_pads_end; + } + void set_adding_above(const CoordinateDiff& pads_end) { + m_pads_end = pads_end; + } + /// \return The pad type for convolution. + const PadType& get_auto_pad() const { + return m_auto_pad; + } + void set_auto_pad(const PadType& auto_pad) { + m_auto_pad = auto_pad; + } + /// \return The mode of convolution. + const BinaryConvolutionMode& get_mode() const { + return m_mode; + } + void set_mode(const BinaryConvolutionMode& mode) { + m_mode = mode; + } + /// \return The pad value. + float get_pad_value() const { + return m_pad_value; + } + void set_pad_value(float pad_value) { + m_pad_value = pad_value; + } + +protected: + BinaryConvolutionMode mode_from_string(const std::string& mode) const; + Strides m_strides; + Strides m_dilations; + CoordinateDiff m_pads_begin; + CoordinateDiff m_pads_end; + BinaryConvolutionMode m_mode; + float m_pad_value; + PadType m_auto_pad; +}; +} // namespace v1 +} // namespace op + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v1::BinaryConvolution::BinaryConvolutionMode& type); + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v1::BinaryConvolution::BinaryConvolutionMode& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", + 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/broadcast.hpp b/ngraph/core/include/openvino/op/broadcast.hpp new file mode 100644 index 00000000000..2b605a7afed --- /dev/null +++ b/ngraph/core/include/openvino/op/broadcast.hpp @@ -0,0 +1,133 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/axis_set.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" +#include "openvino/op/util/broadcast_base.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Operation which "adds" axes to an input tensor, replicating elements from the +/// input as needed along the new axes. +class OPENVINO_API Broadcast : public util::BroadcastBase { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a broadcast operation. + Broadcast() = default; + /// \brief Constructs a broadcast operation. + /// + /// \param arg The input tensor to be broadcast. + /// \param target_shape The shape of the output tensor. + /// \param axes_mapping The axis positions (0-based) in the result that correspond + /// to input axes. 'Arg' tensor is broadcast along the + /// remaining axes. + /// E.g., Input Shape - [3, 4], Target Shape - [3, 5, 4, 4] + /// axes_mapping - [0, 2] => Broadcast along axes 1 and 3. + /// axes_mapping - [0, 3] => Broadcast along axes 1 and 2. + /// \param broadcast_spec Broadcast specification to use for determining broadcast + /// axes. 'axes_mapping' should not be provided if mode other + /// than explicit (none) is used. + Broadcast(const Output& arg, + const Output& target_shape, + const Output& axes_mapping, + const BroadcastModeSpec& broadcast_spec = BroadcastType::EXPLICIT); + + /// \brief Constructs a broadcast operation. + /// + /// \param arg The input tensor to be broadcast. + /// \param target_shape The shape of the output tensor. + /// \param broadcast_spec Broadcast specification to use for determining broadcast + /// axes + Broadcast(const Output& arg, + const Output& target_shape, + const BroadcastModeSpec& broadcast_spec = BroadcastType::NUMPY); + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + // \return Broadcast Specification. + const BroadcastModeSpec& get_broadcast_spec() const { + return m_mode; + } + void set_broadcast_spec(const BroadcastModeSpec& broadcast_spec) { + m_mode = broadcast_spec; + } + + void validate_and_infer_types() override; + + /// \return true and the AxisSet if broadcast axes can be fully determined. + std::pair get_broadcast_axes() const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool broadcast_evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v3 + +namespace v1 { +/// \brief Operation which "adds" axes to an input tensor, replicating elements from the +/// input as needed along the new axes. +class OPENVINO_API Broadcast : public util::BroadcastBase { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a broadcast operation. + Broadcast() = default; + /// \brief Constructs a broadcast operation. + /// + /// \param arg The input tensor to be broadcast. + /// \param target_shape The shape of the output tensor. + /// \param axes_mapping The axis positions (0-based) in the result that correspond + /// to input axes. 'Arg' tensor is broadcast along the + /// remaining axes. + /// E.g., Input Shape - [3, 4], Target Shape - [3, 5, 4, 4] + /// axes_mapping - [0, 2] => Broadcast along axes 1 and 3. + /// axes_mapping - [0, 3] => Broadcast along axes 1 and 2. + /// \param broadcast_spec Broadcast specification to use for determining broadcast + /// axes. 'axes_mapping' is ignored if broadcast_spec is not + /// NONE + Broadcast(const Output& arg, + const Output& target_shape, + const Output& axes_mapping, + const AutoBroadcastSpec& broadcast_spec = AutoBroadcastSpec()); + + /// \brief Constructs a broadcast operation. + /// + /// \param arg The input tensor to be broadcast. + /// \param target_shape The shape of the output tensor. + /// \param broadcast_spec Broadcast specification to use for determining broadcast + /// axes + Broadcast(const Output& arg, + const Output& target_shape, + const AutoBroadcastSpec& broadcast_spec = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return Broadcast Specification. + const AutoBroadcastSpec& get_broadcast_spec() const { + return m_broadcast_spec; + } + void set_broadcast_spec(const AutoBroadcastSpec& broadcast_spec) { + m_broadcast_spec = broadcast_spec; + } + + void validate_and_infer_types() override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + AutoBroadcastSpec m_broadcast_spec; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/bucketize.hpp b/ngraph/core/include/openvino/op/bucketize.hpp new file mode 100644 index 00000000000..dbc89f188ba --- /dev/null +++ b/ngraph/core/include/openvino/op/bucketize.hpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Operation that bucketizes the input based on boundaries +class OPENVINO_API Bucketize : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Bucketize() = default; + /// \brief Constructs a Bucketize node + + /// \param data Input data to bucketize + /// \param buckets 1-D of sorted unique boundaries for buckets + /// \param output_type Output tensor type, "i64" or "i32", defaults to i64 + /// \param with_right_bound indicates whether bucket includes the right or left + /// edge of interval. default true = includes right edge + Bucketize(const Output& data, + const Output& buckets, + const element::Type output_type = element::i64, + const bool with_right_bound = true); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; + + element::Type get_output_type() const { + return m_output_type; + } + void set_output_type(element::Type output_type) { + m_output_type = output_type; + } + // Overload collision with method on Node + using Node::set_output_type; + + bool get_with_right_bound() const { + return m_with_right_bound; + } + void set_with_right_bound(bool with_right_bound) { + m_with_right_bound = with_right_bound; + } + +private: + element::Type m_output_type; + bool m_with_right_bound; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/ceiling.hpp b/ngraph/core/include/openvino/op/ceiling.hpp new file mode 100644 index 00000000000..95b6359e8e2 --- /dev/null +++ b/ngraph/core/include/openvino/op/ceiling.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise ceiling operation. +class OPENVINO_API Ceiling : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a ceiling operation. + Ceiling() = default; + /// \brief Constructs a ceiling operation. + /// + /// \param arg Node that produces the input tensor. + Ceiling(const Output& arg); + + bool visit_attributes(AttributeVisitor&) override { + return true; + } + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/clamp.hpp b/ngraph/core/include/openvino/op/clamp.hpp new file mode 100644 index 00000000000..42f9bc08cd4 --- /dev/null +++ b/ngraph/core/include/openvino/op/clamp.hpp @@ -0,0 +1,50 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Performs a clipping operation on all elements of the input node +/// +/// All input values that are outside of the range are set to 'min' or 'max' +/// depending on which side of the range they are. The values that fall into +/// this range remain unchanged. +class OPENVINO_API Clamp : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Clamp(); + /// \brief Constructs a Clamp node. + /// + /// \param data - Node producing the input tensor + /// \param min - the lower bound of the range + /// \param max - the upper bound of the range + Clamp(const Output& data, const double min, const double max); + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + double get_min() const { + return m_min; + } + double get_max() const { + return m_max; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + double m_min; + double m_max; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/concat.hpp b/ngraph/core/include/openvino/op/concat.hpp new file mode 100644 index 00000000000..e21ab5fc5ea --- /dev/null +++ b/ngraph/core/include/openvino/op/concat.hpp @@ -0,0 +1,65 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Concatenation operation. +class OPENVINO_API Concat : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a concatenation operation. + Concat() = default; + /// \brief Constructs a concatenation operation. + /// + /// \param args The outputs producing the input tensors. + /// \param axis The axis along which to concatenate the input tensors. + Concat(const OutputVector& args, int64_t axis); + + /// \brief Constructs a concatenation operation. + /// + /// \param args The nodes producing the input tensors. + /// \param axis The axis along which to concatenate the input tensors. + Concat(const NodeVector& args, int64_t axis); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The concatenation axis. + int64_t get_concatenation_axis() const { + return m_concat_axis; + } + void set_concatenation_axis(int64_t concatenation_axis) { + m_concat_axis = concatenation_axis; + } + /// \return The concatenation axis. + int64_t get_axis() const { + return m_axis; + } + void set_axis(int64_t axis) { + m_axis = axis; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& output_values) const override; + bool evaluate_upper(const HostTensorVector& output_values) const override; + +protected: + /// \ brief m_axis stores default value for all iterations + int64_t m_axis; + /// \brief m_concat_axis stores m_axis plus the number of rank for each iteration + int64_t m_concat_axis = -1; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/convert.hpp b/ngraph/core/include/openvino/op/convert.hpp new file mode 100644 index 00000000000..6626f494abe --- /dev/null +++ b/ngraph/core/include/openvino/op/convert.hpp @@ -0,0 +1,52 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise type conversion operation. +class OPENVINO_API Convert : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a conversion operation. + Convert() = default; + /// \brief Constructs a conversion operation. + /// + /// \param arg Node that produces the input tensor. + /// \param destination_type Element type for the output tensor. + Convert(const Output& arg, const ov::element::Type& destination_type); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + const element::Type& get_destination_type() const { + return m_destination_type; + } + void set_destination_type(const element::Type& destination_type) { + m_destination_type = destination_type; + } + const element::Type& get_convert_element_type() const { + return m_destination_type; + } + void set_convert_element_type(const element::Type& destination_type) { + m_destination_type = destination_type; + } + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& outputs) const override; + bool evaluate_upper(const HostTensorVector& outputs) const override; + +protected: + ov::element::Type m_destination_type; +}; +} // namespace v0 +using v0::Convert; +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/convert_like.hpp b/ngraph/core/include/openvino/op/convert_like.hpp new file mode 100644 index 00000000000..38c36df44ab --- /dev/null +++ b/ngraph/core/include/openvino/op/convert_like.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise type conversion operation. +class OPENVINO_API ConvertLike : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a conversion operation. + ConvertLike() = default; + /// \brief Constructs a conversion operation. + /// \param data Node that produces the input tensor. + /// \param like Node which provides the target type information for the conversion. + ConvertLike(const Output& data, const Output& like); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool constant_fold(OutputVector& output_values, const OutputVector& input_values) override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/convolution.hpp b/ngraph/core/include/openvino/op/convolution.hpp new file mode 100644 index 00000000000..8c5dc16b1c2 --- /dev/null +++ b/ngraph/core/include/openvino/op/convolution.hpp @@ -0,0 +1,225 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Batched convolution operation, with optional window dilation and stride. +/// +class OPENVINO_API Convolution : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a batched convolution operation. + Convolution() = default; + /// \brief Constructs a batched convolution operation. + /// + /// \param data_batch The node producing the input data batch tensor.
+ /// `[N, C_IN, D1, ... Df]` + /// \param filters The node producing the filters tensor.
+ /// `[C_OUT, C_IN, F1, ... Ff]` + /// \param strides The strides.
+ /// `[f]` + /// \param dilations The dilations.
+ /// `[f]` + /// \param pads_begin The beginning of padding shape.
+ /// `[f]` + /// \param pads_end The end of padding shape.
+ /// `[f]` + /// \param auto_pad The pad type for automatically computing padding sizes.
+ /// `[f]` + /// + /// Output `[N, C_OUT, R1, ... Rf]` + /// + Convolution(const Output& data_batch, + const Output& filters, + const Strides& strides, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const Strides& dilations, + const PadType& auto_pad = PadType::EXPLICIT); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The strides. + const Strides& get_strides() const { + return m_strides; + } + void set_strides(const Strides& strides) { + m_strides = strides; + } + /// \return The dilations. + const Strides& get_dilations() const { + return m_dilations; + } + void set_dilations(const Strides& dilations) { + m_dilations = dilations; + } + /// \return The padding-below sizes (possibly negative). + const CoordinateDiff& get_pads_begin() const { + return m_pads_begin; + } + void set_pads_begin(const CoordinateDiff& pads_begin) { + m_pads_begin = pads_begin; + } + /// \return The padding-above sizes (possibly negative). + const CoordinateDiff& get_pads_end() const { + return m_pads_end; + } + void set_adding_above(const CoordinateDiff& pads_end) { + m_pads_end = pads_end; + } + /// \return The pad type for convolution. + const PadType& get_auto_pad() const { + return m_auto_pad; + } + void set_auto_pad(const PadType& auto_pad) { + m_auto_pad = auto_pad; + } + /// \return The default value for Convolution. + OPENVINO_SUPPRESS_DEPRECATED_START + std::shared_ptr get_default_value() const override; + OPENVINO_SUPPRESS_DEPRECATED_END + +protected: + Strides m_strides; + Strides m_dilations; + CoordinateDiff m_pads_begin; + CoordinateDiff m_pads_end; + PadType m_auto_pad; +}; + +/// \brief Data batch backprop for batched convolution operation. +class OPENVINO_API ConvolutionBackpropData : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a batched-convolution data batch-backprop operation. + ConvolutionBackpropData() = default; + // clang-format off + // + // \brief Constructs a batched-convolution data batch-backprop operation. + // + // \param data The node producing data from forward-prop. Shape: [N, + // C_INPUT, X1, ..., XD]. + // \param filters The node producing the filter from forward-prop. Shape: + // [C_INPUT, C_OUTPUT, K_D, ..., K_1] + // \param output_shape The shape of the data batch from forward-prop. It's size + // should be equal to number of data spatial dimensions. + // \param strides The strides from forward-prop. + // \param pads_begin The padding-below sizes from forward-prop. + // \param pads_end The padding-above sizes from forward-prop. + // \param dilations The dilations from forward-prop. + // \param auto_pad The pad type for automatically computing padding sizes. + // \param output_padding The output padding adds additional amount of paddings per + // each spatial axis in the output tensor. clang-format on + // + ConvolutionBackpropData(const Output& data, + const Output& filters, + const Output& output_shape, + const Strides& strides, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const Strides& dilations, + const PadType& auto_pad = PadType::EXPLICIT, + const CoordinateDiff& output_padding = {}); + + // clang-format off + // + // \brief Constructs a batched-convolution data batch-backprop operation. + // + // \param data The node producing data from forward-prop. Shape: [N, + // C_INPUT, X1, ..., XD]. + // \param filters The node producing the filter from forward-prop. Shape: + // [C_INPUT, C_OUTPUT, K_D, ..., K_1] + // \param strides The strides from forward-prop. + // \param pads_begin The padding-below sizes from forward-prop. + // \param pads_end The padding-above sizes from forward-prop. + // \param dilations The dilations from forward-prop. + // \param auto_pad The pad type for automatically computing padding sizes. + // \param output_padding The output padding adds additional amount of paddings per + // each spatial axis in the output tensor. clang-format on + // + ConvolutionBackpropData(const Output& data, + const Output& filters, + const Strides& strides, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const Strides& dilations, + const PadType& auto_pad = PadType::EXPLICIT, + const CoordinateDiff& output_padding = {}); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + bool is_dynamic() const override; + + std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The output spatial dimensions shape. + const PartialShape get_output_shape() const; + void set_output_shape(const ngraph::Shape& output_shape); + /// \return The strides from the forward prop. + const Strides& get_strides() const { return m_strides; } + void set_strides(const Strides& strides) { m_strides = strides; } + /// \return The dilations from the forward prop. + const Strides& get_dilations() const { return m_dilations; } + void set_dilations(const Strides& dilations) { m_dilations = dilations; } + /// \return The padding-below sizes (possibly negative) from the forward prop. + const CoordinateDiff& get_pads_begin() const { return m_pads_begin; } + void set_pads_begin(const CoordinateDiff& pads_begin) { m_pads_begin = pads_begin; } + /// \return The padding-above sizes (possibly negative) from the forward prop. + const CoordinateDiff& get_pads_end() const { return m_pads_end; } + void set_pads_end(const CoordinateDiff& pads_end) { m_pads_end = pads_end; } + /// \return The auto pad. + const PadType& get_auto_pad() const { return m_auto_pad; } + void set_auto_pad(const PadType& auto_pad) { m_auto_pad = auto_pad; } + /// \return The output padding. + const CoordinateDiff& get_output_padding() const { return m_output_padding; } + void set_output_padding(const CoordinateDiff& output_padding) + { + m_output_padding = output_padding; + } + /// \brief Calculates output spatial features size. + /// + /// \param[in] input_data_shape The input data partial shape + /// \param[in] filters_shape The filters partial shape + /// \param[in] strides The strides values. + /// \param[in] dilations The dilations values. + /// \param[in] pads_begin The paddings at the beginning of axis. + /// \param[in] pads_end The paddings at the end of axis. + /// \param[in] output_padding The output padding values. + /// \param output_spatial_shape The placeholder for computed output spatial partial + /// shape. + /// + void + infer_conv_backprop_output_spatial_shape(const std::vector& input_data_shape, + const std::vector& filters_shape, + const Strides& strides, + const Strides& dilations, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const CoordinateDiff& output_padding, + std::vector& output_spatial_shape); + + protected: + Strides m_strides; + Strides m_dilations; + CoordinateDiff m_pads_begin; + CoordinateDiff m_pads_end; + PadType m_auto_pad; + CoordinateDiff m_output_padding; + }; + } // namespace v1 + } // namespace op +} // namespace ngraph diff --git a/ngraph/core/include/openvino/op/cos.hpp b/ngraph/core/include/openvino/op/cos.hpp new file mode 100644 index 00000000000..1e251e0f4e0 --- /dev/null +++ b/ngraph/core/include/openvino/op/cos.hpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise cosine operation. +class OPENVINO_API Cos : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a cosine operation. + Cos() = default; + /// \brief Constructs a cosine operation. + /// + /// \param arg Node that produces the input tensor. + Cos(const Output& arg); + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/cosh.hpp b/ngraph/core/include/openvino/op/cosh.hpp new file mode 100644 index 00000000000..844f4328299 --- /dev/null +++ b/ngraph/core/include/openvino/op/cosh.hpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise hyperbolic cosine (cosh) operation. +class OPENVINO_API Cosh : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a hyperbolic cosine operation. + Cosh() = default; + /// \brief Constructs a hyperbolic cosine operation. + /// + /// \param arg Node that produces the input tensor. + Cosh(const Output& arg); + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/ctc_greedy_decoder.hpp b/ngraph/core/include/openvino/op/ctc_greedy_decoder.hpp new file mode 100644 index 00000000000..d5b6ff5c483 --- /dev/null +++ b/ngraph/core/include/openvino/op/ctc_greedy_decoder.hpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API CTCGreedyDecoder : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + CTCGreedyDecoder() = default; + /// \brief Constructs a CTCGreedyDecoder operation + /// + /// \param input Logits on which greedy decoding is performed + /// \param seq_len Sequence lengths + /// \param ctc_merge_repeated Whether to merge repeated labels + CTCGreedyDecoder(const Output& input, const Output& seq_len, const bool ctc_merge_repeated); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool get_ctc_merge_repeated() const { + return m_ctc_merge_repeated; + } + +private: + bool m_ctc_merge_repeated; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/ctc_greedy_decoder_seq_len.hpp b/ngraph/core/include/openvino/op/ctc_greedy_decoder_seq_len.hpp new file mode 100644 index 00000000000..9338e2ebe3e --- /dev/null +++ b/ngraph/core/include/openvino/op/ctc_greedy_decoder_seq_len.hpp @@ -0,0 +1,100 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v6 { +/// \brief Operator performing CTCGreedyDecoder +/// +class OPENVINO_API CTCGreedyDecoderSeqLen : public Op { +public: + OPENVINO_RTTI_DECLARATION; + CTCGreedyDecoderSeqLen() = default; + /// \brief Constructs a CTCGreedyDecoderSeqLen operation + /// + /// \param input 3-D tensor of logits on which greedy decoding is + /// performed + /// \param seq_len 1-D tensor of sequence lengths + /// \param merge_repeated Whether to merge repeated labels + /// \param classes_index_type Specifies the output classes_index tensor type + /// \param sequence_length_type Specifies the output sequence_length tensor type + CTCGreedyDecoderSeqLen(const Output& input, + const Output& seq_len, + const bool merge_repeated = true, + const element::Type& classes_index_type = element::i32, + const element::Type& sequence_length_type = element::i32); + /// \brief Constructs a CTCGreedyDecoderSeqLen operation + /// + /// \param input 3-D tensor of logits on which greedy decoding is + /// performed + /// \param seq_len 1-D tensor of sequence lengths + /// \param blank_index Scalar or 1-D tensor with 1 element used to mark a + /// blank index + /// \param merge_repeated Whether to merge repeated labels + /// \param classes_index_type Specifies the output classes_index tensor type + /// \param sequence_length_type Specifies the output sequence_length tensor type + CTCGreedyDecoderSeqLen(const Output& input, + const Output& seq_len, + const Output& blank_index, + const bool merge_repeated = true, + const element::Type& classes_index_type = element::i32, + const element::Type& sequence_length_type = element::i32); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \brief Get merge_repeated attribute + /// + /// \return Current value of merge_repeated attribute + /// + bool get_merge_repeated() const { + return m_merge_repeated; + } + /// \brief Get classes_index_type attribute + /// + /// \return Current value of classes_index_type attribute + /// + const element::Type& get_classes_index_type() const { + return m_classes_index_type; + } + /// \brief Set classes_index_type attribute + /// + /// \param classes_index_type Type of classes_index + /// + void set_classes_index_type(const element::Type& classes_index_type) { + m_classes_index_type = classes_index_type; + validate_and_infer_types(); + } + + /// \brief Get sequence_length_type attribute + /// + /// \return Current value of sequence_length_type attribute + /// + const element::Type& get_sequence_length_type() const { + return m_sequence_length_type; + } + + /// \brief Set sequence_length_type attribute + /// + /// \param sequence_length_type Type of sequence length + /// + void set_sequence_length_type(const element::Type& sequence_length_type) { + m_sequence_length_type = sequence_length_type; + validate_and_infer_types(); + } + +private: + bool m_merge_repeated; + element::Type m_classes_index_type{element::i32}; + element::Type m_sequence_length_type{element::i32}; +}; +} // namespace v6 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/ctc_loss.hpp b/ngraph/core/include/openvino/op/ctc_loss.hpp new file mode 100644 index 00000000000..5407fd0d113 --- /dev/null +++ b/ngraph/core/include/openvino/op/ctc_loss.hpp @@ -0,0 +1,71 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v4 { +class OPENVINO_API CTCLoss : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + CTCLoss() = default; + /// \brief Constructs a CTCLoss operation + /// + /// \param logits 3-D tensor of logits + /// \param logit_length 1-D tensor of length for each object from + /// a batch + /// \param labels 2-D tensor of labels for which likelyhood + /// is estimated using logist + /// \param label_length 1-D tensor of length for each label + /// sequence + /// \param blank_index Scalar used to mark a blank index + /// \param preprocess_collapse_repeated Flag for preprocessing labels before loss + /// calculation + /// \param ctc_merge_repeated Flag for merging repeated characters in a + /// potential alignment + /// \param unique Flag to find unique elements in a target + /// before matching with alignment + CTCLoss(const Output& logits, + const Output& logit_length, + const Output& labels, + const Output& label_length, + const bool preprocess_collapse_repeated = false, + const bool ctc_merge_repeated = true, + const bool unique = false); + + CTCLoss(const Output& logits, + const Output& logit_length, + const Output& labels, + const Output& label_length, + const Output& blank_index, + const bool preprocess_collapse_repeated = false, + const bool ctc_merge_repeated = true, + const bool unique = false); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool get_preprocess_collapse_repeated() const { + return preprocess_collapse_repeated_; + } + bool get_ctc_merge_repeated() const { + return ctc_merge_repeated_; + } + bool get_unique() const { + return unique_; + } + +private: + bool preprocess_collapse_repeated_; + bool ctc_merge_repeated_; + bool unique_; +}; +} // namespace v4 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/cum_sum.hpp b/ngraph/core/include/openvino/op/cum_sum.hpp new file mode 100644 index 00000000000..073a168bfbc --- /dev/null +++ b/ngraph/core/include/openvino/op/cum_sum.hpp @@ -0,0 +1,60 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/axis_set.hpp" +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Tensor cumulative sum operation. +/// +/// Compute the cumulative sum of the input tensor along the axis specified. +/// +class OPENVINO_API CumSum : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a cumulative summation operation. + CumSum() = default; + + /// \brief Constructs a cumulative summation operation. + /// + /// \param arg The tensor to be summed. + /// \param axis zero dimension tensor specifying axis position along which + /// cumulative sum must be performed + /// \param exclusive if set to true, the top element is not included + /// \param reverse if set to true, will perform the sums in reverse direction + CumSum(const Output& arg, const Output& axis, const bool exclusive = false, const bool reverse = false); + + /// \brief Constructs a cumulative summation operation with axis = 0 + /// + /// \param arg The tensor to be summed + CumSum(const Output& arg, const bool exclusive = false, const bool reverse = false); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + /// \return The default value for CumSum. + OPENVINO_SUPPRESS_DEPRECATED_START + std::shared_ptr get_default_value() const override; + OPENVINO_SUPPRESS_DEPRECATED_END + bool is_exclusive() const { + return m_exclusive; + } + bool is_reverse() const { + return m_reverse; + } + +private: + bool m_exclusive = false; + bool m_reverse = false; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/src/op/batch_norm.cpp b/ngraph/core/src/op/batch_norm.cpp index 753a59a18b4..c053c2d704e 100644 --- a/ngraph/core/src/op/batch_norm.cpp +++ b/ngraph/core/src/op/batch_norm.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::BatchNormInference, "batchNormInference", 0); +OPENVINO_RTTI_DEFINITION(op::v0::BatchNormInference, "batchNormInference", 0); op::v0::BatchNormInference::BatchNormInference(const Output& input, const Output& gamma, @@ -71,7 +71,7 @@ std::shared_ptr op::v0::BatchNormInference::clone_with_new_inputs(const Ou m_epsilon); } -NGRAPH_RTTI_DEFINITION(op::v5::BatchNormInference, "BatchNormInference", 5); +OPENVINO_RTTI_DEFINITION(op::v5::BatchNormInference, "BatchNormInference", 5); op::v5::BatchNormInference::BatchNormInference(const Output& input, const Output& gamma, diff --git a/ngraph/core/src/op/batch_to_space.cpp b/ngraph/core/src/op/batch_to_space.cpp index 0fc96d69f7a..8bbac08abbf 100644 --- a/ngraph/core/src/op/batch_to_space.cpp +++ b/ngraph/core/src/op/batch_to_space.cpp @@ -23,7 +23,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::BatchToSpace, "BatchToSpace", 1); +OPENVINO_RTTI_DEFINITION(op::v1::BatchToSpace, "BatchToSpace", 1); ngraph::op::v1::BatchToSpace::BatchToSpace(const ngraph::Output& data, const ngraph::Output& block_shape, diff --git a/ngraph/core/src/op/binary_convolution.cpp b/ngraph/core/src/op/binary_convolution.cpp index f6439d3bd1a..a3e420a6454 100644 --- a/ngraph/core/src/op/binary_convolution.cpp +++ b/ngraph/core/src/op/binary_convolution.cpp @@ -13,19 +13,18 @@ #include "ngraph/validation_util.hpp" using namespace std; -using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::BinaryConvolution, "BinaryConvolution", 1); +OPENVINO_RTTI_DEFINITION(ov::op::v1::BinaryConvolution, "BinaryConvolution", 1); -op::v1::BinaryConvolution::BinaryConvolution(const Output& data, - const Output& kernel, - const Strides& strides, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const Strides& dilations, - BinaryConvolutionMode mode, - float pad_value, - const PadType& auto_pad) +ov::op::v1::BinaryConvolution::BinaryConvolution(const Output& data, + const Output& kernel, + const Strides& strides, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const Strides& dilations, + BinaryConvolutionMode mode, + float pad_value, + const PadType& auto_pad) : Op({data, kernel}), m_strides(strides), m_dilations(dilations), @@ -37,15 +36,15 @@ op::v1::BinaryConvolution::BinaryConvolution(const Output& data, constructor_validate_and_infer_types(); } -op::v1::BinaryConvolution::BinaryConvolution(const Output& data, - const Output& kernel, - const Strides& strides, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const Strides& dilations, - const std::string& mode, - float pad_value, - const PadType& auto_pad) +ov::op::v1::BinaryConvolution::BinaryConvolution(const Output& data, + const Output& kernel, + const Strides& strides, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const Strides& dilations, + const std::string& mode, + float pad_value, + const PadType& auto_pad) : Op({data, kernel}), m_strides(strides), m_dilations(dilations), @@ -57,7 +56,7 @@ op::v1::BinaryConvolution::BinaryConvolution(const Output& data, constructor_validate_and_infer_types(); } -void op::v1::BinaryConvolution::validate_and_infer_types() { +void ov::op::v1::BinaryConvolution::validate_and_infer_types() { NGRAPH_OP_SCOPE(v1_BinaryConvolution_validate_and_infer_types); const PartialShape& data_batch_pshape = get_input_partial_shape(0); element::Type data_batch_et = get_input_element_type(0); @@ -79,19 +78,19 @@ void op::v1::BinaryConvolution::validate_and_infer_types() { " and ", filters_pshape); - PartialShape result_shape = validate_and_infer_convolution_forward_output_shape(this, - result_ps_rank, - data_batch_pshape, - filters_pshape, - m_auto_pad, - m_strides, - m_dilations, - m_pads_begin, - m_pads_end); + PartialShape result_shape = ngraph::validate_and_infer_convolution_forward_output_shape(this, + result_ps_rank, + data_batch_pshape, + filters_pshape, + m_auto_pad, + m_strides, + m_dilations, + m_pads_begin, + m_pads_end); set_output_type(0, data_batch_et, result_shape); } -shared_ptr op::v1::BinaryConvolution::clone_with_new_inputs(const OutputVector& new_args) const { +shared_ptr ov::op::v1::BinaryConvolution::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v1_BinaryConvolution_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), @@ -105,7 +104,7 @@ shared_ptr op::v1::BinaryConvolution::clone_with_new_inputs(const OutputVe m_auto_pad); } -bool op::v1::BinaryConvolution::visit_attributes(AttributeVisitor& visitor) { +bool ov::op::v1::BinaryConvolution::visit_attributes(AttributeVisitor& visitor) { NGRAPH_OP_SCOPE(v1_BinaryConvolution_visit_attributes); visitor.on_attribute("strides", m_strides); visitor.on_attribute("pads_begin", m_pads_begin); @@ -130,11 +129,11 @@ EnumNames::get() { constexpr DiscreteTypeInfo AttributeAdapter::type_info; } // namespace ov -std::ostream& operator<<(std::ostream& s, const op::v1::BinaryConvolution::BinaryConvolutionMode& type) { +std::ostream& ov::operator<<(std::ostream& s, const ov::op::v1::BinaryConvolution::BinaryConvolutionMode& type) { return s << as_string(type); } -op::v1::BinaryConvolution::BinaryConvolutionMode op::v1::BinaryConvolution::mode_from_string( +ov::op::v1::BinaryConvolution::BinaryConvolutionMode ov::op::v1::BinaryConvolution::mode_from_string( const std::string& mode) const { return as_enum(mode); } diff --git a/ngraph/core/src/op/broadcast.cpp b/ngraph/core/src/op/broadcast.cpp index 38454b35324..a7ee65bc51d 100644 --- a/ngraph/core/src/op/broadcast.cpp +++ b/ngraph/core/src/op/broadcast.cpp @@ -17,7 +17,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v3::Broadcast, "Broadcast", 3, op::util::BroadcastBase); +OPENVINO_RTTI_DEFINITION(op::v3::Broadcast, "Broadcast", 3, op::util::BroadcastBase); op::v3::Broadcast::Broadcast(const Output& arg, const Output& target_shape, @@ -203,7 +203,7 @@ BroadcastModeSpec to_broadcast_mode(const AutoBroadcastSpec& bs) { } } // namespace -NGRAPH_RTTI_DEFINITION(op::v1::Broadcast, "Broadcast", 1, op::util::BroadcastBase); +OPENVINO_RTTI_DEFINITION(op::v1::Broadcast, "Broadcast", 1, op::util::BroadcastBase); op::v1::Broadcast::Broadcast(const Output& arg, const Output& target_shape, diff --git a/ngraph/core/src/op/bucketize.cpp b/ngraph/core/src/op/bucketize.cpp index 5966ce549ef..0ac5682041c 100644 --- a/ngraph/core/src/op/bucketize.cpp +++ b/ngraph/core/src/op/bucketize.cpp @@ -9,7 +9,7 @@ using namespace ngraph; using namespace std; -NGRAPH_RTTI_DEFINITION(op::v3::Bucketize, "Bucketize", 3); +OPENVINO_RTTI_DEFINITION(op::v3::Bucketize, "Bucketize", 3); op::v3::Bucketize::Bucketize(const Output& data, const Output& buckets, diff --git a/ngraph/core/src/op/ceiling.cpp b/ngraph/core/src/op/ceiling.cpp index 75c43e8629a..7c13facafbe 100644 --- a/ngraph/core/src/op/ceiling.cpp +++ b/ngraph/core/src/op/ceiling.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Ceiling, "Ceiling", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Ceiling, "Ceiling", 0, util::UnaryElementwiseArithmetic); op::Ceiling::Ceiling(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/clamp.cpp b/ngraph/core/src/op/clamp.cpp index ed500f0a884..2a591d1d2c2 100644 --- a/ngraph/core/src/op/clamp.cpp +++ b/ngraph/core/src/op/clamp.cpp @@ -97,7 +97,7 @@ bool op::v0::Clamp::has_evaluate() const { return false; } -NGRAPH_RTTI_DEFINITION(op::v0::Clamp, "Clamp", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Clamp, "Clamp", 0); op::Clamp::Clamp() : Op(), m_min(), m_max() {} diff --git a/ngraph/core/src/op/concat.cpp b/ngraph/core/src/op/concat.cpp index 0595083adb1..0e4ba6f6126 100644 --- a/ngraph/core/src/op/concat.cpp +++ b/ngraph/core/src/op/concat.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::Concat, "Concat", 0); +OPENVINO_RTTI_DEFINITION(ov::op::v0::Concat, "Concat", 0); op::Concat::Concat(const OutputVector& args, int64_t axis) : Op(args), m_axis(axis) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/convert.cpp b/ngraph/core/src/op/convert.cpp index 82bd6722680..1bd2a639d43 100644 --- a/ngraph/core/src/op/convert.cpp +++ b/ngraph/core/src/op/convert.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Convert, "Convert", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Convert, "Convert", 0); op::Convert::Convert(const Output& arg, const element::Type& destination_type) : Op({arg}), diff --git a/ngraph/core/src/op/convert_like.cpp b/ngraph/core/src/op/convert_like.cpp index b036e5e194a..e839d3b53e1 100644 --- a/ngraph/core/src/op/convert_like.cpp +++ b/ngraph/core/src/op/convert_like.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::ConvertLike, "ConvertLike", 1); +OPENVINO_RTTI_DEFINITION(op::v1::ConvertLike, "ConvertLike", 1); op::v1::ConvertLike::ConvertLike(const Output& data, const Output& like) : Op({data, like}) { constructor_validate_and_infer_types(); @@ -37,7 +37,7 @@ shared_ptr op::v1::ConvertLike::clone_with_new_inputs(const OutputVector& bool op::v1::ConvertLike::constant_fold(OutputVector& output_values, const OutputVector& input_values) { OV_ITT_SCOPED_TASK(ov::itt::domains::nGraph, "op::v1::ConvertLike::constant_fold"); - if (auto data_const = std::dynamic_pointer_cast(input_values[0].get_node_shared_ptr())) { + if (auto data_const = std::dynamic_pointer_cast(input_values[0].get_node_shared_ptr())) { auto convert = make_shared(input_values[0], input_values[1].get_element_type()); convert->constant_fold(output_values, OutputVector{data_const}); return true; diff --git a/ngraph/core/src/op/convolution.cpp b/ngraph/core/src/op/convolution.cpp index c521a29bb71..2c8dbb8c4db 100644 --- a/ngraph/core/src/op/convolution.cpp +++ b/ngraph/core/src/op/convolution.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; // *** Convolution OP SET 1 *** -NGRAPH_RTTI_DEFINITION(op::v1::Convolution, "Convolution", 1); +OPENVINO_RTTI_DEFINITION(op::v1::Convolution, "Convolution", 1); op::v1::Convolution::Convolution(const Output& data_batch, const Output& filters, @@ -185,7 +185,7 @@ const PartialShape op::v1::ConvolutionBackpropData::get_output_shape() const { void op::v1::ConvolutionBackpropData::set_output_shape(const Shape& shape) { this->input(2).replace_source_output( - op::Constant::create(this->get_input_element_type(2), Shape{shape.size()}, shape)->output(0)); + op::v0::Constant::create(this->get_input_element_type(2), Shape{shape.size()}, shape)->output(0)); } void op::v1::ConvolutionBackpropData::infer_conv_backprop_output_spatial_shape( @@ -208,7 +208,7 @@ void op::v1::ConvolutionBackpropData::infer_conv_backprop_output_spatial_shape( int64_t val = strides[i] * (input_data_shape[i].get_length() - 1) + dilations[i] * (filters_shape[i].get_length() - 1) + 1 - pads_begin[i] - pads_end[i] + output_padding[i]; - output_spatial_shape.push_back(val); + output_spatial_shape.emplace_back(val); } else { output_spatial_shape.push_back(Dimension::dynamic()); } diff --git a/ngraph/core/src/op/cos.cpp b/ngraph/core/src/op/cos.cpp index 82dc09d4aed..47a45bc5fbb 100644 --- a/ngraph/core/src/op/cos.cpp +++ b/ngraph/core/src/op/cos.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Cos, "Cos", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Cos, "Cos", 0, util::UnaryElementwiseArithmetic); op::Cos::Cos(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/cosh.cpp b/ngraph/core/src/op/cosh.cpp index e53e4c073d8..997ea2a0647 100644 --- a/ngraph/core/src/op/cosh.cpp +++ b/ngraph/core/src/op/cosh.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Cosh, "Cosh", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Cosh, "Cosh", 0, util::UnaryElementwiseArithmetic); op::Cosh::Cosh(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/ctc_greedy_decoder.cpp b/ngraph/core/src/op/ctc_greedy_decoder.cpp index a320dc9d92b..63fd3cf8810 100644 --- a/ngraph/core/src/op/ctc_greedy_decoder.cpp +++ b/ngraph/core/src/op/ctc_greedy_decoder.cpp @@ -9,7 +9,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::CTCGreedyDecoder, "CTCGreedyDecoder", 0); +OPENVINO_RTTI_DEFINITION(ov::op::v0::CTCGreedyDecoder, "CTCGreedyDecoder", 0); op::CTCGreedyDecoder::CTCGreedyDecoder(const Output& input, const Output& seq_len, diff --git a/ngraph/core/src/op/ctc_greedy_decoder_seq_len.cpp b/ngraph/core/src/op/ctc_greedy_decoder_seq_len.cpp index e08cc5e7d34..5d66411b636 100644 --- a/ngraph/core/src/op/ctc_greedy_decoder_seq_len.cpp +++ b/ngraph/core/src/op/ctc_greedy_decoder_seq_len.cpp @@ -9,7 +9,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v6::CTCGreedyDecoderSeqLen, "CTCGreedyDecoderSeqLen", 6); +OPENVINO_RTTI_DEFINITION(op::v6::CTCGreedyDecoderSeqLen, "CTCGreedyDecoderSeqLen", 6); op::v6::CTCGreedyDecoderSeqLen::CTCGreedyDecoderSeqLen(const Output& input, const Output& seq_len, diff --git a/ngraph/core/src/op/ctc_loss.cpp b/ngraph/core/src/op/ctc_loss.cpp index 6f0da3bfc08..a1ba244fe16 100644 --- a/ngraph/core/src/op/ctc_loss.cpp +++ b/ngraph/core/src/op/ctc_loss.cpp @@ -9,7 +9,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v4::CTCLoss, "CTCLoss", 4); +OPENVINO_RTTI_DEFINITION(op::v4::CTCLoss, "CTCLoss", 4); op::v4::CTCLoss::CTCLoss(const Output& logits, const Output& logit_length, diff --git a/ngraph/core/src/op/cum_sum.cpp b/ngraph/core/src/op/cum_sum.cpp index 26c7177920a..d7995e55dce 100644 --- a/ngraph/core/src/op/cum_sum.cpp +++ b/ngraph/core/src/op/cum_sum.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::CumSum, "CumSum", 0); +OPENVINO_RTTI_DEFINITION(op::v0::CumSum, "CumSum", 0); op::v0::CumSum::CumSum(const Output& arg, const Output& axis, const bool exclusive, const bool reverse) : Op({arg, axis}), @@ -23,7 +23,7 @@ op::v0::CumSum::CumSum(const Output& arg, const Output& axis, const } op::v0::CumSum::CumSum(const Output& arg, const bool exclusive, const bool reverse) - : Op({arg, op::Constant::create(element::i32, Shape{}, {0})}), + : Op({arg, op::v0::Constant::create(element::i32, Shape{}, {0})}), m_exclusive(exclusive), m_reverse(reverse) { constructor_validate_and_infer_types(); From 6fa6e488930f85663109cf3468ce1e27b59dbc02 Mon Sep 17 00:00:00 2001 From: Dmitry Pigasin Date: Fri, 3 Sep 2021 18:50:13 +0300 Subject: [PATCH 10/52] [IE Python Speech Sample] Enable `-oname` for a imported model (`-rg` option) (#7254) * Enable `-oname` for a imported model * Refactor `get_output_layer_list` func * refactor: Update inference-engine/ie_bridges/python/sample/speech_sample/speech_sample.py Co-authored-by: Kate Generalova --- .../sample/speech_sample/speech_sample.py | 52 ++++++++++++------- 1 file changed, 32 insertions(+), 20 deletions(-) diff --git a/inference-engine/ie_bridges/python/sample/speech_sample/speech_sample.py b/inference-engine/ie_bridges/python/sample/speech_sample/speech_sample.py index 8019746d177..c1edad6e9ea 100755 --- a/inference-engine/ie_bridges/python/sample/speech_sample/speech_sample.py +++ b/inference-engine/ie_bridges/python/sample/speech_sample/speech_sample.py @@ -2,15 +2,17 @@ # -*- coding: utf-8 -*- # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import argparse import logging as log import re import sys from timeit import default_timer +from typing import Union import numpy as np from arg_parser import parse_args from file_options import read_utterance_file, write_utterance_file -from openvino.inference_engine import ExecutableNetwork, IECore +from openvino.inference_engine import ExecutableNetwork, IECore, IENetwork def get_scale_factor(matrix: np.ndarray) -> float: @@ -75,6 +77,28 @@ def compare_with_reference(result: np.ndarray, reference: np.ndarray): log.info(f'stdev error: {stdev_error:.7f}') +def get_input_layer_list(net: Union[IENetwork, ExecutableNetwork], args: argparse.Namespace) -> list: + """Get a list of input layer names""" + return re.split(', |,', args.input_layers) if args.input_layers else [next(iter(net.input_info))] + + +def get_output_layer_list(net: Union[IENetwork, ExecutableNetwork], + args: argparse.Namespace, with_ports: bool) -> list: + """Get a list of output layer names""" + if args.output_layers: + output_name_port = [output.split(':') for output in re.split(', |,', args.output_layers)] + if with_ports: + try: + return [(blob_name, int(port)) for blob_name, port in output_name_port] + except ValueError: + log.error('Incorrect value for -oname/--output_layers option, please specify a port for output layer.') + sys.exit(-4) + else: + return [blob_name for blob_name, _ in output_name_port] + else: + return [list(net.outputs.keys())[-1]] + + def main(): log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout) args = parse_args() @@ -91,25 +115,13 @@ def main(): # ---------------------------Step 3. Configure input & output---------------------------------------------------------- log.info('Configuring input and output blobs') - # Get names of input and output blobs - if args.input_layers: - input_blobs = re.split(', |,', args.input_layers) - else: - input_blobs = [next(iter(net.input_info))] - + # Mark layers from args.output_layers as outputs if args.output_layers: - output_name_port = [output.split(':') for output in re.split(', |,', args.output_layers)] - try: - output_name_port = [(blob_name, int(port)) for blob_name, port in output_name_port] - except ValueError: - log.error('Output Parameter does not have a port.') - sys.exit(-4) + net.add_outputs(get_output_layer_list(net, args, with_ports=True)) - net.add_outputs(output_name_port) - - output_blobs = [blob_name for blob_name, port in output_name_port] - else: - output_blobs = [list(net.outputs.keys())[-1]] + # Get names of input and output blobs + input_blobs = get_input_layer_list(net, args) + output_blobs = get_output_layer_list(net, args, with_ports=False) # Set input and output precision manually for blob_name in input_blobs: @@ -153,8 +165,8 @@ def main(): exec_net = ie.load_network(net, device_str, plugin_config) else: exec_net = ie.import_network(args.import_gna_model, device_str, plugin_config) - input_blobs = [next(iter(exec_net.input_info))] - output_blobs = [list(exec_net.outputs.keys())[-1]] + input_blobs = get_input_layer_list(exec_net, args) + output_blobs = get_output_layer_list(exec_net, args, with_ports=False) if args.input: input_files = re.split(', |,', args.input) From 781dcdf571a882ff11a88b9343328f6158b2e8be Mon Sep 17 00:00:00 2001 From: Alexander Zhogov Date: Fri, 3 Sep 2021 20:28:53 +0300 Subject: [PATCH 11/52] Azure CI: Run tests on Mac from install dir (#7356) * Azure CI: Run tests on Mac from install dir * Fix --- .ci/azure/linux.yml | 13 ++++++------ .ci/azure/mac.yml | 51 ++++++++++++++++++++++++++++++--------------- 2 files changed, 41 insertions(+), 23 deletions(-) diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index 493d492d04a..d949cf656b4 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -103,6 +103,7 @@ jobs: workingDirectory: $(WORK_DIR) displayName: 'Install dependencies' + # Should be after 'Install dependencies' because Git lfs is not installed - checkout: testdata clean: true lfs: true @@ -140,18 +141,18 @@ jobs: - script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P cmake_install.cmake workingDirectory: $(BUILD_DIR) displayName: 'Install' - + - task: CMake@1 inputs: cmakeArgs: > -GNinja $(REPO_DIR)/tests/layer_tests workingDirectory: $(BUILD_LAYER_TESTS_DIR) - + - script: ninja workingDirectory: $(BUILD_LAYER_TESTS_DIR) displayName: 'Build Layer Tests' - + - script: cmake -DCOMPONENT=tests -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P cmake_install.cmake workingDirectory: $(BUILD_LAYER_TESTS_DIR) displayName: 'Install Layer Tests' @@ -166,7 +167,7 @@ jobs: cp -R $(REPO_DIR)/inference-engine/temp/opencv_4.5.2_ubuntu20/opencv/* $(INSTALL_DIR)/opencv/ workingDirectory: $(BUILD_DIR) displayName: 'Install tests' - + - script: ls -alR $(INSTALL_DIR) displayName: 'List install files' @@ -177,7 +178,7 @@ jobs: - script: $(INSTALL_DIR)/deployment_tools/inference_engine/samples/c/build_samples.sh workingDirectory: $(BUILD_SAMPLES_DIR) displayName: 'Build c samples' - + - script: rm -fr $(BUILD_DIR) displayName: 'Clean build dir' continueOnError: false @@ -253,7 +254,7 @@ jobs: . $(SETUPVARS) -pyver 3.8 && python3 -m pytest --junitxml=TEST-PythonAPI.xml displayName: 'Python API Tests' continueOnError: false - + - script: | . $(SETUPVARS) python3 -m pip install -r requirements.txt diff --git a/.ci/azure/mac.yml b/.ci/azure/mac.yml index b07ff48f78c..cffca56ccfa 100644 --- a/.ci/azure/mac.yml +++ b/.ci/azure/mac.yml @@ -28,19 +28,19 @@ jobs: MODELS_PATH: $(REPO_DIR)/../testdata WORK_DIR: $(Pipeline.Workspace)/_w BUILD_DIR: $(WORK_DIR)/build - BIN_DIR: $(REPO_DIR)/bin/intel64/$(BUILD_TYPE) INSTALL_DIR: $(WORK_DIR)/install_pkg + INSTALL_TEST_DIR: $(INSTALL_DIR)/tests SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh steps: - script: | whoami uname -a - which python3 - python3 --version - which java - java -version - gcc --version + echo Python3 info ; which python3 ; python3 --version + echo Python info ; which python ; python --version + echo Java info ; which java ; java -version + echo gcc info ; which gcc ; gcc --version + echo cmake info ; which cmake ; cmake --version xcrun --sdk macosx --show-sdk-version env sysctl -a @@ -91,47 +91,64 @@ jobs: workingDirectory: $(BUILD_DIR) displayName: 'CMake' + - script: ls -alR $(REPO_DIR)/inference-engine/temp/ + displayName: 'List temp SDKs' + - script: ninja workingDirectory: $(BUILD_DIR) displayName: 'Build Mac' - script: ls -alR $(REPO_DIR)/bin/ - displayName: 'List files' + displayName: 'List bin files' - script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P cmake_install.cmake workingDirectory: $(BUILD_DIR) displayName: 'Install' - - script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU*:IE_CPU.onnx_model_sigmoid:IE_CPU/GRUSequenceOp.onnx_model_gru* --gtest_output=xml:TEST-NGraphUT.xml - workingDirectory: $(BIN_DIR) + - script: ls -alR $(INSTALL_DIR) + displayName: 'List install files' + + - script: | + set -e + mkdir $(INSTALL_DIR)/opencv/ + cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -DCOMPONENT=tests -P cmake_install.cmake + cp -R $(REPO_DIR)/inference-engine/temp/opencv_4.5.2_osx/opencv/* $(INSTALL_DIR)/opencv/ + workingDirectory: $(BUILD_DIR) + displayName: 'Install tests' + + - script: ls -alR $(INSTALL_DIR) + displayName: 'List install files' + + - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU*:IE_CPU.onnx_model_sigmoid:IE_CPU/GRUSequenceOp.onnx_model_gru* --gtest_output=xml:TEST-NGraphUT.xml + workingDirectory: $(INSTALL_TEST_DIR) displayName: 'nGraph UT' continueOnError: false - - script: $(BIN_DIR)/InferenceEngineUnitTests --gtest_print_time=1 --gtest_filter=-MKLDNNGraphStructureTests.TestNoRedundantReordersBeforeDWConvolution:TestConvolution/MKLDNNGraphConvolutionTests.TestsConvolution/0:TestConvolutionDefaultPrimitivesPriority/MKLDNNGraphConvolutionTests.TestsConvolution/0 --gtest_output=xml:TEST-InferenceEngineUnitTests.xml + - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/InferenceEngineUnitTests --gtest_print_time=1 --gtest_filter=-MKLDNNGraphStructureTests.TestNoRedundantReordersBeforeDWConvolution:TestConvolution/MKLDNNGraphConvolutionTests.TestsConvolution/0:TestConvolutionDefaultPrimitivesPriority/MKLDNNGraphConvolutionTests.TestsConvolution/0 --gtest_output=xml:TEST-InferenceEngineUnitTests.xml displayName: 'IE UT old' continueOnError: false - - script: $(BIN_DIR)/ieUnitTests --gtest_output=xml:TEST-ieUnitTests.xml + - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ieUnitTests --gtest_output=xml:TEST-ieUnitTests.xml displayName: 'IE UT' continueOnError: false - - script: $(BIN_DIR)/cpuUnitTests --gtest_output=xml:TEST-cpuUnitTests.xml + - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/cpuUnitTests --gtest_output=xml:TEST-cpuUnitTests.xml displayName: 'CPU UT' continueOnError: false - - script: $(BIN_DIR)/vpuUnitTests --gtest_output=xml:TEST-vpuUnitTests.xml + - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/vpuUnitTests --gtest_output=xml:TEST-vpuUnitTests.xml displayName: 'VPU UT' continueOnError: false - - script: $(BIN_DIR)/onnxImporterUnitTests --gtest_output=xml:TEST-onnxImporterUnitTests.xml + - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/onnxImporterUnitTests --gtest_output=xml:TEST-onnxImporterUnitTests.xml displayName: 'ONNX Importer UT' continueOnError: false - - script: $(BIN_DIR)/ieFuncTests --gtest_output=xml:TEST-ieFuncTests.xml + - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ieFuncTests --gtest_output=xml:TEST-ieFuncTests.xml displayName: 'IE FuncTests' continueOnError: false - - script: $(BIN_DIR)/cpuFuncTests --gtest_filter=*smoke*:-smoke_LPT/ReduceMinTransformation.CompareWithRefImpl/f32_Shape* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml + - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/cpuFuncTests --gtest_filter=*smoke*:-smoke_LPT/ReduceMinTransformation.CompareWithRefImpl/f32_Shape* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml displayName: 'CPU FuncTests' continueOnError: false enabled: false @@ -139,7 +156,7 @@ jobs: - script: | export DATA_PATH=$(MODELS_PATH) export MODELS_PATH=$(MODELS_PATH) - $(BIN_DIR)/InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml + . $(SETUPVARS) && $(INSTALL_TEST_DIR)/InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml displayName: 'IE CAPITests' continueOnError: false From b86984fb3029e36850a869642891a2ced3b06641 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Do=C5=82bniak?= Date: Fri, 3 Sep 2021 20:22:07 +0200 Subject: [PATCH 12/52] MaxPool-8 evaluate() (#7363) --- ngraph/core/include/ngraph/op/max_pool.hpp | 3 + ngraph/core/src/itt.hpp | 7 + ngraph/core/src/op/max_pool.cpp | 137 ++++++++++++++++++ .../runtime/interpreter/evaluates_map.cpp | 42 ------ .../runtime/interpreter/opset_int_tbl.hpp | 1 - 5 files changed, 147 insertions(+), 43 deletions(-) diff --git a/ngraph/core/include/ngraph/op/max_pool.hpp b/ngraph/core/include/ngraph/op/max_pool.hpp index 53096055b64..e870a1e0303 100644 --- a/ngraph/core/include/ngraph/op/max_pool.hpp +++ b/ngraph/core/include/ngraph/op/max_pool.hpp @@ -120,6 +120,9 @@ public: m_axis = axis; } + bool has_evaluate() const override; + bool evaluate(const HostTensorVector&, const HostTensorVector&) const override; + private: Strides m_dilations; element::Type m_index_element_type{element::i64}; diff --git a/ngraph/core/src/itt.hpp b/ngraph/core/src/itt.hpp index 114b3e7aa93..0e37005b853 100644 --- a/ngraph/core/src/itt.hpp +++ b/ngraph/core/src/itt.hpp @@ -49,6 +49,13 @@ OV_ITT_DOMAIN(SIMPLE_ngraph_pass); } \ } break +#define NGRAPH_2_TYPES_CASE(region, a, b, ...) \ + case element::Type_t::a: { \ + OV_SCOPE(ngraph_op, OV_PP_CAT4(region, _, a, b)) { \ + rc = evaluate(__VA_ARGS__); \ + } \ + } break + #define NGRAPH_COPY_TENSOR(region, a, ...) \ case ov::element::Type_t::a: { \ OV_SCOPE(ngraph_op, OV_PP_CAT3(region, _, a)) { \ diff --git a/ngraph/core/src/op/max_pool.cpp b/ngraph/core/src/op/max_pool.cpp index 5e5755fa485..6398db7fc48 100644 --- a/ngraph/core/src/op/max_pool.cpp +++ b/ngraph/core/src/op/max_pool.cpp @@ -160,6 +160,96 @@ bool op::v1::MaxPool::has_evaluate() const { // ------------------------------ V8 ------------------------------ +namespace maxpool_v8 { +template +inline bool evaluate(const HostTensorPtr& data, + const HostTensorPtr& values, + const HostTensorPtr& indices, + const Shape& out_shape, + const Shape& kernel, + const Strides& strides, + const Strides& dilations, + const Shape& pads_begin, + const Shape& pads_end, + const int64_t axis) { + using Values_t = typename element_type_traits::value_type; + using Indices_t = typename element_type_traits::value_type; + runtime::reference::max_pool(data->get_data_ptr(), + values->get_data_ptr(), + indices->get_data_ptr(), + data->get_shape(), + out_shape, + kernel, + strides, + dilations, + pads_begin, + pads_end, + axis); + return true; +} + +bool evaluate_maxpool(const HostTensorPtr& data, + const HostTensorPtr& values, + const HostTensorPtr& indices, + const Shape& out_shape, + const Shape& kernel, + const Strides& strides, + const Strides& dilations, + const Shape& pads_begin, + const Shape& pads_end, + const int64_t axis) { +#define EVAL_MAX_POOL_8(data_et, index_et) \ + NGRAPH_2_TYPES_CASE(maxpool_v8::evaluate_maxpool, \ + data_et, \ + index_et, \ + data, \ + values, \ + indices, \ + out_shape, \ + kernel, \ + strides, \ + dilations, \ + pads_begin, \ + pads_end, \ + axis) + + bool rc = true; + switch (indices->get_element_type()) { + case element::Type_t::i32: { + switch (data->get_element_type()) { + EVAL_MAX_POOL_8(i32, i32); + EVAL_MAX_POOL_8(i64, i32); + EVAL_MAX_POOL_8(u32, i32); + EVAL_MAX_POOL_8(u64, i32); + EVAL_MAX_POOL_8(f16, i32); + EVAL_MAX_POOL_8(f32, i32); + default: + rc = false; + break; + } + } break; + case element::Type_t::i64: { + switch (data->get_element_type()) { + EVAL_MAX_POOL_8(i32, i64); + EVAL_MAX_POOL_8(i64, i64); + EVAL_MAX_POOL_8(u32, i64); + EVAL_MAX_POOL_8(u64, i64); + EVAL_MAX_POOL_8(f16, i64); + EVAL_MAX_POOL_8(f32, i64); + default: + rc = false; + break; + } + } break; + default: + rc = false; + break; + } + + return rc; +} +} // namespace maxpool_v8 + NGRAPH_RTTI_DEFINITION(op::v8::MaxPool, "MaxPool", 8, op::util::MaxPoolBase); op::v8::MaxPool::MaxPool(const Output& arg, @@ -223,3 +313,50 @@ shared_ptr op::v8::MaxPool::clone_with_new_inputs(const OutputVector& new_ m_index_element_type, m_axis); } + +bool op::v8::MaxPool::has_evaluate() const { + NGRAPH_OP_SCOPE(v8_MaxPool_has_evaluate); + switch (get_input_element_type(0)) { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: + return true; + default: + break; + } + return false; +} + +bool op::v8::MaxPool::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { + NGRAPH_OP_SCOPE(v8_MaxPool_evaluate); + + const auto arg_shape = inputs[0]->get_partial_shape(); + auto pads_begin_s = get_pads_begin(); + auto pads_end_s = get_pads_end(); + update_auto_padding(arg_shape, get_dilations(), pads_begin_s, pads_end_s); + CoordinateDiff pads_begin(pads_begin_s.begin(), pads_begin_s.end()); + CoordinateDiff pads_end(pads_end_s.begin(), pads_end_s.end()); + auto out_shape = infer_batched_pooling_forward(this, + arg_shape, + pads_begin, + pads_end, + get_kernel(), + get_strides(), + true, + get_rounding_type() == op::RoundingType::CEIL, + get_dilations()); + + return maxpool_v8::evaluate_maxpool(inputs[0], + outputs[0], + outputs[1], + out_shape.get_shape(), + get_kernel(), + get_strides(), + get_dilations(), + get_pads_begin(), + get_pads_end(), + get_axis()); +} diff --git a/ngraph/test/runtime/interpreter/evaluates_map.cpp b/ngraph/test/runtime/interpreter/evaluates_map.cpp index 781c252fa2a..08d5c3171d0 100644 --- a/ngraph/test/runtime/interpreter/evaluates_map.cpp +++ b/ngraph/test/runtime/interpreter/evaluates_map.cpp @@ -50,7 +50,6 @@ #include #include #include -#include #include #include #include @@ -2923,47 +2922,6 @@ namespace return true; } - template - bool evaluate(const shared_ptr& op, - const HostTensorVector& outputs, - const HostTensorVector& inputs) - { - using T = typename element_type_traits::value_type; - if (op->get_index_element_type() == element::i32) - { - runtime::reference::max_pool(inputs[0]->get_data_ptr(), - outputs[0]->get_data_ptr(), - outputs[1]->get_data_ptr(), - inputs[0]->get_shape(), - outputs[0]->get_shape(), - op->get_kernel(), - op->get_strides(), - op->get_dilations(), - op->get_pads_begin(), - op->get_pads_end(), - op->get_axis()); - } - else if (op->get_index_element_type() == element::i64) - { - runtime::reference::max_pool(inputs[0]->get_data_ptr(), - outputs[0]->get_data_ptr(), - outputs[1]->get_data_ptr(), - inputs[0]->get_shape(), - outputs[0]->get_shape(), - op->get_kernel(), - op->get_strides(), - op->get_dilations(), - op->get_pads_begin(), - op->get_pads_end(), - op->get_axis()); - } - else - { - return false; - } - return true; - } - template bool evaluate_node(std::shared_ptr node, const HostTensorVector& outputs, diff --git a/ngraph/test/runtime/interpreter/opset_int_tbl.hpp b/ngraph/test/runtime/interpreter/opset_int_tbl.hpp index 7dea5e56ffc..b24b8676961 100644 --- a/ngraph/test/runtime/interpreter/opset_int_tbl.hpp +++ b/ngraph/test/runtime/interpreter/opset_int_tbl.hpp @@ -101,5 +101,4 @@ NGRAPH_OP(AdaptiveAvgPool, ngraph::op::v8) NGRAPH_OP(AdaptiveMaxPool, ngraph::op::v8) NGRAPH_OP(Gather, op::v8) NGRAPH_OP(MatrixNms, op::v8) -NGRAPH_OP(MaxPool, op::v8) NGRAPH_OP(MulticlassNms, op::v8) From 005e7da325adad81e7cbcdc82bc0b2118cd10252 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 3 Sep 2021 22:01:18 +0300 Subject: [PATCH 13/52] Removed auto plugin (#7310) * Try to remove auto plugin * Auto is registered as MULTI * Register MultiDevicePlugin as AUTO * More explicit config --- .../plugins/create_plugin_file.cmake | 5 +- cmake/developer_package/plugins/plugins.cmake | 138 +++++++++--------- .../ie_bridges/python/wheel/setup.py | 6 - inference-engine/src/CMakeLists.txt | 2 - .../src/auto_plugin/CMakeLists.txt | 19 --- .../src/auto_plugin/auto_plugin.cpp | 11 -- .../src/auto_plugin/auto_plugin.hpp | 17 --- .../src/inference_engine/src/ie_core.cpp | 22 +-- .../src/multi_device/CMakeLists.txt | 5 + .../multi_device_exec_network.cpp | 5 +- .../src/multi_device/multi_device_plugin.cpp | 32 ++-- .../interface/ie_internal_plugin_config.hpp | 13 +- .../inference_engine/CMakeLists.txt | 1 - .../functional/plugin/cpu/CMakeLists.txt | 2 +- .../functional/plugin/gpu/CMakeLists.txt | 1 - .../deployment_manager/configs/darwin.json | 1 - scripts/deployment_manager/configs/linux.json | 1 - .../deployment_manager/configs/windows.json | 1 - 18 files changed, 114 insertions(+), 168 deletions(-) delete mode 100644 inference-engine/src/auto_plugin/CMakeLists.txt delete mode 100644 inference-engine/src/auto_plugin/auto_plugin.cpp delete mode 100644 inference-engine/src/auto_plugin/auto_plugin.hpp diff --git a/cmake/developer_package/plugins/create_plugin_file.cmake b/cmake/developer_package/plugins/create_plugin_file.cmake index 88f33904bee..cb28967d796 100644 --- a/cmake/developer_package/plugins/create_plugin_file.cmake +++ b/cmake/developer_package/plugins/create_plugin_file.cmake @@ -2,6 +2,8 @@ # SPDX-License-Identifier: Apache-2.0 # +cmake_policy(SET CMP0007 NEW) + set(newContent " ") if(IE_PLUGIN_PROPERTIES) @@ -9,10 +11,11 @@ if(IE_PLUGIN_PROPERTIES) ") foreach(props IN LISTS IE_PLUGIN_PROPERTIES) - string(REPLACE "," ";" props "${props}") + string(REPLACE ":" ";" props "${props}") list(GET props 0 key) list(GET props 1 value) + set(newContent "${newContent} ") endforeach() diff --git a/cmake/developer_package/plugins/plugins.cmake b/cmake/developer_package/plugins/plugins.cmake index cec023f3062..3f83954cfa7 100644 --- a/cmake/developer_package/plugins/plugins.cmake +++ b/cmake/developer_package/plugins/plugins.cmake @@ -20,19 +20,18 @@ endif() # # ie_add_plugin(NAME # DEVICE_NAME -# SOURCES -# OBJECT_LIBRARIES -# VERSION_DEFINES_FOR -# SKIP_INSTALL +# [PSEUDO] +# [DEFAULT_CONFIG ] +# [SOURCES ] +# [OBJECT_LIBRARIES ] +# [VERSION_DEFINES_FOR ] +# [SKIP_INSTALL] # ) # function(ie_add_plugin) - set(options - SKIP_INSTALL - ADD_CLANG_FORMAT - ) + set(options SKIP_INSTALL ADD_CLANG_FORMAT PSEUDO_PLUGIN) set(oneValueArgs NAME DEVICE_NAME VERSION_DEFINES_FOR) - set(multiValueArgs SOURCES OBJECT_LIBRARIES CPPLINT_FILTERS) + set(multiValueArgs DEFAULT_CONFIG SOURCES OBJECT_LIBRARIES CPPLINT_FILTERS) cmake_parse_arguments(IE_PLUGIN "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) if(NOT IE_PLUGIN_NAME) @@ -45,41 +44,73 @@ function(ie_add_plugin) # create and configure target - if(IE_PLUGIN_VERSION_DEFINES_FOR) - addVersionDefines(${IE_PLUGIN_VERSION_DEFINES_FOR} CI_BUILD_NUMBER) - endif() + if(NOT IE_PLUGIN_PSEUDO_PLUGIN) + if(IE_PLUGIN_VERSION_DEFINES_FOR) + addVersionDefines(${IE_PLUGIN_VERSION_DEFINES_FOR} CI_BUILD_NUMBER) + endif() - set(input_files ${IE_PLUGIN_SOURCES}) - foreach(obj_lib IN LISTS IE_PLUGIN_OBJECT_LIBRARIES) - list(APPEND input_files $) - add_cpplint_target(${obj_lib}_cpplint FOR_TARGETS ${obj_lib}) - endforeach() + set(input_files ${IE_PLUGIN_SOURCES}) + foreach(obj_lib IN LISTS IE_PLUGIN_OBJECT_LIBRARIES) + list(APPEND input_files $) + add_cpplint_target(${obj_lib}_cpplint FOR_TARGETS ${obj_lib}) + endforeach() - add_library(${IE_PLUGIN_NAME} MODULE ${input_files}) - target_compile_definitions(${IE_PLUGIN_NAME} PRIVATE IMPLEMENT_INFERENCE_ENGINE_PLUGIN) + add_library(${IE_PLUGIN_NAME} MODULE ${input_files}) + target_compile_definitions(${IE_PLUGIN_NAME} PRIVATE IMPLEMENT_INFERENCE_ENGINE_PLUGIN) - ie_add_vs_version_file(NAME ${IE_PLUGIN_NAME} - FILEDESCRIPTION "Inference Engine ${IE_PLUGIN_DEVICE_NAME} device plugin library") + ie_add_vs_version_file(NAME ${IE_PLUGIN_NAME} + FILEDESCRIPTION "Inference Engine ${IE_PLUGIN_DEVICE_NAME} device plugin library") - if(TARGET IE::inference_engine_plugin_api) - target_link_libraries(${IE_PLUGIN_NAME} PRIVATE IE::inference_engine_plugin_api) - else() - target_link_libraries(${IE_PLUGIN_NAME} PRIVATE inference_engine_plugin_api) - endif() + if(TARGET IE::inference_engine_plugin_api) + target_link_libraries(${IE_PLUGIN_NAME} PRIVATE IE::inference_engine_plugin_api) + else() + target_link_libraries(${IE_PLUGIN_NAME} PRIVATE inference_engine_plugin_api) + endif() - if(WIN32) - set_target_properties(${IE_PLUGIN_NAME} PROPERTIES COMPILE_PDB_NAME ${IE_PLUGIN_NAME}) - endif() + if(WIN32) + set_target_properties(${IE_PLUGIN_NAME} PROPERTIES COMPILE_PDB_NAME ${IE_PLUGIN_NAME}) + endif() - set(custom_filter "") - foreach(filter IN LISTS IE_PLUGIN_CPPLINT_FILTERS) - string(CONCAT custom_filter "${custom_filter}" "," "${filter}") - endforeach() + set(custom_filter "") + foreach(filter IN LISTS IE_PLUGIN_CPPLINT_FILTERS) + string(CONCAT custom_filter "${custom_filter}" "," "${filter}") + endforeach() - if (IE_PLUGIN_ADD_CLANG_FORMAT) - add_clang_format_target(${IE_PLUGIN_NAME}_clang FOR_TARGETS ${IE_PLUGIN_NAME}) - else() - add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter}) + if (IE_PLUGIN_ADD_CLANG_FORMAT) + add_clang_format_target(${IE_PLUGIN_NAME}_clang FOR_TARGETS ${IE_PLUGIN_NAME}) + else() + add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter}) + endif() + + add_dependencies(ie_plugins ${IE_PLUGIN_NAME}) + if(TARGET inference_engine_preproc) + add_dependencies(${IE_PLUGIN_NAME} inference_engine_preproc) + endif() + + # fake dependencies to build in the following order: + # IE -> IE readers -> IE inference plugins -> IE-based apps + if(TARGET inference_engine_ir_reader) + add_dependencies(${IE_PLUGIN_NAME} inference_engine_ir_reader) + endif() + if(TARGET inference_engine_ir_v7_reader) + add_dependencies(${IE_PLUGIN_NAME} inference_engine_ir_v7_reader) + endif() + if(TARGET onnx_ngraph_frontend) + add_dependencies(${IE_PLUGIN_NAME} onnx_ngraph_frontend) + endif() + if(TARGET paddlepaddle_ngraph_frontend) + add_dependencies(${IE_PLUGIN_NAME} paddlepaddle_ngraph_frontend) + endif() + + # install rules + if(NOT IE_PLUGIN_SKIP_INSTALL) + string(TOLOWER "${IE_PLUGIN_DEVICE_NAME}" install_component) + ie_cpack_add_component(${install_component} REQUIRED DEPENDS core) + + install(TARGETS ${IE_PLUGIN_NAME} + LIBRARY DESTINATION ${IE_CPACK_RUNTIME_PATH} + COMPONENT ${install_component}) + endif() endif() # check that plugin with such name is not registered @@ -98,33 +129,7 @@ function(ie_add_plugin) list(APPEND PLUGIN_FILES "${IE_PLUGIN_DEVICE_NAME}:${IE_PLUGIN_NAME}") set(PLUGIN_FILES "${PLUGIN_FILES}" CACHE INTERNAL "" FORCE) - - add_dependencies(ie_plugins ${IE_PLUGIN_NAME}) - if(TARGET inference_engine_preproc) - add_dependencies(${IE_PLUGIN_NAME} inference_engine_preproc) - endif() - - # fake dependencies to build in the following order: - # IE -> IE readers -> IE inference plugins -> IE-based apps - if(TARGET inference_engine_ir_reader) - add_dependencies(${IE_PLUGIN_NAME} inference_engine_ir_reader) - endif() - if(TARGET inference_engine_ir_v7_reader) - add_dependencies(${IE_PLUGIN_NAME} inference_engine_ir_v7_reader) - endif() - if(TARGET onnx_ngraph_frontend) - add_dependencies(${IE_PLUGIN_NAME} onnx_ngraph_frontend) - endif() - - # install rules - - if(NOT IE_PLUGIN_SKIP_INSTALL) - string(TOLOWER "${IE_PLUGIN_DEVICE_NAME}" install_component) - ie_cpack_add_component(${install_component} REQUIRED DEPENDS core) - - install(TARGETS ${IE_PLUGIN_NAME} - LIBRARY DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ${install_component}) - endif() + set(${IE_PLUGIN_DEVICE_NAME}_CONFIG "${IE_PLUGIN_DEFAULT_CONFIG}" CACHE INTERNAL "" FORCE) endfunction() # @@ -168,7 +173,7 @@ macro(ie_register_plugins) list(GET name 1 name) # create plugin file - set(config_file_name "${CMAKE_BINARY_DIR}/plugins/${name}.xml") + set(config_file_name "${CMAKE_BINARY_DIR}/plugins/${device_name}.xml") ie_plugin_get_file_name(${name} library_name) add_custom_command(TARGET ${IE_REGISTER_MAIN_TARGET} POST_BUILD @@ -176,9 +181,10 @@ macro(ie_register_plugins) "${CMAKE_COMMAND}" -D "IE_CONFIG_OUTPUT_FILE=${config_file_name}" -D "IE_DEVICE_NAME=${device_name}" + -D "IE_PLUGIN_PROPERTIES=${${device_name}_CONFIG}" -D "IE_PLUGIN_LIBRARY_NAME=${library_name}" -P "${IEDevScripts_DIR}/plugins/create_plugin_file.cmake" - COMMENT "Register ${name} plugin" + COMMENT "Register ${device_name} device as ${library_name}" VERBATIM) list(APPEND plugin_files_local "${config_file_name}") diff --git a/inference-engine/ie_bridges/python/wheel/setup.py b/inference-engine/ie_bridges/python/wheel/setup.py index 6b233ff4284..a84280a8671 100644 --- a/inference-engine/ie_bridges/python/wheel/setup.py +++ b/inference-engine/ie_bridges/python/wheel/setup.py @@ -65,12 +65,6 @@ LIB_INSTALL_CFG = { 'install_dir': PLUGINS_LIBS_DIR, 'rpath': LIBS_RPATH, }, - 'auto_plugin': { - 'name': 'auto', - 'prefix': 'libs.plugins', - 'install_dir': PLUGINS_LIBS_DIR, - 'rpath': LIBS_RPATH, - }, 'myriad_plugin': { 'name': 'myriad', 'prefix': 'libs.plugins', diff --git a/inference-engine/src/CMakeLists.txt b/inference-engine/src/CMakeLists.txt index 4ac1a0da14b..ceb077de7a7 100644 --- a/inference-engine/src/CMakeLists.txt +++ b/inference-engine/src/CMakeLists.txt @@ -30,8 +30,6 @@ endif() add_subdirectory(hetero_plugin) -add_subdirectory(auto_plugin) - add_subdirectory(multi_device) add_subdirectory(transformations) diff --git a/inference-engine/src/auto_plugin/CMakeLists.txt b/inference-engine/src/auto_plugin/CMakeLists.txt deleted file mode 100644 index 7d430d64961..00000000000 --- a/inference-engine/src/auto_plugin/CMakeLists.txt +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -set (TARGET_NAME "AutoPlugin") - -file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) -file(GLOB HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp) - -ie_add_plugin(NAME ${TARGET_NAME} - DEVICE_NAME "AUTO" - SOURCES ${SOURCES} ${HEADERS} - VERSION_DEFINES_FOR auto_plugin.cpp) - -target_link_libraries(${TARGET_NAME} PRIVATE ngraph inference_engine inference_engine_transformations) - -ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) - -set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) diff --git a/inference-engine/src/auto_plugin/auto_plugin.cpp b/inference-engine/src/auto_plugin/auto_plugin.cpp deleted file mode 100644 index 5ad9d46984a..00000000000 --- a/inference-engine/src/auto_plugin/auto_plugin.cpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "auto_plugin.hpp" - -namespace AutoPlugin { -// define CreatePluginEngine to create plugin instance -static const InferenceEngine::Version version = {{2, 1}, CI_BUILD_NUMBER, "AutoPlugin"}; -IE_DEFINE_PLUGIN_CREATE_FUNCTION(AutoInferencePlugin, version) -} // namespace AutoPlugin diff --git a/inference-engine/src/auto_plugin/auto_plugin.hpp b/inference-engine/src/auto_plugin/auto_plugin.hpp deleted file mode 100644 index 77e22d1ab03..00000000000 --- a/inference-engine/src/auto_plugin/auto_plugin.hpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -namespace AutoPlugin { -class AutoInferencePlugin : public InferenceEngine::IInferencePlugin { -public: - AutoInferencePlugin() = default; - ~AutoInferencePlugin() = default; -}; - -} // namespace AutoPlugin diff --git a/inference-engine/src/inference_engine/src/ie_core.cpp b/inference-engine/src/inference_engine/src/ie_core.cpp index 9ede317d1cd..a568d2f6217 100644 --- a/inference-engine/src/inference_engine/src/ie_core.cpp +++ b/inference-engine/src/inference_engine/src/ie_core.cpp @@ -64,16 +64,14 @@ Parsed parseDeviceNameIntoConfig(const std::string& deviceName, const std::ma deviceName_ = "MULTI"; config_[InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES] = deviceName.substr(6); } else if (deviceName.find("AUTO") == 0) { - deviceName_ = "MULTI"; + deviceName_ = "AUTO"; if (deviceName.find("AUTO:") == 0) { config_[InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES] = deviceName.substr(std::string("AUTO:").size()); } - config_.insert({CONFIG_KEY_INTERNAL(WORK_MODE), ""}); } else { - if (deviceName_ == "AUTO") { - deviceName_ = "MULTI"; - config_.insert({CONFIG_KEY_INTERNAL(WORK_MODE), ""}); + if (deviceName_.empty()) { + deviceName_ = "AUTO"; } InferenceEngine::DeviceIDParser parser(deviceName_); deviceName_ = parser.getDeviceName(); @@ -584,17 +582,12 @@ public: { if (deviceName.find("AUTO:") == 0) { IE_THROW() - << "You can get specific metrics with the GetMetric only for the MULTI itself (without devices). " + << "You can get specific metrics with the GetMetric only for the AUTO itself (without devices). " "To get individual devices's metrics call GetMetric for each device separately"; } } - std::string pluginName = deviceName; - if (pluginName == "AUTO") { - pluginName = "MULTI"; - } - - auto parsed = parseDeviceNameIntoConfig(pluginName); + auto parsed = parseDeviceNameIntoConfig(deviceName); // we need to return a copy of Parameter object which is created on Core side, // not in InferenceEngine plugin side, which can be unloaded from Core in a parallel thread @@ -649,9 +642,6 @@ public: std::lock_guard lock(pluginsMutex); auto deviceName = pluginName; - if (deviceName == "AUTO") { - deviceName = "MULTI"; - } auto it = pluginRegistry.find(deviceName); if (it == pluginRegistry.end()) { IE_THROW() << "Device with \"" << deviceName << "\" name is not registered in the InferenceEngine"; @@ -876,7 +866,7 @@ public: if (pos != std::string::npos) { deviceNames = InferenceEngine::DeviceIDParser::getMultiDevices(deviceName.substr(pos + 1)); } - deviceNames.emplace_back("MULTI"); + deviceNames.emplace_back("AUTO"); } else { deviceNames.push_back(deviceName); } diff --git a/inference-engine/src/multi_device/CMakeLists.txt b/inference-engine/src/multi_device/CMakeLists.txt index 75c6d43b615..3c6ffbf72e1 100644 --- a/inference-engine/src/multi_device/CMakeLists.txt +++ b/inference-engine/src/multi_device/CMakeLists.txt @@ -12,6 +12,11 @@ ie_add_plugin(NAME ${TARGET_NAME} SOURCES ${SOURCES} ${HEADERS} VERSION_DEFINES_FOR multi_device_plugin.cpp) +ie_add_plugin(NAME ${TARGET_NAME} + DEVICE_NAME "AUTO" + PSEUDO_PLUGIN + DEFAULT_CONFIG "MULTI_WORK_MODE_AS_AUTO:YES") + target_link_libraries(${TARGET_NAME} PRIVATE inference_engine ngraph inference_engine_transformations) set_ie_threading_interface_for(${TARGET_NAME}) diff --git a/inference-engine/src/multi_device/multi_device_exec_network.cpp b/inference-engine/src/multi_device/multi_device_exec_network.cpp index 08ff59237e1..63d88bde967 100644 --- a/inference-engine/src/multi_device/multi_device_exec_network.cpp +++ b/inference-engine/src/multi_device/multi_device_exec_network.cpp @@ -169,7 +169,7 @@ std::shared_ptr MultiDeviceExecutableNetwork::G } catch (const NotImplemented&) {} } IE_THROW(NotImplemented) << "None of the devices in the MULTI has an associated remote context." - << " Current list of devices allowed via the DEVICE_PRIORITIES config: " << devices_names; + << " Current list of devices allowed via the DEVICE_PRIORITIES config: " << devices_names; } InferenceEngine::IInferRequestInternal::Ptr MultiDeviceExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs, @@ -221,8 +221,7 @@ void MultiDeviceExecutableNetwork::SetConfig(const std::map supported_configKeys = { + const std::vector supported_configKeys = { MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - CONFIG_KEY_INTERNAL(WORK_MODE) + CONFIG_KEY_INTERNAL(MULTI_WORK_MODE_AS_AUTO) }; } // namespace @@ -167,7 +167,7 @@ InferenceEngine::Parameter MultiDeviceInferencePlugin::GetMetric(const std::stri metrics.push_back(METRIC_KEY(SUPPORTED_CONFIG_KEYS)); IE_SET_METRIC_RETURN(SUPPORTED_METRICS, metrics); } else if (name == METRIC_KEY(FULL_DEVICE_NAME)) { - std::string device_name = { "MULTI" }; + std::string device_name = { GetName() }; IE_SET_METRIC_RETURN(FULL_DEVICE_NAME, device_name); } else if (name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) { IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, supported_configKeys); @@ -185,7 +185,7 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetwork(const st IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadExeNetworkImpl(const CNNNetwork &network, const std::map& config) { if (network.getFunction() == nullptr) { - IE_THROW() << "MULTI device supports just ngraph network representation"; + IE_THROW() << GetName() << " device supports just ngraph network representation"; } auto networkPrecision = GetNetworkPrecision(network); @@ -197,23 +197,24 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons const std::map& config, const std::string &networkPrecision) { if (GetCore() == nullptr) { - IE_THROW() << "Please, work with MULTI device via InferenceEngine::Core object"; + IE_THROW() << "Please, work with " << GetName() << " device via InferenceEngine::Core object"; } if (modelPath.empty() && network.getFunction() == nullptr) { - IE_THROW() << "MULTI device supports just ngraph network representation"; + IE_THROW() << GetName() << " device supports just ngraph network representation"; } auto fullConfig = mergeConfigs(_config, config); // collect the settings that are applicable to the devices we are loading the network to std::unordered_map multiNetworkConfig; std::vector metaDevices; - auto workMode = fullConfig.find(CONFIG_KEY_INTERNAL(WORK_MODE)); + auto workMode = fullConfig.find(CONFIG_KEY_INTERNAL(MULTI_WORK_MODE_AS_AUTO)); + bool workModeAuto = workMode != fullConfig.end() && workMode->second == InferenceEngine::PluginConfigParams::YES; auto priorities = fullConfig.find(MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES); // not found device priorities for -d AUTO use case if (priorities == fullConfig.end()) { - if (workMode != fullConfig.end()) { + if (workModeAuto) { std::string allDevices; auto availableDevices = GetCore()->GetAvailableDevices(); if (availableDevices.empty()) { @@ -226,16 +227,15 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons metaDevices = ParseMetaDevices(allDevices, fullConfig); multiNetworkConfig.insert({MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, allDevices}); } else { - IE_THROW() << "KEY_MULTI_DEVICE_PRIORITIES key is not set for MULTI device"; + IE_THROW() << "KEY_MULTI_DEVICE_PRIORITIES key is not set for " << GetName() << " device"; } } else { // for use case -d MULTI:xPU or -d AUTO:xPU metaDevices = ParseMetaDevices(priorities->second, fullConfig); multiNetworkConfig.insert(*priorities); } // check if it is -d AUTO or -d AUTO:xPU use case - if (workMode != fullConfig.end()) { + if (workModeAuto) { auto targetDevice = SelectDevice(metaDevices, networkPrecision); - // std::cout << "!!! DEBUG: select device is " << targetDevice.deviceName << std::endl; metaDevices = { targetDevice }; } @@ -271,7 +271,7 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons executor->runAndWait(loads); if (executableNetworkPerDevice.empty()) IE_THROW(NotFound) << "Failed to load network to any device " - << "that the MULTI device is initialized to work with"; + << "that the " << GetName() << " device is initialized to work with"; // checking the perf counters config from the loaded network to respect both device's plugin and load-specific setting size_t num_plugins_supporting_perf_counters = 0; @@ -302,11 +302,11 @@ QueryNetworkResult MultiDeviceInferencePlugin::QueryNetwork(const CNNNetwork& QueryNetworkResult queryResult; if (GetCore() == nullptr) { - IE_THROW() << "Please, work with MULTI device via InferencEngine::Core object"; + IE_THROW() << "Please, work with " << GetName() << " device via InferencEngine::Core object"; } if (network.getFunction() == nullptr) { - IE_THROW() << "MULTI device supports just ngraph network representation"; + IE_THROW() << GetName() << " device supports just ngraph network representation"; } queryResult.rc = StatusCode::OK; @@ -315,7 +315,7 @@ QueryNetworkResult MultiDeviceInferencePlugin::QueryNetwork(const CNNNetwork& auto fullConfig = mergeConfigs(_config, config); auto priorities = fullConfig.find(MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES); if (priorities == fullConfig.end()) { - IE_THROW() << "KEY_MULTI_DEVICE_PRIORITIES key is not set for MULTI device"; + IE_THROW() << "KEY_MULTI_DEVICE_PRIORITIES key is not set for " << GetName() << " device"; } auto metaDevices = ParseMetaDevices(priorities->second, fullConfig); std::unordered_set supportedLayers; @@ -338,7 +338,7 @@ QueryNetworkResult MultiDeviceInferencePlugin::QueryNetwork(const CNNNetwork& DeviceInformation MultiDeviceInferencePlugin::SelectDevice(const std::vector& metaDevices, const std::string& networkPrecision) { if (metaDevices.empty()) { - IE_THROW(NotFound) << "No available device to select in AUTO plugin"; + IE_THROW(NotFound) << "No available device to select in " << GetName() << " plugin"; } if (metaDevices.size() == 1) { return metaDevices.at(0); diff --git a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_internal_plugin_config.hpp b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_internal_plugin_config.hpp index 913ea531305..69c1c9a3daa 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_internal_plugin_config.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_internal_plugin_config.hpp @@ -26,6 +26,13 @@ namespace PluginConfigInternalParams { */ #define CONFIG_KEY_INTERNAL(name) ::InferenceEngine::PluginConfigInternalParams::_CONFIG_KEY(name) +/** + * @def CONFIG_VALUE_INTERNAL(name) + * @ingroup ie_dev_api_plugin_api + * @brief Shortcut for defining internal configuration values + */ +#define CONFIG_VALUE_INTERNAL(name) ::InferenceEngine::PluginConfigInternalParams::name + /** * @brief Defines a low precision mode key * @ingroup ie_dev_api_plugin_api @@ -47,12 +54,8 @@ DECLARE_CONFIG_KEY(FORCE_DISABLE_CACHE); /** * @brief The name for setting work mode internal in MULTI device plugin option. - * - * This option should be used with value only: - * PluginConfigInternalParams::MULTI_MODE_AUTO or PluginConfigInternalParams::MULTI_MODE_LEGACY */ -DECLARE_CONFIG_KEY(WORK_MODE); -DECLARE_CONFIG_VALUE(MULTI_MODE_AUTO); +DECLARE_CONFIG_KEY(MULTI_WORK_MODE_AS_AUTO); } // namespace PluginConfigInternalParams diff --git a/inference-engine/tests/functional/inference_engine/CMakeLists.txt b/inference-engine/tests/functional/inference_engine/CMakeLists.txt index d5a9d2fd2d7..7d54c16d4ff 100644 --- a/inference-engine/tests/functional/inference_engine/CMakeLists.txt +++ b/inference-engine/tests/functional/inference_engine/CMakeLists.txt @@ -26,7 +26,6 @@ set(DEPENDENCIES inference_engine_ir_reader inference_engine_ir_v7_reader HeteroPlugin - AutoPlugin MultiDevicePlugin template_extension lptNgraphFunctions diff --git a/inference-engine/tests/functional/plugin/cpu/CMakeLists.txt b/inference-engine/tests/functional/plugin/cpu/CMakeLists.txt index 761ddb19580..6680a202f96 100644 --- a/inference-engine/tests/functional/plugin/cpu/CMakeLists.txt +++ b/inference-engine/tests/functional/plugin/cpu/CMakeLists.txt @@ -9,7 +9,7 @@ add_library(cpuSpecificRtInfo STATIC ${IE_MAIN_SOURCE_DIR}/src/mkldnn_plugin/uti target_link_libraries(cpuSpecificRtInfo PRIVATE ngraph) set(INCLUDES ${CMAKE_CURRENT_SOURCE_DIR} ${IE_MAIN_SOURCE_DIR}/src/mkldnn_plugin) -set(DEPENDENCIES MKLDNNPlugin AutoPlugin) +set(DEPENDENCIES MKLDNNPlugin) set(LINK_LIBRARIES funcSharedTests cpuSpecificRtInfo) if (NGRAPH_ONNX_FRONTEND_ENABLE) list(APPEND INCLUDES "${OpenVINO_SOURCE_DIR}/docs/onnx_custom_op") diff --git a/inference-engine/tests/functional/plugin/gpu/CMakeLists.txt b/inference-engine/tests/functional/plugin/gpu/CMakeLists.txt index 8191fb4e905..6e5df4bfd78 100644 --- a/inference-engine/tests/functional/plugin/gpu/CMakeLists.txt +++ b/inference-engine/tests/functional/plugin/gpu/CMakeLists.txt @@ -13,7 +13,6 @@ addIeTargetTest( ${CMAKE_CURRENT_SOURCE_DIR} DEPENDENCIES clDNNPlugin - AutoPlugin LINK_LIBRARIES funcSharedTests OpenCL diff --git a/scripts/deployment_manager/configs/darwin.json b/scripts/deployment_manager/configs/darwin.json index 09be8f75ed9..b00e38cc041 100644 --- a/scripts/deployment_manager/configs/darwin.json +++ b/scripts/deployment_manager/configs/darwin.json @@ -22,7 +22,6 @@ "deployment_tools/inference_engine/lib/intel64/libinference_engine_preproc.so", "deployment_tools/inference_engine/lib/intel64/libinference_engine_ir_reader.so", "deployment_tools/inference_engine/lib/intel64/libinference_engine_c_api.dylib", - "deployment_tools/inference_engine/lib/intel64/libAutoPlugin.so", "deployment_tools/inference_engine/lib/intel64/libHeteroPlugin.so", "deployment_tools/inference_engine/lib/intel64/libMultiDevicePlugin.so", "deployment_tools/inference_engine/lib/intel64/plugins.xml", diff --git a/scripts/deployment_manager/configs/linux.json b/scripts/deployment_manager/configs/linux.json index 0c39eeb82a4..cbcd82c850f 100644 --- a/scripts/deployment_manager/configs/linux.json +++ b/scripts/deployment_manager/configs/linux.json @@ -28,7 +28,6 @@ "deployment_tools/inference_engine/lib/intel64/libinference_engine_preproc.so", "deployment_tools/inference_engine/lib/intel64/libinference_engine_ir_reader.so", "deployment_tools/inference_engine/lib/intel64/libinference_engine_c_api.so", - "deployment_tools/inference_engine/lib/intel64/libAutoPlugin.so", "deployment_tools/inference_engine/lib/intel64/libHeteroPlugin.so", "deployment_tools/inference_engine/lib/intel64/libMultiDevicePlugin.so", "deployment_tools/inference_engine/lib/intel64/plugins.xml", diff --git a/scripts/deployment_manager/configs/windows.json b/scripts/deployment_manager/configs/windows.json index 14ceedbff8a..6ee8e12cc6e 100644 --- a/scripts/deployment_manager/configs/windows.json +++ b/scripts/deployment_manager/configs/windows.json @@ -22,7 +22,6 @@ "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_preproc.dll", "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_ir_reader.dll", "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_c_api.dll", - "deployment_tools/inference_engine/bin/intel64/Release/AutoPlugin.dll", "deployment_tools/inference_engine/lib/intel64/Release/HeteroPlugin.dll", "deployment_tools/inference_engine/lib/intel64/Release/MultiDevicePlugin.dll", "deployment_tools/inference_engine/bin/intel64/Release/plugins.xml", From bb84d11313e6af52b48d2a7fc28707cdade65dac Mon Sep 17 00:00:00 2001 From: Dmitry Pigasin Date: Fri, 3 Sep 2021 23:23:00 +0300 Subject: [PATCH 14/52] [IE Python Speech Sample] Add `--scale_factor` and `--performance_counter` options (#6663) * Adds perf counters, and scale factor args * Adding defined choices for arch type for -a/--arch option * changing print to logger, frequencies are now global consts * change to log info formatting * Fix style issues * doc: Update inference-engine/ie_bridges/python/sample/speech_sample/README.md * doc: Update inference-engine/ie_bridges/python/sample/speech_sample/README.md * doc: Update inference-engine/ie_bridges/python/sample/speech_sample/arg_parser.py * doc: Update inference-engine/ie_bridges/python/sample/speech_sample/arg_parser.py Co-authored-by: Koyanagi, Ken Co-authored-by: Kate Generalova --- .../python/sample/speech_sample/README.md | 17 +++++++-- .../python/sample/speech_sample/arg_parser.py | 6 ++++ .../sample/speech_sample/speech_sample.py | 35 +++++++++++++++++-- 3 files changed, 53 insertions(+), 5 deletions(-) diff --git a/inference-engine/ie_bridges/python/sample/speech_sample/README.md b/inference-engine/ie_bridges/python/sample/speech_sample/README.md index 2f7fd4323aa..54403416bc4 100644 --- a/inference-engine/ie_bridges/python/sample/speech_sample/README.md +++ b/inference-engine/ie_bridges/python/sample/speech_sample/README.md @@ -80,7 +80,8 @@ Usage message: usage: speech_sample.py [-h] (-m MODEL | -rg IMPORT_GNA_MODEL) -i INPUT [-o OUTPUT] [-r REFERENCE] [-d DEVICE] [-bs BATCH_SIZE] [-qb QUANTIZATION_BITS] - [-wg EXPORT_GNA_MODEL] [-iname INPUT_LAYERS] + [-sf SCALE_FACTOR] [-wg EXPORT_GNA_MODEL] [-pc] + [-a {CORE,ATOM}] [-iname INPUT_LAYERS] [-oname OUTPUT_LAYERS] optional arguments: @@ -94,9 +95,10 @@ optional arguments: Options: -h, --help Show this help message and exit. -i INPUT, --input INPUT - Required. Path to an input file (.ark or .npz). + Required. Path to an input file (.ark or .npz). -o OUTPUT, --output OUTPUT - Optional. Output file name to save inference results (.ark or .npz). + Optional. Output file name to save inference results + (.ark or .npz). -r REFERENCE, --reference REFERENCE Optional. Read reference score file and compare scores. @@ -113,9 +115,18 @@ Options: -qb QUANTIZATION_BITS, --quantization_bits QUANTIZATION_BITS Optional. Weight bits for quantization: 8 or 16 (default 16). + -sf SCALE_FACTOR, --scale_factor SCALE_FACTOR + Optional. The user-specified input scale factor for + quantization. -wg EXPORT_GNA_MODEL, --export_gna_model EXPORT_GNA_MODEL Optional. Write GNA model to file using path/filename provided. + -pc, --performance_counter + Optional. Enables performance report (specify -a to + ensure arch accurate results). + -a {CORE,ATOM}, --arch {CORE,ATOM} + Optional. Specify architecture. CORE, ATOM with the + combination of -pc. -iname INPUT_LAYERS, --input_layers INPUT_LAYERS Optional. Layer names for input blobs. The names are separated with ",". Allows to change the order of diff --git a/inference-engine/ie_bridges/python/sample/speech_sample/arg_parser.py b/inference-engine/ie_bridges/python/sample/speech_sample/arg_parser.py index cfc20dfb425..1d2ad5c7d71 100644 --- a/inference-engine/ie_bridges/python/sample/speech_sample/arg_parser.py +++ b/inference-engine/ie_bridges/python/sample/speech_sample/arg_parser.py @@ -28,10 +28,16 @@ def parse_args() -> argparse.Namespace: args.add_argument('-bs', '--batch_size', default=1, type=int, help='Optional. Batch size 1-8 (default 1).') args.add_argument('-qb', '--quantization_bits', default=16, type=int, help='Optional. Weight bits for quantization: 8 or 16 (default 16).') + args.add_argument('-sf', '--scale_factor', type=float, + help='Optional. The user-specified input scale factor for quantization.') args.add_argument('-wg', '--export_gna_model', type=str, help='Optional. Write GNA model to file using path/filename provided.') args.add_argument('-we', '--export_embedded_gna_model', type=str, help=argparse.SUPPRESS) args.add_argument('-we_gen', '--embedded_gna_configuration', default='GNA1', type=str, help=argparse.SUPPRESS) + args.add_argument('-pc', '--performance_counter', action='store_true', + help='Optional. Enables performance report (specify -a to ensure arch accurate results).') + args.add_argument('-a', '--arch', default='CORE', type=str.upper, choices=['CORE', 'ATOM'], + help='Optional. Specify architecture. CORE, ATOM with the combination of -pc.') args.add_argument('-iname', '--input_layers', type=str, help='Optional. Layer names for input blobs. The names are separated with ",". ' 'Allows to change the order of input layers for -i flag. Example: Input1,Input2') diff --git a/inference-engine/ie_bridges/python/sample/speech_sample/speech_sample.py b/inference-engine/ie_bridges/python/sample/speech_sample/speech_sample.py index c1edad6e9ea..bb2fa27e43b 100755 --- a/inference-engine/ie_bridges/python/sample/speech_sample/speech_sample.py +++ b/inference-engine/ie_bridges/python/sample/speech_sample/speech_sample.py @@ -14,6 +14,10 @@ from arg_parser import parse_args from file_options import read_utterance_file, write_utterance_file from openvino.inference_engine import ExecutableNetwork, IECore, IENetwork +# Operating Frequency for GNA HW devices for Core and Atom architecture +GNA_CORE_FREQUENCY = 400 +GNA_ATOM_FREQUENCY = 200 + def get_scale_factor(matrix: np.ndarray) -> float: """Get scale factor for quantization using utterance matrix""" @@ -143,21 +147,26 @@ def main(): plugin_config['GNA_DEVICE_MODE'] = gna_device_mode plugin_config['GNA_PRECISION'] = f'I{args.quantization_bits}' - # Get a GNA scale factor + # Set a GNA scale factor if args.import_gna_model: log.info(f'Using scale factor from the imported GNA model: {args.import_gna_model}') + elif args.scale_factor: + log.info(f'Using scale factor of {args.scale_factor:.7f} specified by user.') + plugin_config['GNA_SCALE_FACTOR'] = str(args.scale_factor) else: utterances = read_utterance_file(args.input.split(',')[0]) key = sorted(utterances)[0] scale_factor = get_scale_factor(utterances[key]) log.info(f'Using scale factor of {scale_factor:.7f} calculated from first utterance.') - plugin_config['GNA_SCALE_FACTOR'] = str(scale_factor) if args.export_embedded_gna_model: plugin_config['GNA_FIRMWARE_MODEL_IMAGE'] = args.export_embedded_gna_model plugin_config['GNA_FIRMWARE_MODEL_IMAGE_GENERATION'] = args.embedded_gna_configuration + if args.performance_counter: + plugin_config['PERF_COUNT'] = 'YES' + device_str = f'HETERO:{",".join(devices)}' if 'HETERO' in args.device else devices[0] log.info('Loading the model to the plugin') @@ -220,6 +229,7 @@ def main(): log.info('Starting inference in synchronous mode') results = {blob_name: {} for blob_name in output_blobs} infer_times = [] + perf_counters = [] for key in sorted(input_data): start_infer_time = default_timer() @@ -235,6 +245,7 @@ def main(): results[blob_name][key] = result[blob_name] infer_times.append(default_timer() - start_infer_time) + perf_counters.append(exec_net.requests[0].get_perf_counts()) # ---------------------------Step 8. Process output-------------------------------------------------------------------- for blob_name in output_blobs: @@ -247,6 +258,26 @@ def main(): if args.reference: compare_with_reference(results[blob_name][key], references[blob_name][key]) + if args.performance_counter: + if 'GNA' in args.device: + pc = perf_counters[i] + total_cycles = int(pc['1.1 Total scoring time in HW']['real_time']) + stall_cycles = int(pc['1.2 Stall scoring time in HW']['real_time']) + active_cycles = total_cycles - stall_cycles + frequency = 10**6 + if args.arch == 'CORE': + frequency *= GNA_CORE_FREQUENCY + else: + frequency *= GNA_ATOM_FREQUENCY + total_inference_time = total_cycles / frequency + active_time = active_cycles / frequency + stall_time = stall_cycles / frequency + log.info('') + log.info('Performance Statistics of GNA Hardware') + log.info(f' Total Inference Time: {(total_inference_time * 1000):.4f} ms') + log.info(f' Active Time: {(active_time * 1000):.4f} ms') + log.info(f' Stall Time: {(stall_time * 1000):.4f} ms') + log.info('') log.info(f'Total sample time: {sum(infer_times) * 1000:.2f}ms') From 35fef3deeaf70699e1cb4f0a9b8aa191505ad047 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Mon, 6 Sep 2021 08:15:28 +0300 Subject: [PATCH 15/52] Moved operations D-F to ov namespace (#7341) * Moved ngraph::Node to ov namespace * Fixed code style * Fixed VPU * Fixed GNA * Fixed tests * Added aliases for backward compatibility * Fix clDNN * Try to fix build * Fixed comment * Renamed RTTI macros * Moved op utils to ov namespace * Fixed ngraph library build * Fixed unit-tests * Changed src folder * Fixed recurrent_sequence * Changed low latency * Fixed serialize * Fixed ieFuncTests * Try to fix windows * Remove custom operator<< from tests * Fixed build * Moved operations from A to ov namespace * Moved operations from B and C to ov namespace * Moved operations D-F to ov namespace * Update ngraph/core/src/op/embeddingbag_offsets_sum.cpp Co-authored-by: Katarzyna Mitrus * Update ngraph/core/src/op/embeddingbag_packedsum.cpp Co-authored-by: Katarzyna Mitrus Co-authored-by: Ilya Lavrenov Co-authored-by: Katarzyna Mitrus --- .../ngraph/op/deformable_convolution.hpp | 158 +--------------- .../ngraph/op/deformable_psroi_pooling.hpp | 95 +--------- .../core/include/ngraph/op/depth_to_space.hpp | 67 +------ .../include/ngraph/op/detection_output.hpp | 66 +------ ngraph/core/include/ngraph/op/dft.hpp | 25 +-- ngraph/core/include/ngraph/op/divide.hpp | 43 +---- ngraph/core/include/ngraph/op/einsum.hpp | 59 +----- ngraph/core/include/ngraph/op/elu.hpp | 29 +-- .../ngraph/op/embedding_segments_sum.hpp | 67 +------ .../ngraph/op/embeddingbag_offsets_sum.hpp | 44 +---- .../ngraph/op/embeddingbag_packedsum.hpp | 33 +--- ngraph/core/include/ngraph/op/equal.hpp | 39 +--- ngraph/core/include/ngraph/op/erf.hpp | 18 +- ngraph/core/include/ngraph/op/exp.hpp | 20 +- ...xperimental_detectron_detection_output.hpp | 57 +----- ...erimental_detectron_generate_proposals.hpp | 47 +---- ...imental_detectron_prior_grid_generator.hpp | 50 +---- .../op/experimental_detectron_roi_feature.hpp | 41 +--- .../op/experimental_detectron_topkrois.hpp | 28 +-- .../include/ngraph/op/extractimagepatches.hpp | 58 +----- .../core/include/ngraph/op/fake_quantize.hpp | 68 +------ ngraph/core/include/ngraph/op/floor.hpp | 18 +- ngraph/core/include/ngraph/op/floor_mod.hpp | 31 +--- .../openvino/op/deformable_convolution.hpp | 175 ++++++++++++++++++ .../openvino/op/deformable_psroi_pooling.hpp | 107 +++++++++++ .../include/openvino/op/depth_to_space.hpp | 78 ++++++++ .../include/openvino/op/detection_output.hpp | 78 ++++++++ ngraph/core/include/openvino/op/dft.hpp | 53 ++++++ ngraph/core/include/openvino/op/divide.hpp | 55 ++++++ ngraph/core/include/openvino/op/einsum.hpp | 71 +++++++ ngraph/core/include/openvino/op/elu.hpp | 41 ++++ .../openvino/op/embedding_segments_sum.hpp | 77 ++++++++ .../openvino/op/embeddingbag_offsets_sum.hpp | 55 ++++++ .../openvino/op/embeddingbag_packedsum.hpp | 44 +++++ ngraph/core/include/openvino/op/equal.hpp | 51 +++++ ngraph/core/include/openvino/op/erf.hpp | 30 +++ ngraph/core/include/openvino/op/exp.hpp | 32 ++++ ...xperimental_detectron_detection_output.hpp | 74 ++++++++ ...erimental_detectron_generate_proposals.hpp | 64 +++++++ ...imental_detectron_prior_grid_generator.hpp | 67 +++++++ .../op/experimental_detectron_roi_feature.hpp | 59 ++++++ .../op/experimental_detectron_topkrois.hpp | 45 +++++ .../openvino/op/extractimagepatches.hpp | 70 +++++++ .../include/openvino/op/fake_quantize.hpp | 81 ++++++++ ngraph/core/include/openvino/op/floor.hpp | 30 +++ ngraph/core/include/openvino/op/floor_mod.hpp | 42 +++++ .../op/util/embeddingbag_offsets_base.hpp | 5 +- .../op/util/embeddingbag_packed_base.hpp | 5 +- ngraph/core/src/op/deformable_convolution.cpp | 10 +- .../core/src/op/deformable_psroi_pooling.cpp | 2 +- ngraph/core/src/op/depth_to_space.cpp | 4 +- ngraph/core/src/op/detection_output.cpp | 31 ++-- ngraph/core/src/op/dft.cpp | 2 +- ngraph/core/src/op/divide.cpp | 2 +- ngraph/core/src/op/einsum.cpp | 2 +- ngraph/core/src/op/elu.cpp | 2 +- ngraph/core/src/op/embedding_segments_sum.cpp | 2 +- .../core/src/op/embeddingbag_offsets_sum.cpp | 2 +- ngraph/core/src/op/embeddingbag_packedsum.cpp | 2 +- ngraph/core/src/op/equal.cpp | 2 +- ngraph/core/src/op/erf.cpp | 2 +- ngraph/core/src/op/exp.cpp | 2 +- ...xperimental_detectron_detection_output.cpp | 2 +- ...erimental_detectron_generate_proposals.cpp | 6 +- ...imental_detectron_prior_grid_generator.cpp | 2 +- .../op/experimental_detectron_roi_feature.cpp | 4 +- .../op/experimental_detectron_topkrois.cpp | 2 +- ngraph/core/src/op/extractimagepatches.cpp | 2 +- ngraph/core/src/op/fake_quantize.cpp | 2 +- ngraph/core/src/op/floor.cpp | 2 +- ngraph/core/src/op/floor_mod.cpp | 2 +- .../src/op/util/embeddingbag_offsets_base.cpp | 2 +- .../src/op/util/embeddingbag_packed_base.cpp | 2 +- 73 files changed, 1580 insertions(+), 1165 deletions(-) create mode 100644 ngraph/core/include/openvino/op/deformable_convolution.hpp create mode 100644 ngraph/core/include/openvino/op/deformable_psroi_pooling.hpp create mode 100644 ngraph/core/include/openvino/op/depth_to_space.hpp create mode 100644 ngraph/core/include/openvino/op/detection_output.hpp create mode 100644 ngraph/core/include/openvino/op/dft.hpp create mode 100644 ngraph/core/include/openvino/op/divide.hpp create mode 100644 ngraph/core/include/openvino/op/einsum.hpp create mode 100644 ngraph/core/include/openvino/op/elu.hpp create mode 100644 ngraph/core/include/openvino/op/embedding_segments_sum.hpp create mode 100644 ngraph/core/include/openvino/op/embeddingbag_offsets_sum.hpp create mode 100644 ngraph/core/include/openvino/op/embeddingbag_packedsum.hpp create mode 100644 ngraph/core/include/openvino/op/equal.hpp create mode 100644 ngraph/core/include/openvino/op/erf.hpp create mode 100644 ngraph/core/include/openvino/op/exp.hpp create mode 100644 ngraph/core/include/openvino/op/experimental_detectron_detection_output.hpp create mode 100644 ngraph/core/include/openvino/op/experimental_detectron_generate_proposals.hpp create mode 100644 ngraph/core/include/openvino/op/experimental_detectron_prior_grid_generator.hpp create mode 100644 ngraph/core/include/openvino/op/experimental_detectron_roi_feature.hpp create mode 100644 ngraph/core/include/openvino/op/experimental_detectron_topkrois.hpp create mode 100644 ngraph/core/include/openvino/op/extractimagepatches.hpp create mode 100644 ngraph/core/include/openvino/op/fake_quantize.hpp create mode 100644 ngraph/core/include/openvino/op/floor.hpp create mode 100644 ngraph/core/include/openvino/op/floor_mod.hpp diff --git a/ngraph/core/include/ngraph/op/deformable_convolution.hpp b/ngraph/core/include/ngraph/op/deformable_convolution.hpp index 2d95b83eafd..88141dcd39c 100644 --- a/ngraph/core/include/ngraph/op/deformable_convolution.hpp +++ b/ngraph/core/include/ngraph/op/deformable_convolution.hpp @@ -8,168 +8,16 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/deformable_convolution_base.hpp" +#include "openvino/op/deformable_convolution.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief DeformableConvolution operation. -class NGRAPH_API DeformableConvolution : public op::util::DeformableConvolutionBase { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a conversion operation. - DeformableConvolution() = default; - /// \brief Constructs a conversion operation. - /// - /// \param arg Node that produces the input tensor. - /// \param offsets Node producing the deformable values tensor. - /// \param filters Node producing the filters(kernels) tensor with OIZYX - /// layout. - /// \param strides Convolution strides. - /// \param pads_begin Amount of padding to be added to the beginning along - /// each axis. For example in case of a 2D input the value - /// of (1, 2) means that 1 element will be added to the - /// top and 2 elements to the left. - /// \param pads_end Amount of padding to be added to the end along each - /// axis. - /// \param dilations The distance in width and height between the weights - /// in the filters tensor. - /// \param auto_pad Specifies how the automatic calculation of padding - /// should be done. - /// \param group The number of groups which both output and input - /// should be split into. - /// \param deformable_group The number of groups which deformable values and - /// output should be split into along the channel axis. - DeformableConvolution(const Output& arg, - const Output& offsets, - const Output& filters, - const Strides& strides, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const Strides& dilations, - const PadType& auto_pad = PadType::EXPLICIT, - const int64_t group = 1, - const int64_t deformable_group = 1); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v1::DeformableConvolution; } // namespace v1 namespace v8 { -class NGRAPH_API DeformableConvolution : public op::util::DeformableConvolutionBase { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a conversion operation. - DeformableConvolution() = default; - /// \brief Constructs a conversion operation. - /// - /// \param arg Node that produces the input tensor. - /// \param offsets Node producing the deformable values tensor. - /// \param filters Node producing the filters(kernels) tensor with OIZYX - /// layout. - /// \param strides Convolution strides. - /// \param pads_begin Amount of padding to be added to the beginning along - /// each axis. For example in case of a 2D input the value - /// of (1, 2) means that 1 element will be added to the - /// top and 2 elements to the left. - /// \param pads_end Amount of padding to be added to the end along each - /// axis. - /// \param dilations The distance in width and height between the weights - /// in the filters tensor. - /// \param auto_pad Specifies how the automatic calculation of padding - /// should be done. - /// \param group The number of groups which both output and input - /// should be split into. - /// \param deformable_group The number of groups which deformable values and - /// output should be split into along the channel axis. - /// \param bilinear_interpolation_pad - /// The flag that determines the mode of bilinear - /// interpolation execution. - /// If the flag is `true` and the sampling location is - /// within one pixel outside of the feature map boundary, - /// then bilinear interpolation is performed on the zero - /// padded feature map. If the flag is `false` and the - /// sampling location is within one pixel outside of the - /// feature map boundary, then the sampling location - /// shifts to the inner boundary of the feature map.` - DeformableConvolution(const Output& arg, - const Output& offsets, - const Output& filters, - const Strides& strides, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const Strides& dilations, - const PadType& auto_pad = PadType::EXPLICIT, - const int64_t group = 1, - const int64_t deformable_group = 1, - const bool bilinear_interpolation_pad = false); - - /// \brief Constructs a conversion operation. - /// - /// \param arg Node that produces the input tensor. - /// \param offsets Node producing the deformable values tensor. - /// \param filters Node producing the filters(kernels) tensor with OIZYX - /// layout. - /// \param mask Node producing the mask(mask) tensor. - /// \param strides Convolution strides. - /// \param pads_begin Amount of padding to be added to the beginning along - /// each axis. For example in case of a 2D input the value - /// of (1, 2) means that 1 element will be added to the - /// top and 2 elements to the left. - /// \param pads_end Amount of padding to be added to the end along each - /// axis. - /// \param dilations The distance in width and height between the weights - /// in the filters tensor. - /// \param auto_pad Specifies how the automatic calculation of padding - /// should be done. - /// \param group The number of groups which both output and input - /// should be split into. - /// \param deformable_group The number of groups which deformable values and - /// output should be split into along the channel axis. - /// \param bilinear_interpolation_pad - /// The flag that determines the mode of bilinear - /// interpolation execution. - /// If the flag is `true` and the sampling location is - /// within one pixel outside of the feature map boundary, - /// then bilinear interpolation is performed on the zero - /// padded feature map. If the flag is `false` and the - /// sampling location is within one pixel outside of the - /// feature map boundary, then the sampling location - /// shifts to the inner boundary of the feature map. - DeformableConvolution(const Output& arg, - const Output& offsets, - const Output& filters, - const Output& mask, - const Strides& strides, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const Strides& dilations, - const PadType& auto_pad = PadType::EXPLICIT, - const int64_t group = 1, - const int64_t deformable_group = 1, - const bool bilinear_interpolation_pad = false); - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - - bool has_evaluate() const override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool get_bilinear_interpolation_pad() const { - return m_bilinear_interpolation_pad; - } - - void set_bilinear_interpolation_pad(const bool bilinear_interpolation_pad) { - m_bilinear_interpolation_pad = bilinear_interpolation_pad; - } - -private: - bool m_bilinear_interpolation_pad; -}; +using ov::op::v8::DeformableConvolution; } // namespace v8 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/deformable_psroi_pooling.hpp b/ngraph/core/include/ngraph/op/deformable_psroi_pooling.hpp index 362d325ef33..6e47d387a98 100644 --- a/ngraph/core/include/ngraph/op/deformable_psroi_pooling.hpp +++ b/ngraph/core/include/ngraph/op/deformable_psroi_pooling.hpp @@ -5,103 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/deformable_psroi_pooling.hpp" namespace ngraph { namespace op { namespace v1 { -class NGRAPH_API DeformablePSROIPooling : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - DeformablePSROIPooling() = default; - /// \brief Constructs a DeformablePSROIPooling operation - /// - /// \param input Input tensor with position sensitive score maps - /// \param coords Input tensor with list of five element tuples - /// describing ROI coordinates - /// \param offsets Input tensor with transformation values - /// \param output_dim Pooled output channel number - /// \param group_size Number of horizontal bins per row to divide ROI area, - /// it defines output width and height - /// \param spatial_scale Multiplicative spatial scale factor to translate ROI - /// coordinates from their input scale to the scale used when - /// pooling - /// \param mode Specifies mode for pooling. - /// \param spatial_bins_x Specifies numbers of bins to divide ROI single - /// bin over width - /// \param spatial_bins_y Specifies numbers of bins to divide ROI single - /// bin over height - /// \param no_trans The flag that specifies whenever third input exists - /// and contains transformation (offset) values - /// \param trans_std The value that all transformation (offset) values are - /// multiplied with - /// \param part_size The number of parts the output tensor spatial dimensions - /// are divided into. Basically it is the height - /// and width of the third input - DeformablePSROIPooling(const Output& input, - const Output& coords, - const Output& offsets, - const int64_t output_dim, - const float spatial_scale, - const int64_t group_size = 1, - const std::string mode = "bilinear_deformable", - int64_t spatial_bins_x = 1, - int64_t spatial_bins_y = 1, - float trans_std = 1, - int64_t part_size = 1); - - DeformablePSROIPooling(const Output& input, - const Output& coords, - const int64_t output_dim, - const float spatial_scale, - const int64_t group_size = 1, - const std::string mode = "bilinear_deformable", - int64_t spatial_bins_x = 1, - int64_t spatial_bins_y = 1, - float trans_std = 1, - int64_t part_size = 1); - - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - int64_t get_output_dim() const { - return m_output_dim; - } - int64_t get_group_size() const { - return m_group_size; - } - float get_spatial_scale() const { - return m_spatial_scale; - } - const std::string& get_mode() const { - return m_mode; - } - int64_t get_spatial_bins_x() const { - return m_spatial_bins_x; - } - int64_t get_spatial_bins_y() const { - return m_spatial_bins_y; - } - float get_trans_std() const { - return m_trans_std; - } - int64_t get_part_size() const { - return m_part_size; - } - -private: - int64_t m_output_dim; - float m_spatial_scale; - int64_t m_group_size = 1; - std::string m_mode = "bilinear_deformable"; - int64_t m_spatial_bins_x = 1; - int64_t m_spatial_bins_y = 1; - float m_trans_std = 1.f; - int64_t m_part_size = 1; -}; +using ov::op::v1::DeformablePSROIPooling; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/depth_to_space.hpp b/ngraph/core/include/ngraph/op/depth_to_space.hpp index 0b57d0476db..8fd67c9eda5 100644 --- a/ngraph/core/include/ngraph/op/depth_to_space.hpp +++ b/ngraph/core/include/ngraph/op/depth_to_space.hpp @@ -6,76 +6,13 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/depth_to_space.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief DepthToSpace permutes data from the depth dimension of the input blob into -/// spatial dimensions. -/// -/// \note Values from the depth dimension (assuming NCHW layout) are moved in -/// spatial blocks to the height and width dimensions. -/// -/// Output node produces a tensor with shape: -/// [N, C/(blocksize * blocksize), H * blocksize, W * blocksize] -class NGRAPH_API DepthToSpace : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - enum class DepthToSpaceMode { - // The input depth is divided to [block_size, ..., block_size, new_depth] - BLOCKS_FIRST, - // The input depth is divided to [new_depth, block_size, ..., block_size] - DEPTH_FIRST - }; - - DepthToSpace() = default; - /// \brief Constructs a DepthToSpace operation. - /// - /// \param data Node producing the input tensor - /// \param mode Specifies how the input depth dimension is split to block - /// coordinates - /// \param block_size The size of the block of values to be moved - DepthToSpace(const Output& data, const DepthToSpaceMode& mode, std::size_t block_size = 1); - - DepthToSpace(const Output& data, const std::string& mode, std::size_t block_size = 1); - bool visit_attributes(AttributeVisitor& visitor) override; - - std::size_t get_block_size() const { - return m_blocksize; - } - DepthToSpaceMode get_mode() const { - return m_mode; - } - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - void validate_and_infer_types() override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - std::size_t m_blocksize; - DepthToSpaceMode m_mode; -}; +using ov::op::v0::DepthToSpace; } // namespace v0 using v0::DepthToSpace; } // namespace op - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v0::DepthToSpace::DepthToSpaceMode& type); } // namespace ngraph -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v0::DepthToSpace::DepthToSpaceMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/detection_output.hpp b/ngraph/core/include/ngraph/op/detection_output.hpp index b00a41096c7..5400d5b22bf 100644 --- a/ngraph/core/include/ngraph/op/detection_output.hpp +++ b/ngraph/core/include/ngraph/op/detection_output.hpp @@ -5,74 +5,14 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/detection_output.hpp" namespace ngraph { namespace op { -struct DetectionOutputAttrs { - int num_classes; - int background_label_id = 0; - int top_k = -1; - bool variance_encoded_in_target = false; - std::vector keep_top_k; - std::string code_type = std::string{"caffe.PriorBoxParameter.CORNER"}; - bool share_location = true; - float nms_threshold; - float confidence_threshold = 0; - bool clip_after_nms = false; - bool clip_before_nms = false; - bool decrease_label_id = false; - bool normalized = false; - size_t input_height = 1; - size_t input_width = 1; - float objectness_score = 0; -}; +using DetectionOutputAttrs = ov::op::v0::DetectionOutput::Attributes; namespace v0 { -/// \brief Layer which performs non-max suppression to -/// generate detection output using location and confidence predictions -class NGRAPH_API DetectionOutput : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - DetectionOutput() = default; - /// \brief Constructs a DetectionOutput operation - /// - /// \param box_logits Box logits - /// \param class_preds Class predictions - /// \param proposals Proposals - /// \param aux_class_preds Auxilary class predictions - /// \param aux_box_preds Auxilary box predictions - /// \param attrs Detection Output attributes - DetectionOutput(const Output& box_logits, - const Output& class_preds, - const Output& proposals, - const Output& aux_class_preds, - const Output& aux_box_preds, - const DetectionOutputAttrs& attrs); - - /// \brief Constructs a DetectionOutput operation - /// - /// \param box_logits Box logits - /// \param class_preds Class predictions - /// \param proposals Proposals - /// \param attrs Detection Output attributes - DetectionOutput(const Output& box_logits, - const Output& class_preds, - const Output& proposals, - const DetectionOutputAttrs& attrs); - - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - const DetectionOutputAttrs& get_attrs() const { - return m_attrs; - } - bool visit_attributes(AttributeVisitor& visitor) override; - -private: - DetectionOutputAttrs m_attrs; -}; +using ov::op::v0::DetectionOutput; } // namespace v0 using v0::DetectionOutput; } // namespace op diff --git a/ngraph/core/include/ngraph/op/dft.hpp b/ngraph/core/include/ngraph/op/dft.hpp index 3065d128df7..abd217511bc 100644 --- a/ngraph/core/include/ngraph/op/dft.hpp +++ b/ngraph/core/include/ngraph/op/dft.hpp @@ -23,33 +23,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/fft_base.hpp" +#include "openvino/op/dft.hpp" namespace ngraph { namespace op { namespace v7 { -/// \brief An operation DFT that computes the discrete Fourier transformation. -class NGRAPH_API DFT : public util::FFTBase { -public: - NGRAPH_RTTI_DECLARATION; - DFT() = default; - - /// \brief Constructs a DFT operation. DFT is performed for full size axes. - /// - /// \param data Input data - /// \param axes Axes to perform DFT - DFT(const Output& data, const Output& axes); - - /// \brief Constructs a DFT operation. - /// - /// \param data Input data - /// \param axes Axes to perform DFT - /// \param signal_size Signal sizes for 'axes' - DFT(const Output& data, const Output& axes, const Output& signal_size); - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v7::DFT; } // namespace v7 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/divide.hpp b/ngraph/core/include/ngraph/op/divide.hpp index 9f9364e8181..9b74d17e149 100644 --- a/ngraph/core/include/ngraph/op/divide.hpp +++ b/ngraph/core/include/ngraph/op/divide.hpp @@ -5,51 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/divide.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise division operation. -class NGRAPH_API Divide : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a division operation. - Divide() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - - /// \brief Constructs a division operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param pythondiv Use Python style rounding for integral type - /// \param auto_broadcast Auto broadcast specification - Divide(const Output& arg0, - const Output& arg1, - bool pythondiv, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - /// \brief Constructs a division operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - Divide(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - bool visit_attributes(AttributeVisitor& visitor) override; - bool is_pythondiv() const { - return m_pythondiv; - } - void set_is_pythondiv(bool pythondiv) { - m_pythondiv = pythondiv; - } - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - bool m_pythondiv{true}; -}; +using ov::op::v1::Divide; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/einsum.hpp b/ngraph/core/include/ngraph/op/einsum.hpp index a23c2c0f7c5..71de175f8dd 100644 --- a/ngraph/core/include/ngraph/op/einsum.hpp +++ b/ngraph/core/include/ngraph/op/einsum.hpp @@ -6,67 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/einsum.hpp" namespace ngraph { namespace op { namespace v7 { -/// \brief Einsum operation. -class NGRAPH_API Einsum : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - Einsum() = default; - - /// - /// \brief Constructs Einsum operation. - /// - /// \param inputs Input nodes on which Einsum operation performs - /// contraction - /// - /// \param equation Einstein summation convention - /// - Einsum(const OutputVector& inputs, const std::string& equation); - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \brief Get an equation of Einsum operation - /// - /// \return Einsum equation - /// - std::string get_equation() const { - return m_equation; - } - - /// \brief Check correctness of equation format and extract input subscripts - /// and output subscript - /// - /// \param equation Equation to be parsed and checked - /// - /// \param input_subscripts A vector of extracted input subscripts - /// - /// \param output_subscript An output subscript - /// - static void parse_equation(const std::string& equation, - std::vector& input_subscripts, - std::string& output_subscript); - - /// \brief Extract labels (from subscript) that can be alphabetic letters or - /// ellipsis - /// - /// \param subscript Subscript - /// - /// \return A vector of extracted labels from the input subscript in the order - /// of appearence - /// - static std::vector extract_labels(const std::string& subscript); - -private: - std::string m_equation; -}; +using ov::op::v7::Einsum; } // namespace v7 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/elu.hpp b/ngraph/core/include/ngraph/op/elu.hpp index 173cb9b02a6..0e1356c130f 100644 --- a/ngraph/core/include/ngraph/op/elu.hpp +++ b/ngraph/core/include/ngraph/op/elu.hpp @@ -6,37 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/elu.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Exponential Linear Unit -/// x < 0 => f(x) = alpha * (exp(x) - 1.) -/// x >= 0 => f(x) = x -/// -class NGRAPH_API Elu : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - - Elu() = default; - /// \brief Constructs an Elu operation. - /// - /// \param data Input tensor - /// \param alpha Multiplier for negative values - Elu(const Output& data, const double alpha); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - double get_alpha() const { - return m_alpha; - } - -private: - double m_alpha; -}; +using ov::op::v0::Elu; } // namespace v0 using v0::Elu; } // namespace op diff --git a/ngraph/core/include/ngraph/op/embedding_segments_sum.hpp b/ngraph/core/include/ngraph/op/embedding_segments_sum.hpp index ebca3b5164b..0d264a28cf5 100644 --- a/ngraph/core/include/ngraph/op/embedding_segments_sum.hpp +++ b/ngraph/core/include/ngraph/op/embedding_segments_sum.hpp @@ -6,75 +6,12 @@ #include "ngraph/axis_set.hpp" #include "ngraph/op/util/index_reduction.hpp" +#include "openvino/op/embedding_segments_sum.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief Returns embeddings for given indices -class NGRAPH_API EmbeddingSegmentsSum : public Op { -public: - static constexpr NodeTypeInfo type_info{"EmbeddingSegmentsSum", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - /// \brief Constructs a EmbeddingSegmentsSum operation. - EmbeddingSegmentsSum() = default; - /// \brief Constructs a EmbeddingSegmentsSum operation. - /// - /// EmbeddingSegmentsSum constructs an output tensor by replacing every index in a - /// given - /// input tensor with a row (from the weights matrix) at that index - /// - /// \param 'emb_table' tensor containing the embedding lookup table of the module of - /// shape [num_emb, emb_dim1, emb_dim2, ...] and of type T - /// \param 'indices' tensor of shape [num_indices] and of type T_IND. Required - /// \param `segment_ids` tensor of shape `[num_indices]` and of type *T_IND* with - /// indices - /// into the output Tensor. Values should be sorted and can be repeated. Required. - /// \param `num_segments` scalar of type *T_IND* indicating the number of segments. - /// Required. - /// \param 'default_index' scalar of type T_IND containing default index in - /// embedding - /// table to fill empty "bags". If not provided empty "bags" - /// are filled with zeros. Optional. - /// \param 'per_sample_weights' tensor of the same shape as indices and of type T. - /// Each value in this tensor are multiplied with each - /// value pooled from embedding table for each index. Optional. - - EmbeddingSegmentsSum(const Output& emb_table, - const Output& indices, - const Output& segment_ids, - const Output& num_segments, - const Output& default_index, - const Output& per_sample_weights); - - EmbeddingSegmentsSum(const Output& emb_table, - const Output& indices, - const Output& segment_ids, - const Output& num_segments, - const Output& default_index); - - EmbeddingSegmentsSum(const Output& emb_table, - const Output& indices, - const Output& segment_ids, - const Output& num_segments); - - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool visit_attributes(AttributeVisitor&) override { - return true; - } - -private: - static constexpr int EMB_TABLE = 0; - static constexpr int INDICES = 1; - static constexpr int SEGMENT_IDS = 2; - static constexpr int NUM_SEGMENTS = 3; - static constexpr int DEFAULT_INDEX = 4; - static constexpr int PER_SAMPLE_WEIGHTS = 5; -}; +using ov::op::v3::EmbeddingSegmentsSum; } // namespace v3 using v3::EmbeddingSegmentsSum; } // namespace op diff --git a/ngraph/core/include/ngraph/op/embeddingbag_offsets_sum.hpp b/ngraph/core/include/ngraph/op/embeddingbag_offsets_sum.hpp index f47def5fee5..6fc907a272d 100644 --- a/ngraph/core/include/ngraph/op/embeddingbag_offsets_sum.hpp +++ b/ngraph/core/include/ngraph/op/embeddingbag_offsets_sum.hpp @@ -7,52 +7,12 @@ #include "ngraph/axis_set.hpp" #include "ngraph/op/util/embeddingbag_offsets_base.hpp" #include "ngraph/op/util/index_reduction.hpp" +#include "openvino/op/embeddingbag_offsets_sum.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief Returns embeddings for given indices -class NGRAPH_API EmbeddingBagOffsetsSum : public util::EmbeddingBagOffsetsBase { -public: - static constexpr NodeTypeInfo type_info{"EmbeddingBagOffsetsSum", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - /// \brief Constructs a EmbeddingBagOffsetsSum operation. - EmbeddingBagOffsetsSum() = default; - /// \brief Constructs a EmbeddingBagOffsetsSum operation. - /// - /// EmbeddingBagOffsetsSum constructs an output tensor by replacing every index in a - /// given - /// input tensor with a row (from the weights matrix) at that index - /// - /// \param emb_table tensor containing the embedding lookup table of the module of - /// shape [num_emb, emb_dim1, emb_dim2, ...] and of type T - /// \param tensor of shape [num_indices] and of type T_IND. Required - /// \param offsets tensor of shape [batch] and of type T_IND containing the starting - /// index positions of each "bag" in indices. Required. - /// \param default_index scalar of type T_IND containing default index in embedding - /// table to fill empty "bags". If not provided empty "bags" - /// are filled with zeros. Optional. - /// \param per_sample_weigths tensor of the same shape as indices and of type T. - /// Each value in this tensor are multiplied with each - /// value pooled from embedding table for each index. Optional. - - EmbeddingBagOffsetsSum(const Output& emb_table, - const Output& indices, - const Output& offsets, - const Output& default_index, - const Output& per_sample_weights); - - EmbeddingBagOffsetsSum(const Output& emb_table, - const Output& indices, - const Output& offsets, - const Output& default_index); - - EmbeddingBagOffsetsSum(const Output& emb_table, const Output& indices, const Output& offsets); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v3::EmbeddingBagOffsetsSum; } // namespace v3 using v3::EmbeddingBagOffsetsSum; } // namespace op diff --git a/ngraph/core/include/ngraph/op/embeddingbag_packedsum.hpp b/ngraph/core/include/ngraph/op/embeddingbag_packedsum.hpp index 159f100a610..8952ce1be9d 100644 --- a/ngraph/core/include/ngraph/op/embeddingbag_packedsum.hpp +++ b/ngraph/core/include/ngraph/op/embeddingbag_packedsum.hpp @@ -7,41 +7,12 @@ #include "ngraph/axis_set.hpp" #include "ngraph/op/util/embeddingbag_packed_base.hpp" #include "ngraph/op/util/index_reduction.hpp" +#include "openvino/op/embeddingbag_packedsum.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief Returns embeddings for given indices -class NGRAPH_API EmbeddingBagPackedSum : public util::EmbeddingBagPackedBase { -public: - static constexpr NodeTypeInfo type_info{"EmbeddingBagPackedSum", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - /// \brief Constructs a EmbeddingBagPackedSum operation. - EmbeddingBagPackedSum() = default; - /// \brief Constructs a EmbeddingBagPackedSum operation. - /// - /// EmbeddingBagPackedSum constructs an output tensor by replacing every index in a - /// given - /// input tensor with a row (from the weights matrix) at that index - /// - /// \param emb_table Tensor containing the embedding lookup table of the module of - /// shape [num_emb, emb_dim1, emb_dim2, ...] and of type T - /// \param indices Tensor of shape `[batch, indices_per_bag]` and of type *T_IND*. - /// Required. - /// \param per_sample_weigths tensor of the same shape as indices and of type T. - /// Each value in this tensor are multiplied with each - /// value pooled from embedding table for each index. Optional. - - EmbeddingBagPackedSum(const Output& emb_table, - const Output& indices, - const Output& per_sample_weights); - - EmbeddingBagPackedSum(const Output& emb_table, const Output& indices); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v3::EmbeddingBagPackedSum; } // namespace v3 using v3::EmbeddingBagPackedSum; } // namespace op diff --git a/ngraph/core/include/ngraph/op/equal.hpp b/ngraph/core/include/ngraph/op/equal.hpp index 81766834f0b..6751bd0b8eb 100644 --- a/ngraph/core/include/ngraph/op/equal.hpp +++ b/ngraph/core/include/ngraph/op/equal.hpp @@ -5,47 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_comparison.hpp" +#include "openvino/op/equal.hpp" namespace ngraph { namespace op { namespace v1 { -// clang-format off - /// \brief Elementwise is-equal operation. - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ------ | --------------------------------- | ------------------------------------------------------ | - /// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. | - /// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. | - /// | `autob`| AutoBroadcastSpec | Auto broadcast specification. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | - /// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] = \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ | -// clang-format on -class NGRAPH_API Equal : public util::BinaryElementwiseComparison { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs an equal operation. - Equal() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} - /// \brief Constructs an equal operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - Equal(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Equal; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/erf.hpp b/ngraph/core/include/ngraph/op/erf.hpp index 3bb5392bbfe..22a4a7246f5 100644 --- a/ngraph/core/include/ngraph/op/erf.hpp +++ b/ngraph/core/include/ngraph/op/erf.hpp @@ -5,26 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/erf.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise erf operation. -class NGRAPH_API Erf : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a floor operation. - Erf() = default; - /// \brief Constructs a floor operation. - /// - /// \param arg Node that produces the input tensor. - Erf(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Erf; } // namespace v0 using v0::Erf; } // namespace op diff --git a/ngraph/core/include/ngraph/op/exp.hpp b/ngraph/core/include/ngraph/op/exp.hpp index f8170c66249..8172778087e 100644 --- a/ngraph/core/include/ngraph/op/exp.hpp +++ b/ngraph/core/include/ngraph/op/exp.hpp @@ -5,28 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/exp.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise natural exponential (exp) operation. -class NGRAPH_API Exp : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs an exponential operation. - Exp() = default; - /// \brief Constructs an exponential operation. - /// - /// \param arg Node that produces the input tensor. - Exp(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Exp; } // namespace v0 using v0::Exp; } // namespace op diff --git a/ngraph/core/include/ngraph/op/experimental_detectron_detection_output.hpp b/ngraph/core/include/ngraph/op/experimental_detectron_detection_output.hpp index 0231d20eba9..bd3689ee662 100644 --- a/ngraph/core/include/ngraph/op/experimental_detectron_detection_output.hpp +++ b/ngraph/core/include/ngraph/op/experimental_detectron_detection_output.hpp @@ -10,65 +10,12 @@ #include "ngraph/attribute_adapter.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/experimental_detectron_detection_output.hpp" namespace ngraph { namespace op { namespace v6 { -/// \brief An operation ExperimentalDetectronDetectionOutput performs -/// non-maximum suppression to generate the detection output using -/// information on location and score predictions. -class NGRAPH_API ExperimentalDetectronDetectionOutput : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Structure that specifies attributes of the operation - struct Attributes { - // specifies score threshold - float score_threshold; - // specifies NMS threshold - float nms_threshold; - // specifies maximal delta of logarithms for width and height - float max_delta_log_wh; - // specifies number of detected classes - int64_t num_classes; - // specifies maximal number of detections per class - int64_t post_nms_count; - // specifies maximual number of detections per image - size_t max_detections_per_image; - // a flag specifies whether to delete background classes or not - // `true` means background classes should be deleted, - // `false` means background classes shouldn't be deleted. - bool class_agnostic_box_regression; - // specifies deltas of weights - std::vector deltas_weights; - }; - - ExperimentalDetectronDetectionOutput() = default; - /// \brief Constructs a ExperimentalDetectronDetectionOutput operation. - /// - /// \param input_rois Input rois - /// \param input_deltas Input deltas - /// \param input_scores Input scores - /// \param input_im_info Input image info - /// \param attrs Attributes attributes - ExperimentalDetectronDetectionOutput(const Output& input_rois, - const Output& input_deltas, - const Output& input_scores, - const Output& input_im_info, - const Attributes& attrs); - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - /// \brief Returns attributes of the operation ExperimentalDetectronDetectionOutput - const Attributes& get_attrs() const { - return m_attrs; - } - -private: - Attributes m_attrs; -}; +using ov::op::v6::ExperimentalDetectronDetectionOutput; } // namespace v6 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/experimental_detectron_generate_proposals.hpp b/ngraph/core/include/ngraph/op/experimental_detectron_generate_proposals.hpp index 2abb9d5d593..26cf040ec39 100644 --- a/ngraph/core/include/ngraph/op/experimental_detectron_generate_proposals.hpp +++ b/ngraph/core/include/ngraph/op/experimental_detectron_generate_proposals.hpp @@ -10,55 +10,12 @@ #include "ngraph/attribute_adapter.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/experimental_detectron_generate_proposals.hpp" namespace ngraph { namespace op { namespace v6 { -/// \brief An operation ExperimentalDetectronGenerateProposalsSingleImage -/// computes ROIs and their scores based on input data. -class NGRAPH_API ExperimentalDetectronGenerateProposalsSingleImage : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Structure that specifies attributes of the operation - struct Attributes { - // minimum box width & height - float min_size; - // specifies NMS threshold - float nms_threshold; - // number of top-n proposals after NMS - int64_t post_nms_count; - // number of top-n proposals before NMS - int64_t pre_nms_count; - }; - - ExperimentalDetectronGenerateProposalsSingleImage() = default; - /// \brief Constructs a ExperimentalDetectronGenerateProposalsSingleImage operation. - /// - /// \param im_info Input image info - /// \param anchors Input anchors - /// \param deltas Input deltas - /// \param scores Input scores - /// \param attrs Operation attributes - ExperimentalDetectronGenerateProposalsSingleImage(const Output& im_info, - const Output& anchors, - const Output& deltas, - const Output& scores, - const Attributes& attrs); - - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - const Attributes& get_attrs() const { - return m_attrs; - } - -private: - Attributes m_attrs; -}; +using ov::op::v6::ExperimentalDetectronGenerateProposalsSingleImage; } // namespace v6 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/experimental_detectron_prior_grid_generator.hpp b/ngraph/core/include/ngraph/op/experimental_detectron_prior_grid_generator.hpp index c109412cc78..ded6e775721 100644 --- a/ngraph/core/include/ngraph/op/experimental_detectron_prior_grid_generator.hpp +++ b/ngraph/core/include/ngraph/op/experimental_detectron_prior_grid_generator.hpp @@ -10,58 +10,12 @@ #include "ngraph/attribute_adapter.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/experimental_detectron_prior_grid_generator.hpp" namespace ngraph { namespace op { namespace v6 { -/// \brief An operation ExperimentalDetectronPriorGridGenerator generates prior -/// grids of specified sizes. -class NGRAPH_API ExperimentalDetectronPriorGridGenerator : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Structure that specifies attributes of the operation - struct Attributes { - // Specifies whether the output tensor should be 2D or 4D - // `true` means the output tensor should be 2D tensor, - // `false` means the output tensor should be 4D tensor. - bool flatten; - // Specifies number of cells of the generated grid with respect to height. - int64_t h; - // Specifies number of cells of the generated grid with respect to width. - int64_t w; - // Specifies the step of generated grid with respect to x coordinate - float stride_x; - // Specifies the step of generated grid with respect to y coordinate - float stride_y; - }; - - ExperimentalDetectronPriorGridGenerator() = default; - /// \brief Constructs a ExperimentalDetectronDetectionOutput operation. - /// - /// \param priors Input priors - /// \param feature_map Input feature map - /// \param im_data Image data - /// \param attrs attributes - ExperimentalDetectronPriorGridGenerator(const Output& priors, - const Output& feature_map, - const Output& im_data, - const Attributes& attrs); - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - /// \brief Returns attributes of this operation. - const Attributes& get_attrs() const { - return m_attrs; - } - -private: - Attributes m_attrs; - - void validate(); -}; +using ov::op::v6::ExperimentalDetectronPriorGridGenerator; } // namespace v6 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/experimental_detectron_roi_feature.hpp b/ngraph/core/include/ngraph/op/experimental_detectron_roi_feature.hpp index 675573ba254..6c4043456f7 100644 --- a/ngraph/core/include/ngraph/op/experimental_detectron_roi_feature.hpp +++ b/ngraph/core/include/ngraph/op/experimental_detectron_roi_feature.hpp @@ -11,49 +11,12 @@ #include "ngraph/attribute_adapter.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/experimental_detectron_roi_feature.hpp" namespace ngraph { namespace op { namespace v6 { -/// \brief An operation ExperimentalDetectronROIFeatureExtractor -/// is the ROIAlign operation applied over a feature pyramid. -class NGRAPH_API ExperimentalDetectronROIFeatureExtractor : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Structure that specifies attributes of the operation - struct Attributes { - int64_t output_size; - int64_t sampling_ratio; - std::vector pyramid_scales; - bool aligned; - }; - - ExperimentalDetectronROIFeatureExtractor() = default; - /// \brief Constructs a ExperimentalDetectronROIFeatureExtractor operation. - /// - /// \param args Inputs of ExperimentalDetectronROIFeatureExtractor - /// \param attrs Operation attributes - ExperimentalDetectronROIFeatureExtractor(const OutputVector& args, const Attributes& attrs); - - /// \brief Constructs a ExperimentalDetectronROIFeatureExtractor operation. - /// - /// \param args Inputs of ExperimentalDetectronROIFeatureExtractor - /// \param attrs Operation attributes - ExperimentalDetectronROIFeatureExtractor(const NodeVector& args, const Attributes& attrs); - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - /// \brief Returns attributes of the operation. - const Attributes& get_attrs() const { - return m_attrs; - } - -private: - Attributes m_attrs; -}; +using ov::op::v6::ExperimentalDetectronROIFeatureExtractor; } // namespace v6 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/experimental_detectron_topkrois.hpp b/ngraph/core/include/ngraph/op/experimental_detectron_topkrois.hpp index 17f6bd591f3..91f56825d90 100644 --- a/ngraph/core/include/ngraph/op/experimental_detectron_topkrois.hpp +++ b/ngraph/core/include/ngraph/op/experimental_detectron_topkrois.hpp @@ -10,36 +10,12 @@ #include "ngraph/attribute_adapter.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/experimental_detectron_topkrois.hpp" namespace ngraph { namespace op { namespace v6 { -/// \brief An operation ExperimentalDetectronTopKROIs, according to the repository -/// is TopK operation applied to probabilities of input ROIs. -class NGRAPH_API ExperimentalDetectronTopKROIs : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - ExperimentalDetectronTopKROIs() = default; - /// \brief Constructs a ExperimentalDetectronTopKROIs operation. - /// - /// \param input_rois Input rois - /// \param rois_probs Probabilities for input rois - /// \param max_rois Maximal numbers of output rois - ExperimentalDetectronTopKROIs(const Output& input_rois, const Output& rois_probs, size_t max_rois = 0); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - size_t get_max_rois() const { - return m_max_rois; - } - -private: - size_t m_max_rois; -}; +using ov::op::v6::ExperimentalDetectronTopKROIs; } // namespace v6 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/extractimagepatches.hpp b/ngraph/core/include/ngraph/op/extractimagepatches.hpp index ae3eb3ed62b..8c427d10876 100644 --- a/ngraph/core/include/ngraph/op/extractimagepatches.hpp +++ b/ngraph/core/include/ngraph/op/extractimagepatches.hpp @@ -5,66 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/extractimagepatches.hpp" namespace ngraph { namespace op { namespace v3 { -class NGRAPH_API ExtractImagePatches : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - ExtractImagePatches() = default; - /// \brief Constructs a ExtractImagePatches operation - /// - /// \param data 4-D Input data to extract image patches - /// \param sizes Patch size in the format of [size_rows, size_cols] - /// \param strides Patch movement stride in the format of [stride_rows, stride_cols] - /// \param rates Element seleciton rate for creating a patch. in the format of - /// [rate_rows, rate_cols] - /// \param auto_pad Padding type. it can be any value from - /// valid, same_lower, same_upper - ExtractImagePatches(const Output& image, - const Shape& sizes, - const Strides& strides, - const Shape& rates, - const PadType& auto_pad); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - const Shape& get_sizes() const { - return m_patch_sizes; - } - void set_sizes(const Shape& sizes) { - m_patch_sizes = sizes; - } - const Strides& get_strides() const { - return m_patch_movement_strides; - } - void set_strides(const Strides& strides) { - m_patch_movement_strides = strides; - } - const Shape& get_rates() const { - return m_patch_selection_rates; - } - void set_rates(const Shape& rates) { - m_patch_selection_rates = rates; - } - const PadType& get_auto_pad() const { - return m_padding; - } - void set_auto_pad(PadType& padding) { - m_padding = padding; - } - -private: - Shape m_patch_sizes; - Strides m_patch_movement_strides; - Shape m_patch_selection_rates; - PadType m_padding; -}; +using ov::op::v3::ExtractImagePatches; } // namespace v3 using v3::ExtractImagePatches; } // namespace op diff --git a/ngraph/core/include/ngraph/op/fake_quantize.hpp b/ngraph/core/include/ngraph/op/fake_quantize.hpp index 00c970bf8fd..d36772c2dc9 100644 --- a/ngraph/core/include/ngraph/op/fake_quantize.hpp +++ b/ngraph/core/include/ngraph/op/fake_quantize.hpp @@ -7,76 +7,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/fake_quantize.hpp" namespace ngraph { namespace op { namespace v0 { -/// -/// \brief Class performing element-wise linear quantization. -/// -/// \note Input floating point values are quantized into a discrete -/// set of floating point values. -/// -/// \paragraph Implementation This class creates a node which performs the following -/// operation: -/// -/// round((data - input_low) / (input_high - input_low) * (levels-1)) / -/// (levels-1) * (output_high - output_low) + output_low -/// -/// -class NGRAPH_API FakeQuantize : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - - FakeQuantize(); - /// - /// \brief Constructs a FakeQuantize operation node. - /// - /// \param[in] data The input data tensor. - /// \param[in] input_low The minimum limit for input values. - /// \param[in] input_high The maximum limit for input values. - /// \param[in] output_low The minimum quantized value. - /// \param[in] output_high The maximum quantized value. - /// \param[in] levels The number of quantization levels. - /// \param[in] auto_broadcast AutoBroadcast mode to be used for broadcasting - /// limit values - /// - FakeQuantize(const Output& data, - const Output& input_low, - const Output& input_high, - const Output& output_low, - const Output& output_high, - std::size_t levels, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - std::size_t get_levels() const { - return m_levels; - } - void set_levels(std::size_t levels) { - m_levels = levels; - } - const AutoBroadcastSpec& get_auto_broadcast() const { - return m_auto_broadcast; - } - void set_auto_broadcast(const AutoBroadcastSpec& auto_broadcast) { - m_auto_broadcast = auto_broadcast; - } - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override { - return false; - } - -private: - std::size_t m_levels; - AutoBroadcastSpec m_auto_broadcast = op::AutoBroadcastType::NUMPY; -}; +using ov::op::v0::FakeQuantize; } // namespace v0 using v0::FakeQuantize; } // namespace op diff --git a/ngraph/core/include/ngraph/op/floor.hpp b/ngraph/core/include/ngraph/op/floor.hpp index 6196f8f689c..e58ce80b042 100644 --- a/ngraph/core/include/ngraph/op/floor.hpp +++ b/ngraph/core/include/ngraph/op/floor.hpp @@ -5,26 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/floor.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise floor operation. -class NGRAPH_API Floor : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a floor operation. - Floor() = default; - /// \brief Constructs a floor operation. - /// - /// \param arg Node that produces the input tensor. - Floor(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Floor; } // namespace v0 using v0::Floor; } // namespace op diff --git a/ngraph/core/include/ngraph/op/floor_mod.hpp b/ngraph/core/include/ngraph/op/floor_mod.hpp index 2ce6441af99..b4fd0f9309e 100644 --- a/ngraph/core/include/ngraph/op/floor_mod.hpp +++ b/ngraph/core/include/ngraph/op/floor_mod.hpp @@ -7,40 +7,13 @@ #include #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/floor_mod.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise FloorMod operation. -/// -class NGRAPH_API FloorMod : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs an uninitialized addition operation - FloorMod() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY){}; - - /// \brief Constructs an Floor Mod operation. - /// - /// \param arg0 Output that produces the first input tensor.
- /// `[d0, ...]` - /// \param arg1 Output that produces the second input tensor.
- /// `[d0, ...]` - /// \param auto_broadcast Auto broadcast specification - /// - /// Output `[d0, ...]` - /// - FloorMod(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastType::NUMPY); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor& visitor) override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::FloorMod; } // namespace v1 - using v1::FloorMod; } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/openvino/op/deformable_convolution.hpp b/ngraph/core/include/openvino/op/deformable_convolution.hpp new file mode 100644 index 00000000000..3670ee1a059 --- /dev/null +++ b/ngraph/core/include/openvino/op/deformable_convolution.hpp @@ -0,0 +1,175 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" +#include "openvino/op/util/deformable_convolution_base.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief DeformableConvolution operation. +class OPENVINO_API DeformableConvolution : public op::util::DeformableConvolutionBase { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a conversion operation. + DeformableConvolution() = default; + /// \brief Constructs a conversion operation. + /// + /// \param arg Node that produces the input tensor. + /// \param offsets Node producing the deformable values tensor. + /// \param filters Node producing the filters(kernels) tensor with OIZYX + /// layout. + /// \param strides Convolution strides. + /// \param pads_begin Amount of padding to be added to the beginning along + /// each axis. For example in case of a 2D input the value + /// of (1, 2) means that 1 element will be added to the + /// top and 2 elements to the left. + /// \param pads_end Amount of padding to be added to the end along each + /// axis. + /// \param dilations The distance in width and height between the weights + /// in the filters tensor. + /// \param auto_pad Specifies how the automatic calculation of padding + /// should be done. + /// \param group The number of groups which both output and input + /// should be split into. + /// \param deformable_group The number of groups which deformable values and + /// output should be split into along the channel axis. + DeformableConvolution(const Output& arg, + const Output& offsets, + const Output& filters, + const Strides& strides, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const Strides& dilations, + const PadType& auto_pad = PadType::EXPLICIT, + const int64_t group = 1, + const int64_t deformable_group = 1); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v1 + +namespace v8 { +class OPENVINO_API DeformableConvolution : public op::util::DeformableConvolutionBase { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a conversion operation. + DeformableConvolution() = default; + /// \brief Constructs a conversion operation. + /// + /// \param arg Node that produces the input tensor. + /// \param offsets Node producing the deformable values tensor. + /// \param filters Node producing the filters(kernels) tensor with OIZYX + /// layout. + /// \param strides Convolution strides. + /// \param pads_begin Amount of padding to be added to the beginning along + /// each axis. For example in case of a 2D input the value + /// of (1, 2) means that 1 element will be added to the + /// top and 2 elements to the left. + /// \param pads_end Amount of padding to be added to the end along each + /// axis. + /// \param dilations The distance in width and height between the weights + /// in the filters tensor. + /// \param auto_pad Specifies how the automatic calculation of padding + /// should be done. + /// \param group The number of groups which both output and input + /// should be split into. + /// \param deformable_group The number of groups which deformable values and + /// output should be split into along the channel axis. + /// \param bilinear_interpolation_pad + /// The flag that determines the mode of bilinear + /// interpolation execution. + /// If the flag is `true` and the sampling location is + /// within one pixel outside of the feature map boundary, + /// then bilinear interpolation is performed on the zero + /// padded feature map. If the flag is `false` and the + /// sampling location is within one pixel outside of the + /// feature map boundary, then the sampling location + /// shifts to the inner boundary of the feature map.` + DeformableConvolution(const Output& arg, + const Output& offsets, + const Output& filters, + const Strides& strides, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const Strides& dilations, + const PadType& auto_pad = PadType::EXPLICIT, + const int64_t group = 1, + const int64_t deformable_group = 1, + const bool bilinear_interpolation_pad = false); + + /// \brief Constructs a conversion operation. + /// + /// \param arg Node that produces the input tensor. + /// \param offsets Node producing the deformable values tensor. + /// \param filters Node producing the filters(kernels) tensor with OIZYX + /// layout. + /// \param mask Node producing the mask(mask) tensor. + /// \param strides Convolution strides. + /// \param pads_begin Amount of padding to be added to the beginning along + /// each axis. For example in case of a 2D input the value + /// of (1, 2) means that 1 element will be added to the + /// top and 2 elements to the left. + /// \param pads_end Amount of padding to be added to the end along each + /// axis. + /// \param dilations The distance in width and height between the weights + /// in the filters tensor. + /// \param auto_pad Specifies how the automatic calculation of padding + /// should be done. + /// \param group The number of groups which both output and input + /// should be split into. + /// \param deformable_group The number of groups which deformable values and + /// output should be split into along the channel axis. + /// \param bilinear_interpolation_pad + /// The flag that determines the mode of bilinear + /// interpolation execution. + /// If the flag is `true` and the sampling location is + /// within one pixel outside of the feature map boundary, + /// then bilinear interpolation is performed on the zero + /// padded feature map. If the flag is `false` and the + /// sampling location is within one pixel outside of the + /// feature map boundary, then the sampling location + /// shifts to the inner boundary of the feature map. + DeformableConvolution(const Output& arg, + const Output& offsets, + const Output& filters, + const Output& mask, + const Strides& strides, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const Strides& dilations, + const PadType& auto_pad = PadType::EXPLICIT, + const int64_t group = 1, + const int64_t deformable_group = 1, + const bool bilinear_interpolation_pad = false); + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + + bool has_evaluate() const override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool get_bilinear_interpolation_pad() const { + return m_bilinear_interpolation_pad; + } + + void set_bilinear_interpolation_pad(const bool bilinear_interpolation_pad) { + m_bilinear_interpolation_pad = bilinear_interpolation_pad; + } + +private: + bool m_bilinear_interpolation_pad; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/deformable_psroi_pooling.hpp b/ngraph/core/include/openvino/op/deformable_psroi_pooling.hpp new file mode 100644 index 00000000000..06ee5c551f4 --- /dev/null +++ b/ngraph/core/include/openvino/op/deformable_psroi_pooling.hpp @@ -0,0 +1,107 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +class OPENVINO_API DeformablePSROIPooling : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + DeformablePSROIPooling() = default; + /// \brief Constructs a DeformablePSROIPooling operation + /// + /// \param input Input tensor with position sensitive score maps + /// \param coords Input tensor with list of five element tuples + /// describing ROI coordinates + /// \param offsets Input tensor with transformation values + /// \param output_dim Pooled output channel number + /// \param group_size Number of horizontal bins per row to divide ROI area, + /// it defines output width and height + /// \param spatial_scale Multiplicative spatial scale factor to translate ROI + /// coordinates from their input scale to the scale used when + /// pooling + /// \param mode Specifies mode for pooling. + /// \param spatial_bins_x Specifies numbers of bins to divide ROI single + /// bin over width + /// \param spatial_bins_y Specifies numbers of bins to divide ROI single + /// bin over height + /// \param no_trans The flag that specifies whenever third input exists + /// and contains transformation (offset) values + /// \param trans_std The value that all transformation (offset) values are + /// multiplied with + /// \param part_size The number of parts the output tensor spatial dimensions + /// are divided into. Basically it is the height + /// and width of the third input + DeformablePSROIPooling(const Output& input, + const Output& coords, + const Output& offsets, + const int64_t output_dim, + const float spatial_scale, + const int64_t group_size = 1, + const std::string mode = "bilinear_deformable", + int64_t spatial_bins_x = 1, + int64_t spatial_bins_y = 1, + float trans_std = 1, + int64_t part_size = 1); + + DeformablePSROIPooling(const Output& input, + const Output& coords, + const int64_t output_dim, + const float spatial_scale, + const int64_t group_size = 1, + const std::string mode = "bilinear_deformable", + int64_t spatial_bins_x = 1, + int64_t spatial_bins_y = 1, + float trans_std = 1, + int64_t part_size = 1); + + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + int64_t get_output_dim() const { + return m_output_dim; + } + int64_t get_group_size() const { + return m_group_size; + } + float get_spatial_scale() const { + return m_spatial_scale; + } + const std::string& get_mode() const { + return m_mode; + } + int64_t get_spatial_bins_x() const { + return m_spatial_bins_x; + } + int64_t get_spatial_bins_y() const { + return m_spatial_bins_y; + } + float get_trans_std() const { + return m_trans_std; + } + int64_t get_part_size() const { + return m_part_size; + } + +private: + int64_t m_output_dim; + float m_spatial_scale; + int64_t m_group_size = 1; + std::string m_mode = "bilinear_deformable"; + int64_t m_spatial_bins_x = 1; + int64_t m_spatial_bins_y = 1; + float m_trans_std = 1.f; + int64_t m_part_size = 1; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/depth_to_space.hpp b/ngraph/core/include/openvino/op/depth_to_space.hpp new file mode 100644 index 00000000000..216359dcf58 --- /dev/null +++ b/ngraph/core/include/openvino/op/depth_to_space.hpp @@ -0,0 +1,78 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief DepthToSpace permutes data from the depth dimension of the input blob into +/// spatial dimensions. +/// +/// \note Values from the depth dimension (assuming NCHW layout) are moved in +/// spatial blocks to the height and width dimensions. +/// +/// Output node produces a tensor with shape: +/// [N, C/(blocksize * blocksize), H * blocksize, W * blocksize] +class OPENVINO_API DepthToSpace : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + enum class DepthToSpaceMode { + // The input depth is divided to [block_size, ..., block_size, new_depth] + BLOCKS_FIRST, + // The input depth is divided to [new_depth, block_size, ..., block_size] + DEPTH_FIRST + }; + + DepthToSpace() = default; + /// \brief Constructs a DepthToSpace operation. + /// + /// \param data Node producing the input tensor + /// \param mode Specifies how the input depth dimension is split to block + /// coordinates + /// \param block_size The size of the block of values to be moved + DepthToSpace(const Output& data, const DepthToSpaceMode& mode, std::size_t block_size = 1); + + DepthToSpace(const Output& data, const std::string& mode, std::size_t block_size = 1); + bool visit_attributes(AttributeVisitor& visitor) override; + + std::size_t get_block_size() const { + return m_blocksize; + } + DepthToSpaceMode get_mode() const { + return m_mode; + } + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + void validate_and_infer_types() override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + std::size_t m_blocksize; + DepthToSpaceMode m_mode; +}; +} // namespace v0 +} // namespace op + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v0::DepthToSpace::DepthToSpaceMode& type); + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v0::DepthToSpace::DepthToSpaceMode& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/detection_output.hpp b/ngraph/core/include/openvino/op/detection_output.hpp new file mode 100644 index 00000000000..26c9d9828a9 --- /dev/null +++ b/ngraph/core/include/openvino/op/detection_output.hpp @@ -0,0 +1,78 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Layer which performs non-max suppression to +/// generate detection output using location and confidence predictions +class OPENVINO_API DetectionOutput : public Op { +public: + struct Attributes { + int num_classes; + int background_label_id = 0; + int top_k = -1; + bool variance_encoded_in_target = false; + std::vector keep_top_k; + std::string code_type = std::string{"caffe.PriorBoxParameter.CORNER"}; + bool share_location = true; + float nms_threshold; + float confidence_threshold = 0; + bool clip_after_nms = false; + bool clip_before_nms = false; + bool decrease_label_id = false; + bool normalized = false; + size_t input_height = 1; + size_t input_width = 1; + float objectness_score = 0; + }; + + OPENVINO_RTTI_DECLARATION; + + DetectionOutput() = default; + /// \brief Constructs a DetectionOutput operation + /// + /// \param box_logits Box logits + /// \param class_preds Class predictions + /// \param proposals Proposals + /// \param aux_class_preds Auxilary class predictions + /// \param aux_box_preds Auxilary box predictions + /// \param attrs Detection Output attributes + DetectionOutput(const Output& box_logits, + const Output& class_preds, + const Output& proposals, + const Output& aux_class_preds, + const Output& aux_box_preds, + const Attributes& attrs); + + /// \brief Constructs a DetectionOutput operation + /// + /// \param box_logits Box logits + /// \param class_preds Class predictions + /// \param proposals Proposals + /// \param attrs Detection Output attributes + DetectionOutput(const Output& box_logits, + const Output& class_preds, + const Output& proposals, + const Attributes& attrs); + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + const Attributes& get_attrs() const { + return m_attrs; + } + bool visit_attributes(AttributeVisitor& visitor) override; + +private: + Attributes m_attrs; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/dft.hpp b/ngraph/core/include/openvino/op/dft.hpp new file mode 100644 index 00000000000..954bcb7f5f6 --- /dev/null +++ b/ngraph/core/include/openvino/op/dft.hpp @@ -0,0 +1,53 @@ +//***************************************************************************** +// Copyright 2017-2021 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include +#include + +#include "openvino/op/op.hpp" +#include "openvino/op/util/fft_base.hpp" + +namespace ov { +namespace op { +namespace v7 { +/// \brief An operation DFT that computes the discrete Fourier transformation. +class OPENVINO_API DFT : public util::FFTBase { +public: + OPENVINO_RTTI_DECLARATION; + DFT() = default; + + /// \brief Constructs a DFT operation. DFT is performed for full size axes. + /// + /// \param data Input data + /// \param axes Axes to perform DFT + DFT(const Output& data, const Output& axes); + + /// \brief Constructs a DFT operation. + /// + /// \param data Input data + /// \param axes Axes to perform DFT + /// \param signal_size Signal sizes for 'axes' + DFT(const Output& data, const Output& axes, const Output& signal_size); + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v7 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/divide.hpp b/ngraph/core/include/openvino/op/divide.hpp new file mode 100644 index 00000000000..065c5f641e0 --- /dev/null +++ b/ngraph/core/include/openvino/op/divide.hpp @@ -0,0 +1,55 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise division operation. +class OPENVINO_API Divide : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a division operation. + Divide() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + + /// \brief Constructs a division operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param pythondiv Use Python style rounding for integral type + /// \param auto_broadcast Auto broadcast specification + Divide(const Output& arg0, + const Output& arg1, + bool pythondiv, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + /// \brief Constructs a division operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + Divide(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + bool visit_attributes(AttributeVisitor& visitor) override; + bool is_pythondiv() const { + return m_pythondiv; + } + void set_is_pythondiv(bool pythondiv) { + m_pythondiv = pythondiv; + } + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + bool m_pythondiv{true}; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/einsum.hpp b/ngraph/core/include/openvino/op/einsum.hpp new file mode 100644 index 00000000000..14f10ba8b88 --- /dev/null +++ b/ngraph/core/include/openvino/op/einsum.hpp @@ -0,0 +1,71 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v7 { +/// \brief Einsum operation. +class OPENVINO_API Einsum : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Einsum() = default; + + /// + /// \brief Constructs Einsum operation. + /// + /// \param inputs Input nodes on which Einsum operation performs + /// contraction + /// + /// \param equation Einstein summation convention + /// + Einsum(const OutputVector& inputs, const std::string& equation); + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \brief Get an equation of Einsum operation + /// + /// \return Einsum equation + /// + std::string get_equation() const { + return m_equation; + } + + /// \brief Check correctness of equation format and extract input subscripts + /// and output subscript + /// + /// \param equation Equation to be parsed and checked + /// + /// \param input_subscripts A vector of extracted input subscripts + /// + /// \param output_subscript An output subscript + /// + static void parse_equation(const std::string& equation, + std::vector& input_subscripts, + std::string& output_subscript); + + /// \brief Extract labels (from subscript) that can be alphabetic letters or + /// ellipsis + /// + /// \param subscript Subscript + /// + /// \return A vector of extracted labels from the input subscript in the order + /// of appearence + /// + static std::vector extract_labels(const std::string& subscript); + +private: + std::string m_equation; +}; +} // namespace v7 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/elu.hpp b/ngraph/core/include/openvino/op/elu.hpp new file mode 100644 index 00000000000..eb9c8aaa093 --- /dev/null +++ b/ngraph/core/include/openvino/op/elu.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Exponential Linear Unit +/// x < 0 => f(x) = alpha * (exp(x) - 1.) +/// x >= 0 => f(x) = x +/// +class OPENVINO_API Elu : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Elu() = default; + /// \brief Constructs an Elu operation. + /// + /// \param data Input tensor + /// \param alpha Multiplier for negative values + Elu(const Output& data, const double alpha); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + double get_alpha() const { + return m_alpha; + } + +private: + double m_alpha; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/embedding_segments_sum.hpp b/ngraph/core/include/openvino/op/embedding_segments_sum.hpp new file mode 100644 index 00000000000..fb73228343e --- /dev/null +++ b/ngraph/core/include/openvino/op/embedding_segments_sum.hpp @@ -0,0 +1,77 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/axis_set.hpp" +#include "openvino/op/util/index_reduction.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Returns embeddings for given indices +class OPENVINO_API EmbeddingSegmentsSum : public Op { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a EmbeddingSegmentsSum operation. + EmbeddingSegmentsSum() = default; + /// \brief Constructs a EmbeddingSegmentsSum operation. + /// + /// EmbeddingSegmentsSum constructs an output tensor by replacing every index in a + /// given + /// input tensor with a row (from the weights matrix) at that index + /// + /// \param 'emb_table' tensor containing the embedding lookup table of the module of + /// shape [num_emb, emb_dim1, emb_dim2, ...] and of type T + /// \param 'indices' tensor of shape [num_indices] and of type T_IND. Required + /// \param `segment_ids` tensor of shape `[num_indices]` and of type *T_IND* with + /// indices + /// into the output Tensor. Values should be sorted and can be repeated. Required. + /// \param `num_segments` scalar of type *T_IND* indicating the number of segments. + /// Required. + /// \param 'default_index' scalar of type T_IND containing default index in + /// embedding + /// table to fill empty "bags". If not provided empty "bags" + /// are filled with zeros. Optional. + /// \param 'per_sample_weights' tensor of the same shape as indices and of type T. + /// Each value in this tensor are multiplied with each + /// value pooled from embedding table for each index. Optional. + + EmbeddingSegmentsSum(const Output& emb_table, + const Output& indices, + const Output& segment_ids, + const Output& num_segments, + const Output& default_index, + const Output& per_sample_weights); + + EmbeddingSegmentsSum(const Output& emb_table, + const Output& indices, + const Output& segment_ids, + const Output& num_segments, + const Output& default_index); + + EmbeddingSegmentsSum(const Output& emb_table, + const Output& indices, + const Output& segment_ids, + const Output& num_segments); + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool visit_attributes(AttributeVisitor&) override { + return true; + } + +private: + static constexpr int EMB_TABLE = 0; + static constexpr int INDICES = 1; + static constexpr int SEGMENT_IDS = 2; + static constexpr int NUM_SEGMENTS = 3; + static constexpr int DEFAULT_INDEX = 4; + static constexpr int PER_SAMPLE_WEIGHTS = 5; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/embeddingbag_offsets_sum.hpp b/ngraph/core/include/openvino/op/embeddingbag_offsets_sum.hpp new file mode 100644 index 00000000000..0d2326c3a57 --- /dev/null +++ b/ngraph/core/include/openvino/op/embeddingbag_offsets_sum.hpp @@ -0,0 +1,55 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/axis_set.hpp" +#include "openvino/op/util/embeddingbag_offsets_base.hpp" +#include "openvino/op/util/index_reduction.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Returns embeddings for given indices +class OPENVINO_API EmbeddingBagOffsetsSum : public util::EmbeddingBagOffsetsBase { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a EmbeddingBagOffsetsSum operation. + EmbeddingBagOffsetsSum() = default; + /// \brief Constructs a EmbeddingBagOffsetsSum operation. + /// + /// EmbeddingBagOffsetsSum constructs an output tensor by replacing every index in a + /// given + /// input tensor with a row (from the weights matrix) at that index + /// + /// \param emb_table tensor containing the embedding lookup table of the module of + /// shape [num_emb, emb_dim1, emb_dim2, ...] and of type T + /// \param tensor of shape [num_indices] and of type T_IND. Required + /// \param offsets tensor of shape [batch] and of type T_IND containing the starting + /// index positions of each "bag" in indices. Required. + /// \param default_index scalar of type T_IND containing default index in embedding + /// table to fill empty "bags". If not provided empty "bags" + /// are filled with zeros. Optional. + /// \param per_sample_weigths tensor of the same shape as indices and of type T. + /// Each value in this tensor are multiplied with each + /// value pooled from embedding table for each index. Optional. + + EmbeddingBagOffsetsSum(const Output& emb_table, + const Output& indices, + const Output& offsets, + const Output& default_index, + const Output& per_sample_weights); + + EmbeddingBagOffsetsSum(const Output& emb_table, + const Output& indices, + const Output& offsets, + const Output& default_index); + + EmbeddingBagOffsetsSum(const Output& emb_table, const Output& indices, const Output& offsets); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/embeddingbag_packedsum.hpp b/ngraph/core/include/openvino/op/embeddingbag_packedsum.hpp new file mode 100644 index 00000000000..68c3dc3f96f --- /dev/null +++ b/ngraph/core/include/openvino/op/embeddingbag_packedsum.hpp @@ -0,0 +1,44 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/axis_set.hpp" +#include "openvino/op/util/embeddingbag_packed_base.hpp" +#include "openvino/op/util/index_reduction.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Returns embeddings for given indices +class OPENVINO_API EmbeddingBagPackedSum : public util::EmbeddingBagPackedBase { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a EmbeddingBagPackedSum operation. + EmbeddingBagPackedSum() = default; + /// \brief Constructs a EmbeddingBagPackedSum operation. + /// + /// EmbeddingBagPackedSum constructs an output tensor by replacing every index in a + /// given + /// input tensor with a row (from the weights matrix) at that index + /// + /// \param emb_table Tensor containing the embedding lookup table of the module of + /// shape [num_emb, emb_dim1, emb_dim2, ...] and of type T + /// \param indices Tensor of shape `[batch, indices_per_bag]` and of type *T_IND*. + /// Required. + /// \param per_sample_weigths tensor of the same shape as indices and of type T. + /// Each value in this tensor are multiplied with each + /// value pooled from embedding table for each index. Optional. + + EmbeddingBagPackedSum(const Output& emb_table, + const Output& indices, + const Output& per_sample_weights); + + EmbeddingBagPackedSum(const Output& emb_table, const Output& indices); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/equal.hpp b/ngraph/core/include/openvino/op/equal.hpp new file mode 100644 index 00000000000..ce73248ffcb --- /dev/null +++ b/ngraph/core/include/openvino/op/equal.hpp @@ -0,0 +1,51 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_comparison.hpp" + +namespace ov { +namespace op { +namespace v1 { +// clang-format off +/// \brief Elementwise is-equal operation. +/// +/// ## Inputs +/// +/// | | Type | Description | +/// | ------ | --------------------------------- | ------------------------------------------------------ | +/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. | +/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. | +/// | `autob`| AutoBroadcastSpec | Auto broadcast specification. | +/// +/// ## Output +/// +/// | Type | Description | +/// | ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] = \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ | +// clang-format on +class OPENVINO_API Equal : public util::BinaryElementwiseComparison { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs an equal operation. + Equal() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} + /// \brief Constructs an equal operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + Equal(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/erf.hpp b/ngraph/core/include/openvino/op/erf.hpp new file mode 100644 index 00000000000..b3be867dda9 --- /dev/null +++ b/ngraph/core/include/openvino/op/erf.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise erf operation. +class OPENVINO_API Erf : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a floor operation. + Erf() = default; + /// \brief Constructs a floor operation. + /// + /// \param arg Node that produces the input tensor. + Erf(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/exp.hpp b/ngraph/core/include/openvino/op/exp.hpp new file mode 100644 index 00000000000..7bb23dbd3f6 --- /dev/null +++ b/ngraph/core/include/openvino/op/exp.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise natural exponential (exp) operation. +class OPENVINO_API Exp : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs an exponential operation. + Exp() = default; + /// \brief Constructs an exponential operation. + /// + /// \param arg Node that produces the input tensor. + Exp(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/experimental_detectron_detection_output.hpp b/ngraph/core/include/openvino/op/experimental_detectron_detection_output.hpp new file mode 100644 index 00000000000..4f04bf2a0bf --- /dev/null +++ b/ngraph/core/include/openvino/op/experimental_detectron_detection_output.hpp @@ -0,0 +1,74 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/core/attribute_adapter.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v6 { +/// \brief An operation ExperimentalDetectronDetectionOutput performs +/// non-maximum suppression to generate the detection output using +/// information on location and score predictions. +class OPENVINO_API ExperimentalDetectronDetectionOutput : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Structure that specifies attributes of the operation + struct Attributes { + // specifies score threshold + float score_threshold; + // specifies NMS threshold + float nms_threshold; + // specifies maximal delta of logarithms for width and height + float max_delta_log_wh; + // specifies number of detected classes + int64_t num_classes; + // specifies maximal number of detections per class + int64_t post_nms_count; + // specifies maximual number of detections per image + size_t max_detections_per_image; + // a flag specifies whether to delete background classes or not + // `true` means background classes should be deleted, + // `false` means background classes shouldn't be deleted. + bool class_agnostic_box_regression; + // specifies deltas of weights + std::vector deltas_weights; + }; + + ExperimentalDetectronDetectionOutput() = default; + /// \brief Constructs a ExperimentalDetectronDetectionOutput operation. + /// + /// \param input_rois Input rois + /// \param input_deltas Input deltas + /// \param input_scores Input scores + /// \param input_im_info Input image info + /// \param attrs Attributes attributes + ExperimentalDetectronDetectionOutput(const Output& input_rois, + const Output& input_deltas, + const Output& input_scores, + const Output& input_im_info, + const Attributes& attrs); + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + /// \brief Returns attributes of the operation ExperimentalDetectronDetectionOutput + const Attributes& get_attrs() const { + return m_attrs; + } + +private: + Attributes m_attrs; +}; +} // namespace v6 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/experimental_detectron_generate_proposals.hpp b/ngraph/core/include/openvino/op/experimental_detectron_generate_proposals.hpp new file mode 100644 index 00000000000..c17fdcae970 --- /dev/null +++ b/ngraph/core/include/openvino/op/experimental_detectron_generate_proposals.hpp @@ -0,0 +1,64 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/core/attribute_adapter.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v6 { +/// \brief An operation ExperimentalDetectronGenerateProposalsSingleImage +/// computes ROIs and their scores based on input data. +class OPENVINO_API ExperimentalDetectronGenerateProposalsSingleImage : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Structure that specifies attributes of the operation + struct Attributes { + // minimum box width & height + float min_size; + // specifies NMS threshold + float nms_threshold; + // number of top-n proposals after NMS + int64_t post_nms_count; + // number of top-n proposals before NMS + int64_t pre_nms_count; + }; + + ExperimentalDetectronGenerateProposalsSingleImage() = default; + /// \brief Constructs a ExperimentalDetectronGenerateProposalsSingleImage operation. + /// + /// \param im_info Input image info + /// \param anchors Input anchors + /// \param deltas Input deltas + /// \param scores Input scores + /// \param attrs Operation attributes + ExperimentalDetectronGenerateProposalsSingleImage(const Output& im_info, + const Output& anchors, + const Output& deltas, + const Output& scores, + const Attributes& attrs); + + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + const Attributes& get_attrs() const { + return m_attrs; + } + +private: + Attributes m_attrs; +}; +} // namespace v6 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/experimental_detectron_prior_grid_generator.hpp b/ngraph/core/include/openvino/op/experimental_detectron_prior_grid_generator.hpp new file mode 100644 index 00000000000..14933e22dff --- /dev/null +++ b/ngraph/core/include/openvino/op/experimental_detectron_prior_grid_generator.hpp @@ -0,0 +1,67 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/core/attribute_adapter.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v6 { +/// \brief An operation ExperimentalDetectronPriorGridGenerator generates prior +/// grids of specified sizes. +class OPENVINO_API ExperimentalDetectronPriorGridGenerator : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Structure that specifies attributes of the operation + struct Attributes { + // Specifies whether the output tensor should be 2D or 4D + // `true` means the output tensor should be 2D tensor, + // `false` means the output tensor should be 4D tensor. + bool flatten; + // Specifies number of cells of the generated grid with respect to height. + int64_t h; + // Specifies number of cells of the generated grid with respect to width. + int64_t w; + // Specifies the step of generated grid with respect to x coordinate + float stride_x; + // Specifies the step of generated grid with respect to y coordinate + float stride_y; + }; + + ExperimentalDetectronPriorGridGenerator() = default; + /// \brief Constructs a ExperimentalDetectronDetectionOutput operation. + /// + /// \param priors Input priors + /// \param feature_map Input feature map + /// \param im_data Image data + /// \param attrs attributes + ExperimentalDetectronPriorGridGenerator(const Output& priors, + const Output& feature_map, + const Output& im_data, + const Attributes& attrs); + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + /// \brief Returns attributes of this operation. + const Attributes& get_attrs() const { + return m_attrs; + } + +private: + Attributes m_attrs; + + void validate(); +}; +} // namespace v6 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/experimental_detectron_roi_feature.hpp b/ngraph/core/include/openvino/op/experimental_detectron_roi_feature.hpp new file mode 100644 index 00000000000..052b7c0bc4b --- /dev/null +++ b/ngraph/core/include/openvino/op/experimental_detectron_roi_feature.hpp @@ -0,0 +1,59 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "openvino/core/attribute_adapter.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v6 { +/// \brief An operation ExperimentalDetectronROIFeatureExtractor +/// is the ROIAlign operation applied over a feature pyramid. +class OPENVINO_API ExperimentalDetectronROIFeatureExtractor : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Structure that specifies attributes of the operation + struct Attributes { + int64_t output_size; + int64_t sampling_ratio; + std::vector pyramid_scales; + bool aligned; + }; + + ExperimentalDetectronROIFeatureExtractor() = default; + /// \brief Constructs a ExperimentalDetectronROIFeatureExtractor operation. + /// + /// \param args Inputs of ExperimentalDetectronROIFeatureExtractor + /// \param attrs Operation attributes + ExperimentalDetectronROIFeatureExtractor(const OutputVector& args, const Attributes& attrs); + + /// \brief Constructs a ExperimentalDetectronROIFeatureExtractor operation. + /// + /// \param args Inputs of ExperimentalDetectronROIFeatureExtractor + /// \param attrs Operation attributes + ExperimentalDetectronROIFeatureExtractor(const NodeVector& args, const Attributes& attrs); + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + /// \brief Returns attributes of the operation. + const Attributes& get_attrs() const { + return m_attrs; + } + +private: + Attributes m_attrs; +}; +} // namespace v6 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/experimental_detectron_topkrois.hpp b/ngraph/core/include/openvino/op/experimental_detectron_topkrois.hpp new file mode 100644 index 00000000000..50f73ac5588 --- /dev/null +++ b/ngraph/core/include/openvino/op/experimental_detectron_topkrois.hpp @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/core/attribute_adapter.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v6 { +/// \brief An operation ExperimentalDetectronTopKROIs, according to the repository +/// is TopK operation applied to probabilities of input ROIs. +class OPENVINO_API ExperimentalDetectronTopKROIs : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + ExperimentalDetectronTopKROIs() = default; + /// \brief Constructs a ExperimentalDetectronTopKROIs operation. + /// + /// \param input_rois Input rois + /// \param rois_probs Probabilities for input rois + /// \param max_rois Maximal numbers of output rois + ExperimentalDetectronTopKROIs(const Output& input_rois, const Output& rois_probs, size_t max_rois = 0); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + size_t get_max_rois() const { + return m_max_rois; + } + +private: + size_t m_max_rois; +}; +} // namespace v6 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/extractimagepatches.hpp b/ngraph/core/include/openvino/op/extractimagepatches.hpp new file mode 100644 index 00000000000..bd738d0cf10 --- /dev/null +++ b/ngraph/core/include/openvino/op/extractimagepatches.hpp @@ -0,0 +1,70 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v3 { +class OPENVINO_API ExtractImagePatches : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + ExtractImagePatches() = default; + /// \brief Constructs a ExtractImagePatches operation + /// + /// \param data 4-D Input data to extract image patches + /// \param sizes Patch size in the format of [size_rows, size_cols] + /// \param strides Patch movement stride in the format of [stride_rows, stride_cols] + /// \param rates Element seleciton rate for creating a patch. in the format of + /// [rate_rows, rate_cols] + /// \param auto_pad Padding type. it can be any value from + /// valid, same_lower, same_upper + ExtractImagePatches(const Output& image, + const ngraph::Shape& sizes, + const Strides& strides, + const ngraph::Shape& rates, + const PadType& auto_pad); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + const ngraph::Shape& get_sizes() const { + return m_patch_sizes; + } + void set_sizes(const ngraph::Shape& sizes) { + m_patch_sizes = sizes; + } + const Strides& get_strides() const { + return m_patch_movement_strides; + } + void set_strides(const Strides& strides) { + m_patch_movement_strides = strides; + } + const ngraph::Shape& get_rates() const { + return m_patch_selection_rates; + } + void set_rates(const ngraph::Shape& rates) { + m_patch_selection_rates = rates; + } + const PadType& get_auto_pad() const { + return m_padding; + } + void set_auto_pad(PadType& padding) { + m_padding = padding; + } + +private: + ngraph::Shape m_patch_sizes; + Strides m_patch_movement_strides; + ngraph::Shape m_patch_selection_rates; + PadType m_padding; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/fake_quantize.hpp b/ngraph/core/include/openvino/op/fake_quantize.hpp new file mode 100644 index 00000000000..86f10ec3778 --- /dev/null +++ b/ngraph/core/include/openvino/op/fake_quantize.hpp @@ -0,0 +1,81 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// +/// \brief Class performing element-wise linear quantization. +/// +/// \note Input floating point values are quantized into a discrete +/// set of floating point values. +/// +/// \paragraph Implementation This class creates a node which performs the following +/// operation: +/// +/// round((data - input_low) / (input_high - input_low) * (levels-1)) / +/// (levels-1) * (output_high - output_low) + output_low +/// +/// +class OPENVINO_API FakeQuantize : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + FakeQuantize(); + /// + /// \brief Constructs a FakeQuantize operation node. + /// + /// \param[in] data The input data tensor. + /// \param[in] input_low The minimum limit for input values. + /// \param[in] input_high The maximum limit for input values. + /// \param[in] output_low The minimum quantized value. + /// \param[in] output_high The maximum quantized value. + /// \param[in] levels The number of quantization levels. + /// \param[in] auto_broadcast AutoBroadcast mode to be used for broadcasting + /// limit values + /// + FakeQuantize(const Output& data, + const Output& input_low, + const Output& input_high, + const Output& output_low, + const Output& output_high, + std::size_t levels, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + std::size_t get_levels() const { + return m_levels; + } + void set_levels(std::size_t levels) { + m_levels = levels; + } + const AutoBroadcastSpec& get_auto_broadcast() const { + return m_auto_broadcast; + } + void set_auto_broadcast(const AutoBroadcastSpec& auto_broadcast) { + m_auto_broadcast = auto_broadcast; + } + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override { + return false; + } + +private: + std::size_t m_levels; + AutoBroadcastSpec m_auto_broadcast = op::AutoBroadcastType::NUMPY; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/floor.hpp b/ngraph/core/include/openvino/op/floor.hpp new file mode 100644 index 00000000000..a5a759d4d94 --- /dev/null +++ b/ngraph/core/include/openvino/op/floor.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise floor operation. +class OPENVINO_API Floor : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a floor operation. + Floor() = default; + /// \brief Constructs a floor operation. + /// + /// \param arg Node that produces the input tensor. + Floor(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/floor_mod.hpp b/ngraph/core/include/openvino/op/floor_mod.hpp new file mode 100644 index 00000000000..a26bbe34bdf --- /dev/null +++ b/ngraph/core/include/openvino/op/floor_mod.hpp @@ -0,0 +1,42 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise FloorMod operation. +/// +class OPENVINO_API FloorMod : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs an uninitialized addition operation + FloorMod() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY){}; + + /// \brief Constructs an Floor Mod operation. + /// + /// \param arg0 Output that produces the first input tensor.
+ /// `[d0, ...]` + /// \param arg1 Output that produces the second input tensor.
+ /// `[d0, ...]` + /// \param auto_broadcast Auto broadcast specification + /// + /// Output `[d0, ...]` + /// + FloorMod(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastType::NUMPY); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor& visitor) override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/util/embeddingbag_offsets_base.hpp b/ngraph/core/include/openvino/op/util/embeddingbag_offsets_base.hpp index c7a6f5d7147..ea755e81a57 100644 --- a/ngraph/core/include/openvino/op/util/embeddingbag_offsets_base.hpp +++ b/ngraph/core/include/openvino/op/util/embeddingbag_offsets_base.hpp @@ -13,10 +13,7 @@ namespace util { /// \brief Returns embeddings for given indices class OPENVINO_API EmbeddingBagOffsetsBase : public Op { public: - static constexpr NodeTypeInfo type_info{"EmbeddingBagOffsetsBase", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } + OPENVINO_RTTI_DECLARATION; /// \brief Constructs a EmbeddingBagOffsetsBase operation. EmbeddingBagOffsetsBase() = default; /// \brief Constructs a EmbeddingBagOffsetsBase operation. diff --git a/ngraph/core/include/openvino/op/util/embeddingbag_packed_base.hpp b/ngraph/core/include/openvino/op/util/embeddingbag_packed_base.hpp index 9e23eb38962..89929fa3171 100644 --- a/ngraph/core/include/openvino/op/util/embeddingbag_packed_base.hpp +++ b/ngraph/core/include/openvino/op/util/embeddingbag_packed_base.hpp @@ -13,10 +13,7 @@ namespace util { /// \brief Returns embeddings for given indices class OPENVINO_API EmbeddingBagPackedBase : public Op { public: - static constexpr NodeTypeInfo type_info{"EmbeddingBagPackedBase", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } + OPENVINO_RTTI_DECLARATION; /// \brief Constructs a EmbeddingBagPackedBase operation. EmbeddingBagPackedBase() = default; /// \brief Constructs a EmbeddingBagPackedBase operation. diff --git a/ngraph/core/src/op/deformable_convolution.cpp b/ngraph/core/src/op/deformable_convolution.cpp index ad993c06cae..54ecd9cc3e1 100644 --- a/ngraph/core/src/op/deformable_convolution.cpp +++ b/ngraph/core/src/op/deformable_convolution.cpp @@ -14,8 +14,14 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::DeformableConvolution, "DeformableConvolution", 1, op::util::DeformableConvolutionBase); -NGRAPH_RTTI_DEFINITION(op::v8::DeformableConvolution, "DeformableConvolution", 8, op::util::DeformableConvolutionBase); +OPENVINO_RTTI_DEFINITION(op::v1::DeformableConvolution, + "DeformableConvolution", + 1, + op::util::DeformableConvolutionBase); +OPENVINO_RTTI_DEFINITION(op::v8::DeformableConvolution, + "DeformableConvolution", + 8, + op::util::DeformableConvolutionBase); op::v8::DeformableConvolution::DeformableConvolution(const Output& arg, const Output& offsets, diff --git a/ngraph/core/src/op/deformable_psroi_pooling.cpp b/ngraph/core/src/op/deformable_psroi_pooling.cpp index 5e6821112de..ac82f76745b 100644 --- a/ngraph/core/src/op/deformable_psroi_pooling.cpp +++ b/ngraph/core/src/op/deformable_psroi_pooling.cpp @@ -9,7 +9,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::DeformablePSROIPooling, "DeformablePSROIPooling", 1); +OPENVINO_RTTI_DEFINITION(op::v1::DeformablePSROIPooling, "DeformablePSROIPooling", 1); op::v1::DeformablePSROIPooling::DeformablePSROIPooling(const Output& input, const Output& coords, diff --git a/ngraph/core/src/op/depth_to_space.cpp b/ngraph/core/src/op/depth_to_space.cpp index 3fd9aff4097..2b24cc8daa0 100644 --- a/ngraph/core/src/op/depth_to_space.cpp +++ b/ngraph/core/src/op/depth_to_space.cpp @@ -16,7 +16,7 @@ using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::DepthToSpace, "DepthToSpace", 0); +OPENVINO_RTTI_DEFINITION(op::v0::DepthToSpace, "DepthToSpace", 0); op::DepthToSpace::DepthToSpace(const Output& data, const DepthToSpaceMode& mode, const size_t block_size) : Op({data}), @@ -113,7 +113,7 @@ bool op::DepthToSpace::has_evaluate() const { return !get_input_partial_shape(0).is_dynamic(); } -std::ostream& ngraph::operator<<(std::ostream& s, const op::DepthToSpace::DepthToSpaceMode& type) { +std::ostream& ov::operator<<(std::ostream& s, const ov::op::v0::DepthToSpace::DepthToSpaceMode& type) { return s << as_string(type); } diff --git a/ngraph/core/src/op/detection_output.cpp b/ngraph/core/src/op/detection_output.cpp index bd49fcf8072..5ade2c7ff14 100644 --- a/ngraph/core/src/op/detection_output.cpp +++ b/ngraph/core/src/op/detection_output.cpp @@ -7,31 +7,30 @@ #include "itt.hpp" using namespace std; -using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::DetectionOutput, "DetectionOutput", 0); +OPENVINO_RTTI_DEFINITION(ov::op::v0::DetectionOutput, "DetectionOutput", 0); -op::DetectionOutput::DetectionOutput(const Output& box_logits, - const Output& class_preds, - const Output& proposals, - const Output& aux_class_preds, - const Output& aux_box_preds, - const DetectionOutputAttrs& attrs) +ov::op::v0::DetectionOutput::DetectionOutput(const Output& box_logits, + const Output& class_preds, + const Output& proposals, + const Output& aux_class_preds, + const Output& aux_box_preds, + const Attributes& attrs) : Op({box_logits, class_preds, proposals, aux_class_preds, aux_box_preds}), m_attrs(attrs) { constructor_validate_and_infer_types(); } -op::DetectionOutput::DetectionOutput(const Output& box_logits, - const Output& class_preds, - const Output& proposals, - const DetectionOutputAttrs& attrs) +ov::op::v0::DetectionOutput::DetectionOutput(const Output& box_logits, + const Output& class_preds, + const Output& proposals, + const Attributes& attrs) : Op({box_logits, class_preds, proposals}), m_attrs(attrs) { constructor_validate_and_infer_types(); } -void op::DetectionOutput::validate_and_infer_types() { +void ov::op::v0::DetectionOutput::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_DetectionOutput_validate_and_infer_types); NODE_VALIDATION_CHECK(this, m_attrs.num_classes > 0, "Number of classes must be greater than zero"); @@ -205,12 +204,12 @@ void op::DetectionOutput::validate_and_infer_types() { } else { output_shape.push_back(num_images * num_prior_boxes * m_attrs.num_classes); } - output_shape.push_back(7); + output_shape.emplace_back(7); set_output_type(0, box_logits_et, output_shape); } -shared_ptr op::DetectionOutput::clone_with_new_inputs(const OutputVector& new_args) const { +shared_ptr ov::op::v0::DetectionOutput::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_DetectionOutput_clone_with_new_inputs); check_new_args_count(this, new_args); @@ -230,7 +229,7 @@ shared_ptr op::DetectionOutput::clone_with_new_inputs(const OutputVector& } } -bool op::DetectionOutput::visit_attributes(AttributeVisitor& visitor) { +bool ov::op::v0::DetectionOutput::visit_attributes(AttributeVisitor& visitor) { NGRAPH_OP_SCOPE(v0_DetectionOutput_visit_attributes); visitor.on_attribute("num_classes", m_attrs.num_classes); visitor.on_attribute("background_label_id", m_attrs.background_label_id); diff --git a/ngraph/core/src/op/dft.cpp b/ngraph/core/src/op/dft.cpp index a6f31b5b7d8..b473e165e19 100644 --- a/ngraph/core/src/op/dft.cpp +++ b/ngraph/core/src/op/dft.cpp @@ -31,7 +31,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v7::DFT, "DFT", 7, util::FFTBase); +OPENVINO_RTTI_DEFINITION(op::v7::DFT, "DFT", 7, util::FFTBase); op::v7::DFT::DFT(const Output& data, const Output& axes) : FFTBase(data, axes) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/divide.cpp b/ngraph/core/src/op/divide.cpp index b6f07f90b98..3620fe54c96 100644 --- a/ngraph/core/src/op/divide.cpp +++ b/ngraph/core/src/op/divide.cpp @@ -53,7 +53,7 @@ bool evaluate_divide(const HostTensorPtr& arg0, // ------------------------------ v1 ------------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::Divide, "Divide", 1, util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v1::Divide, "Divide", 1, util::BinaryElementwiseArithmetic); op::v1::Divide::Divide(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/einsum.cpp b/ngraph/core/src/op/einsum.cpp index dd7ceba0b41..2cc2fd3ef14 100644 --- a/ngraph/core/src/op/einsum.cpp +++ b/ngraph/core/src/op/einsum.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v7::Einsum, "Einsum", 7); +OPENVINO_RTTI_DEFINITION(op::v7::Einsum, "Einsum", 7); op::v7::Einsum::Einsum(const OutputVector& inputs, const std::string& equation) : Op(inputs), m_equation(equation) { // normalize input equation by removing extra white-spaces from the equation diff --git a/ngraph/core/src/op/elu.cpp b/ngraph/core/src/op/elu.cpp index fad636d9b4e..76d27b58943 100644 --- a/ngraph/core/src/op/elu.cpp +++ b/ngraph/core/src/op/elu.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::Elu, "Elu", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Elu, "Elu", 0); op::Elu::Elu(const Output& data, const double alpha) : Op({data}), m_alpha{alpha} { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/embedding_segments_sum.cpp b/ngraph/core/src/op/embedding_segments_sum.cpp index f95facfec27..96ca10ba481 100644 --- a/ngraph/core/src/op/embedding_segments_sum.cpp +++ b/ngraph/core/src/op/embedding_segments_sum.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v3::EmbeddingSegmentsSum::type_info; +OPENVINO_RTTI_DEFINITION(op::v3::EmbeddingSegmentsSum, "EmbeddingSegmentsSum", 3); op::v3::EmbeddingSegmentsSum::EmbeddingSegmentsSum(const Output& emb_table, const Output& indices, diff --git a/ngraph/core/src/op/embeddingbag_offsets_sum.cpp b/ngraph/core/src/op/embeddingbag_offsets_sum.cpp index 25d84725361..98b40f48ef2 100644 --- a/ngraph/core/src/op/embeddingbag_offsets_sum.cpp +++ b/ngraph/core/src/op/embeddingbag_offsets_sum.cpp @@ -10,7 +10,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v3::EmbeddingBagOffsetsSum::type_info; +OPENVINO_RTTI_DEFINITION(op::v3::EmbeddingBagOffsetsSum, "EmbeddingBagOffsetsSum", 3, util::EmbeddingBagOffsetsBase); op::v3::EmbeddingBagOffsetsSum::EmbeddingBagOffsetsSum(const Output& emb_table, const Output& indices, diff --git a/ngraph/core/src/op/embeddingbag_packedsum.cpp b/ngraph/core/src/op/embeddingbag_packedsum.cpp index d3da20a33e4..d45b13ca853 100644 --- a/ngraph/core/src/op/embeddingbag_packedsum.cpp +++ b/ngraph/core/src/op/embeddingbag_packedsum.cpp @@ -10,7 +10,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v3::EmbeddingBagPackedSum::type_info; +OPENVINO_RTTI_DEFINITION(op::v3::EmbeddingBagPackedSum, "EmbeddingBagPackedSum", 3, util::EmbeddingBagPackedBase); op::v3::EmbeddingBagPackedSum::EmbeddingBagPackedSum(const Output& emb_table, const Output& indices, diff --git a/ngraph/core/src/op/equal.cpp b/ngraph/core/src/op/equal.cpp index 7cf02c9d39f..f024f7b6af4 100644 --- a/ngraph/core/src/op/equal.cpp +++ b/ngraph/core/src/op/equal.cpp @@ -50,7 +50,7 @@ bool evaluate_equal(const HostTensorPtr& arg0, //------------------------------- v1 ------------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::Equal, "Equal", 1, op::util::BinaryElementwiseComparison); +OPENVINO_RTTI_DEFINITION(op::v1::Equal, "Equal", 1, op::util::BinaryElementwiseComparison); op::v1::Equal::Equal(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseComparison(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/erf.cpp b/ngraph/core/src/op/erf.cpp index 8fdfe5222cd..5315f447715 100644 --- a/ngraph/core/src/op/erf.cpp +++ b/ngraph/core/src/op/erf.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Erf, "Erf", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Erf, "Erf", 0, util::UnaryElementwiseArithmetic); bool ngraph::op::v0::Erf::visit_attributes(AttributeVisitor& visitor) { NGRAPH_OP_SCOPE(v0_Erf_visit_attributes); diff --git a/ngraph/core/src/op/exp.cpp b/ngraph/core/src/op/exp.cpp index 5e030eeadb8..8d841b23563 100644 --- a/ngraph/core/src/op/exp.cpp +++ b/ngraph/core/src/op/exp.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::Exp, "Exp", 0, UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Exp, "Exp", 0, UnaryElementwiseArithmetic); op::Exp::Exp(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/experimental_detectron_detection_output.cpp b/ngraph/core/src/op/experimental_detectron_detection_output.cpp index 25d1c8b4554..a0b7717fd4e 100644 --- a/ngraph/core/src/op/experimental_detectron_detection_output.cpp +++ b/ngraph/core/src/op/experimental_detectron_detection_output.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v6::ExperimentalDetectronDetectionOutput, "ExperimentalDetectronDetectionOutput", 6); +OPENVINO_RTTI_DEFINITION(op::v6::ExperimentalDetectronDetectionOutput, "ExperimentalDetectronDetectionOutput", 6); op::v6::ExperimentalDetectronDetectionOutput::ExperimentalDetectronDetectionOutput(const Output& input_rois, const Output& input_deltas, diff --git a/ngraph/core/src/op/experimental_detectron_generate_proposals.cpp b/ngraph/core/src/op/experimental_detectron_generate_proposals.cpp index 114542ea880..d27c6bb2b98 100644 --- a/ngraph/core/src/op/experimental_detectron_generate_proposals.cpp +++ b/ngraph/core/src/op/experimental_detectron_generate_proposals.cpp @@ -12,9 +12,9 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v6::ExperimentalDetectronGenerateProposalsSingleImage, - "ExperimentalDetectronGenerateProposalsSingleImage", - 6); +OPENVINO_RTTI_DEFINITION(op::v6::ExperimentalDetectronGenerateProposalsSingleImage, + "ExperimentalDetectronGenerateProposalsSingleImage", + 6); op::v6::ExperimentalDetectronGenerateProposalsSingleImage::ExperimentalDetectronGenerateProposalsSingleImage( const Output& im_info, diff --git a/ngraph/core/src/op/experimental_detectron_prior_grid_generator.cpp b/ngraph/core/src/op/experimental_detectron_prior_grid_generator.cpp index ab93a19ba6f..42ea322e950 100644 --- a/ngraph/core/src/op/experimental_detectron_prior_grid_generator.cpp +++ b/ngraph/core/src/op/experimental_detectron_prior_grid_generator.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v6::ExperimentalDetectronPriorGridGenerator, "ExperimentalDetectronPriorGridGenerator", 6); +OPENVINO_RTTI_DEFINITION(op::v6::ExperimentalDetectronPriorGridGenerator, "ExperimentalDetectronPriorGridGenerator", 6); op::v6::ExperimentalDetectronPriorGridGenerator::ExperimentalDetectronPriorGridGenerator( const Output& priors, diff --git a/ngraph/core/src/op/experimental_detectron_roi_feature.cpp b/ngraph/core/src/op/experimental_detectron_roi_feature.cpp index bd9b55ba632..bd158e5388f 100644 --- a/ngraph/core/src/op/experimental_detectron_roi_feature.cpp +++ b/ngraph/core/src/op/experimental_detectron_roi_feature.cpp @@ -14,7 +14,9 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v6::ExperimentalDetectronROIFeatureExtractor, "ExperimentalDetectronROIFeatureExtractor", 6); +OPENVINO_RTTI_DEFINITION(op::v6::ExperimentalDetectronROIFeatureExtractor, + "ExperimentalDetectronROIFeatureExtractor", + 6); op::v6::ExperimentalDetectronROIFeatureExtractor::ExperimentalDetectronROIFeatureExtractor(const OutputVector& args, const Attributes& attrs) diff --git a/ngraph/core/src/op/experimental_detectron_topkrois.cpp b/ngraph/core/src/op/experimental_detectron_topkrois.cpp index c92fb94defb..6e6c0783862 100644 --- a/ngraph/core/src/op/experimental_detectron_topkrois.cpp +++ b/ngraph/core/src/op/experimental_detectron_topkrois.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v6::ExperimentalDetectronTopKROIs, "ExperimentalDetectronTopKROIs", 6); +OPENVINO_RTTI_DEFINITION(op::v6::ExperimentalDetectronTopKROIs, "ExperimentalDetectronTopKROIs", 6); op::v6::ExperimentalDetectronTopKROIs::ExperimentalDetectronTopKROIs(const Output& input_rois, const Output& rois_probs, diff --git a/ngraph/core/src/op/extractimagepatches.cpp b/ngraph/core/src/op/extractimagepatches.cpp index 941aa1c70e8..36203524827 100644 --- a/ngraph/core/src/op/extractimagepatches.cpp +++ b/ngraph/core/src/op/extractimagepatches.cpp @@ -12,7 +12,7 @@ using namespace ngraph; // ExtractImagePatches v3 -NGRAPH_RTTI_DEFINITION(op::v3::ExtractImagePatches, "ExtractImagePatches", 3); +OPENVINO_RTTI_DEFINITION(op::v3::ExtractImagePatches, "ExtractImagePatches", 3); op::v3::ExtractImagePatches::ExtractImagePatches(const Output& image, const Shape& sizes, diff --git a/ngraph/core/src/op/fake_quantize.cpp b/ngraph/core/src/op/fake_quantize.cpp index 324b87b651f..2282612009f 100644 --- a/ngraph/core/src/op/fake_quantize.cpp +++ b/ngraph/core/src/op/fake_quantize.cpp @@ -18,7 +18,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::FakeQuantize, "FakeQuantize", 0); +OPENVINO_RTTI_DEFINITION(op::v0::FakeQuantize, "FakeQuantize", 0); op::FakeQuantize::FakeQuantize() : Op(), m_levels() {} diff --git a/ngraph/core/src/op/floor.cpp b/ngraph/core/src/op/floor.cpp index 22554b7808b..efe732ebe66 100644 --- a/ngraph/core/src/op/floor.cpp +++ b/ngraph/core/src/op/floor.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Floor, "Floor", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Floor, "Floor", 0, util::UnaryElementwiseArithmetic); op::Floor::Floor(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/floor_mod.cpp b/ngraph/core/src/op/floor_mod.cpp index 6e72f11be28..7795900e70b 100644 --- a/ngraph/core/src/op/floor_mod.cpp +++ b/ngraph/core/src/op/floor_mod.cpp @@ -11,7 +11,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::FloorMod, "FloorMod", 1, op::util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v1::FloorMod, "FloorMod", 1, op::util::BinaryElementwiseArithmetic); op::v1::FloorMod::FloorMod(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp b/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp index 8a554ab9813..358ba88c043 100644 --- a/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp +++ b/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp @@ -9,7 +9,7 @@ using namespace std; -constexpr ov::NodeTypeInfo ov::op::util::EmbeddingBagOffsetsBase::type_info; +OPENVINO_RTTI_DEFINITION(ov::op::util::EmbeddingBagOffsetsBase, "EmbeddingBagOffsetsBase", 3); ov::op::util::EmbeddingBagOffsetsBase::EmbeddingBagOffsetsBase(const Output& emb_table, const Output& indices, diff --git a/ngraph/core/src/op/util/embeddingbag_packed_base.cpp b/ngraph/core/src/op/util/embeddingbag_packed_base.cpp index 35b99a00f41..734fbd5ff1d 100644 --- a/ngraph/core/src/op/util/embeddingbag_packed_base.cpp +++ b/ngraph/core/src/op/util/embeddingbag_packed_base.cpp @@ -9,7 +9,7 @@ using namespace std; -constexpr ov::NodeTypeInfo ov::op::util::EmbeddingBagPackedBase::type_info; +OPENVINO_RTTI_DEFINITION(ov::op::util::EmbeddingBagPackedBase, "EmbeddingBagPackedBase", 3); ov::op::util::EmbeddingBagPackedBase::EmbeddingBagPackedBase(const Output& emb_table, const Output& indices, From e3aed9854bc5c1f2fb437edefb8033289b3c0928 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Mon, 6 Sep 2021 11:07:20 +0300 Subject: [PATCH 16/52] Moved operations G-L to ov namespace (#7344) * Moved ngraph::Node to ov namespace * Fixed code style * Fixed VPU * Fixed GNA * Fixed tests * Added aliases for backward compatibility * Fix clDNN * Try to fix build * Fixed comment * Renamed RTTI macros * Moved op utils to ov namespace * Fixed ngraph library build * Fixed unit-tests * Changed src folder * Fixed recurrent_sequence * Changed low latency * Fixed serialize * Fixed ieFuncTests * Try to fix windows * Remove custom operator<< from tests * Fixed build * Moved operations from A to ov namespace * Moved operations from B and C to ov namespace * Moved operations D-F to ov namespace * Update ngraph/core/src/op/embeddingbag_offsets_sum.cpp Co-authored-by: Katarzyna Mitrus * Update ngraph/core/src/op/embeddingbag_packedsum.cpp Co-authored-by: Katarzyna Mitrus * Fixed RTTI * Moved operations G-L to ov namespace * Fixed RTTI Co-authored-by: Ilya Lavrenov Co-authored-by: Katarzyna Mitrus --- ngraph/core/include/ngraph/graph_util.hpp | 9 +- ngraph/core/include/ngraph/node.hpp | 9 +- ngraph/core/include/ngraph/op/gather.hpp | 66 +-- .../include/ngraph/op/gather_elements.hpp | 27 +- ngraph/core/include/ngraph/op/gather_nd.hpp | 28 +- ngraph/core/include/ngraph/op/gather_tree.hpp | 26 +- ngraph/core/include/ngraph/op/gelu.hpp | 70 +-- ngraph/core/include/ngraph/op/greater.hpp | 21 +- ngraph/core/include/ngraph/op/greater_eq.hpp | 21 +- ngraph/core/include/ngraph/op/grn.hpp | 27 +- ngraph/core/include/ngraph/op/group_conv.hpp | 261 +----------- ngraph/core/include/ngraph/op/gru_cell.hpp | 147 +------ .../core/include/ngraph/op/gru_sequence.hpp | 37 +- .../core/include/ngraph/op/hard_sigmoid.hpp | 23 +- ngraph/core/include/ngraph/op/hsigmoid.hpp | 22 +- ngraph/core/include/ngraph/op/hswish.hpp | 22 +- ngraph/core/include/ngraph/op/idft.hpp | 25 +- ngraph/core/include/ngraph/op/if.hpp | 80 +--- ngraph/core/include/ngraph/op/interpolate.hpp | 348 +-------------- ngraph/core/include/ngraph/op/less.hpp | 21 +- ngraph/core/include/ngraph/op/less_eq.hpp | 22 +- ngraph/core/include/ngraph/op/log.hpp | 18 +- ngraph/core/include/ngraph/op/log_softmax.hpp | 31 +- ngraph/core/include/ngraph/op/loop.hpp | 76 +--- ngraph/core/include/ngraph/op/lrn.hpp | 66 +-- ngraph/core/include/ngraph/op/lstm_cell.hpp | 381 +---------------- .../core/include/ngraph/op/lstm_sequence.hpp | 174 +------- ngraph/core/include/ngraph/op/parameter.hpp | 68 +-- ngraph/core/include/ngraph/op/result.hpp | 52 +-- .../include/ngraph/op/tensor_iterator.hpp | 27 +- ngraph/core/include/openvino/core/node.hpp | 14 +- ngraph/core/include/openvino/op/gather.hpp | 80 ++++ .../include/openvino/op/gather_elements.hpp | 39 ++ ngraph/core/include/openvino/op/gather_nd.hpp | 40 ++ .../core/include/openvino/op/gather_tree.hpp | 38 ++ ngraph/core/include/openvino/op/gelu.hpp | 81 ++++ ngraph/core/include/openvino/op/greater.hpp | 33 ++ .../core/include/openvino/op/greater_eq.hpp | 33 ++ ngraph/core/include/openvino/op/grn.hpp | 41 ++ .../core/include/openvino/op/group_conv.hpp | 273 ++++++++++++ ngraph/core/include/openvino/op/gru_cell.hpp | 160 +++++++ .../core/include/openvino/op/gru_sequence.hpp | 54 +++ .../core/include/openvino/op/hard_sigmoid.hpp | 35 ++ ngraph/core/include/openvino/op/hsigmoid.hpp | 35 ++ ngraph/core/include/openvino/op/hswish.hpp | 35 ++ ngraph/core/include/openvino/op/idft.hpp | 41 ++ ngraph/core/include/openvino/op/if.hpp | 94 +++++ .../core/include/openvino/op/interpolate.hpp | 360 ++++++++++++++++ ngraph/core/include/openvino/op/less.hpp | 33 ++ ngraph/core/include/openvino/op/less_eq.hpp | 34 ++ ngraph/core/include/openvino/op/log.hpp | 30 ++ .../core/include/openvino/op/log_softmax.hpp | 43 ++ ngraph/core/include/openvino/op/loop.hpp | 90 ++++ ngraph/core/include/openvino/op/lrn.hpp | 78 ++++ ngraph/core/include/openvino/op/lstm_cell.hpp | 397 ++++++++++++++++++ .../include/openvino/op/lstm_sequence.hpp | 196 +++++++++ ngraph/core/include/openvino/op/parameter.hpp | 78 ++++ ngraph/core/include/openvino/op/result.hpp | 61 +++ .../include/openvino/op/tensor_iterator.hpp | 43 ++ ngraph/core/src/op/gather.cpp | 6 +- ngraph/core/src/op/gather_elements.cpp | 2 +- ngraph/core/src/op/gather_nd.cpp | 2 +- ngraph/core/src/op/gather_tree.cpp | 2 +- ngraph/core/src/op/gelu.cpp | 8 +- ngraph/core/src/op/greater.cpp | 2 +- ngraph/core/src/op/greater_eq.cpp | 2 +- ngraph/core/src/op/grn.cpp | 2 +- ngraph/core/src/op/group_conv.cpp | 10 +- ngraph/core/src/op/gru_cell.cpp | 8 +- ngraph/core/src/op/gru_sequence.cpp | 2 +- ngraph/core/src/op/hard_sigmoid.cpp | 6 +- ngraph/core/src/op/hsigmoid.cpp | 2 +- ngraph/core/src/op/hswish.cpp | 2 +- ngraph/core/src/op/idft.cpp | 2 +- ngraph/core/src/op/if.cpp | 18 +- ngraph/core/src/op/interpolate.cpp | 42 +- ngraph/core/src/op/less.cpp | 2 +- ngraph/core/src/op/less_eq.cpp | 2 +- ngraph/core/src/op/log.cpp | 2 +- ngraph/core/src/op/log_softmax.cpp | 2 +- ngraph/core/src/op/loop.cpp | 8 +- ngraph/core/src/op/lrn.cpp | 6 +- ngraph/core/src/op/lstm_cell.cpp | 20 +- ngraph/core/src/op/lstm_sequence.cpp | 4 +- ngraph/core/src/op/parameter.cpp | 2 +- ngraph/core/src/op/result.cpp | 2 +- ngraph/core/src/op/tensor_iterator.cpp | 2 +- 87 files changed, 2728 insertions(+), 2216 deletions(-) create mode 100644 ngraph/core/include/openvino/op/gather.hpp create mode 100644 ngraph/core/include/openvino/op/gather_elements.hpp create mode 100644 ngraph/core/include/openvino/op/gather_nd.hpp create mode 100644 ngraph/core/include/openvino/op/gather_tree.hpp create mode 100644 ngraph/core/include/openvino/op/gelu.hpp create mode 100644 ngraph/core/include/openvino/op/greater.hpp create mode 100644 ngraph/core/include/openvino/op/greater_eq.hpp create mode 100644 ngraph/core/include/openvino/op/grn.hpp create mode 100644 ngraph/core/include/openvino/op/group_conv.hpp create mode 100644 ngraph/core/include/openvino/op/gru_cell.hpp create mode 100644 ngraph/core/include/openvino/op/gru_sequence.hpp create mode 100644 ngraph/core/include/openvino/op/hard_sigmoid.hpp create mode 100644 ngraph/core/include/openvino/op/hsigmoid.hpp create mode 100644 ngraph/core/include/openvino/op/hswish.hpp create mode 100644 ngraph/core/include/openvino/op/idft.hpp create mode 100644 ngraph/core/include/openvino/op/if.hpp create mode 100644 ngraph/core/include/openvino/op/interpolate.hpp create mode 100644 ngraph/core/include/openvino/op/less.hpp create mode 100644 ngraph/core/include/openvino/op/less_eq.hpp create mode 100644 ngraph/core/include/openvino/op/log.hpp create mode 100644 ngraph/core/include/openvino/op/log_softmax.hpp create mode 100644 ngraph/core/include/openvino/op/loop.hpp create mode 100644 ngraph/core/include/openvino/op/lrn.hpp create mode 100644 ngraph/core/include/openvino/op/lstm_cell.hpp create mode 100644 ngraph/core/include/openvino/op/lstm_sequence.hpp create mode 100644 ngraph/core/include/openvino/op/parameter.hpp create mode 100644 ngraph/core/include/openvino/op/result.hpp create mode 100644 ngraph/core/include/openvino/op/tensor_iterator.hpp diff --git a/ngraph/core/include/ngraph/graph_util.hpp b/ngraph/core/include/ngraph/graph_util.hpp index 0fc8ae0221b..7a3900dd10f 100644 --- a/ngraph/core/include/ngraph/graph_util.hpp +++ b/ngraph/core/include/ngraph/graph_util.hpp @@ -18,11 +18,18 @@ #include "ngraph/function.hpp" #include "ngraph/node.hpp" +namespace ov { +namespace op { +namespace v0 { +class Parameter; +} +} // namespace op +} // namespace ov namespace ngraph { namespace op { namespace v0 { -class Parameter; +using ov::op::v0::Parameter; } } // namespace op diff --git a/ngraph/core/include/ngraph/node.hpp b/ngraph/core/include/ngraph/node.hpp index 8d4259f8c34..5910b2b90f5 100644 --- a/ngraph/core/include/ngraph/node.hpp +++ b/ngraph/core/include/ngraph/node.hpp @@ -38,6 +38,13 @@ #include "ngraph/variant.hpp" #include "openvino/core/node.hpp" +namespace ov { +namespace op { +namespace v0 { +class Result; +} +} // namespace op +} // namespace ov namespace ngraph { using ov::Node; @@ -52,7 +59,7 @@ using HostTensorVector = std::vector; namespace op { namespace v0 { -class Result; +using ov::op::v0::Result; } } // namespace op diff --git a/ngraph/core/include/ngraph/op/gather.hpp b/ngraph/core/include/ngraph/op/gather.hpp index c81397fcf12..31f2e1d3ee7 100644 --- a/ngraph/core/include/ngraph/op/gather.hpp +++ b/ngraph/core/include/ngraph/op/gather.hpp @@ -5,76 +5,18 @@ #pragma once #include "ngraph/op/util/gather_base.hpp" +#include "openvino/op/gather.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Gather slices from axis of data according to indices -class NGRAPH_API Gather : public op::util::GatherBase { -public: - NGRAPH_RTTI_DECLARATION; - static const int64_t AXIS_NOT_SET_VALUE = std::numeric_limits::max(); - Gather() = default; - /// \param data The tensor from which slices are gathered - /// \param indices Tensor with indexes to gather - /// \param axis The tensor is a dimension index to gather data from - Gather(const Output& params, const Output& indices, const Output& axis); - - bool visit_attributes(AttributeVisitor& visitor) override; - int64_t get_axis() const override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v1::Gather; } // namespace v1 - namespace v7 { -/// \brief Gather slices from axis of data according to indices -class NGRAPH_API Gather : public op::util::GatherBase { -public: - NGRAPH_RTTI_DECLARATION; - Gather() = default; - - /// \param data The tensor from which slices are gathered - /// \param indices Tensor with indexes to gather - /// \param axis The tensor is a dimension index to gather data from - /// \param batch_dims The number of batch dimension in data and indices tensors. - /// If batch_dims = 0 Gather v7 is identical to Gather v1. - Gather(const Output& data, - const Output& indices, - const Output& axis, - const int64_t batch_dims = 0); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - int64_t get_batch_dims() const; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v7::Gather; } // namespace v7 - namespace v8 { -/// \brief Gather slices from axis of data according to indices. Negative indices -/// are supported and indicate reverse indexing from the end -class NGRAPH_API Gather : public op::util::GatherBase { -public: - NGRAPH_RTTI_DECLARATION; - Gather() = default; - - /// \param data The tensor from which slices are gathered - /// \param indices Tensor with indexes to gather - /// \param axis The tensor is a dimension index to gather data from - /// \param batch_dims The number of batch dimension in data and indices tensors. - Gather(const Output& data, - const Output& indices, - const Output& axis, - const int64_t batch_dims = 0); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - int64_t get_batch_dims() const; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v8::Gather; } // namespace v8 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/gather_elements.hpp b/ngraph/core/include/ngraph/op/gather_elements.hpp index 36c36caeec4..9dbfa1c1644 100644 --- a/ngraph/core/include/ngraph/op/gather_elements.hpp +++ b/ngraph/core/include/ngraph/op/gather_elements.hpp @@ -5,35 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/gather_elements.hpp" namespace ngraph { namespace op { namespace v6 { -/// \brief GatherElements operation -/// -class NGRAPH_API GatherElements : public Op { -public: - NGRAPH_RTTI_DECLARATION; - GatherElements() = default; - - /// \brief Constructs a GatherElements operation. - /// - /// \param data Node producing data that are gathered - /// \param indices Node producing indices by which the operation gathers elements - /// \param axis specifies axis along which indices are specified - GatherElements(const Output& data, const Output& indices, const int64_t axis); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - int64_t get_axis() const { - return m_axis; - } - -private: - int64_t m_axis; -}; +using ov::op::v6::GatherElements; } // namespace v6 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/gather_nd.hpp b/ngraph/core/include/ngraph/op/gather_nd.hpp index fffcc96e653..9689be8b854 100644 --- a/ngraph/core/include/ngraph/op/gather_nd.hpp +++ b/ngraph/core/include/ngraph/op/gather_nd.hpp @@ -5,36 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/gather_nd.hpp" namespace ngraph { namespace op { namespace v5 { -/// \brief GatherND operation -/// -class NGRAPH_API GatherND : public Op { -public: - NGRAPH_RTTI_DECLARATION; - GatherND() = default; - - /// \brief Constructs a GatherND operation. - /// - /// \param data Node producing data that are gathered - /// \param indices Node producing indices by which the operation gathers elements - /// or slices from data - /// \param batch_dims Specifies a number of batch dimensions - GatherND(const Output& data, const Output& indices, const size_t batch_dims = 0); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - size_t get_batch_dims() const { - return m_batch_dims; - } - -private: - size_t m_batch_dims; -}; +using ov::op::v5::GatherND; } // namespace v5 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/gather_tree.hpp b/ngraph/core/include/ngraph/op/gather_tree.hpp index 792531f03e2..9f015b9bcd1 100644 --- a/ngraph/core/include/ngraph/op/gather_tree.hpp +++ b/ngraph/core/include/ngraph/op/gather_tree.hpp @@ -5,34 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/gather_tree.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Generates the complete beams from the ids per each step and the parent beam -/// ids. -class NGRAPH_API GatherTree : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - GatherTree() = default; - /// \param step_ids Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] with - /// indices from per each step - /// \param parent_idx Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] with - /// parent beam indices - /// \param max_seq_len Tensor of shape [BATCH_SIZE] with maximum lengths for each - /// sequence in the batch - /// \param end_token Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] - GatherTree(const Output& step_ids, - const Output& parent_idx, - const Output& max_seq_len, - const Output& end_token); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v1::GatherTree; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/gelu.hpp b/ngraph/core/include/ngraph/op/gelu.hpp index e662bada82f..08b0e813205 100644 --- a/ngraph/core/include/ngraph/op/gelu.hpp +++ b/ngraph/core/include/ngraph/op/gelu.hpp @@ -7,81 +7,19 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/gelu.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Gaussian Error Linear Unit -/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) ) -class NGRAPH_API Gelu : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - Gelu(); - /// \brief Constructs a Gelu operation. - /// - /// \param data Input tensor - Gelu(const Output& data); - - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v0::Gelu; } // namespace v0 using v0::Gelu; -/// \brief Specifies the approximation to calculate Gelu -enum class GeluApproximationMode { TANH, ERF }; -NGRAPH_API std::ostream& operator<<(std::ostream& s, const GeluApproximationMode& type); +using ov::op::GeluApproximationMode; namespace v7 { -/// \brief Gaussian Error Linear Unit -/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) ) for "approximation" = "erf" -/// f(x) = 0.5 * x * (1 + tanh([sqrt(2 / pi)] * [x + 0.044715^3]) for "approximation" = -/// "tanh" -class NGRAPH_API Gelu : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - Gelu() = default; - /// \brief Constructs a Gelu operation. - /// - /// \param data Input tensor - /// \param mode Approximation mode - Gelu(const Output& data, GeluApproximationMode mode = GeluApproximationMode::ERF); - - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - GeluApproximationMode get_approximation_mode() const; - -private: - GeluApproximationMode m_approximation_mode = GeluApproximationMode::ERF; -}; +using ov::op::v7::Gelu; } // namespace v7 } // namespace op } // namespace ngraph - -namespace ov { -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::GeluApproximationMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/greater.hpp b/ngraph/core/include/ngraph/op/greater.hpp index b89089fbb5f..1302e88d867 100644 --- a/ngraph/core/include/ngraph/op/greater.hpp +++ b/ngraph/core/include/ngraph/op/greater.hpp @@ -5,29 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_comparison.hpp" +#include "openvino/op/greater.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise greater-than operation. -class NGRAPH_API Greater : public util::BinaryElementwiseComparison { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a greater-than operation. - Greater() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} - /// \brief Constructs a greater-than operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - Greater(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Greater; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/greater_eq.hpp b/ngraph/core/include/ngraph/op/greater_eq.hpp index f9e9fc61a58..061628e004f 100644 --- a/ngraph/core/include/ngraph/op/greater_eq.hpp +++ b/ngraph/core/include/ngraph/op/greater_eq.hpp @@ -5,29 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_comparison.hpp" +#include "openvino/op/greater_eq.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise greater-than-or-equal operation. -class NGRAPH_API GreaterEqual : public util::BinaryElementwiseComparison { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a greater-than-or-equal operation. - GreaterEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} - /// \brief Constructs a greater-than-or-equal operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - GreaterEqual(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::GreaterEqual; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/grn.hpp b/ngraph/core/include/ngraph/op/grn.hpp index 58471d0b882..03133d31f05 100644 --- a/ngraph/core/include/ngraph/op/grn.hpp +++ b/ngraph/core/include/ngraph/op/grn.hpp @@ -7,35 +7,12 @@ #include #include "ngraph/op/op.hpp" +#include "openvino/op/grn.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Global Response Normalization with L2 norm (across channels only). -/// -class NGRAPH_API GRN : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - GRN() = default; - /// \brief Constructs a GRN operation. - /// - /// \param data - Node producing the input tensor - /// \param bias - The bias added to the variance. - /// - GRN(const Output& data, float bias); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - float get_bias() const { - return m_bias; - } - -protected: - float m_bias = 1.0f; -}; +using ov::op::v0::GRN; } // namespace v0 using v0::GRN; } // namespace op diff --git a/ngraph/core/include/ngraph/op/group_conv.hpp b/ngraph/core/include/ngraph/op/group_conv.hpp index 42d79149c87..352093535e8 100644 --- a/ngraph/core/include/ngraph/op/group_conv.hpp +++ b/ngraph/core/include/ngraph/op/group_conv.hpp @@ -7,268 +7,13 @@ #include "ngraph/op/convolution.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/group_conv.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Batched convolution operation, with optional window dilation and stride. -class NGRAPH_API GroupConvolution : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a batched convolution operation. - GroupConvolution() = default; - /// \brief Constructs a batched convolution operation. - /// - /// \param data_batch The node producing the input data batch tensor.
- /// `[N, C_IN, D1, ... Df]` - /// \param filters The node producing the filters tensor.
- /// `[GROUPS, FC_OUT, FC_IN, F1, ... Ff]` - /// \param strides The strides.
- /// `[f]` - /// \param dilations The dilations.
- /// `[f]` - /// \param pads_begin The beginning of padding shape.
- /// `[f]` - /// \param pads_end The end of padding shape.
- /// `[f]` - /// \param auto_pad The pad type for automatically computing padding sizes.
- /// `[f]` - /// - /// Output `[N, FC_OUT * GROUPS, R1, ... Rf]` - /// - GroupConvolution(const Output& data_batch, - const Output& filters, - const Strides& strides, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const Strides& dilations, - const PadType& auto_pad = PadType::EXPLICIT); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - /// \return The strides. - const Strides& get_strides() const { - return m_strides; - } - void set_strides(const Strides& strides) { - m_strides = strides; - } - /// \return The dilations. - const Strides& get_dilations() const { - return m_dilations; - } - void set_dilations(const Strides& dilations) { - m_dilations = dilations; - } - /// \return The padding-below sizes (possibly negative). - const CoordinateDiff& get_pads_begin() const { - return m_pads_begin; - } - void set_pads_begin(const CoordinateDiff& pads_begin) { - m_pads_begin = pads_begin; - } - /// \return The padding-above sizes (possibly negative). - const CoordinateDiff& get_pads_end() const { - return m_pads_end; - } - void set_adding_above(const CoordinateDiff& pads_end) { - m_pads_end = pads_end; - } - /// \return The pad type for convolution. - const PadType& get_auto_pad() const { - return m_auto_pad; - } - void set_auto_pad(const PadType& auto_pad) { - m_auto_pad = auto_pad; - } - /// \return The default value for Convolution. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - -protected: - Strides m_strides; - Strides m_dilations; - CoordinateDiff m_pads_begin; - CoordinateDiff m_pads_end; - PadType m_auto_pad; -}; - -/// \brief Data batch backprop for batched convolution operation. -class NGRAPH_API GroupConvolutionBackpropData : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a batched-convolution data batch-backprop operation. - GroupConvolutionBackpropData(); - // clang-format off - // - // \brief Constructs a batched-convolution data batch-backprop operation. - // - // \param data The node producing data from forward-prop. Shape: [N, - // C_INPUT * GROUPS, X1, ..., XD]. - // \param filter The node producing the filter from forward-prop. Shape: - // [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1] - // \param output_shape The shape of the data batch from forward-prop. It's size - // should be equal to number of data spatial dimensions. - // \param strides The strides from forward-prop. - // \param pads_begin The padding-below sizes from forward-prop. - // \param pads_end The padding-above sizes from forward-prop. - // \param dilations The dilations from forward-prop. - // \param auto_pad The pad type for automatically computing padding sizes. - // \param output_padding The output padding adds additional amount of paddings per - // each spatial axis in the output tensor. - // - // clang-format on - // - GroupConvolutionBackpropData(const Output& data, - const Output& filter, - const Output& output_shape, - const Strides& strides, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const Strides& dilations, - const PadType& auto_pad = PadType::EXPLICIT, - const CoordinateDiff& output_padding = {}); - - // clang-format off - // - // \brief Constructs a batched-convolution data batch-backprop operation. - // - // \param data The node producing data from forward-prop. Shape: [N, - // C_INPUT * GROUPS, X1, ..., XD]. - // \param filter The node producing the filter from forward-prop. Shape: - // [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1] - // \param output_shape The shape of the data batch from forward-prop. It's size - // should be equal to number of data spatial dimensions. - // \param strides The strides from forward-prop. - // \param dilations The dilations from forward-prop. - // \param auto_pad The pad type for automatically computing padding sizes. - // \param output_padding The output padding adds additional amount of paddings per - // each spatial axis in the output tensor. - // - // clang-format on - // - GroupConvolutionBackpropData(const Output& data, - const Output& filter, - const Output& output_shape, - const Strides& strides, - const Strides& dilations, - const PadType& auto_pad, - const CoordinateDiff& output_padding = {}); - - // clang-format off - // - // \brief Constructs a batched-convolution data batch-backprop operation. - // - // \param data The node producing data from forward-prop. Shape: - // [N, C_INPUT * GROUPS, X1, ..., XD]. - // \param filter The node producing the filter from forward-prop. Shape: - // [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1] - // \param strides The strides from forward-prop. - // \param pads_begin The padding-below sizes from forward-prop. - // \param pads_end The padding-above sizes from forward-prop. - // \param dilations The dilations from forward-prop. - // \param auto_pad The pad type for automatically computing padding sizes. - // \param output_padding The output padding adds additional amount of paddings per - // each spatial axis in the output tensor. - // - // clang-format on - GroupConvolutionBackpropData(const Output& data, - const Output& filter, - const Strides& strides, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const Strides& dilations, - const PadType& auto_pad = PadType::EXPLICIT, - const CoordinateDiff& output_padding = {}); - /// - /// \brief Calculates output spatial features size. - /// - /// \param[in] input_data_shape The input data partial shape - /// \param[in] filters_shape The filters partial shape - /// \param[in] strides The strides values. - /// \param[in] dilations The dilations values. - /// \param[in] pads_begin The paddings at the beginning of axis. - /// \param[in] pads_end The paddings at the end of axis. - /// \param[in] output_padding The output padding values. - /// \param output_spatial_shape The placeholder for computed output spatial - /// partial - /// shape. - /// - void infer_conv_backprop_output_spatial_shape(const std::vector& input_data_shape, - const std::vector& filters_shape, - const Strides& strides, - const Strides& dilations, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const CoordinateDiff& output_padding, - std::vector& output_spatial_shape); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual bool is_dynamic() const override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The spatial shape of the output. - const PartialShape get_convolution_output_shape() const; - void set_output_shape(const Shape& output_shape); - /// \return The strides from the forward prop. - const Strides& get_strides() const { - return m_strides; - } - void set_strides(const Strides& strides) { - m_strides = strides; - } - /// \return The dilations from the forward prop. - const Strides& get_dilations() const { - return m_dilations; - } - void set_dilations(const Strides& dilations) { - m_dilations = dilations; - } - /// \return The number of pixels to add to the beginning along each axis. - const CoordinateDiff& get_pads_begin() const { - return m_pads_begin; - } - void set_pads_begin(const CoordinateDiff& pads_begin) { - m_pads_begin = pads_begin; - } - /// \return The number of pixels to add to the ending along each axis. - const CoordinateDiff& get_pads_end() const { - return m_pads_end; - } - void set_pads_end(const CoordinateDiff& pads_end) { - m_pads_end = pads_end; - } - /// \return The auto pad. - const PadType& get_auto_pad() const { - return m_auto_pad; - } - void set_auto_pad(const PadType& auto_pad) { - m_auto_pad = auto_pad; - } - /// \return The output padding. - const CoordinateDiff& get_output_padding() const { - return m_output_padding; - } - void set_output_padding(const CoordinateDiff& output_padding) { - m_output_padding = output_padding; - } - -protected: - Strides m_strides; - Strides m_dilations; - CoordinateDiff m_pads_begin; - CoordinateDiff m_pads_end; - PadType m_auto_pad; - CoordinateDiff m_output_padding; -}; - +using ov::op::v1::GroupConvolution; +using ov::op::v1::GroupConvolutionBackpropData; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/gru_cell.hpp b/ngraph/core/include/ngraph/op/gru_cell.hpp index 3fa8e4d6c40..ed5c7e532a2 100644 --- a/ngraph/core/include/ngraph/op/gru_cell.hpp +++ b/ngraph/core/include/ngraph/op/gru_cell.hpp @@ -13,155 +13,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/activation_functions.hpp" #include "ngraph/op/util/rnn_cell_base.hpp" +#include "openvino/op/gru_cell.hpp" namespace ngraph { namespace op { namespace v3 { -/// -/// \brief Class for GRU cell node. -/// -/// \note It follows notation and equations defined as in ONNX standard: -/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#GRU -/// -/// Note this class represents only single *cell* and not whole GRU *layer*. -/// -class NGRAPH_API GRUCell : public util::RNNCellBase { -public: - static constexpr NodeTypeInfo type_info{"GRUCell", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - GRUCell(); - /// - /// \brief Constructs GRUCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: - /// [gates_count * hidden_size, input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [gates_count * hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// - GRUCell(const Output& X, - const Output& initial_hidden_state, - const Output& W, - const Output& R, - std::size_t hidden_size); - - /// - /// \brief Constructs GRUCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: - /// [gates_count * hidden_size, input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [gates_count * hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// - GRUCell(const Output& X, - const Output& initial_hidden_state, - const Output& W, - const Output& R, - std::size_t hidden_size, - const std::vector& activations, - const std::vector& activations_alpha, - const std::vector& activations_beta, - float clip, - bool linear_before_reset); - - /// - /// \brief Constructs GRUCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [gates_count * - /// hidden_size, input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [gates_count * hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] B The sum of biases (weight and recurrence) for - /// update, reset and hidden gates. - /// If linear_before_reset := true then biases for - /// hidden gates are - /// placed separately (weight and recurrence). - /// Shape: [gates_count * hidden_size] if - /// linear_before_reset := false - /// Shape: [(gates_count + 1) * hidden_size] if - /// linear_before_reset := true - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// \param[in] linear_before_reset Whether or not to apply the linear - /// transformation before multiplying by the - /// output of the reset gate. - /// - GRUCell(const Output& X, - const Output& initial_hidden_state, - const Output& W, - const Output& R, - const Output& B, - std::size_t hidden_size, - const std::vector& activations = std::vector{"sigmoid", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f, - bool linear_before_reset = false); - - virtual void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool get_linear_before_reset() const { - return m_linear_before_reset; - } - -private: - /// brief Add and initialize bias input to all zeros. - void add_default_bias_input(); - - /// - /// \brief The Activation function f. - /// - util::ActivationFunction m_activation_f; - /// - /// \brief The Activation function g. - /// - util::ActivationFunction m_activation_g; - - static constexpr std::size_t s_gates_count{3}; - /// - /// \brief Control whether or not apply the linear transformation. - /// - /// \note The linear transformation may be applied when computing the output of - /// hidden gate. It's done before multiplying by the output of the reset gate. - /// - bool m_linear_before_reset; -}; +using ov::op::v3::GRUCell; } // namespace v3 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/gru_sequence.hpp b/ngraph/core/include/ngraph/op/gru_sequence.hpp index f6e80740fcc..f5b54873316 100644 --- a/ngraph/core/include/ngraph/op/gru_sequence.hpp +++ b/ngraph/core/include/ngraph/op/gru_sequence.hpp @@ -10,45 +10,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/rnn_cell_base.hpp" +#include "openvino/op/gru_sequence.hpp" namespace ngraph { namespace op { namespace v5 { -class NGRAPH_API GRUSequence : public util::RNNCellBase { -public: - NGRAPH_RTTI_DECLARATION; - GRUSequence(); - - GRUSequence(const Output& X, - const Output& H_t, - const Output& sequence_lengths, - const Output& W, - const Output& R, - const Output& B, - size_t hidden_size, - op::RecurrentSequenceDirection direction, - const std::vector& activations = std::vector{"sigmoid", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f, - bool linear_before_reset = false); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - bool get_linear_before_reset() const { - return m_linear_before_reset; - } - op::RecurrentSequenceDirection get_direction() const { - return m_direction; - } - -protected: - op::RecurrentSequenceDirection m_direction; - bool m_linear_before_reset; -}; +using ov::op::v5::GRUSequence; } // namespace v5 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/hard_sigmoid.hpp b/ngraph/core/include/ngraph/op/hard_sigmoid.hpp index 1a6c56d2fe1..03b8a0e72da 100644 --- a/ngraph/core/include/ngraph/op/hard_sigmoid.hpp +++ b/ngraph/core/include/ngraph/op/hard_sigmoid.hpp @@ -6,31 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/hard_sigmoid.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Parameterized, bounded sigmoid-like, piecewise linear -/// function. min(max(alpha*x + beta, 0), 1) -/// -class NGRAPH_API HardSigmoid : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - HardSigmoid(); - - /// \brief Constructs a HardSigmoid operation. - /// - /// \param data Input tensor. - /// \param[in] alpha A scalar value representing the alpha parameter. - /// \param[in] beta A scalar value representing the beta parameter. - /// - HardSigmoid(const Output& data, const Output& alpha, const Output& beta); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v0::HardSigmoid; } // namespace v0 using v0::HardSigmoid; } // namespace op diff --git a/ngraph/core/include/ngraph/op/hsigmoid.hpp b/ngraph/core/include/ngraph/op/hsigmoid.hpp index 8913c6e809d..8bd8dddb4b5 100644 --- a/ngraph/core/include/ngraph/op/hsigmoid.hpp +++ b/ngraph/core/include/ngraph/op/hsigmoid.hpp @@ -7,30 +7,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/hsigmoid.hpp" namespace ngraph { namespace op { namespace v5 { -/// \brief A HSigmoid Activation Function -/// f(x) = min(max(x + 3, 0), 6) / 6 or -/// f(x) = min(ReLU(x + 3), 6) / 6 -/// -class NGRAPH_API HSigmoid : public ngraph::op::util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - HSigmoid() = default; - - /// \brief Constructs a HSigmoid operation. - /// - /// \param data Input tensor - HSigmoid(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v5::HSigmoid; } // namespace v5 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/hswish.hpp b/ngraph/core/include/ngraph/op/hswish.hpp index ce469d5508c..60aa9fb4dbf 100644 --- a/ngraph/core/include/ngraph/op/hswish.hpp +++ b/ngraph/core/include/ngraph/op/hswish.hpp @@ -7,30 +7,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/hswish.hpp" namespace ngraph { namespace op { namespace v4 { -/// \brief A HSwish Activation Function -/// f(x) = x * min(max(x + 3, 0), 6) / 6 or -/// f(x) = x * min(ReLU(x + 3), 6) / 6 -/// -class NGRAPH_API HSwish : public ngraph::op::util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - HSwish() = default; - - /// \brief Constructs a HSwish (hard version of Swish) operation. - /// - /// \param data Input tensor - HSwish(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v4::HSwish; } // namespace v4 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/idft.hpp b/ngraph/core/include/ngraph/op/idft.hpp index 7955cbcad82..bddd5873814 100644 --- a/ngraph/core/include/ngraph/op/idft.hpp +++ b/ngraph/core/include/ngraph/op/idft.hpp @@ -11,33 +11,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/fft_base.hpp" +#include "openvino/op/idft.hpp" namespace ngraph { namespace op { namespace v7 { -/// \brief An operation IDFT that computes the inverse discrete Fourier transformation. -class NGRAPH_API IDFT : public util::FFTBase { -public: - NGRAPH_RTTI_DECLARATION; - IDFT() = default; - - /// \brief Constructs a IDFT operation. IDFT is performed for full size axes. - /// - /// \param data Input data - /// \param axes Axes to perform IDFT - IDFT(const Output& data, const Output& axes); - - /// \brief Constructs a IDFT operation. - /// - /// \param data Input data - /// \param axes Axes to perform IDFT - /// \param signal_size Signal sizes for 'axes' - IDFT(const Output& data, const Output& axes, const Output& signal_size); - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v7::IDFT; } // namespace v7 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/if.hpp b/ngraph/core/include/ngraph/op/if.hpp index 32ed1d5b846..f0cea6373ef 100644 --- a/ngraph/core/include/ngraph/op/if.hpp +++ b/ngraph/core/include/ngraph/op/if.hpp @@ -9,86 +9,12 @@ #include "ngraph/function.hpp" #include "ngraph/op/parameter.hpp" #include "ngraph/op/util/multi_subgraph_base.hpp" +#include "openvino/op/if.hpp" namespace ngraph { namespace op { namespace v8 { -/// \brief If operation. -class NGRAPH_API If : public util::MultiSubGraphOp { -public: - enum BodyIndexes { THEN_BODY_INDEX = 0, ELSE_BODY_INDEX = 1 }; - - NGRAPH_RTTI_DECLARATION; - bool visit_attributes(AttributeVisitor& visitor) override; - - /// \brief Constructs If with condition - /// - /// \param execution_condition condition node. - If(const Output& execution_condition); - If(); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \brief gets then_body as ngraph::Function. - /// - /// \return then_body as ngraph::Function. - const std::shared_ptr& get_then_body() const { - return m_bodies[THEN_BODY_INDEX]; - } - - /// \brief gets else_body as ngraph::Function. - /// - /// \return else_body as ngraph::Function. - const std::shared_ptr& get_else_body() const { - return m_bodies[ELSE_BODY_INDEX]; - } - - /// \brief sets new ngraph::Function as new then_body. - /// - /// \param body new body for 'then' branch. - void set_then_body(const std::shared_ptr& body) { - m_bodies[THEN_BODY_INDEX] = body; - } - - /// \brief sets new ngraph::Function as new else_body. - /// - /// \param body new body for 'else' branch. - void set_else_body(const std::shared_ptr& body) { - m_bodies[ELSE_BODY_INDEX] = body; - } - - /// \brief sets new input to the operation associated with parameters - /// of each sub-graphs - /// - /// \param value input to operation - /// \param then_parameter parameter for then_body or nullptr - /// \param else_parameter parameter for else_body or nullpt - void set_input(const Output& value, - const std::shared_ptr& then_parameter, - const std::shared_ptr& else_parameter); - - /// \brief sets new output from the operation associated with results - /// of each sub-graphs - /// - /// \param then_result result from then_body - /// \param else_parameter result from else_body - /// \return output from operation - Output set_output(const std::shared_ptr& then_result, const std::shared_ptr& else_result); - - void validate_and_infer_types() override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - - bool has_evaluate() const override; - -private: - using OutputMap = std::map>; - - void validate_and_infer_type_body(const std::shared_ptr& body, - const ngraph::op::util::MultiSubgraphInputDescriptionVector& input_descriptors); - - OutputMap get_mapping_outputs_on_body_description( - const ngraph::op::util::MultiSubgraphOutputDescriptionVector& output_descriptors); -}; +using ov::op::v8::If; } // namespace v8 } // namespace op -} // namespace ngraph \ No newline at end of file +} // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/interpolate.hpp b/ngraph/core/include/ngraph/op/interpolate.hpp index e2104a8dda7..0c860817d75 100644 --- a/ngraph/core/include/ngraph/op/interpolate.hpp +++ b/ngraph/core/include/ngraph/op/interpolate.hpp @@ -10,358 +10,18 @@ #include "ngraph/attribute_adapter.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/interpolate.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Structure that specifies attributes for interpolation -struct InterpolateAttrs { - // specify dimension indices where interpolation is applied, and `axes` is any - // unordered list of indeces of different dimensions of input tensor. Required. - AxisSet axes; - // specifies type of interpolation - // one of `nearest`, `linear`, `cubic`, `area`. Required. - std::string mode; - // a flag that specifies whether to align corners or not. - // `true` (default) means the alignment is applied, - // `false` means the alignment isn't applied. - bool align_corners = true; - // a flag that specifies whether to perform anti-aliasing. default is `false` - bool antialias = false; - // specify the number of pixels to add to the beginning of the image being - // interpolated. This addition of pixels is done before interpolation calculation. - std::vector pads_begin; - // specify the number of pixels to add to the end of the image being interpolated. - // This addition of pixels is done before interpolation calculation. - std::vector pads_end; -}; - -/// \brief Layer which performs bilinear interpolation -class NGRAPH_API Interpolate : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - enum class InterpolateMode { - NEAREST, - LINEAR, - CUBIC, - AREA, - nearest NGRAPH_ENUM_DEPRECATED("Please use NEAREST instead") = NEAREST, - linear NGRAPH_ENUM_DEPRECATED("Please use LINEAR instead") = LINEAR, - cubic NGRAPH_ENUM_DEPRECATED("Please use CUBIC instead") = CUBIC, - area NGRAPH_ENUM_DEPRECATED("Please use AREA instead") = AREA - }; - - Interpolate() = default; - /// \brief Constructs a Interpolate operation - /// - /// \param image Input image - /// \param output_shape Output shape of spatial axes - /// \param attrs Interpolation attributes - Interpolate(const Output& image, const Output& output_shape, const InterpolateAttrs& attrs); - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - const InterpolateAttrs& get_attrs() const { - return m_attrs; - } - -private: - InterpolateAttrs m_attrs; -}; +using InterpolateAttrs = ov::op::v0::Interpolate::Attributes; +using ov::op::v0::Interpolate; } // namespace v0 - namespace v4 { -class NGRAPH_API Interpolate : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Shape calculation mode - /// - /// sizes - output shape for interpolated axes is calculated using input `sizes` - /// scales - output shape for interpolated axes is calculated using input `scales` - enum class ShapeCalcMode { - SIZES, - SCALES, - sizes NGRAPH_ENUM_DEPRECATED("Please use SIZES instead") = SIZES, - scales NGRAPH_ENUM_DEPRECATED("Please use SCALES instead") = SCALES - }; - - /// \brief Interpolation mode - /// - /// nearest - nearest interpolation - /// linear - linear interpolation as in TensorFlow - /// linear_onnx - linear interpolation as in ONNX - /// cubic - cubic interpolation - enum class InterpolateMode { - NEAREST, - LINEAR, - LINEAR_ONNX, - CUBIC, - nearest NGRAPH_ENUM_DEPRECATED("Please use NEAREST instead") = NEAREST, - linear NGRAPH_ENUM_DEPRECATED("Please use LINEAR instead") = LINEAR, - linear_onnx NGRAPH_ENUM_DEPRECATED("Please use LINEAR_ONNX instead") = LINEAR_ONNX, - cubic NGRAPH_ENUM_DEPRECATED("Please use CUBIC instead") = CUBIC - }; - - /// \brief Mode of the calculation of the source coordinate from resized one - /// - /// These modes are modes from ONNX runtime. - enum class CoordinateTransformMode { - HALF_PIXEL, - PYTORCH_HALF_PIXEL, - ASYMMETRIC, - TF_HALF_PIXEL_FOR_NN, - ALIGN_CORNERS, - half_pixel NGRAPH_ENUM_DEPRECATED("Please use HALF_PIXEL instead") = HALF_PIXEL, - pytorch_half_pixel NGRAPH_ENUM_DEPRECATED("Please use PYTORCH_HALF_PIXEL instead") = PYTORCH_HALF_PIXEL, - asymmetric NGRAPH_ENUM_DEPRECATED("Please use ASYMMETRIC instead") = ASYMMETRIC, - tf_half_pixel_for_nn NGRAPH_ENUM_DEPRECATED("Please use TF_HALF_PIXEL_FOR_NN instead") = TF_HALF_PIXEL_FOR_NN, - align_corners NGRAPH_ENUM_DEPRECATED("Please use ALIGN_CORNERS instead") = ALIGN_CORNERS - }; - - /// \brief Round modes for the nearest interpolation. - enum class NearestMode { - ROUND_PREFER_FLOOR, - ROUND_PREFER_CEIL, - FLOOR, - CEIL, - SIMPLE, - round_prefer_floor NGRAPH_ENUM_DEPRECATED("Please use ROUND_PREFER_FLOOR instead") = ROUND_PREFER_FLOOR, - round_prefer_ceil NGRAPH_ENUM_DEPRECATED("Please use ROUND_PREFER_CEIL instead") = ROUND_PREFER_CEIL, - floor NGRAPH_ENUM_DEPRECATED("Please use FLOOR instead") = FLOOR, - ceil NGRAPH_ENUM_DEPRECATED("Please use CEIL instead") = CEIL, - simple NGRAPH_ENUM_DEPRECATED("Please use SIMPLE instead") = SIMPLE - }; - - struct InterpolateAttrs { - // specifies type of interpolation - // one of `nearest`, `linear`, `linear_onnx`, `cubic` Required. - InterpolateMode mode = InterpolateMode::NEAREST; - // specifies shape calculation mode - // one of `sizes`, `scales` Required - ShapeCalcMode shape_calculation_mode = ShapeCalcMode::SIZES; - // specify the number of pixels to add to the beginning of the image being - // interpolated. This addition of pixels is done before interpolation - // calculation. - std::vector pads_begin; - // specify the number of pixels to add to the end of the image being - // interpolated. This addition of pixels is done before interpolation - // calculation. - std::vector pads_end; - // specifies how to transform the coordinate in the resized tensor to the - // coordinate in the original tensor. one of `half_pixel`, `pytorch_half_pixel`, - // `asymmetric`, `tf_half_pixel_for_nn`, `align_corners` - CoordinateTransformMode coordinate_transformation_mode = CoordinateTransformMode::HALF_PIXEL; - // specifies round mode when `mode == nearest` and is used only when `mode == - // nearest`. one of `round_prefer_floor`, `round_prefer_ceil`, `floor`, `ceil`, - // `simple` - NearestMode nearest_mode = NearestMode::ROUND_PREFER_FLOOR; - // a flag that specifies whether to perform anti-aliasing. default is `false` - bool antialias = false; - // specifies the parameter *a* for cubic interpolation (see, e.g. - // [article](https://ieeexplore.ieee.org/document/1163711/)). *cube_coeff* is - // used only when `mode == cubic` - double cube_coeff = -0.75f; - - InterpolateAttrs() = default; - - InterpolateAttrs(InterpolateMode mode, - ShapeCalcMode shape_calculation_mode, - const std::vector& pads_begin, - const std::vector& pads_end, - CoordinateTransformMode coordinate_transformation_mode = CoordinateTransformMode::HALF_PIXEL, - NearestMode nearest_mode = NearestMode::ROUND_PREFER_FLOOR, - bool antialias = false, - double cube_coeff = -0.75) - : mode(mode), - shape_calculation_mode(shape_calculation_mode), - pads_begin(pads_begin), - pads_end(pads_end), - coordinate_transformation_mode(coordinate_transformation_mode), - nearest_mode(nearest_mode), - antialias(antialias), - cube_coeff(cube_coeff) {} - }; - - Interpolate() = default; - /// \brief Constructs a Interpolate operation without 'axes' input. - /// - /// \param image Input image - /// \param output_shape Output shape of spatial axes - /// \param scales Scales of spatial axes, i.e. output_shape / input_shape - /// \param attrs Interpolation attributes - Interpolate(const Output& image, - const Output& output_shape, - const Output& scales, - const InterpolateAttrs& attrs); - - /// \brief Constructs a Interpolate operation with 'axes' input. - /// - /// \param image Input image - /// \param output_shape Output shape of spatial axes - /// \param scales Scales of spatial axes, i.e. output_shape / input_shape - /// \param axes Interpolation axes - /// \param attrs Interpolation attributes - Interpolate(const Output& image, - const Output& output_shape, - const Output& scales, - const Output& axes, - const InterpolateAttrs& attrs); - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - - const InterpolateAttrs& get_attrs() const { - return m_attrs; - } - -protected: - /// \return The interpolation axes. - std::vector get_axes() const; - -private: - bool evaluate_interpolate(const HostTensorVector& outputs, const HostTensorVector& inputs) const; - InterpolateAttrs m_attrs; - - /// \brief Corrects pads_begin and pads_end attributes. - /// - /// \details When Interpolate-4 is a result of some transformation, it is possible - /// that pads_begin.size() != pads_end.size() or - /// pads_begin.size() != input_rank. In such case, we should correct - /// pads_begin and pads_end, using padding of pads_begin and pads_end by - /// zeros or using pads_begin[0 : input_rank], pads_end[0 : input_rank]. - /// - /// Padding of pads_begin is performed when pads_begin.size() < input_rank, - /// and pads_begin[0 : input_rank] is used when - /// pads_begin.size() < input_rank. - /// - /// Similarly for pads_end. - void correct_pads(); - - /// \brief Calculates input shape after padding. - /// - /// \param input_shape Shape of input data. - /// - /// \return Padded input shape, i.e. input_shape + pads_begin + pads_end - PartialShape get_padded_input_shape(const PartialShape& input_shape) const; - - /// \brief Infers output shape using scales. - /// - /// \param output_shape[in,out] output shape - /// \param axes Interpolation axes - /// \param scales Scales for interpolated axes - /// \param padded_input_shape input shape after padding - void infer_using_scales(PartialShape& output_shape, - const std::vector& axes, - const std::vector& scales, - const PartialShape& padded_input_shape) const; - - /// \brief Infers output shape using sizes. - /// - /// \param output_shape[in,out] output shape - /// \param axes Interpolation axes - /// \param sizes sizes for interpolated axes - void infer_using_shapes(PartialShape& output_shape, - const std::vector& axes, - const std::vector& sizes) const; -}; +using ov::op::v4::Interpolate; } // namespace v4 - using v0::Interpolate; using v0::InterpolateAttrs; } // namespace op - -//---------------------------------------- v0 -------------------------------------------------- -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v0::Interpolate::InterpolateMode& type); - -//---------------------------------------- v4 -------------------------------------------------- - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::InterpolateMode& type); - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::CoordinateTransformMode& type); - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::NearestMode& type); - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::ShapeCalcMode& type); - } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v0::Interpolate::InterpolateMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v4::Interpolate::InterpolateMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 4}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v4::Interpolate::CoordinateTransformMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 4}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v4::Interpolate::NearestMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 4}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v4::Interpolate::ShapeCalcMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 4}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/less.hpp b/ngraph/core/include/ngraph/op/less.hpp index 9df5e153aa3..340395cc1da 100644 --- a/ngraph/core/include/ngraph/op/less.hpp +++ b/ngraph/core/include/ngraph/op/less.hpp @@ -5,29 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_comparison.hpp" +#include "openvino/op/less.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise less-than operation. -class NGRAPH_API Less : public util::BinaryElementwiseComparison { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a less-than operation. - Less() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} - /// \brief Constructs a less-than operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - Less(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Less; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/less_eq.hpp b/ngraph/core/include/ngraph/op/less_eq.hpp index f45fb9614c3..fe8bfdff9fa 100644 --- a/ngraph/core/include/ngraph/op/less_eq.hpp +++ b/ngraph/core/include/ngraph/op/less_eq.hpp @@ -5,30 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_comparison.hpp" +#include "openvino/op/less_eq.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise less-than-or-equal operation. -class NGRAPH_API LessEqual : public util::BinaryElementwiseComparison { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a less-than-or-equal operation. - LessEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} - - /// \brief Constructs a less-than-or-equal operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - LessEqual(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::LessEqual; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/log.hpp b/ngraph/core/include/ngraph/op/log.hpp index 6fee3de27d5..51c9c232f05 100644 --- a/ngraph/core/include/ngraph/op/log.hpp +++ b/ngraph/core/include/ngraph/op/log.hpp @@ -5,26 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/log.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise natural log operation. -class NGRAPH_API Log : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a natural log operation. - Log() = default; - /// \brief Constructs a natural log operation. - /// - /// \param arg Node that produces the input tensor. - Log(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Log; } // namespace v0 using v0::Log; } // namespace op diff --git a/ngraph/core/include/ngraph/op/log_softmax.hpp b/ngraph/core/include/ngraph/op/log_softmax.hpp index 5a17ab4e6e6..c0d3a2a020d 100644 --- a/ngraph/core/include/ngraph/op/log_softmax.hpp +++ b/ngraph/core/include/ngraph/op/log_softmax.hpp @@ -5,39 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/log_softmax.hpp" namespace ngraph { namespace op { namespace v5 { -class NGRAPH_API LogSoftmax : public Op { -public: - NGRAPH_RTTI_DECLARATION; - LogSoftmax() = default; - /// \brief Constructs a LogSoftmax operation. - /// - /// \param arg Node that produces the first input tensor.
- /// `[d0, ...]` - /// \param axis The axis position (0-based) on which to calculate the LogSoftmax. - /// - /// Output `[d0, ...]` - /// - LogSoftmax(const Output& arg, const int64_t axis); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - int64_t get_axis() const { - return m_axis; - } - void set_axis(const int64_t axis) { - m_axis = axis; - } - -private: - int64_t m_axis = 1; -}; +using ov::op::v5::LogSoftmax; } // namespace v5 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/loop.hpp b/ngraph/core/include/ngraph/op/loop.hpp index 8d037c9bd37..33be140ee70 100644 --- a/ngraph/core/include/ngraph/op/loop.hpp +++ b/ngraph/core/include/ngraph/op/loop.hpp @@ -12,84 +12,12 @@ #include "ngraph/op/parameter.hpp" #include "ngraph/op/tensor_iterator.hpp" #include "ngraph/op/util/sub_graph_base.hpp" +#include "openvino/op/loop.hpp" namespace ngraph { namespace op { namespace v5 { -/// \brief Iterate a body over tensors, accumulating into tensors. -class NGRAPH_API Loop : public op::util::SubGraphOp { -public: - /// \brief Allows to define the purpose of inputs/outputs in the body - struct SpecialBodyPorts { - SpecialBodyPorts() = default; - SpecialBodyPorts(int64_t in_current_iteration_input_idx, int64_t in_body_condition_output_idx) - : current_iteration_input_idx(in_current_iteration_input_idx), - body_condition_output_idx(in_body_condition_output_idx) {} - // -1 means the input is not provided, this input is optional - int64_t current_iteration_input_idx = -1; - // -1 means the output is not provided, - // this output is required, throw an exception if not provided - int64_t body_condition_output_idx = -1; - }; - - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a Loop operation. - Loop() = default; - - /// \brief Constructs a Loop operation. - /// - /// \param trip_count Node specifies the maximum number of iterations. - /// \param execution_condition Node determines whether to execute the first - /// iteration or not. - Loop(const Output& trip_count, const Output& execution_condition); - - Output get_concatenated_slices(const Output& value, - int64_t start, - int64_t stride, - int64_t part_size, - int64_t end, - int64_t axis) override; - - void set_special_body_ports(const SpecialBodyPorts& special_body_ports) { - m_special_body_ports = special_body_ports; - } - - SpecialBodyPorts get_special_body_ports() const { - return m_special_body_ports; - } - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - Loop(const Loop&); - -private: - void clone_to(Loop& dst, const OutputVector& new_args) const; - - SpecialBodyPorts m_special_body_ports; -}; +using ov::op::v5::Loop; } // namespace v5 } // namespace op } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public DirectValueAccessor { -public: - AttributeAdapter(ngraph::op::v5::Loop::SpecialBodyPorts& value) - : DirectValueAccessor(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/lrn.hpp b/ngraph/core/include/ngraph/op/lrn.hpp index a229f9600a1..908505b9124 100644 --- a/ngraph/core/include/ngraph/op/lrn.hpp +++ b/ngraph/core/include/ngraph/op/lrn.hpp @@ -5,74 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/lrn.hpp" namespace ngraph { namespace op { namespace v0 { -// clang-format off - /// \brief Elementwise Local Response Normalization (LRN) operation. - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ----- | --------------------------------------- | ----------------------------------------------- | - /// | `arg` | \f$N[n, c, d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ---------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | - /// | \f$N[n, c, d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[n, c, d_1,\dots,d_n] = \frac{N[n,i,d_1,\dots,d_n]}{ (bias + alpha * (\sum_{i=max(0,(nsize-1)/2)}^{min(C, (nsize-1)/2)+1} N[n,i,d_1,\dots,d_n]^{2}) ^ {2})}\f$ | -// clang-format on -class NGRAPH_API LRN : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a LRN operation. - LRN() = default; - /// \brief Constructs a LRN operation. - /// - /// \param arg Node that produces the input tensor. - LRN(const Output& arg, double alpha, double beta, double bias, size_t size); - - LRN(const Output& arg, const Output& axes, double alpha, double beta, double bias, size_t size); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - void validate_and_infer_types() override; - - double get_alpha() const { - return m_alpha; - } - void set_alpha(double alpha) { - m_alpha = alpha; - } - double get_beta() const { - return m_beta; - } - void set_beta(double beta) { - m_beta = beta; - } - double get_bias() const { - return m_bias; - } - void set_bias(double bias) { - m_bias = bias; - } - size_t get_nsize() const { - return m_size; - } - void set_nsize(size_t size) { - m_size = size; - } - AxisSet get_reduction_axes() const; - -protected: - double m_alpha; - double m_beta; - double m_bias; - size_t m_size; -}; +using ov::op::v0::LRN; } // namespace v0 using v0::LRN; } // namespace op diff --git a/ngraph/core/include/ngraph/op/lstm_cell.hpp b/ngraph/core/include/ngraph/op/lstm_cell.hpp index 17c81965e37..d6a78beac7a 100644 --- a/ngraph/core/include/ngraph/op/lstm_cell.hpp +++ b/ngraph/core/include/ngraph/op/lstm_cell.hpp @@ -13,391 +13,18 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/activation_functions.hpp" #include "ngraph/op/util/rnn_cell_base.hpp" +#include "openvino/op/lstm_cell.hpp" namespace ngraph { namespace op { -enum class LSTMWeightsFormat { - FICO, // IE - ICOF, // PyTorch - IFCO, // DNNL, TF, MxNet - IFOC, // Caffe - IOFC, // ONNX -}; +using ov::op::LSTMWeightsFormat; namespace v0 { -/// -/// \brief Class for single lstm cell node. -/// -/// \note Following implementation supports: -/// \li \c peepholes Gers & Schmidhuber (2000) -/// https://ieeexplore.ieee.org/document/861302 -/// \li Coupling input and forget gates. -/// -/// \note It calculates following equations: -/// -/// it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) -/// ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) -/// ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) -/// Ct = ft (.) Ct-1 + it (.) ct -/// ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) -/// Ht = ot (.) h(Ct) -/// -/// * - Is a dot product, -/// (.) - is a Hadamard product (element-wise), -/// f, g, h - are activation functions. -/// -/// \note This class represents only single *cell* (for current time step) and not -/// the whole LSTM Sequence layer -/// -/// \sa LSTMSequence, RNNCell, GRUCell -/// -class NGRAPH_API LSTMCell : public util::RNNCellBase { -public: - NGRAPH_RTTI_DECLARATION; - - LSTMCell(); - /// - /// \brief Constructs LSTMCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] initial_cell_state The cell state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The gate weights tensor with shape: - /// [4*hidden_size, input_size]. - /// \param[in] R The recurrence weights tensor with shape: - /// [4*hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] weights_format The order of gates in weights tensors. The - /// default format is IFCO since it is used by - /// DNNL. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// \param[in] input_forget Controls coupling input and forget gates. - /// - LSTMCell(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& W, - const Output& R, - std::size_t hidden_size, - LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, - const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f, - bool input_forget = false); - - /// - /// \brief Constructs LSTMCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] initial_cell_state The cell state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [4*hidden_size, - /// input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [4*hidden_size, hidden_size]. - /// \param[in] B The bias tensor for gates with shape: - /// [4*hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] weights_format The order of gates in weights tensors. The - /// default format is IFCO since it is used by - /// DNNL. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// \param[in] input_forget Controls coupling input and forget gates. - /// - LSTMCell(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& W, - const Output& R, - const Output& B, - std::size_t hidden_size, - LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, - const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f, - bool input_forget = false); - - /// - /// \brief Constructs LSTMCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] initial_cell_state The cell state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [4*hidden_size, - /// input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [4*hidden_size, hidden_size]. - /// \param[in] B The bias tensor for gates with shape: - /// [4*hidden_size]. - /// \param[in] P The weight tensor for peepholes with shape: - /// [3*hidden_size] - 3 equals to only iof gates. - /// The order is: input, output, forget gates. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] weights_format The order of gates in weights tensors. The - /// default format is IFCO since it is used by - /// DNNL. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// \param[in] input_forget Controls coupling input and forget gates. - /// - LSTMCell(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& W, - const Output& R, - const Output& B, - const Output& P, - std::size_t hidden_size, - LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, - const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f, - bool input_forget = false); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool get_input_forget() const { - return m_input_forget; - } - LSTMWeightsFormat get_weights_format() const { - return m_weights_format; - } - -private: - /// - /// \brief Creates the default bias input initialized with zeros. - /// - /// \return The object of Output class. - /// - Output get_default_bias_input() const; - - /// - /// \brief Creates the default peepholes input initialized with zeros. - /// - /// \return The object of Output class. - /// - Output get_default_peepholes_input() const; - /// - /// \brief The Activation function f. - /// - util::ActivationFunction m_activation_f; - /// - /// \brief The Activation function g. - /// - util::ActivationFunction m_activation_g; - /// - /// \brief The Activation function h. - /// - util::ActivationFunction m_activation_h; - /// - /// \brief Controls whether to couple input and forget gates. - /// - bool m_input_forget = false; - - /// - /// \brief The order of gates in weights tensors. - /// - LSTMWeightsFormat m_weights_format; - - static constexpr std::size_t s_gates_count{4}; - static constexpr std::size_t s_peepholes_count{3}; -}; +using ov::op::v0::LSTMCell; } // namespace v0 namespace v4 { -/// -/// \brief Class for single lstm cell node. -/// -/// \note Following implementation supports: -/// \li \c peepholes Gers & Schmidhuber (2000) -/// https://ieeexplore.ieee.org/document/861302 -/// \li Coupling input and forget gates. -/// -/// \note It calculates following equations: -/// -/// it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) -/// ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Wbf + Rbf) -/// ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) -/// Ct = ft (.) Ct-1 + it (.) ct -/// ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Wbo + Rbo) -/// Ht = ot (.) h(Ct) -/// -/// * - Is a dot product, -/// (.) - is a Hadamard product (element-wise), -/// f, g, h - are activation functions. -/// -/// \note This class represents only single *cell* (for current time step) and not -/// the whole LSTM Sequence layer -/// -/// \sa LSTMSequence, RNNCell, GRUCell -/// -class NGRAPH_API LSTMCell : public util::RNNCellBase { -public: - NGRAPH_RTTI_DECLARATION; - - LSTMCell(); - /// - /// \brief Constructs LSTMCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] initial_cell_state The cell state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The gate weights tensor with shape: - /// [4*hidden_size, input_size]. - /// \param[in] R The recurrence weights tensor with shape: - /// [4*hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - LSTMCell(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& W, - const Output& R, - std::size_t hidden_size, - const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f); - - /// - /// \brief Constructs LSTMCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] initial_cell_state The cell state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [4*hidden_size, - /// input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [4*hidden_size, hidden_size]. - /// \param[in] B The bias tensor for gates with shape: - /// [4*hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// - LSTMCell(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& W, - const Output& R, - const Output& B, - std::size_t hidden_size, - const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f); - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - -private: - /// - /// \brief Creates the default bias input initialized with zeros. - /// - /// \return The object of Output class. - /// - Output get_default_bias_input() const; - - /// - /// \brief The Activation function f. - /// - util::ActivationFunction m_activation_f; - /// - /// \brief The Activation function g. - /// - util::ActivationFunction m_activation_g; - /// - /// \brief The Activation function h. - /// - util::ActivationFunction m_activation_h; - - static constexpr std::size_t s_gates_count{4}; -}; +using ov::op::v4::LSTMCell; } // namespace v4 } // namespace op - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::LSTMWeightsFormat& type); } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::LSTMWeightsFormat& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/lstm_sequence.hpp b/ngraph/core/include/ngraph/op/lstm_sequence.hpp index 75638c6c411..4898b08666d 100644 --- a/ngraph/core/include/ngraph/op/lstm_sequence.hpp +++ b/ngraph/core/include/ngraph/op/lstm_sequence.hpp @@ -15,184 +15,16 @@ #include "ngraph/op/lstm_cell.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/rnn_cell_base.hpp" +#include "openvino/op/lstm_sequence.hpp" namespace ngraph { namespace op { namespace v0 { - -/// -/// \brief Class for lstm sequence node. -/// -/// \note It follows notation and equations defined as in ONNX standard: -/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#LSTM -/// -/// \sa LSTMCell, RNNCell, GRUCell -/// -/// -class NGRAPH_API LSTMSequence : public Op { -public: - NGRAPH_RTTI_DECLARATION; - LSTMSequence(); - - using direction = RecurrentSequenceDirection; - - size_t get_default_output_index() const override { - return no_default_index(); - } - explicit LSTMSequence(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& sequence_lengths, - const Output& W, - const Output& R, - const Output& B, - const Output& P, - const std::int64_t hidden_size, - const direction lstm_direction, - LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, - const std::vector activations_alpha = {}, - const std::vector activations_beta = {}, - const std::vector activations = {"sigmoid", "tanh", "tanh"}, - const float clip_threshold = 0, - const bool input_forget = false); - - explicit LSTMSequence(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& sequence_lengths, - const Output& W, - const Output& R, - const Output& B, - const std::int64_t hidden_size, - const direction lstm_direction, - LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - const std::vector& activations = {"sigmoid", "tanh", "tanh"}, - const float clip_threshold = 0, - const bool input_forget = false); - - virtual void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - std::vector get_activations_alpha() const { - return m_activations_alpha; - } - std::vector get_activations_beta() const { - return m_activations_beta; - } - std::vector get_activations() const { - return m_activations; - } - float get_clip_threshold() const { - return m_clip_threshold; - } - direction get_direction() const { - return m_direction; - } - std::int64_t get_hidden_size() const { - return m_hidden_size; - } - bool get_input_forget() const { - return m_input_forget; - } - LSTMWeightsFormat get_weights_format() const { - return m_weights_format; - } - -private: - /// - /// \brief Gets the masked value according to sequence length in a batch. - /// - /// \note Zeros out values or sets them to default value for inputs with - /// sequence length shorter than currently procssed time step. - /// - /// \param[in] data The input value. - /// \param[in] time_step The current time step denoting sequence length. - /// \param[in] batch_axis The batch axis index of data tensor. - /// \param[in] default_value The default value for masked elements. - /// - /// \return The masked value. - /// - std::shared_ptr get_masked_node(const Output& data, - std::int32_t time_step, - std::size_t batch_axis = 0, - const Output& default_value = Output()) const; - - OutputVector lstm_pass(bool is_reverse = false) const; - - // Split(bi-directional) and squeeze input data to remove 'num_direction' dimension. - std::shared_ptr prepare_input(Output node, bool is_reverse, size_t num_direction_axis = 0) const; - - std::vector m_activations_alpha; - std::vector m_activations_beta; - std::vector m_activations; - float m_clip_threshold; - direction m_direction; - std::int64_t m_hidden_size; - bool m_input_forget; - LSTMWeightsFormat m_weights_format; -}; +using ov::op::v0::LSTMSequence; } // namespace v0 namespace v5 { -/// -/// \brief Class for lstm sequence node. -/// -/// \note It follows notation and equations defined as in ONNX standard: -/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#LSTM -/// -/// \sa LSTMCell, RNNCell, GRUCell -/// -/// -class NGRAPH_API LSTMSequence : public util::RNNCellBase { -public: - NGRAPH_RTTI_DECLARATION; - LSTMSequence() = default; - - using direction = RecurrentSequenceDirection; - - size_t get_default_output_index() const override { - return no_default_index(); - } - explicit LSTMSequence(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& sequence_lengths, - const Output& W, - const Output& R, - const Output& B, - const std::int64_t hidden_size, - const direction lstm_direction, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - const std::vector& activations = {"sigmoid", "tanh", "tanh"}, - const float clip = 0.f) - : RNNCellBase({X, initial_hidden_state, initial_cell_state, sequence_lengths, W, R, B}, - hidden_size, - clip, - activations, - activations_alpha, - activations_beta), - m_direction(lstm_direction) { - constructor_validate_and_infer_types(); - } - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - direction get_direction() const { - return m_direction; - } - -private: - direction m_direction; -}; +using ov::op::v5::LSTMSequence; } // namespace v5 } // namespace op - } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/parameter.hpp b/ngraph/core/include/ngraph/op/parameter.hpp index 9ac3feb7b8f..26a7bcbf08c 100644 --- a/ngraph/core/include/ngraph/op/parameter.hpp +++ b/ngraph/core/include/ngraph/op/parameter.hpp @@ -5,78 +5,14 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/parameter.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief A function parameter. -/// -/// Parameters are nodes that represent the arguments that will be passed to -/// user-defined functions. Function creation requires a sequence of parameters. -/// Basic graph operations do not need parameters attached to a function. -class NGRAPH_API Parameter : public op::Op { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructions a tensor-typed parameter node. - Parameter() = default; - /// \brief Constructions a tensor-typed parameter node. - /// - /// \param element_type The element type of the parameter. - /// \param pshape The partial shape of the parameter. - Parameter(const ngraph::element::Type& element_type, const PartialShape& pshape); - - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool is_relevant_to_shapes() const; - void set_is_relevant_to_shapes(bool is_relevant); - - const PartialShape& get_partial_shape() const { - return m_partial_shape; - } - PartialShape& get_partial_shape() { - return m_partial_shape; - } - void set_partial_shape(const PartialShape& partial_shape) { - m_partial_shape = partial_shape; - } - const element::Type& get_element_type() const { - return m_element_type; - } - void set_element_type(const element::Type& element_type) { - m_element_type = element_type; - } - -protected: - PartialShape m_partial_shape; - element::Type m_element_type; - bool m_is_relevant_to_shapes{false}; -}; +using ov::op::v0::Parameter; } // namespace v0 using v0::Parameter; } // namespace op using ParameterVector = std::vector>; } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter : public VisitorAdapter { -public: - AttributeAdapter(ngraph::ParameterVector& ref); - - bool visit_attributes(AttributeVisitor& visitor) override; - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } - -protected: - ngraph::ParameterVector& m_ref; -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/result.hpp b/ngraph/core/include/ngraph/op/result.hpp index da47ff4b83f..4eb82338023 100644 --- a/ngraph/core/include/ngraph/op/result.hpp +++ b/ngraph/core/include/ngraph/op/result.hpp @@ -7,62 +7,14 @@ #include #include "ngraph/op/op.hpp" +#include "openvino/op/result.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API Result : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Allows a value to be used as a function result. - Result() = default; - /// \brief Allows a value to be used as a function result. - /// - /// \param arg Node that produces the input tensor. - Result(const Output& arg, bool needs_default_layout = false); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - void set_needs_default_layout(bool val) { - m_needs_default_layout = val; - } - bool needs_default_layout() const { - return m_needs_default_layout; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; - -private: - bool m_needs_default_layout{false}; -}; +using ov::op::v0::Result; } // namespace v0 - using v0::Result; } // namespace op using ResultVector = std::vector>; } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter : public VisitorAdapter { -public: - AttributeAdapter(ngraph::ResultVector& ref); - - bool visit_attributes(AttributeVisitor& visitor) override; - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } - -protected: - ngraph::ResultVector& m_ref; -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/tensor_iterator.hpp b/ngraph/core/include/ngraph/op/tensor_iterator.hpp index 405710363d4..528d2394bcf 100644 --- a/ngraph/core/include/ngraph/op/tensor_iterator.hpp +++ b/ngraph/core/include/ngraph/op/tensor_iterator.hpp @@ -9,35 +9,12 @@ #include "ngraph/function.hpp" #include "ngraph/op/parameter.hpp" #include "ngraph/op/util/sub_graph_base.hpp" +#include "openvino/op/tensor_iterator.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Iterate a body over tensors, accumulating into tensors. -class NGRAPH_API TensorIterator : public op::util::SubGraphOp { -public: - NGRAPH_RTTI_DECLARATION; - - bool visit_attributes(AttributeVisitor& visitor) override; - - TensorIterator() = default; - explicit TensorIterator(const OutputVector& values); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - /// \return the body of the iteration - std::shared_ptr get_body() const { - return m_bodies[0]; - } - /// \param body set the body of the iteration - void set_body(const std::shared_ptr& body) { - set_function(body); - } - void validate_and_infer_types() override; - void revalidate_and_infer_types_for_body_ops(); - -private: - void try_to_set_num_iterations_if_no_slice_inputs(); -}; +using ov::op::v0::TensorIterator; } // namespace v0 using v0::TensorIterator; } // namespace op diff --git a/ngraph/core/include/openvino/core/node.hpp b/ngraph/core/include/openvino/core/node.hpp index 641d774ba62..aab675dc62f 100644 --- a/ngraph/core/include/openvino/core/node.hpp +++ b/ngraph/core/include/openvino/core/node.hpp @@ -42,19 +42,15 @@ namespace runtime { class HostTensor; } // namespace runtime -namespace op { - -namespace v0 { -class Result; -} // namespace v0 -} // namespace op - } // namespace ngraph namespace ov { namespace op { +namespace v0 { +class Result; +} // namespace v0 struct AutoBroadcastSpec; -} +} // namespace op namespace pass { namespace pattern { class Matcher; @@ -76,7 +72,7 @@ class Node; /// environment) for evaluating ngraph::function. using EvaluationContext = std::map>; -using ResultVector = std::vector>; +using ResultVector = std::vector>; OPENVINO_API std::string node_validation_failure_loc_string(const Node* node); diff --git a/ngraph/core/include/openvino/op/gather.hpp b/ngraph/core/include/openvino/op/gather.hpp new file mode 100644 index 00000000000..f9546ffc08b --- /dev/null +++ b/ngraph/core/include/openvino/op/gather.hpp @@ -0,0 +1,80 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/gather_base.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Gather slices from axis of data according to indices +class OPENVINO_API Gather : public op::util::GatherBase { +public: + OPENVINO_RTTI_DECLARATION; + static const int64_t AXIS_NOT_SET_VALUE = std::numeric_limits::max(); + Gather() = default; + /// \param data The tensor from which slices are gathered + /// \param indices Tensor with indexes to gather + /// \param axis The tensor is a dimension index to gather data from + Gather(const Output& params, const Output& indices, const Output& axis); + + bool visit_attributes(AttributeVisitor& visitor) override; + int64_t get_axis() const override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v1 + +namespace v7 { +/// \brief Gather slices from axis of data according to indices +class OPENVINO_API Gather : public op::util::GatherBase { +public: + OPENVINO_RTTI_DECLARATION; + Gather() = default; + + /// \param data The tensor from which slices are gathered + /// \param indices Tensor with indexes to gather + /// \param axis The tensor is a dimension index to gather data from + /// \param batch_dims The number of batch dimension in data and indices tensors. + /// If batch_dims = 0 Gather v7 is identical to Gather v1. + Gather(const Output& data, + const Output& indices, + const Output& axis, + const int64_t batch_dims = 0); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + int64_t get_batch_dims() const; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v7 + +namespace v8 { +/// \brief Gather slices from axis of data according to indices. Negative indices +/// are supported and indicate reverse indexing from the end +class OPENVINO_API Gather : public op::util::GatherBase { +public: + OPENVINO_RTTI_DECLARATION; + Gather() = default; + + /// \param data The tensor from which slices are gathered + /// \param indices Tensor with indexes to gather + /// \param axis The tensor is a dimension index to gather data from + /// \param batch_dims The number of batch dimension in data and indices tensors. + Gather(const Output& data, + const Output& indices, + const Output& axis, + const int64_t batch_dims = 0); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + int64_t get_batch_dims() const; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/gather_elements.hpp b/ngraph/core/include/openvino/op/gather_elements.hpp new file mode 100644 index 00000000000..ae00119bbed --- /dev/null +++ b/ngraph/core/include/openvino/op/gather_elements.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v6 { +/// \brief GatherElements operation +/// +class OPENVINO_API GatherElements : public Op { +public: + OPENVINO_RTTI_DECLARATION; + GatherElements() = default; + + /// \brief Constructs a GatherElements operation. + /// + /// \param data Node producing data that are gathered + /// \param indices Node producing indices by which the operation gathers elements + /// \param axis specifies axis along which indices are specified + GatherElements(const Output& data, const Output& indices, const int64_t axis); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + int64_t get_axis() const { + return m_axis; + } + +private: + int64_t m_axis; +}; +} // namespace v6 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/gather_nd.hpp b/ngraph/core/include/openvino/op/gather_nd.hpp new file mode 100644 index 00000000000..82fcd2b7ac9 --- /dev/null +++ b/ngraph/core/include/openvino/op/gather_nd.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v5 { +/// \brief GatherND operation +/// +class OPENVINO_API GatherND : public Op { +public: + OPENVINO_RTTI_DECLARATION; + GatherND() = default; + + /// \brief Constructs a GatherND operation. + /// + /// \param data Node producing data that are gathered + /// \param indices Node producing indices by which the operation gathers elements + /// or slices from data + /// \param batch_dims Specifies a number of batch dimensions + GatherND(const Output& data, const Output& indices, const size_t batch_dims = 0); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + size_t get_batch_dims() const { + return m_batch_dims; + } + +private: + size_t m_batch_dims; +}; +} // namespace v5 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/gather_tree.hpp b/ngraph/core/include/openvino/op/gather_tree.hpp new file mode 100644 index 00000000000..e6f2828d9cd --- /dev/null +++ b/ngraph/core/include/openvino/op/gather_tree.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Generates the complete beams from the ids per each step and the parent beam +/// ids. +class OPENVINO_API GatherTree : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + GatherTree() = default; + /// \param step_ids Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] with + /// indices from per each step + /// \param parent_idx Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] with + /// parent beam indices + /// \param max_seq_len Tensor of shape [BATCH_SIZE] with maximum lengths for each + /// sequence in the batch + /// \param end_token Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] + GatherTree(const Output& step_ids, + const Output& parent_idx, + const Output& max_seq_len, + const Output& end_token); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/gelu.hpp b/ngraph/core/include/openvino/op/gelu.hpp new file mode 100644 index 00000000000..f8b4ed66a4a --- /dev/null +++ b/ngraph/core/include/openvino/op/gelu.hpp @@ -0,0 +1,81 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Gaussian Error Linear Unit +/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) ) +class OPENVINO_API Gelu : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Gelu(); + /// \brief Constructs a Gelu operation. + /// + /// \param data Input tensor + Gelu(const Output& data); + + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v0 + +/// \brief Specifies the approximation to calculate Gelu +enum class GeluApproximationMode { TANH, ERF }; +OPENVINO_API std::ostream& operator<<(std::ostream& s, const GeluApproximationMode& type); + +namespace v7 { +/// \brief Gaussian Error Linear Unit +/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) ) for "approximation" = "erf" +/// f(x) = 0.5 * x * (1 + tanh([sqrt(2 / pi)] * [x + 0.044715^3]) for "approximation" = +/// "tanh" +class OPENVINO_API Gelu : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + Gelu() = default; + /// \brief Constructs a Gelu operation. + /// + /// \param data Input tensor + /// \param mode Approximation mode + Gelu(const Output& data, GeluApproximationMode mode = GeluApproximationMode::ERF); + + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + GeluApproximationMode get_approximation_mode() const; + +private: + GeluApproximationMode m_approximation_mode = GeluApproximationMode::ERF; +}; +} // namespace v7 +} // namespace op + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::GeluApproximationMode& value) : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; +} // namespace ov diff --git a/ngraph/core/include/openvino/op/greater.hpp b/ngraph/core/include/openvino/op/greater.hpp new file mode 100644 index 00000000000..1edfcafa34f --- /dev/null +++ b/ngraph/core/include/openvino/op/greater.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_comparison.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise greater-than operation. +class OPENVINO_API Greater : public util::BinaryElementwiseComparison { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a greater-than operation. + Greater() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} + /// \brief Constructs a greater-than operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + Greater(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/greater_eq.hpp b/ngraph/core/include/openvino/op/greater_eq.hpp new file mode 100644 index 00000000000..7ce10d2f70d --- /dev/null +++ b/ngraph/core/include/openvino/op/greater_eq.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_comparison.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise greater-than-or-equal operation. +class OPENVINO_API GreaterEqual : public util::BinaryElementwiseComparison { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a greater-than-or-equal operation. + GreaterEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} + /// \brief Constructs a greater-than-or-equal operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + GreaterEqual(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/grn.hpp b/ngraph/core/include/openvino/op/grn.hpp new file mode 100644 index 00000000000..d151f908aa9 --- /dev/null +++ b/ngraph/core/include/openvino/op/grn.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Global Response Normalization with L2 norm (across channels only). +/// +class OPENVINO_API GRN : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + GRN() = default; + /// \brief Constructs a GRN operation. + /// + /// \param data - Node producing the input tensor + /// \param bias - The bias added to the variance. + /// + GRN(const Output& data, float bias); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + float get_bias() const { + return m_bias; + } + +protected: + float m_bias = 1.0f; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/group_conv.hpp b/ngraph/core/include/openvino/op/group_conv.hpp new file mode 100644 index 00000000000..b2001468eab --- /dev/null +++ b/ngraph/core/include/openvino/op/group_conv.hpp @@ -0,0 +1,273 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/convolution.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Batched convolution operation, with optional window dilation and stride. +class OPENVINO_API GroupConvolution : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a batched convolution operation. + GroupConvolution() = default; + /// \brief Constructs a batched convolution operation. + /// + /// \param data_batch The node producing the input data batch tensor.
+ /// `[N, C_IN, D1, ... Df]` + /// \param filters The node producing the filters tensor.
+ /// `[GROUPS, FC_OUT, FC_IN, F1, ... Ff]` + /// \param strides The strides.
+ /// `[f]` + /// \param dilations The dilations.
+ /// `[f]` + /// \param pads_begin The beginning of padding shape.
+ /// `[f]` + /// \param pads_end The end of padding shape.
+ /// `[f]` + /// \param auto_pad The pad type for automatically computing padding sizes.
+ /// `[f]` + /// + /// Output `[N, FC_OUT * GROUPS, R1, ... Rf]` + /// + GroupConvolution(const Output& data_batch, + const Output& filters, + const Strides& strides, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const Strides& dilations, + const PadType& auto_pad = PadType::EXPLICIT); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + /// \return The strides. + const Strides& get_strides() const { + return m_strides; + } + void set_strides(const Strides& strides) { + m_strides = strides; + } + /// \return The dilations. + const Strides& get_dilations() const { + return m_dilations; + } + void set_dilations(const Strides& dilations) { + m_dilations = dilations; + } + /// \return The padding-below sizes (possibly negative). + const CoordinateDiff& get_pads_begin() const { + return m_pads_begin; + } + void set_pads_begin(const CoordinateDiff& pads_begin) { + m_pads_begin = pads_begin; + } + /// \return The padding-above sizes (possibly negative). + const CoordinateDiff& get_pads_end() const { + return m_pads_end; + } + void set_adding_above(const CoordinateDiff& pads_end) { + m_pads_end = pads_end; + } + /// \return The pad type for convolution. + const PadType& get_auto_pad() const { + return m_auto_pad; + } + void set_auto_pad(const PadType& auto_pad) { + m_auto_pad = auto_pad; + } + /// \return The default value for Convolution. + OPENVINO_SUPPRESS_DEPRECATED_START + std::shared_ptr get_default_value() const override; + OPENVINO_SUPPRESS_DEPRECATED_END + +protected: + Strides m_strides; + Strides m_dilations; + CoordinateDiff m_pads_begin; + CoordinateDiff m_pads_end; + PadType m_auto_pad; +}; + +/// \brief Data batch backprop for batched convolution operation. +class OPENVINO_API GroupConvolutionBackpropData : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a batched-convolution data batch-backprop operation. + GroupConvolutionBackpropData(); + // clang-format off + // + // \brief Constructs a batched-convolution data batch-backprop operation. + // + // \param data The node producing data from forward-prop. Shape: [N, + // C_INPUT * GROUPS, X1, ..., XD]. + // \param filter The node producing the filter from forward-prop. Shape: + // [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1] + // \param output_shape The shape of the data batch from forward-prop. It's size + // should be equal to number of data spatial dimensions. + // \param strides The strides from forward-prop. + // \param pads_begin The padding-below sizes from forward-prop. + // \param pads_end The padding-above sizes from forward-prop. + // \param dilations The dilations from forward-prop. + // \param auto_pad The pad type for automatically computing padding sizes. + // \param output_padding The output padding adds additional amount of paddings per + // each spatial axis in the output tensor. + // + // clang-format on + // + GroupConvolutionBackpropData(const Output& data, + const Output& filter, + const Output& output_shape, + const Strides& strides, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const Strides& dilations, + const PadType& auto_pad = PadType::EXPLICIT, + const CoordinateDiff& output_padding = {}); + + // clang-format off + // + // \brief Constructs a batched-convolution data batch-backprop operation. + // + // \param data The node producing data from forward-prop. Shape: [N, + // C_INPUT * GROUPS, X1, ..., XD]. + // \param filter The node producing the filter from forward-prop. Shape: + // [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1] + // \param output_shape The shape of the data batch from forward-prop. It's size + // should be equal to number of data spatial dimensions. + // \param strides The strides from forward-prop. + // \param dilations The dilations from forward-prop. + // \param auto_pad The pad type for automatically computing padding sizes. + // \param output_padding The output padding adds additional amount of paddings per + // each spatial axis in the output tensor. + // + // clang-format on + // + GroupConvolutionBackpropData(const Output& data, + const Output& filter, + const Output& output_shape, + const Strides& strides, + const Strides& dilations, + const PadType& auto_pad, + const CoordinateDiff& output_padding = {}); + + // clang-format off + // + // \brief Constructs a batched-convolution data batch-backprop operation. + // + // \param data The node producing data from forward-prop. Shape: + // [N, C_INPUT * GROUPS, X1, ..., XD]. + // \param filter The node producing the filter from forward-prop. Shape: + // [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1] + // \param strides The strides from forward-prop. + // \param pads_begin The padding-below sizes from forward-prop. + // \param pads_end The padding-above sizes from forward-prop. + // \param dilations The dilations from forward-prop. + // \param auto_pad The pad type for automatically computing padding sizes. + // \param output_padding The output padding adds additional amount of paddings per + // each spatial axis in the output tensor. + // + // clang-format on + GroupConvolutionBackpropData(const Output& data, + const Output& filter, + const Strides& strides, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const Strides& dilations, + const PadType& auto_pad = PadType::EXPLICIT, + const CoordinateDiff& output_padding = {}); + /// + /// \brief Calculates output spatial features size. + /// + /// \param[in] input_data_shape The input data partial shape + /// \param[in] filters_shape The filters partial shape + /// \param[in] strides The strides values. + /// \param[in] dilations The dilations values. + /// \param[in] pads_begin The paddings at the beginning of axis. + /// \param[in] pads_end The paddings at the end of axis. + /// \param[in] output_padding The output padding values. + /// \param output_spatial_shape The placeholder for computed output spatial + /// partial + /// shape. + /// + void infer_conv_backprop_output_spatial_shape(const std::vector& input_data_shape, + const std::vector& filters_shape, + const Strides& strides, + const Strides& dilations, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const CoordinateDiff& output_padding, + std::vector& output_spatial_shape); + + bool visit_attributes(AttributeVisitor& visitor) override; + bool is_dynamic() const override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The spatial shape of the output. + const PartialShape get_convolution_output_shape() const; + void set_output_shape(const ngraph::Shape& output_shape); + /// \return The strides from the forward prop. + const Strides& get_strides() const { + return m_strides; + } + void set_strides(const Strides& strides) { + m_strides = strides; + } + /// \return The dilations from the forward prop. + const Strides& get_dilations() const { + return m_dilations; + } + void set_dilations(const Strides& dilations) { + m_dilations = dilations; + } + /// \return The number of pixels to add to the beginning along each axis. + const CoordinateDiff& get_pads_begin() const { + return m_pads_begin; + } + void set_pads_begin(const CoordinateDiff& pads_begin) { + m_pads_begin = pads_begin; + } + /// \return The number of pixels to add to the ending along each axis. + const CoordinateDiff& get_pads_end() const { + return m_pads_end; + } + void set_pads_end(const CoordinateDiff& pads_end) { + m_pads_end = pads_end; + } + /// \return The auto pad. + const PadType& get_auto_pad() const { + return m_auto_pad; + } + void set_auto_pad(const PadType& auto_pad) { + m_auto_pad = auto_pad; + } + /// \return The output padding. + const CoordinateDiff& get_output_padding() const { + return m_output_padding; + } + void set_output_padding(const CoordinateDiff& output_padding) { + m_output_padding = output_padding; + } + +protected: + Strides m_strides; + Strides m_dilations; + CoordinateDiff m_pads_begin; + CoordinateDiff m_pads_end; + PadType m_auto_pad; + CoordinateDiff m_output_padding; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/gru_cell.hpp b/ngraph/core/include/openvino/op/gru_cell.hpp new file mode 100644 index 00000000000..f417b3bb05c --- /dev/null +++ b/ngraph/core/include/openvino/op/gru_cell.hpp @@ -0,0 +1,160 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "openvino/op/op.hpp" +#include "openvino/op/util/activation_functions.hpp" +#include "openvino/op/util/rnn_cell_base.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// +/// \brief Class for GRU cell node. +/// +/// \note Note this class represents only single *cell* and not whole GRU *layer*. +/// +class OPENVINO_API GRUCell : public util::RNNCellBase { +public: + OPENVINO_RTTI_DECLARATION; + GRUCell(); + /// + /// \brief Constructs GRUCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: + /// [gates_count * hidden_size, input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [gates_count * hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// + GRUCell(const Output& X, + const Output& initial_hidden_state, + const Output& W, + const Output& R, + std::size_t hidden_size); + + /// + /// \brief Constructs GRUCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: + /// [gates_count * hidden_size, input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [gates_count * hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// + GRUCell(const Output& X, + const Output& initial_hidden_state, + const Output& W, + const Output& R, + std::size_t hidden_size, + const std::vector& activations, + const std::vector& activations_alpha, + const std::vector& activations_beta, + float clip, + bool linear_before_reset); + + /// + /// \brief Constructs GRUCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [gates_count * + /// hidden_size, input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [gates_count * hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] B The sum of biases (weight and recurrence) for + /// update, reset and hidden gates. + /// If linear_before_reset := true then biases for + /// hidden gates are + /// placed separately (weight and recurrence). + /// Shape: [gates_count * hidden_size] if + /// linear_before_reset := false + /// Shape: [(gates_count + 1) * hidden_size] if + /// linear_before_reset := true + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// \param[in] linear_before_reset Whether or not to apply the linear + /// transformation before multiplying by the + /// output of the reset gate. + /// + GRUCell(const Output& X, + const Output& initial_hidden_state, + const Output& W, + const Output& R, + const Output& B, + std::size_t hidden_size, + const std::vector& activations = std::vector{"sigmoid", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f, + bool linear_before_reset = false); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool get_linear_before_reset() const { + return m_linear_before_reset; + } + +private: + /// brief Add and initialize bias input to all zeros. + void add_default_bias_input(); + + /// + /// \brief The Activation function f. + /// + util::ActivationFunction m_activation_f; + /// + /// \brief The Activation function g. + /// + util::ActivationFunction m_activation_g; + + static constexpr std::size_t s_gates_count{3}; + /// + /// \brief Control whether or not apply the linear transformation. + /// + /// \note The linear transformation may be applied when computing the output of + /// hidden gate. It's done before multiplying by the output of the reset gate. + /// + bool m_linear_before_reset; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/gru_sequence.hpp b/ngraph/core/include/openvino/op/gru_sequence.hpp new file mode 100644 index 00000000000..3421d5d31a4 --- /dev/null +++ b/ngraph/core/include/openvino/op/gru_sequence.hpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "openvino/op/op.hpp" +#include "openvino/op/util/rnn_cell_base.hpp" + +namespace ov { +namespace op { +namespace v5 { +class OPENVINO_API GRUSequence : public util::RNNCellBase { +public: + OPENVINO_RTTI_DECLARATION; + GRUSequence(); + + GRUSequence(const Output& X, + const Output& H_t, + const Output& sequence_lengths, + const Output& W, + const Output& R, + const Output& B, + size_t hidden_size, + op::RecurrentSequenceDirection direction, + const std::vector& activations = std::vector{"sigmoid", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f, + bool linear_before_reset = false); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + bool get_linear_before_reset() const { + return m_linear_before_reset; + } + op::RecurrentSequenceDirection get_direction() const { + return m_direction; + } + +protected: + op::RecurrentSequenceDirection m_direction; + bool m_linear_before_reset; +}; +} // namespace v5 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/hard_sigmoid.hpp b/ngraph/core/include/openvino/op/hard_sigmoid.hpp new file mode 100644 index 00000000000..bea9b15b0b1 --- /dev/null +++ b/ngraph/core/include/openvino/op/hard_sigmoid.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Parameterized, bounded sigmoid-like, piecewise linear +/// function. min(max(alpha*x + beta, 0), 1) +/// +class OPENVINO_API HardSigmoid : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + HardSigmoid(); + + /// \brief Constructs a HardSigmoid operation. + /// + /// \param data Input tensor. + /// \param[in] alpha A scalar value representing the alpha parameter. + /// \param[in] beta A scalar value representing the beta parameter. + /// + HardSigmoid(const Output& data, const Output& alpha, const Output& beta); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/hsigmoid.hpp b/ngraph/core/include/openvino/op/hsigmoid.hpp new file mode 100644 index 00000000000..4b7c891724d --- /dev/null +++ b/ngraph/core/include/openvino/op/hsigmoid.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v5 { +/// \brief A HSigmoid Activation Function +/// f(x) = min(max(x + 3, 0), 6) / 6 or +/// f(x) = min(ReLU(x + 3), 6) / 6 +/// +class OPENVINO_API HSigmoid : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + HSigmoid() = default; + + /// \brief Constructs a HSigmoid operation. + /// + /// \param data Input tensor + HSigmoid(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v5 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/hswish.hpp b/ngraph/core/include/openvino/op/hswish.hpp new file mode 100644 index 00000000000..7c64232eadd --- /dev/null +++ b/ngraph/core/include/openvino/op/hswish.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v4 { +/// \brief A HSwish Activation Function +/// f(x) = x * min(max(x + 3, 0), 6) / 6 or +/// f(x) = x * min(ReLU(x + 3), 6) / 6 +/// +class OPENVINO_API HSwish : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + HSwish() = default; + + /// \brief Constructs a HSwish (hard version of Swish) operation. + /// + /// \param data Input tensor + HSwish(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v4 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/idft.hpp b/ngraph/core/include/openvino/op/idft.hpp new file mode 100644 index 00000000000..cf0352c679b --- /dev/null +++ b/ngraph/core/include/openvino/op/idft.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/op/op.hpp" +#include "openvino/op/util/fft_base.hpp" + +namespace ov { +namespace op { +namespace v7 { +/// \brief An operation IDFT that computes the inverse discrete Fourier transformation. +class OPENVINO_API IDFT : public util::FFTBase { +public: + OPENVINO_RTTI_DECLARATION; + IDFT() = default; + + /// \brief Constructs a IDFT operation. IDFT is performed for full size axes. + /// + /// \param data Input data + /// \param axes Axes to perform IDFT + IDFT(const Output& data, const Output& axes); + + /// \brief Constructs a IDFT operation. + /// + /// \param data Input data + /// \param axes Axes to perform IDFT + /// \param signal_size Signal sizes for 'axes' + IDFT(const Output& data, const Output& axes, const Output& signal_size); + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v7 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/if.hpp b/ngraph/core/include/openvino/op/if.hpp new file mode 100644 index 00000000000..f262a0e7179 --- /dev/null +++ b/ngraph/core/include/openvino/op/if.hpp @@ -0,0 +1,94 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/core/function.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/util/multi_subgraph_base.hpp" + +namespace ov { +namespace op { +namespace v8 { +/// \brief If operation. +class OPENVINO_API If : public util::MultiSubGraphOp { +public: + enum BodyIndexes { THEN_BODY_INDEX = 0, ELSE_BODY_INDEX = 1 }; + + OPENVINO_RTTI_DECLARATION; + bool visit_attributes(AttributeVisitor& visitor) override; + + /// \brief Constructs If with condition + /// + /// \param execution_condition condition node. + If(const Output& execution_condition); + If(); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \brief gets then_body as ngraph::Function. + /// + /// \return then_body as ngraph::Function. + const std::shared_ptr& get_then_body() const { + return m_bodies[THEN_BODY_INDEX]; + } + + /// \brief gets else_body as ngraph::Function. + /// + /// \return else_body as ngraph::Function. + const std::shared_ptr& get_else_body() const { + return m_bodies[ELSE_BODY_INDEX]; + } + + /// \brief sets new ngraph::Function as new then_body. + /// + /// \param body new body for 'then' branch. + void set_then_body(const std::shared_ptr& body) { + m_bodies[THEN_BODY_INDEX] = body; + } + + /// \brief sets new ngraph::Function as new else_body. + /// + /// \param body new body for 'else' branch. + void set_else_body(const std::shared_ptr& body) { + m_bodies[ELSE_BODY_INDEX] = body; + } + + /// \brief sets new input to the operation associated with parameters + /// of each sub-graphs + /// + /// \param value input to operation + /// \param then_parameter parameter for then_body or nullptr + /// \param else_parameter parameter for else_body or nullpt + void set_input(const Output& value, + const std::shared_ptr& then_parameter, + const std::shared_ptr& else_parameter); + + /// \brief sets new output from the operation associated with results + /// of each sub-graphs + /// + /// \param then_result result from then_body + /// \param else_parameter result from else_body + /// \return output from operation + Output set_output(const std::shared_ptr& then_result, + const std::shared_ptr& else_result); + + void validate_and_infer_types() override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + + bool has_evaluate() const override; + +private: + using OutputMap = std::map>; + + void validate_and_infer_type_body(const std::shared_ptr& body, + const MultiSubgraphInputDescriptionVector& input_descriptors); + + OutputMap get_mapping_outputs_on_body_description(const MultiSubgraphOutputDescriptionVector& output_descriptors); +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/interpolate.hpp b/ngraph/core/include/openvino/op/interpolate.hpp new file mode 100644 index 00000000000..f66f1c226f1 --- /dev/null +++ b/ngraph/core/include/openvino/op/interpolate.hpp @@ -0,0 +1,360 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/core/attribute_adapter.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v0 { + +/// \brief Layer which performs bilinear interpolation +class OPENVINO_API Interpolate : public Op { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Structure that specifies attributes for interpolation + struct Attributes { + // specify dimension indices where interpolation is applied, and `axes` is any + // unordered list of indeces of different dimensions of input tensor. Required. + AxisSet axes; + // specifies type of interpolation + // one of `nearest`, `linear`, `cubic`, `area`. Required. + std::string mode; + // a flag that specifies whether to align corners or not. + // `true` (default) means the alignment is applied, + // `false` means the alignment isn't applied. + bool align_corners = true; + // a flag that specifies whether to perform anti-aliasing. default is `false` + bool antialias = false; + // specify the number of pixels to add to the beginning of the image being + // interpolated. This addition of pixels is done before interpolation calculation. + std::vector pads_begin; + // specify the number of pixels to add to the end of the image being interpolated. + // This addition of pixels is done before interpolation calculation. + std::vector pads_end; + }; + + enum class InterpolateMode { + NEAREST, + LINEAR, + CUBIC, + AREA, + nearest OPENVINO_ENUM_DEPRECATED("Please use NEAREST instead") = NEAREST, + linear OPENVINO_ENUM_DEPRECATED("Please use LINEAR instead") = LINEAR, + cubic OPENVINO_ENUM_DEPRECATED("Please use CUBIC instead") = CUBIC, + area OPENVINO_ENUM_DEPRECATED("Please use AREA instead") = AREA + }; + + Interpolate() = default; + /// \brief Constructs a Interpolate operation + /// + /// \param image Input image + /// \param output_shape Output shape of spatial axes + /// \param attrs Interpolation attributes + Interpolate(const Output& image, const Output& output_shape, const Attributes& attrs); + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + const Attributes& get_attrs() const { + return m_attrs; + } + +private: + Attributes m_attrs; +}; +} // namespace v0 + +namespace v4 { +class OPENVINO_API Interpolate : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Shape calculation mode + /// + /// sizes - output shape for interpolated axes is calculated using input `sizes` + /// scales - output shape for interpolated axes is calculated using input `scales` + enum class ShapeCalcMode { + SIZES, + SCALES, + sizes OPENVINO_ENUM_DEPRECATED("Please use SIZES instead") = SIZES, + scales OPENVINO_ENUM_DEPRECATED("Please use SCALES instead") = SCALES + }; + + /// \brief Interpolation mode + /// + /// nearest - nearest interpolation + /// linear - linear interpolation as in TensorFlow + /// linear_onnx - linear interpolation as in ONNX + /// cubic - cubic interpolation + enum class InterpolateMode { + NEAREST, + LINEAR, + LINEAR_ONNX, + CUBIC, + nearest OPENVINO_ENUM_DEPRECATED("Please use NEAREST instead") = NEAREST, + linear OPENVINO_ENUM_DEPRECATED("Please use LINEAR instead") = LINEAR, + linear_onnx OPENVINO_ENUM_DEPRECATED("Please use LINEAR_ONNX instead") = LINEAR_ONNX, + cubic OPENVINO_ENUM_DEPRECATED("Please use CUBIC instead") = CUBIC + }; + + /// \brief Mode of the calculation of the source coordinate from resized one + /// + /// These modes are modes from ONNX runtime. + enum class CoordinateTransformMode { + HALF_PIXEL, + PYTORCH_HALF_PIXEL, + ASYMMETRIC, + TF_HALF_PIXEL_FOR_NN, + ALIGN_CORNERS, + half_pixel OPENVINO_ENUM_DEPRECATED("Please use HALF_PIXEL instead") = HALF_PIXEL, + pytorch_half_pixel OPENVINO_ENUM_DEPRECATED("Please use PYTORCH_HALF_PIXEL instead") = PYTORCH_HALF_PIXEL, + asymmetric OPENVINO_ENUM_DEPRECATED("Please use ASYMMETRIC instead") = ASYMMETRIC, + tf_half_pixel_for_nn OPENVINO_ENUM_DEPRECATED("Please use TF_HALF_PIXEL_FOR_NN instead") = TF_HALF_PIXEL_FOR_NN, + align_corners OPENVINO_ENUM_DEPRECATED("Please use ALIGN_CORNERS instead") = ALIGN_CORNERS + }; + + /// \brief Round modes for the nearest interpolation. + enum class NearestMode { + ROUND_PREFER_FLOOR, + ROUND_PREFER_CEIL, + FLOOR, + CEIL, + SIMPLE, + round_prefer_floor OPENVINO_ENUM_DEPRECATED("Please use ROUND_PREFER_FLOOR instead") = ROUND_PREFER_FLOOR, + round_prefer_ceil OPENVINO_ENUM_DEPRECATED("Please use ROUND_PREFER_CEIL instead") = ROUND_PREFER_CEIL, + floor OPENVINO_ENUM_DEPRECATED("Please use FLOOR instead") = FLOOR, + ceil OPENVINO_ENUM_DEPRECATED("Please use CEIL instead") = CEIL, + simple OPENVINO_ENUM_DEPRECATED("Please use SIMPLE instead") = SIMPLE + }; + + struct InterpolateAttrs { + // specifies type of interpolation + // one of `nearest`, `linear`, `linear_onnx`, `cubic` Required. + InterpolateMode mode = InterpolateMode::NEAREST; + // specifies shape calculation mode + // one of `sizes`, `scales` Required + ShapeCalcMode shape_calculation_mode = ShapeCalcMode::SIZES; + // specify the number of pixels to add to the beginning of the image being + // interpolated. This addition of pixels is done before interpolation + // calculation. + std::vector pads_begin; + // specify the number of pixels to add to the end of the image being + // interpolated. This addition of pixels is done before interpolation + // calculation. + std::vector pads_end; + // specifies how to transform the coordinate in the resized tensor to the + // coordinate in the original tensor. one of `half_pixel`, `pytorch_half_pixel`, + // `asymmetric`, `tf_half_pixel_for_nn`, `align_corners` + CoordinateTransformMode coordinate_transformation_mode = CoordinateTransformMode::HALF_PIXEL; + // specifies round mode when `mode == nearest` and is used only when `mode == + // nearest`. one of `round_prefer_floor`, `round_prefer_ceil`, `floor`, `ceil`, + // `simple` + NearestMode nearest_mode = NearestMode::ROUND_PREFER_FLOOR; + // a flag that specifies whether to perform anti-aliasing. default is `false` + bool antialias = false; + // specifies the parameter *a* for cubic interpolation (see, e.g. + // [article](https://ieeexplore.ieee.org/document/1163711/)). *cube_coeff* is + // used only when `mode == cubic` + double cube_coeff = -0.75f; + + InterpolateAttrs() = default; + + InterpolateAttrs(InterpolateMode mode, + ShapeCalcMode shape_calculation_mode, + const std::vector& pads_begin, + const std::vector& pads_end, + CoordinateTransformMode coordinate_transformation_mode = CoordinateTransformMode::HALF_PIXEL, + NearestMode nearest_mode = NearestMode::ROUND_PREFER_FLOOR, + bool antialias = false, + double cube_coeff = -0.75) + : mode(mode), + shape_calculation_mode(shape_calculation_mode), + pads_begin(pads_begin), + pads_end(pads_end), + coordinate_transformation_mode(coordinate_transformation_mode), + nearest_mode(nearest_mode), + antialias(antialias), + cube_coeff(cube_coeff) {} + }; + + Interpolate() = default; + /// \brief Constructs a Interpolate operation without 'axes' input. + /// + /// \param image Input image + /// \param output_shape Output shape of spatial axes + /// \param scales Scales of spatial axes, i.e. output_shape / input_shape + /// \param attrs Interpolation attributes + Interpolate(const Output& image, + const Output& output_shape, + const Output& scales, + const InterpolateAttrs& attrs); + + /// \brief Constructs a Interpolate operation with 'axes' input. + /// + /// \param image Input image + /// \param output_shape Output shape of spatial axes + /// \param scales Scales of spatial axes, i.e. output_shape / input_shape + /// \param axes Interpolation axes + /// \param attrs Interpolation attributes + Interpolate(const Output& image, + const Output& output_shape, + const Output& scales, + const Output& axes, + const InterpolateAttrs& attrs); + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + + const InterpolateAttrs& get_attrs() const { + return m_attrs; + } + +protected: + /// \return The interpolation axes. + std::vector get_axes() const; + +private: + bool evaluate_interpolate(const HostTensorVector& outputs, const HostTensorVector& inputs) const; + InterpolateAttrs m_attrs; + + /// \brief Corrects pads_begin and pads_end attributes. + /// + /// \details When Interpolate-4 is a result of some transformation, it is possible + /// that pads_begin.size() != pads_end.size() or + /// pads_begin.size() != input_rank. In such case, we should correct + /// pads_begin and pads_end, using padding of pads_begin and pads_end by + /// zeros or using pads_begin[0 : input_rank], pads_end[0 : input_rank]. + /// + /// Padding of pads_begin is performed when pads_begin.size() < input_rank, + /// and pads_begin[0 : input_rank] is used when + /// pads_begin.size() < input_rank. + /// + /// Similarly for pads_end. + void correct_pads(); + + /// \brief Calculates input shape after padding. + /// + /// \param input_shape Shape of input data. + /// + /// \return Padded input shape, i.e. input_shape + pads_begin + pads_end + PartialShape get_padded_input_shape(const PartialShape& input_shape) const; + + /// \brief Infers output shape using scales. + /// + /// \param output_shape[in,out] output shape + /// \param axes Interpolation axes + /// \param scales Scales for interpolated axes + /// \param padded_input_shape input shape after padding + void infer_using_scales(PartialShape& output_shape, + const std::vector& axes, + const std::vector& scales, + const PartialShape& padded_input_shape) const; + + /// \brief Infers output shape using sizes. + /// + /// \param output_shape[in,out] output shape + /// \param axes Interpolation axes + /// \param sizes sizes for interpolated axes + void infer_using_shapes(PartialShape& output_shape, + const std::vector& axes, + const std::vector& sizes) const; +}; +} // namespace v4 +} // namespace op + +//---------------------------------------- v0 -------------------------------------------------- +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v0::Interpolate::InterpolateMode& type); + +//---------------------------------------- v4 -------------------------------------------------- + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::InterpolateMode& type); + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::CoordinateTransformMode& type); + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::NearestMode& type); + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::ShapeCalcMode& type); + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v0::Interpolate::InterpolateMode& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v4::Interpolate::InterpolateMode& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 4}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v4::Interpolate::CoordinateTransformMode& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 4}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v4::Interpolate::NearestMode& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 4}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v4::Interpolate::ShapeCalcMode& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 4}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; +} // namespace ov diff --git a/ngraph/core/include/openvino/op/less.hpp b/ngraph/core/include/openvino/op/less.hpp new file mode 100644 index 00000000000..19f8919216a --- /dev/null +++ b/ngraph/core/include/openvino/op/less.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_comparison.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise less-than operation. +class OPENVINO_API Less : public util::BinaryElementwiseComparison { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a less-than operation. + Less() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} + /// \brief Constructs a less-than operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + Less(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/less_eq.hpp b/ngraph/core/include/openvino/op/less_eq.hpp new file mode 100644 index 00000000000..b18c84bef4f --- /dev/null +++ b/ngraph/core/include/openvino/op/less_eq.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_comparison.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise less-than-or-equal operation. +class OPENVINO_API LessEqual : public util::BinaryElementwiseComparison { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a less-than-or-equal operation. + LessEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} + + /// \brief Constructs a less-than-or-equal operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + LessEqual(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/log.hpp b/ngraph/core/include/openvino/op/log.hpp new file mode 100644 index 00000000000..da3ff95949b --- /dev/null +++ b/ngraph/core/include/openvino/op/log.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise natural log operation. +class OPENVINO_API Log : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a natural log operation. + Log() = default; + /// \brief Constructs a natural log operation. + /// + /// \param arg Node that produces the input tensor. + Log(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/log_softmax.hpp b/ngraph/core/include/openvino/op/log_softmax.hpp new file mode 100644 index 00000000000..b737d081541 --- /dev/null +++ b/ngraph/core/include/openvino/op/log_softmax.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/op/op.hpp" + +namespace ov { +namespace op { +namespace v5 { +class OPENVINO_API LogSoftmax : public Op { +public: + OPENVINO_RTTI_DECLARATION; + LogSoftmax() = default; + /// \brief Constructs a LogSoftmax operation. + /// + /// \param arg Node that produces the first input tensor.
+ /// `[d0, ...]` + /// \param axis The axis position (0-based) on which to calculate the LogSoftmax. + /// + /// Output `[d0, ...]` + /// + LogSoftmax(const Output& arg, const int64_t axis); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + int64_t get_axis() const { + return m_axis; + } + void set_axis(const int64_t axis) { + m_axis = axis; + } + +private: + int64_t m_axis = 1; +}; +} // namespace v5 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/loop.hpp b/ngraph/core/include/openvino/op/loop.hpp new file mode 100644 index 00000000000..c19c53a7b2c --- /dev/null +++ b/ngraph/core/include/openvino/op/loop.hpp @@ -0,0 +1,90 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/core/function.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/util/sub_graph_base.hpp" + +namespace ov { +namespace op { +namespace v5 { +/// \brief Iterate a body over tensors, accumulating into tensors. +class NGRAPH_API Loop : public op::util::SubGraphOp { +public: + /// \brief Allows to define the purpose of inputs/outputs in the body + struct SpecialBodyPorts { + SpecialBodyPorts() = default; + SpecialBodyPorts(int64_t in_current_iteration_input_idx, int64_t in_body_condition_output_idx) + : current_iteration_input_idx(in_current_iteration_input_idx), + body_condition_output_idx(in_body_condition_output_idx) {} + // -1 means the input is not provided, this input is optional + int64_t current_iteration_input_idx = -1; + // -1 means the output is not provided, + // this output is required, throw an exception if not provided + int64_t body_condition_output_idx = -1; + }; + + NGRAPH_RTTI_DECLARATION; + + /// \brief Constructs a Loop operation. + Loop() = default; + + /// \brief Constructs a Loop operation. + /// + /// \param trip_count Node specifies the maximum number of iterations. + /// \param execution_condition Node determines whether to execute the first + /// iteration or not. + Loop(const Output& trip_count, const Output& execution_condition); + + Output get_concatenated_slices(const Output& value, + int64_t start, + int64_t stride, + int64_t part_size, + int64_t end, + int64_t axis) override; + + void set_special_body_ports(const SpecialBodyPorts& special_body_ports) { + m_special_body_ports = special_body_ports; + } + + SpecialBodyPorts get_special_body_ports() const { + return m_special_body_ports; + } + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + Loop(const Loop&); + +private: + void clone_to(Loop& dst, const OutputVector& new_args) const; + + SpecialBodyPorts m_special_body_ports; +}; +} // namespace v5 +} // namespace op + +template <> +class NGRAPH_API AttributeAdapter + : public DirectValueAccessor { +public: + AttributeAdapter(op::v5::Loop::SpecialBodyPorts& value) + : DirectValueAccessor(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/lrn.hpp b/ngraph/core/include/openvino/op/lrn.hpp new file mode 100644 index 00000000000..adf837cbd49 --- /dev/null +++ b/ngraph/core/include/openvino/op/lrn.hpp @@ -0,0 +1,78 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +// clang-format off +/// \brief Elementwise Local Response Normalization (LRN) operation. +/// +/// ## Inputs +/// +/// | | Type | Description | +/// | ----- | --------------------------------------- | ----------------------------------------------- | +/// | `arg` | \f$N[n, c, d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | +/// +/// ## Output +/// +/// | Type | Description | +/// | ---------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +/// | \f$N[n, c, d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[n, c, d_1,\dots,d_n] = \frac{N[n,i,d_1,\dots,d_n]}{ (bias + alpha * (\sum_{i=max(0,(nsize-1)/2)}^{min(C, (nsize-1)/2)+1} N[n,i,d_1,\dots,d_n]^{2}) ^ {2})}\f$ | +// clang-format on +class NGRAPH_API LRN : public Op { +public: + NGRAPH_RTTI_DECLARATION; + + /// \brief Constructs a LRN operation. + LRN() = default; + /// \brief Constructs a LRN operation. + /// + /// \param arg Node that produces the input tensor. + LRN(const Output& arg, double alpha, double beta, double bias, size_t size); + + LRN(const Output& arg, const Output& axes, double alpha, double beta, double bias, size_t size); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + void validate_and_infer_types() override; + + double get_alpha() const { + return m_alpha; + } + void set_alpha(double alpha) { + m_alpha = alpha; + } + double get_beta() const { + return m_beta; + } + void set_beta(double beta) { + m_beta = beta; + } + double get_bias() const { + return m_bias; + } + void set_bias(double bias) { + m_bias = bias; + } + size_t get_nsize() const { + return m_size; + } + void set_nsize(size_t size) { + m_size = size; + } + AxisSet get_reduction_axes() const; + +protected: + double m_alpha; + double m_beta; + double m_bias; + size_t m_size; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/lstm_cell.hpp b/ngraph/core/include/openvino/op/lstm_cell.hpp new file mode 100644 index 00000000000..77946d2f0fc --- /dev/null +++ b/ngraph/core/include/openvino/op/lstm_cell.hpp @@ -0,0 +1,397 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "openvino/op/op.hpp" +#include "openvino/op/util/activation_functions.hpp" +#include "openvino/op/util/rnn_cell_base.hpp" + +namespace ov { +namespace op { +enum class LSTMWeightsFormat { + FICO, // IE + ICOF, // PyTorch + IFCO, // DNNL, TF, MxNet + IFOC, // Caffe + IOFC, // ONNX +}; + +namespace v0 { +/// +/// \brief Class for single lstm cell node. +/// +/// \note Following implementation supports: +/// \li \c peepholes Gers & Schmidhuber (2000) +/// https://ieeexplore.ieee.org/document/861302 +/// \li Coupling input and forget gates. +/// +/// \note It calculates following equations: +/// +/// it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) +/// ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) +/// ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) +/// Ct = ft (.) Ct-1 + it (.) ct +/// ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) +/// Ht = ot (.) h(Ct) +/// +/// * - Is a dot product, +/// (.) - is a Hadamard product (element-wise), +/// f, g, h - are activation functions. +/// +/// \note This class represents only single *cell* (for current time step) and not +/// the whole LSTM Sequence layer +/// +/// \sa LSTMSequence, RNNCell, GRUCell +/// +class OPENVINO_API LSTMCell : public util::RNNCellBase { +public: + OPENVINO_RTTI_DECLARATION; + + LSTMCell(); + /// + /// \brief Constructs LSTMCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] initial_cell_state The cell state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The gate weights tensor with shape: + /// [4*hidden_size, input_size]. + /// \param[in] R The recurrence weights tensor with shape: + /// [4*hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] weights_format The order of gates in weights tensors. The + /// default format is IFCO since it is used by + /// DNNL. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// \param[in] input_forget Controls coupling input and forget gates. + /// + LSTMCell(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& W, + const Output& R, + std::size_t hidden_size, + LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, + const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f, + bool input_forget = false); + + /// + /// \brief Constructs LSTMCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] initial_cell_state The cell state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [4*hidden_size, + /// input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [4*hidden_size, hidden_size]. + /// \param[in] B The bias tensor for gates with shape: + /// [4*hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] weights_format The order of gates in weights tensors. The + /// default format is IFCO since it is used by + /// DNNL. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// \param[in] input_forget Controls coupling input and forget gates. + /// + LSTMCell(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& W, + const Output& R, + const Output& B, + std::size_t hidden_size, + LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, + const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f, + bool input_forget = false); + + /// + /// \brief Constructs LSTMCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] initial_cell_state The cell state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [4*hidden_size, + /// input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [4*hidden_size, hidden_size]. + /// \param[in] B The bias tensor for gates with shape: + /// [4*hidden_size]. + /// \param[in] P The weight tensor for peepholes with shape: + /// [3*hidden_size] - 3 equals to only iof gates. + /// The order is: input, output, forget gates. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] weights_format The order of gates in weights tensors. The + /// default format is IFCO since it is used by + /// DNNL. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// \param[in] input_forget Controls coupling input and forget gates. + /// + LSTMCell(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& W, + const Output& R, + const Output& B, + const Output& P, + std::size_t hidden_size, + LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, + const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f, + bool input_forget = false); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool get_input_forget() const { + return m_input_forget; + } + LSTMWeightsFormat get_weights_format() const { + return m_weights_format; + } + +private: + /// + /// \brief Creates the default bias input initialized with zeros. + /// + /// \return The object of Output class. + /// + Output get_default_bias_input() const; + + /// + /// \brief Creates the default peepholes input initialized with zeros. + /// + /// \return The object of Output class. + /// + Output get_default_peepholes_input() const; + /// + /// \brief The Activation function f. + /// + util::ActivationFunction m_activation_f; + /// + /// \brief The Activation function g. + /// + util::ActivationFunction m_activation_g; + /// + /// \brief The Activation function h. + /// + util::ActivationFunction m_activation_h; + /// + /// \brief Controls whether to couple input and forget gates. + /// + bool m_input_forget = false; + + /// + /// \brief The order of gates in weights tensors. + /// + LSTMWeightsFormat m_weights_format; + + static constexpr std::size_t s_gates_count{4}; + static constexpr std::size_t s_peepholes_count{3}; +}; +} // namespace v0 + +namespace v4 { +/// +/// \brief Class for single lstm cell node. +/// +/// \note Following implementation supports: +/// \li \c peepholes Gers & Schmidhuber (2000) +/// https://ieeexplore.ieee.org/document/861302 +/// \li Coupling input and forget gates. +/// +/// \note It calculates following equations: +/// +/// it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) +/// ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Wbf + Rbf) +/// ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) +/// Ct = ft (.) Ct-1 + it (.) ct +/// ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Wbo + Rbo) +/// Ht = ot (.) h(Ct) +/// +/// * - Is a dot product, +/// (.) - is a Hadamard product (element-wise), +/// f, g, h - are activation functions. +/// +/// \note This class represents only single *cell* (for current time step) and not +/// the whole LSTM Sequence layer +/// +/// \sa LSTMSequence, RNNCell, GRUCell +/// +class OPENVINO_API LSTMCell : public util::RNNCellBase { +public: + OPENVINO_RTTI_DECLARATION; + + LSTMCell(); + /// + /// \brief Constructs LSTMCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] initial_cell_state The cell state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The gate weights tensor with shape: + /// [4*hidden_size, input_size]. + /// \param[in] R The recurrence weights tensor with shape: + /// [4*hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + LSTMCell(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& W, + const Output& R, + std::size_t hidden_size, + const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f); + + /// + /// \brief Constructs LSTMCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] initial_cell_state The cell state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [4*hidden_size, + /// input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [4*hidden_size, hidden_size]. + /// \param[in] B The bias tensor for gates with shape: + /// [4*hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// + LSTMCell(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& W, + const Output& R, + const Output& B, + std::size_t hidden_size, + const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f); + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + +private: + /// + /// \brief Creates the default bias input initialized with zeros. + /// + /// \return The object of Output class. + /// + Output get_default_bias_input() const; + + /// + /// \brief The Activation function f. + /// + util::ActivationFunction m_activation_f; + /// + /// \brief The Activation function g. + /// + util::ActivationFunction m_activation_g; + /// + /// \brief The Activation function h. + /// + util::ActivationFunction m_activation_h; + + static constexpr std::size_t s_gates_count{4}; +}; +} // namespace v4 +} // namespace op + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::LSTMWeightsFormat& type); + +template <> +class OPENVINO_API AttributeAdapter : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::LSTMWeightsFormat& value) : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/lstm_sequence.hpp b/ngraph/core/include/openvino/op/lstm_sequence.hpp new file mode 100644 index 00000000000..a89f425a052 --- /dev/null +++ b/ngraph/core/include/openvino/op/lstm_sequence.hpp @@ -0,0 +1,196 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include + +#include "openvino/op/constant.hpp" +#include "openvino/op/lstm_cell.hpp" +#include "openvino/op/util/attr_types.hpp" +#include "openvino/op/util/rnn_cell_base.hpp" + +namespace ov { +namespace op { +namespace v0 { + +/// +/// \brief Class for lstm sequence node. +/// +/// \note It follows notation and equations defined as in ONNX standard: +/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#LSTM +/// +/// \sa LSTMCell, RNNCell, GRUCell +/// +/// +class NGRAPH_API LSTMSequence : public Op { +public: + NGRAPH_RTTI_DECLARATION; + LSTMSequence(); + + using direction = RecurrentSequenceDirection; + + size_t get_default_output_index() const override { + return no_default_index(); + } + explicit LSTMSequence(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& sequence_lengths, + const Output& W, + const Output& R, + const Output& B, + const Output& P, + const std::int64_t hidden_size, + const direction lstm_direction, + LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, + const std::vector activations_alpha = {}, + const std::vector activations_beta = {}, + const std::vector activations = {"sigmoid", "tanh", "tanh"}, + const float clip_threshold = 0, + const bool input_forget = false); + + explicit LSTMSequence(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& sequence_lengths, + const Output& W, + const Output& R, + const Output& B, + const std::int64_t hidden_size, + const direction lstm_direction, + LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + const std::vector& activations = {"sigmoid", "tanh", "tanh"}, + const float clip_threshold = 0, + const bool input_forget = false); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + std::vector get_activations_alpha() const { + return m_activations_alpha; + } + std::vector get_activations_beta() const { + return m_activations_beta; + } + std::vector get_activations() const { + return m_activations; + } + float get_clip_threshold() const { + return m_clip_threshold; + } + direction get_direction() const { + return m_direction; + } + std::int64_t get_hidden_size() const { + return m_hidden_size; + } + bool get_input_forget() const { + return m_input_forget; + } + LSTMWeightsFormat get_weights_format() const { + return m_weights_format; + } + +private: + /// + /// \brief Gets the masked value according to sequence length in a batch. + /// + /// \note Zeros out values or sets them to default value for inputs with + /// sequence length shorter than currently procssed time step. + /// + /// \param[in] data The input value. + /// \param[in] time_step The current time step denoting sequence length. + /// \param[in] batch_axis The batch axis index of data tensor. + /// \param[in] default_value The default value for masked elements. + /// + /// \return The masked value. + /// + std::shared_ptr get_masked_node(const Output& data, + std::int32_t time_step, + std::size_t batch_axis = 0, + const Output& default_value = Output()) const; + + OutputVector lstm_pass(bool is_reverse = false) const; + + // Split(bi-directional) and squeeze input data to remove 'num_direction' dimension. + std::shared_ptr prepare_input(Output node, bool is_reverse, size_t num_direction_axis = 0) const; + + std::vector m_activations_alpha; + std::vector m_activations_beta; + std::vector m_activations; + float m_clip_threshold; + direction m_direction; + std::int64_t m_hidden_size; + bool m_input_forget; + LSTMWeightsFormat m_weights_format; +}; +} // namespace v0 + +namespace v5 { +/// +/// \brief Class for lstm sequence node. +/// +/// \note It follows notation and equations defined as in ONNX standard: +/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#LSTM +/// +/// \sa LSTMCell, RNNCell, GRUCell +/// +/// +class NGRAPH_API LSTMSequence : public util::RNNCellBase { +public: + NGRAPH_RTTI_DECLARATION; + LSTMSequence() = default; + + using direction = RecurrentSequenceDirection; + + size_t get_default_output_index() const override { + return no_default_index(); + } + explicit LSTMSequence(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& sequence_lengths, + const Output& W, + const Output& R, + const Output& B, + const std::int64_t hidden_size, + const direction lstm_direction, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + const std::vector& activations = {"sigmoid", "tanh", "tanh"}, + const float clip = 0.f) + : RNNCellBase({X, initial_hidden_state, initial_cell_state, sequence_lengths, W, R, B}, + hidden_size, + clip, + activations, + activations_alpha, + activations_beta), + m_direction(lstm_direction) { + constructor_validate_and_infer_types(); + } + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + direction get_direction() const { + return m_direction; + } + +private: + direction m_direction; +}; +} // namespace v5 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/parameter.hpp b/ngraph/core/include/openvino/op/parameter.hpp new file mode 100644 index 00000000000..7878f582927 --- /dev/null +++ b/ngraph/core/include/openvino/op/parameter.hpp @@ -0,0 +1,78 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief A function parameter. +/// +/// Parameters are nodes that represent the arguments that will be passed to +/// user-defined functions. Function creation requires a sequence of parameters. +/// Basic graph operations do not need parameters attached to a function. +class OPENVINO_API Parameter : public op::Op { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructions a tensor-typed parameter node. + Parameter() = default; + /// \brief Constructions a tensor-typed parameter node. + /// + /// \param element_type The element type of the parameter. + /// \param pshape The partial shape of the parameter. + Parameter(const ngraph::element::Type& element_type, const PartialShape& pshape); + + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool is_relevant_to_shapes() const; + void set_is_relevant_to_shapes(bool is_relevant); + + const PartialShape& get_partial_shape() const { + return m_partial_shape; + } + PartialShape& get_partial_shape() { + return m_partial_shape; + } + void set_partial_shape(const PartialShape& partial_shape) { + m_partial_shape = partial_shape; + } + const element::Type& get_element_type() const { + return m_element_type; + } + void set_element_type(const element::Type& element_type) { + m_element_type = element_type; + } + +protected: + PartialShape m_partial_shape; + element::Type m_element_type; + bool m_is_relevant_to_shapes{false}; +}; +} // namespace v0 +} // namespace op +using ParameterVector = std::vector>; + +template <> +class OPENVINO_API AttributeAdapter : public VisitorAdapter { +public: + AttributeAdapter(ParameterVector& ref); + + bool visit_attributes(AttributeVisitor& visitor) override; + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } + +protected: + ParameterVector& m_ref; +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/result.hpp b/ngraph/core/include/openvino/op/result.hpp new file mode 100644 index 00000000000..12f6d92510a --- /dev/null +++ b/ngraph/core/include/openvino/op/result.hpp @@ -0,0 +1,61 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API Result : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Allows a value to be used as a function result. + Result() = default; + /// \brief Allows a value to be used as a function result. + /// + /// \param arg Node that produces the input tensor. + Result(const Output& arg, bool needs_default_layout = false); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + void set_needs_default_layout(bool val) { + m_needs_default_layout = val; + } + bool needs_default_layout() const { + return m_needs_default_layout; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; + +private: + bool m_needs_default_layout{false}; +}; +} // namespace v0 +} // namespace op +using ResultVector = std::vector>; + +template <> +class OPENVINO_API AttributeAdapter : public VisitorAdapter { +public: + AttributeAdapter(ResultVector& ref); + + bool visit_attributes(AttributeVisitor& visitor) override; + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } + +protected: + ResultVector& m_ref; +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/tensor_iterator.hpp b/ngraph/core/include/openvino/op/tensor_iterator.hpp new file mode 100644 index 00000000000..9d8a2e5a838 --- /dev/null +++ b/ngraph/core/include/openvino/op/tensor_iterator.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/core/function.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/util/sub_graph_base.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Iterate a body over tensors, accumulating into tensors. +class NGRAPH_API TensorIterator : public op::util::SubGraphOp { +public: + NGRAPH_RTTI_DECLARATION; + + bool visit_attributes(AttributeVisitor& visitor) override; + + TensorIterator() = default; + explicit TensorIterator(const OutputVector& values); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + /// \return the body of the iteration + std::shared_ptr get_body() const { + return m_bodies[0]; + } + /// \param body set the body of the iteration + void set_body(const std::shared_ptr& body) { + set_function(body); + } + void validate_and_infer_types() override; + void revalidate_and_infer_types_for_body_ops(); + +private: + void try_to_set_num_iterations_if_no_slice_inputs(); +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/src/op/gather.cpp b/ngraph/core/src/op/gather.cpp index e72163839dc..a52eaff000d 100644 --- a/ngraph/core/src/op/gather.cpp +++ b/ngraph/core/src/op/gather.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::Gather, "Gather", 1, op::util::GatherBase); +OPENVINO_RTTI_DEFINITION(op::v1::Gather, "Gather", 1, op::util::GatherBase); op::v1::Gather::Gather(const Output& params, const Output& indices, const Output& axes) : GatherBase(params, indices, axes) { @@ -36,7 +36,7 @@ shared_ptr op::v1::Gather::clone_with_new_inputs(const OutputVector& new_a return make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); } -NGRAPH_RTTI_DEFINITION(op::v7::Gather, "Gather", 7, op::util::GatherBase); +OPENVINO_RTTI_DEFINITION(op::v7::Gather, "Gather", 7, op::util::GatherBase); op::v7::Gather::Gather(const Output& data, const Output& indices, @@ -78,7 +78,7 @@ shared_ptr op::v7::Gather::clone_with_new_inputs(const OutputVector& new_a return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_batch_dims); } -NGRAPH_RTTI_DEFINITION(op::v8::Gather, "Gather", 8, op::util::GatherBase); +OPENVINO_RTTI_DEFINITION(op::v8::Gather, "Gather", 8, op::util::GatherBase); op::v8::Gather::Gather(const Output& data, const Output& indices, diff --git a/ngraph/core/src/op/gather_elements.cpp b/ngraph/core/src/op/gather_elements.cpp index 71f341dd690..be92de695cd 100644 --- a/ngraph/core/src/op/gather_elements.cpp +++ b/ngraph/core/src/op/gather_elements.cpp @@ -12,7 +12,7 @@ using namespace ngraph; // ------------------------------ V6 ------------------------------ -NGRAPH_RTTI_DEFINITION(op::v6::GatherElements, "GatherElements", 6); +OPENVINO_RTTI_DEFINITION(op::v6::GatherElements, "GatherElements", 6); op::v6::GatherElements::GatherElements(const Output& data, const Output& indices, const int64_t axis) : Op({data, indices}), diff --git a/ngraph/core/src/op/gather_nd.cpp b/ngraph/core/src/op/gather_nd.cpp index d94cefa5b24..61e1c2708f7 100644 --- a/ngraph/core/src/op/gather_nd.cpp +++ b/ngraph/core/src/op/gather_nd.cpp @@ -12,7 +12,7 @@ using namespace ngraph; // ------------------------------ V5 ------------------------------ -NGRAPH_RTTI_DEFINITION(op::v5::GatherND, "GatherND", 5); +OPENVINO_RTTI_DEFINITION(op::v5::GatherND, "GatherND", 5); op::v5::GatherND::GatherND(const Output& data, const Output& indices, const size_t batch_dims) : Op({data, indices}), diff --git a/ngraph/core/src/op/gather_tree.cpp b/ngraph/core/src/op/gather_tree.cpp index 88b4b3cab71..994e27be700 100644 --- a/ngraph/core/src/op/gather_tree.cpp +++ b/ngraph/core/src/op/gather_tree.cpp @@ -10,7 +10,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::GatherTree, "GatherTree", 1); +OPENVINO_RTTI_DEFINITION(op::v1::GatherTree, "GatherTree", 1); op::v1::GatherTree::GatherTree(const Output& step_ids, const Output& parent_idx, diff --git a/ngraph/core/src/op/gelu.cpp b/ngraph/core/src/op/gelu.cpp index 76555869a89..0fc27a3373e 100644 --- a/ngraph/core/src/op/gelu.cpp +++ b/ngraph/core/src/op/gelu.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; // ------------------------------ V0 ------------------------------ -NGRAPH_RTTI_DEFINITION(op::v0::Gelu, "Gelu", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Gelu, "Gelu", 0); op::v0::Gelu::Gelu() : Op() {} @@ -60,13 +60,13 @@ NGRAPH_API EnumNames& EnumNames::type_info; -} // namespace ov std::ostream& op::operator<<(std::ostream& s, const op::GeluApproximationMode& type) { return s << as_string(type); } -NGRAPH_RTTI_DEFINITION(op::v7::Gelu, "Gelu", 7); +constexpr DiscreteTypeInfo AttributeAdapter::type_info; +} // namespace ov +OPENVINO_RTTI_DEFINITION(op::v7::Gelu, "Gelu", 7); op::v7::Gelu::Gelu(const Output& data, GeluApproximationMode mode) : UnaryElementwiseArithmetic(data), diff --git a/ngraph/core/src/op/greater.cpp b/ngraph/core/src/op/greater.cpp index 10d89bcf581..ead2f846b93 100644 --- a/ngraph/core/src/op/greater.cpp +++ b/ngraph/core/src/op/greater.cpp @@ -50,7 +50,7 @@ bool evaluate_greater(const HostTensorPtr& arg0, //-------------------------------------- v1 ------------------------------------ -NGRAPH_RTTI_DEFINITION(op::v1::Greater, "Greater", 1, op::util::BinaryElementwiseComparison); +OPENVINO_RTTI_DEFINITION(op::v1::Greater, "Greater", 1, op::util::BinaryElementwiseComparison); op::v1::Greater::Greater(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseComparison(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/greater_eq.cpp b/ngraph/core/src/op/greater_eq.cpp index c42720e5489..f4d6e7ab297 100644 --- a/ngraph/core/src/op/greater_eq.cpp +++ b/ngraph/core/src/op/greater_eq.cpp @@ -51,7 +51,7 @@ bool evaluate_greater_equal(const HostTensorPtr& arg0, //---------------------------------- v1 ---------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::GreaterEqual, "GreaterEqual", 1, op::util::BinaryElementwiseComparison); +OPENVINO_RTTI_DEFINITION(op::v1::GreaterEqual, "GreaterEqual", 1, op::util::BinaryElementwiseComparison); op::v1::GreaterEqual::GreaterEqual(const Output& arg0, const Output& arg1, diff --git a/ngraph/core/src/op/grn.cpp b/ngraph/core/src/op/grn.cpp index 98cfb19e465..960ea235e3a 100644 --- a/ngraph/core/src/op/grn.cpp +++ b/ngraph/core/src/op/grn.cpp @@ -16,7 +16,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::GRN, "GRN", 0); +OPENVINO_RTTI_DEFINITION(op::v0::GRN, "GRN", 0); op::v0::GRN::GRN(const Output& data, float bias) : Op({data}), m_bias(bias) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/group_conv.cpp b/ngraph/core/src/op/group_conv.cpp index f43c7aa0d0b..dba5f693adc 100644 --- a/ngraph/core/src/op/group_conv.cpp +++ b/ngraph/core/src/op/group_conv.cpp @@ -22,10 +22,10 @@ using namespace ngraph; // v1::GroupConvolution //------------------------------------------------------------------------------ -NGRAPH_RTTI_DEFINITION(op::v1::GroupConvolution, "GroupConvolution", 1); +OPENVINO_RTTI_DEFINITION(op::v1::GroupConvolution, "GroupConvolution", 1); shared_ptr op::v1::GroupConvolution::get_default_value() const { - return op::Constant::create(get_element_type(), get_shape(), {0}); + return op::v0::Constant::create(get_element_type(), get_shape(), {0}); } op::v1::GroupConvolution::GroupConvolution(const Output& data_batch, @@ -249,7 +249,7 @@ shared_ptr op::v1::GroupConvolution::clone_with_new_inputs(const OutputVec // v1::GroupConvolutionBackpropData //------------------------------------------------------------------------------ -NGRAPH_RTTI_DEFINITION(op::v1::GroupConvolutionBackpropData, "GroupConvolutionBackpropData", 1); +OPENVINO_RTTI_DEFINITION(op::v1::GroupConvolutionBackpropData, "GroupConvolutionBackpropData", 1); op::v1::GroupConvolutionBackpropData::GroupConvolutionBackpropData() : Op(), @@ -371,7 +371,7 @@ const PartialShape op::v1::GroupConvolutionBackpropData::get_convolution_output_ void op::v1::GroupConvolutionBackpropData::set_output_shape(const Shape& shape) { this->input(2).replace_source_output( - op::Constant::create(this->get_input_element_type(2), Shape{shape.size()}, shape)->output(0)); + op::v0::Constant::create(this->get_input_element_type(2), Shape{shape.size()}, shape)->output(0)); } void op::v1::GroupConvolutionBackpropData::infer_conv_backprop_output_spatial_shape( @@ -393,7 +393,7 @@ void op::v1::GroupConvolutionBackpropData::infer_conv_backprop_output_spatial_sh int64_t val = strides[i] * (input_data_shape[i].get_length() - 1) + dilations[i] * (filters_shape[i].get_length() - 1) + 1 - pads_begin[i] - pads_end[i] + output_padding[i]; - output_spatial_shape.push_back(val); + output_spatial_shape.emplace_back(val); } else { output_spatial_shape.push_back(Dimension::dynamic()); } diff --git a/ngraph/core/src/op/gru_cell.cpp b/ngraph/core/src/op/gru_cell.cpp index a17ba0f834a..aef6257695d 100644 --- a/ngraph/core/src/op/gru_cell.cpp +++ b/ngraph/core/src/op/gru_cell.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v3::GRUCell::type_info; +OPENVINO_RTTI_DEFINITION(op::v3::GRUCell, "GRUCell", 1, op::util::RNNCellBase); op::v3::GRUCell::GRUCell() : m_linear_before_reset(false) { m_activations = {"sigmoid", "tanh"}; @@ -172,9 +172,9 @@ void op::v3::GRUCell::validate_and_infer_types() { void op::v3::GRUCell::add_default_bias_input() { Output B = - op::Constant::create(get_input_element_type(0), - Shape{(s_gates_count + m_linear_before_reset) * get_hidden_size()}, - vector((s_gates_count + m_linear_before_reset) * get_hidden_size(), 0.f)); + op::v0::Constant::create(get_input_element_type(0), + Shape{(s_gates_count + m_linear_before_reset) * get_hidden_size()}, + vector((s_gates_count + m_linear_before_reset) * get_hidden_size(), 0.f)); set_argument(4, B); } diff --git a/ngraph/core/src/op/gru_sequence.cpp b/ngraph/core/src/op/gru_sequence.cpp index cbbae4895bb..1f980134ebe 100644 --- a/ngraph/core/src/op/gru_sequence.cpp +++ b/ngraph/core/src/op/gru_sequence.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v5::GRUSequence, "GRUSequence", 5); +OPENVINO_RTTI_DEFINITION(op::v5::GRUSequence, "GRUSequence", 5); op::v5::GRUSequence::GRUSequence() : m_direction(op::RecurrentSequenceDirection::FORWARD), diff --git a/ngraph/core/src/op/hard_sigmoid.cpp b/ngraph/core/src/op/hard_sigmoid.cpp index d397831c962..5b5c9cda6e4 100644 --- a/ngraph/core/src/op/hard_sigmoid.cpp +++ b/ngraph/core/src/op/hard_sigmoid.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::HardSigmoid, "HardSigmoid", 0); +OPENVINO_RTTI_DEFINITION(op::v0::HardSigmoid, "HardSigmoid", 0); op::v0::HardSigmoid::HardSigmoid() : Op() {} @@ -34,7 +34,7 @@ void op::v0::HardSigmoid::validate_and_infer_types() { if (alpha_pshape.is_static()) { const auto alpha_shape = alpha_pshape.to_shape(); NODE_VALIDATION_CHECK(this, - is_scalar(alpha_shape), + ngraph::is_scalar(alpha_shape), "A scalar is expected for the 'alpha' input. Got: ", alpha_shape); } @@ -42,7 +42,7 @@ void op::v0::HardSigmoid::validate_and_infer_types() { if (beta_pshape.is_static()) { const auto beta_shape = beta_pshape.to_shape(); NODE_VALIDATION_CHECK(this, - is_scalar(beta_shape), + ngraph::is_scalar(beta_shape), "A scalar is expected for the 'beta' input. Got: ", beta_shape); } diff --git a/ngraph/core/src/op/hsigmoid.cpp b/ngraph/core/src/op/hsigmoid.cpp index 32bc9e39b9b..3757544b873 100644 --- a/ngraph/core/src/op/hsigmoid.cpp +++ b/ngraph/core/src/op/hsigmoid.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v5::HSigmoid, "HSigmoid", 5); +OPENVINO_RTTI_DEFINITION(op::v5::HSigmoid, "HSigmoid", 5, op::util::UnaryElementwiseArithmetic); op::v5::HSigmoid::HSigmoid(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/hswish.cpp b/ngraph/core/src/op/hswish.cpp index ffb67eebd76..8dda829655e 100644 --- a/ngraph/core/src/op/hswish.cpp +++ b/ngraph/core/src/op/hswish.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v4::HSwish, "HSwish", 4); +OPENVINO_RTTI_DEFINITION(op::v4::HSwish, "HSwish", 4, op::util::UnaryElementwiseArithmetic); op::v4::HSwish::HSwish(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/idft.cpp b/ngraph/core/src/op/idft.cpp index 72b6ec077e4..8c498fc22cc 100644 --- a/ngraph/core/src/op/idft.cpp +++ b/ngraph/core/src/op/idft.cpp @@ -18,7 +18,7 @@ using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v7::IDFT, "IDFT", 7, util::FFTBase); +OPENVINO_RTTI_DEFINITION(op::v7::IDFT, "IDFT", 7, util::FFTBase); op::v7::IDFT::IDFT(const Output& data, const Output& axes) : FFTBase(data, axes) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/if.cpp b/ngraph/core/src/op/if.cpp index ea8c76d0c8e..fe4a49c7778 100644 --- a/ngraph/core/src/op/if.cpp +++ b/ngraph/core/src/op/if.cpp @@ -18,7 +18,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(ngraph::op::v8::If, "If", 8, MultiSubGraphOp); +OPENVINO_RTTI_DEFINITION(ngraph::op::v8::If, "If", 8, MultiSubGraphOp); op::v8::If::If() : MultiSubGraphOp(2) {} @@ -49,11 +49,11 @@ static ngraph::PartialShape resolve_shape(const ngraph::PartialShape& then_pshap if ((*then_it).is_dynamic() || (*else_it).is_dynamic()) { new_dims.push_back(Dimension::dynamic()); } else if (*then_it == *else_it) { - new_dims.push_back(Dimension(*then_it)); + new_dims.emplace_back(*then_it); } else { auto dim_min = std::min((*then_it).get_min_length(), (*else_it).get_min_length()); auto dim_max = std::max((*then_it).get_min_length(), (*else_it).get_min_length()); - new_dims.push_back(Dimension(dim_min, dim_max)); + new_dims.emplace_back(dim_min, dim_max); } } @@ -125,7 +125,7 @@ void op::v8::If::validate_and_infer_types() { // shape and type inference for outputs from If operations for (const auto& output_descr : m_output_descriptions[cond_index]) { auto body_value = body->get_results().at(output_descr->m_body_value_index)->input_value(0); - auto body_value_partial_shape = body_value.get_partial_shape(); + const auto& body_value_partial_shape = body_value.get_partial_shape(); set_output_type(output_descr->m_output_index, body_value.get_element_type(), body_value_partial_shape); } } else // condition is non constant @@ -236,8 +236,8 @@ bool op::v8::If::has_evaluate() const { } void op::v8::If::set_input(const Output& value, - const std::shared_ptr& then_parameter, - const std::shared_ptr& else_parameter) { + const std::shared_ptr& then_parameter, + const std::shared_ptr& else_parameter) { NGRAPH_CHECK(then_parameter != nullptr || else_parameter != nullptr, "Missing parameters! Both parameters are nullptr!"); auto then_param_index = m_bodies[THEN_BODY_INDEX]->get_parameter_index(then_parameter); @@ -253,8 +253,8 @@ void op::v8::If::set_input(const Output& value, set_invariant_inputs(value, {then_parameter, else_parameter}); } -Output op::v8::If::set_output(const std::shared_ptr& then_result, - const std::shared_ptr& else_result) { +Output op::v8::If::set_output(const std::shared_ptr& then_result, + const std::shared_ptr& else_result) { NGRAPH_CHECK(then_result != nullptr, "Incorrect result in \"then_body\"! Result cant be \'nullptr\'"); NGRAPH_CHECK(else_result != nullptr, "Incorrect result in \"else_body\"! Result cant be \'nullptr\'"); auto then_result_id = m_bodies[THEN_BODY_INDEX]->get_result_index(then_result); @@ -264,4 +264,4 @@ Output op::v8::If::set_output(const std::shared_ptr& then_result, NGRAPH_CHECK(else_result_id != -1, "Missing result ", else_result->get_friendly_name(), "in \'then_body\'!"); return set_body_outputs({then_result, else_result}); -} \ No newline at end of file +} diff --git a/ngraph/core/src/op/interpolate.cpp b/ngraph/core/src/op/interpolate.cpp index 35a71268dd9..6c725c2b012 100644 --- a/ngraph/core/src/op/interpolate.cpp +++ b/ngraph/core/src/op/interpolate.cpp @@ -17,11 +17,9 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Interpolate, "Interpolate", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Interpolate, "Interpolate", 0); -op::v0::Interpolate::Interpolate(const Output& image, - const Output& output_shape, - const op::v0::InterpolateAttrs& attrs) +op::v0::Interpolate::Interpolate(const Output& image, const Output& output_shape, const Attributes& attrs) : Op({image, output_shape}), m_attrs(attrs) { constructor_validate_and_infer_types(); @@ -69,7 +67,7 @@ shared_ptr op::v0::Interpolate::clone_with_new_inputs(const OutputVector& return make_shared(new_args.at(0), new_args.at(1), m_attrs); } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v0::Interpolate::InterpolateMode& type) { +std::ostream& ov::operator<<(std::ostream& s, const op::v0::Interpolate::InterpolateMode& type) { return s << as_string(type); } @@ -92,7 +90,7 @@ constexpr DiscreteTypeInfo AttributeAdapter& image, const Output& output_shape, @@ -481,22 +479,6 @@ bool op::v4::Interpolate::has_evaluate() const { return false; } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v4::Interpolate::InterpolateMode& type) { - return s << as_string(type); -} - -std::ostream& ngraph::operator<<(std::ostream& s, const op::v4::Interpolate::ShapeCalcMode& type) { - return s << as_string(type); -} - -std::ostream& ngraph::operator<<(std::ostream& s, const op::v4::Interpolate::CoordinateTransformMode& type) { - return s << as_string(type); -} - -std::ostream& ngraph::operator<<(std::ostream& s, const op::v4::Interpolate::NearestMode& type) { - return s << as_string(type); -} - namespace ov { template <> NGRAPH_API EnumNames& @@ -553,4 +535,20 @@ EnumNames::get() { } constexpr DiscreteTypeInfo AttributeAdapter::type_info; + +std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::InterpolateMode& type) { + return s << as_string(type); +} + +std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::ShapeCalcMode& type) { + return s << as_string(type); +} + +std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::CoordinateTransformMode& type) { + return s << as_string(type); +} + +std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::NearestMode& type) { + return s << as_string(type); +} } // namespace ov diff --git a/ngraph/core/src/op/less.cpp b/ngraph/core/src/op/less.cpp index 5aaebdd3e54..f50b2f44468 100644 --- a/ngraph/core/src/op/less.cpp +++ b/ngraph/core/src/op/less.cpp @@ -50,7 +50,7 @@ bool evaluate_less(const HostTensorPtr& arg0, // ----------------------------- v1 -------------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::Less, "Less", 1, op::util::BinaryElementwiseComparison); +OPENVINO_RTTI_DEFINITION(op::v1::Less, "Less", 1, op::util::BinaryElementwiseComparison); op::v1::Less::Less(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseComparison(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/less_eq.cpp b/ngraph/core/src/op/less_eq.cpp index 02c2f1c069e..864045e7f6e 100644 --- a/ngraph/core/src/op/less_eq.cpp +++ b/ngraph/core/src/op/less_eq.cpp @@ -13,7 +13,7 @@ using namespace ngraph; // ---------------------------------- v1 --------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::LessEqual, "LessEqual", 1, op::util::BinaryElementwiseComparison); +OPENVINO_RTTI_DEFINITION(op::v1::LessEqual, "LessEqual", 1, op::util::BinaryElementwiseComparison); op::v1::LessEqual::LessEqual(const Output& arg0, const Output& arg1, diff --git a/ngraph/core/src/op/log.cpp b/ngraph/core/src/op/log.cpp index 81cba930c0f..e88f414159a 100644 --- a/ngraph/core/src/op/log.cpp +++ b/ngraph/core/src/op/log.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Log, "Log", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Log, "Log", 0, op::util::UnaryElementwiseArithmetic); op::Log::Log(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/log_softmax.cpp b/ngraph/core/src/op/log_softmax.cpp index 1ccf20418b7..4561cf0b4de 100644 --- a/ngraph/core/src/op/log_softmax.cpp +++ b/ngraph/core/src/op/log_softmax.cpp @@ -10,7 +10,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v5::LogSoftmax, "LogSoftmax", 5); +OPENVINO_RTTI_DEFINITION(op::v5::LogSoftmax, "LogSoftmax", 5); op::v5::LogSoftmax::LogSoftmax(const Output& arg, const int64_t axis) : Op({arg}), m_axis(axis) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/loop.cpp b/ngraph/core/src/op/loop.cpp index 8ab9675b86b..0bd101dd2c4 100644 --- a/ngraph/core/src/op/loop.cpp +++ b/ngraph/core/src/op/loop.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v5::Loop, "Loop", 5); +OPENVINO_RTTI_DEFINITION(op::v5::Loop, "Loop", 5, op::util::SubGraphOp); op::v5::Loop::Loop(const Output& trip_count, const Output& execution_condition) : SubGraphOp() { set_argument(0, trip_count); @@ -178,7 +178,7 @@ void op::v5::Loop::validate_and_infer_types() { body_parameter->set_partial_shape(input_partial_shape); } else if (auto invariant_input_description = - ov::as_type_ptr(input_description)) { + ov::as_type_ptr(input_description)) { auto body_parameter = m_bodies[0]->get_parameters().at(invariant_input_description->m_body_parameter_index); auto body_param_partial_shape = body_parameter->get_partial_shape(); @@ -198,7 +198,7 @@ void op::v5::Loop::validate_and_infer_types() { auto body_value = m_bodies[0]->get_results().at(output_description->m_body_value_index)->input_value(0); if (auto concat_output_description = - ov::as_type_ptr(output_description)) { + ov::as_type_ptr(output_description)) { const auto& body_value_partial_shape = body_value.get_partial_shape(); auto out_shape = body_value_partial_shape; if (zero_number_of_iter) { @@ -220,7 +220,7 @@ void op::v5::Loop::validate_and_infer_types() { } else if (auto body_output_description = - ov::as_type_ptr(output_description)) { + ov::as_type_ptr(output_description)) { const PartialShape& ps = body_value.get_partial_shape(); if (ps.is_dynamic()) { set_output_type(index, body_value.get_element_type(), ps); diff --git a/ngraph/core/src/op/lrn.cpp b/ngraph/core/src/op/lrn.cpp index a3884b726ad..828fd154bc2 100644 --- a/ngraph/core/src/op/lrn.cpp +++ b/ngraph/core/src/op/lrn.cpp @@ -14,10 +14,10 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::LRN, "LRN", 0); +OPENVINO_RTTI_DEFINITION(op::v0::LRN, "LRN", 0); op::LRN::LRN(const Output& arg, double alpha, double beta, double bias, size_t size) - : LRN(arg, op::Constant::create(element::i64, Shape{1}, {1}), alpha, beta, bias, size) { + : LRN(arg, op::v0::Constant::create(element::i64, Shape{1}, {1}), alpha, beta, bias, size) { add_provenance_group_member(input_value(1).get_node_shared_ptr()); } @@ -102,5 +102,5 @@ bool ngraph::op::v0::LRN::visit_attributes(AttributeVisitor& visitor) { shared_ptr op::LRN::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_LRN_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), m_alpha, m_beta, m_bias, m_size); + return make_shared(new_args.at(0), new_args.at(1), m_alpha, m_beta, m_bias, m_size); } diff --git a/ngraph/core/src/op/lstm_cell.cpp b/ngraph/core/src/op/lstm_cell.cpp index aad2148fc6d..724dd267994 100644 --- a/ngraph/core/src/op/lstm_cell.cpp +++ b/ngraph/core/src/op/lstm_cell.cpp @@ -17,8 +17,8 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::LSTMCell, "LSTMCell", 0, op::util::RNNCellBase); -NGRAPH_RTTI_DEFINITION(op::v4::LSTMCell, "LSTMCell", 4, op::util::RNNCellBase); +OPENVINO_RTTI_DEFINITION(op::v0::LSTMCell, "LSTMCell", 0, op::util::RNNCellBase); +OPENVINO_RTTI_DEFINITION(op::v4::LSTMCell, "LSTMCell", 4, op::util::RNNCellBase); op::v0::LSTMCell::LSTMCell() : m_input_forget(false), m_weights_format(LSTMWeightsFormat::IFCO) { m_activations = {"sigmoid", "tanh", "tanh"}; @@ -273,14 +273,15 @@ void op::v0::LSTMCell::validate_and_infer_types() { } Output op::v0::LSTMCell::get_default_bias_input() const { - return Output{ - op::Constant::create(get_input_element_type(0), Shape{s_gates_count * get_hidden_size()}, vector{0.f})}; + return Output{op::v0::Constant::create(get_input_element_type(0), + Shape{s_gates_count * get_hidden_size()}, + vector{0.f})}; } Output op::v0::LSTMCell::get_default_peepholes_input() const { - return Output{op::Constant::create(get_input_element_type(0), - Shape{s_peepholes_count * get_hidden_size()}, - vector{0.f})}; + return Output{op::v0::Constant::create(get_input_element_type(0), + Shape{s_peepholes_count * get_hidden_size()}, + vector{0.f})}; } shared_ptr op::v0::LSTMCell::clone_with_new_inputs(const OutputVector& new_args) const { @@ -511,8 +512,9 @@ void op::v4::LSTMCell::validate_and_infer_types() { } Output op::v4::LSTMCell::get_default_bias_input() const { - return Output{ - op::Constant::create(get_input_element_type(0), Shape{s_gates_count * get_hidden_size()}, vector{0.f})}; + return Output{op::v0::Constant::create(get_input_element_type(0), + Shape{s_gates_count * get_hidden_size()}, + vector{0.f})}; } shared_ptr op::v4::LSTMCell::clone_with_new_inputs(const OutputVector& new_args) const { diff --git a/ngraph/core/src/op/lstm_sequence.cpp b/ngraph/core/src/op/lstm_sequence.cpp index 5a1229986a6..0b30eddf153 100644 --- a/ngraph/core/src/op/lstm_sequence.cpp +++ b/ngraph/core/src/op/lstm_sequence.cpp @@ -16,8 +16,8 @@ using namespace ngraph; using namespace std; -NGRAPH_RTTI_DEFINITION(op::v0::LSTMSequence, "LSTMSequence", 0); -NGRAPH_RTTI_DEFINITION(op::v5::LSTMSequence, "LSTMSequence", 5); +OPENVINO_RTTI_DEFINITION(op::v0::LSTMSequence, "LSTMSequence", 0); +OPENVINO_RTTI_DEFINITION(op::v5::LSTMSequence, "LSTMSequence", 5); op::v0::LSTMSequence::LSTMSequence() : Op(), diff --git a/ngraph/core/src/op/parameter.cpp b/ngraph/core/src/op/parameter.cpp index 8a95f915fa3..415c1dbda00 100644 --- a/ngraph/core/src/op/parameter.cpp +++ b/ngraph/core/src/op/parameter.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Parameter, "Parameter", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Parameter, "Parameter", 0); op::Parameter::Parameter(const element::Type& element_type, const PartialShape& pshape) : m_partial_shape(pshape), diff --git a/ngraph/core/src/op/result.cpp b/ngraph/core/src/op/result.cpp index 237b65c7977..40ce23ceb8e 100644 --- a/ngraph/core/src/op/result.cpp +++ b/ngraph/core/src/op/result.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Result, "Result", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Result, "Result", 0); op::Result::Result(const Output& arg, bool needs_default_layout) : Op({arg}), diff --git a/ngraph/core/src/op/tensor_iterator.cpp b/ngraph/core/src/op/tensor_iterator.cpp index 9669769b8bc..2f1760111f6 100644 --- a/ngraph/core/src/op/tensor_iterator.cpp +++ b/ngraph/core/src/op/tensor_iterator.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::TensorIterator, "TensorIterator", 0, op::util::SubGraphOp); +OPENVINO_RTTI_DEFINITION(op::v0::TensorIterator, "TensorIterator", 0, op::util::SubGraphOp); op::v0::TensorIterator::TensorIterator(const OutputVector& values) : op::util::SubGraphOp(values) {} From 4978e8e6f866af68b63a553213b062d0ac70bc1c Mon Sep 17 00:00:00 2001 From: Sergey Lyubimtsev Date: Mon, 6 Sep 2021 11:08:26 +0300 Subject: [PATCH 17/52] Fix setup.py paths (#7345) * fix paths & setup.py install * fix linter issues * remove extra spaces * fix CI issues * remove extra lines * fix indent --- .../ie_bridges/python/wheel/setup.py | 56 +++++++++++++++---- 1 file changed, 44 insertions(+), 12 deletions(-) diff --git a/inference-engine/ie_bridges/python/wheel/setup.py b/inference-engine/ie_bridges/python/wheel/setup.py index a84280a8671..3a8827b388a 100644 --- a/inference-engine/ie_bridges/python/wheel/setup.py +++ b/inference-engine/ie_bridges/python/wheel/setup.py @@ -7,6 +7,7 @@ import sys import errno import subprocess # nosec import typing +import platform import multiprocessing from fnmatch import fnmatchcase from pathlib import Path @@ -19,18 +20,32 @@ from distutils import log from setuptools import setup, find_namespace_packages, Extension from setuptools.command.build_ext import build_ext from setuptools.command.build_clib import build_clib +from setuptools.command.install import install from decouple import config WHEEL_LIBS_INSTALL_DIR = os.path.join('openvino', 'libs') WHEEL_LIBS_PACKAGE = 'openvino.libs' PYTHON_VERSION = f'python{sys.version_info.major}.{sys.version_info.minor}' +LIBS_DIR = 'bin' if platform.system() == 'Windows' else 'lib' +CONFIG = 'Release' if platform.system() == 'Windows' else '' + +machine = platform.machine() +if machine == 'x86_64' or machine == 'AMD64': + ARCH = 'intel64' +elif machine == 'X86': + ARCH = 'ia32' +elif machine == 'arm': + ARCH = 'arm' +elif machine == 'aarch64': + ARCH = 'arm64' + # The following variables can be defined in environment or .env file CMAKE_BUILD_DIR = config('CMAKE_BUILD_DIR', '.') -CORE_LIBS_DIR = config('CORE_LIBS_DIR', 'deployment_tools/inference_engine/lib/intel64') -PLUGINS_LIBS_DIR = config('PLUGINS_LIBS_DIR', 'deployment_tools/inference_engine/lib/intel64') +CORE_LIBS_DIR = config('CORE_LIBS_DIR', f'deployment_tools/inference_engine/{LIBS_DIR}/{ARCH}/{CONFIG}') +PLUGINS_LIBS_DIR = config('PLUGINS_LIBS_DIR', f'deployment_tools/inference_engine/{LIBS_DIR}/{ARCH}/{CONFIG}') NGRAPH_LIBS_DIR = config('NGRAPH_LIBS_DIR', 'deployment_tools/ngraph/lib') -TBB_LIBS_DIR = config('TBB_LIBS_DIR', 'deployment_tools/inference_engine/external/tbb/lib') +TBB_LIBS_DIR = config('TBB_LIBS_DIR', f'deployment_tools/inference_engine/external/tbb/{LIBS_DIR}') PY_PACKAGES_DIR = config('PY_PACKAGES_DIR', f'python/{PYTHON_VERSION}') LIBS_RPATH = '$ORIGIN' if sys.platform == 'linux' else '@loader_path' @@ -159,10 +174,10 @@ class CustomBuild(build): # if setup.py is directly called use CMake to build product if CMAKE_BUILD_DIR == '.': - openvino_root_dir = os.path.normpath(os.path.join(CMAKE_BUILD_DIR, '../../../../')) - self.announce('Configuring cmake project', level=3) - - self.spawn(['cmake', '-H' + openvino_root_dir, '-B' + self.build_temp, + # set path to the root of OpenVINO CMakeList file + openvino_root_dir = Path(__file__).resolve().parents[4] + self.announce(f'Configuring cmake project: {openvino_root_dir}', level=3) + self.spawn(['cmake', '-H' + str(openvino_root_dir), '-B' + self.build_temp, '-DCMAKE_BUILD_TYPE={type}'.format(type=self.config), '-DENABLE_PYTHON=ON', '-DNGRAPH_ONNX_FRONTEND_ENABLE=ON']) @@ -171,8 +186,8 @@ class CustomBuild(build): self.spawn(['cmake', '--build', self.build_temp, '--config', self.config, '-j', str(self.jobs)]) CMAKE_BUILD_DIR = self.build_temp - self.run_command('build_clib') + build.run(self) # Copy extra package_data content filtered by find_packages dst = Path(self.build_lib) @@ -237,6 +252,10 @@ class CopyExt(build_ext): """Copy extension files to the build directory""" def run(self): + if len(self.extensions) == 1: + self.run_command('build_clib') + self.extensions = [] + self.extensions = find_prebuilt_extensions(get_dir_list(PY_INSTALL_CFG)) for extension in self.extensions: if not isinstance(extension, PrebuiltExtension): raise DistutilsSetupError(f'copy_ext can accept PrebuiltExtension only, but got {extension.name}') @@ -251,10 +270,17 @@ class CopyExt(build_ext): elif sys.platform == 'darwin': rpath = os.path.join('@loader_path', rpath, WHEEL_LIBS_INSTALL_DIR) set_rpath(rpath, os.path.realpath(src)) - copy_file(src, dst, verbose=self.verbose, dry_run=self.dry_run) +class CustomInstall(install): + """Enable build_clib during the installation""" + + def run(self): + self.run_command('build') + install.run(self) + + class CustomClean(clean): """Clean up staging directories""" @@ -350,6 +376,8 @@ def find_prebuilt_extensions(search_dirs): ext_pattern = '**/*.so' for base_dir in search_dirs: for path in Path(base_dir).glob(ext_pattern): + if path.match('openvino/libs/*'): + continue relpath = path.relative_to(base_dir) if relpath.parent != '.': package_names = str(relpath.parent).split(os.path.sep) @@ -358,6 +386,8 @@ def find_prebuilt_extensions(search_dirs): package_names.append(path.name.split('.', 1)[0]) name = '.'.join(package_names) extensions.append(PrebuiltExtension(name, sources=[str(path)])) + if not extensions: + extensions.append(PrebuiltExtension('openvino', sources=[str('setup.py')])) return extensions @@ -413,12 +443,13 @@ if os.path.exists(package_license): packages = find_namespace_packages(get_package_dir(PY_INSTALL_CFG)) package_data: typing.Dict[str, list] = {} - +pkg_name = config('WHEEL_PACKAGE_NAME', 'openvino') +ext_modules = find_prebuilt_extensions(get_dir_list(PY_INSTALL_CFG)) if pkg_name == 'openvino' else [] setup( version=config('WHEEL_VERSION', '0.0.0'), author_email=config('WHEEL_AUTHOR_EMAIL', 'openvino_pushbot@intel.com'), - name=config('WHEEL_PACKAGE_NAME', 'openvino'), + name=pkg_name, license=config('WHEEL_LICENCE_TYPE', 'OSI Approved :: Apache Software License'), author=config('WHEEL_AUTHOR', 'Intel Corporation'), description=config('WHEEL_DESC', 'Inference Engine Python* API'), @@ -429,11 +460,12 @@ setup( url=config('WHEEL_URL', 'https://docs.openvinotoolkit.org/latest/index.html'), cmdclass={ 'build': CustomBuild, + 'install': CustomInstall, 'build_clib': PrepareLibs, 'build_ext': CopyExt, 'clean': CustomClean, }, - ext_modules=find_prebuilt_extensions(get_dir_list(PY_INSTALL_CFG)), + ext_modules=ext_modules, packages=packages, package_dir={'': get_package_dir(PY_INSTALL_CFG)}, package_data=package_data, From d82fed9527ac8e7c84050611ca56539a6b40e766 Mon Sep 17 00:00:00 2001 From: Anastasia Popova Date: Mon, 6 Sep 2021 11:49:27 +0300 Subject: [PATCH 18/52] RandomUniform reference implementation. (#7012) * Added RandomUniform reference implementation. * Corrected comments. * Small correction. * Code style correction. * Added has_evaluate() method. * Added comments, added names to consts. * Small fix. * Replaced arrays with vectors. * Apply suggestions from code review Co-authored-by: Ilya Churaev * Code refactoring. * Corrected tests, code style. * Added comment. * Added comments. * Temporarily added debug output. * Temporarily added debug output. * Removed debug output. * Added comment. * Added comment. * Enabled state saving for RandomUniform. * Code style. * Used to template to convert types. * Added comments. Co-authored-by: Ilya Churaev --- .../op_reference/random_uniform.cpp | 205 +++++++++++ ...isable_random_uniform_constant_folding.hpp | 27 ++ .../common_optimizations.cpp | 2 + ...isable_random_uniform_constant_folding.cpp | 24 ++ .../core/include/ngraph/op/random_uniform.hpp | 16 +- .../runtime/reference/random_uniform.hpp | 39 ++ .../src/runtime/reference/random_uniform.cpp | 333 ++++++++++++++++++ ngraph/core/src/op/random_uniform.cpp | 94 ++++- 8 files changed, 737 insertions(+), 3 deletions(-) create mode 100644 docs/template_plugin/tests/functional/op_reference/random_uniform.cpp create mode 100644 inference-engine/src/transformations/include/transformations/common_optimizations/disable_random_uniform_constant_folding.hpp create mode 100644 inference-engine/src/transformations/src/transformations/common_optimizations/disable_random_uniform_constant_folding.cpp create mode 100644 ngraph/core/reference/include/ngraph/runtime/reference/random_uniform.hpp create mode 100644 ngraph/core/reference/src/runtime/reference/random_uniform.cpp diff --git a/docs/template_plugin/tests/functional/op_reference/random_uniform.cpp b/docs/template_plugin/tests/functional/op_reference/random_uniform.cpp new file mode 100644 index 00000000000..2e454e4a145 --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/random_uniform.cpp @@ -0,0 +1,205 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include + +#include "base_reference_test.hpp" +#include "ngraph/opsets/opset8.hpp" +#include "ngraph/util.hpp" + +using namespace ngraph; + +namespace reference_tests { +namespace { + +struct RandomUniformParams { + RandomUniformParams(const std::vector& paramOutShape, + const Tensor& paramMinValue, + const Tensor& paramMaxValue, + ngraph::element::Type paramOutType, + int64_t paramGlobalSeed, + int64_t paramOpSeed, + const Tensor& paramExpected, + const std::string& test_name) + : out_shape(paramOutShape), + min_val(paramMinValue), + max_val(paramMaxValue), + out_type(paramOutType), + global_seed(paramGlobalSeed), + op_seed(paramOpSeed), + expected(paramExpected), + test_case_name(test_name) {} + std::vector out_shape; + Tensor min_val; + Tensor max_val; + ngraph::element::Type out_type; + int64_t global_seed; + int64_t op_seed; + Tensor expected; + std::string test_case_name; +}; + +class ReferenceRandomUniformLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.out_shape, + params.min_val, + params.max_val, + params.out_type, + params.global_seed, + params.op_seed); + inputData = {params.min_val.data, params.max_val.data}; + refOutData = {params.expected.data}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + return param.test_case_name; + } + +private: + static std::shared_ptr CreateFunction(const std::vector& out_shape, + const Tensor& min_val, + const Tensor& max_val, + const ngraph::element::Type& out_type, + int64_t global_seed, + int64_t op_seed) { + const auto min_val_param = std::make_shared(min_val.type, min_val.shape); + const auto max_val_param = std::make_shared(max_val.type, max_val.shape); + auto out_shape_ = std::make_shared(element::i64, Shape{out_shape.size()}, out_shape); + + return std::make_shared(NodeVector{std::make_shared(out_shape_, + min_val_param, + max_val_param, + out_type, + global_seed, + op_seed)}, + ParameterVector{min_val_param, max_val_param}); + } +}; + +TEST_P(ReferenceRandomUniformLayerTest, RandomUniformWithHardcodedRefs) { + Exec(); +} + +} // namespace + +// Reference values for the following tests are obtained from single layer TensorFlow model with tf.random.uniform(). +INSTANTIATE_TEST_SUITE_P( + smoke_RandomUniform_With_Hardcoded_Refs, + ReferenceRandomUniformLayerTest, + ::testing::Values( + RandomUniformParams(std::vector{3, 2, 4}, + Tensor{{1}, element::f32, std::vector{0}}, + Tensor{{1}, element::f32, std::vector{1}}, + element::Type_t::f32, + 150, + 10, + Tensor{{3, 2, 4}, + element::f32, + std::vector{0.70112360, 0.30539632, 0.93931055, 0.94560349, 0.11694777, + 0.50770056, 0.51971972, 0.22727466, 0.99137402, 0.35519040, + 0.82692313, 0.59864855, 0.31364107, 0.57481313, 0.41399086, + 0.96308255, 0.37140799, 0.85253167, 0.09358585, 0.08200955, + 0.23655081, 0.81056309, 0.74226606, 0.76106691}}, + "float32_default_min_max"), + RandomUniformParams(std::vector{3, 2, 4}, + Tensor{{1}, element::f16, std::vector{0}}, + Tensor{{1}, element::f16, std::vector{1}}, + element::Type_t::f16, + 150, + 10, + Tensor{{3, 2, 4}, + element::f16, + std::vector{0.60449219, 0.80664062, 0.83203125, 0.38378906, 0.03613281, + 0.08300781, 0.54394531, 0.83398438, 0.33593750, 0.71972656, + 0.15429688, 0.12890625, 0.34765625, 0.86914062, 0.41308594, + 0.57226562, 0.57421875, 0.93945312, 0.65527344, 0.82226562, + 0.82421875, 0.13281250, 0.64355469, 0.66015625}}, + "float16_default_min_max"), + RandomUniformParams(std::vector{3, 2, 4}, + Tensor{{1}, element::f32, std::vector{-650}}, + Tensor{{1}, element::f32, std::vector{450}}, + element::Type_t::f32, + 150, + 10, + Tensor{{3, 2, 4}, + element::f32, + std::vector{121.23596191, -314.06405640, 383.24157715, 390.16381836, + -521.35742188, -91.52935791, -78.30828857, -399.99786377, + 440.51147461, -259.29055786, 259.61541748, 8.51342773, + -304.99481201, -17.70556641, -194.61004639, 409.39074707, + -241.45120239, 287.78485107, -547.05554199, -559.78948975, + -389.79409790, 241.61938477, 166.49267578, 187.17358398}}, + "float32_non_default_min_max"), + RandomUniformParams(std::vector{3, 2, 4}, + Tensor{{1}, element::f16, std::vector{-1.5}}, + Tensor{{1}, element::f16, std::vector{-1.0}}, + element::Type_t::f16, + 150, + 10, + Tensor{{3, 2, 4}, + element::f16, + std::vector{-1.19726562, -1.09667969, -1.08398438, -1.30859375, -1.48242188, + -1.45898438, -1.22851562, -1.08300781, -1.33203125, -1.14062500, + -1.42285156, -1.43554688, -1.32617188, -1.06542969, -1.29296875, + -1.21386719, -1.21289062, -1.03027344, -1.17187500, -1.08886719, + -1.08789062, -1.43359375, -1.17773438, -1.16992188}}, + "float16_non_default_min_max"), + RandomUniformParams(std::vector{2, 3, 4}, + Tensor{{1}, element::i32, std::vector{-100}}, + Tensor{{1}, element::i32, std::vector{50}}, + element::Type_t::i32, + 100, + 350, + Tensor{{2, 3, 4}, + element::i32, + std::vector{ + 22, -56, -33, -89, -98, -33, -3, -48, -82, 5, -66, 21, + 29, -42, -73, -37, 3, 36, -35, 20, -11, -8, -78, 47, + }}, + "int32"), + RandomUniformParams(std::vector{5, 4, 3}, + Tensor{{1}, element::i64, std::vector{-2600}}, + Tensor{{1}, element::i64, std::vector{3700}}, + element::Type_t::i64, + 755, + 951, + Tensor{{5, 4, 3}, + element::i64, + std::vector{ + 2116, -1581, 2559, -339, -1660, 519, 90, 2027, -210, 3330, 1831, -1737, + 2683, 2661, 3473, 1220, 3534, -2384, 2199, 1935, 499, 2861, 2743, 3223, + -531, -836, -65, 3435, 632, 1765, 2613, 1891, 1698, 3069, 169, -792, + -32, 2976, -1552, -2588, 3327, -1756, 2637, -1084, 3567, -778, -1465, 2967, + 1242, 2672, -1585, -2271, 3536, -1502, 400, 2241, 3126, 908, 1073, -2110}}, + "int64"), + RandomUniformParams(std::vector{7, 3}, + Tensor{{1}, element::bf16, std::vector{0}}, + Tensor{{1}, element::bf16, std::vector{1}}, + element::Type_t::bf16, + 4978, + 5164, + Tensor{{7, 3}, + element::bf16, + std::vector{0.8984375, 0.84375, 0.1640625, 0.1875, 0.46875, 0.6875, + 0.5234375, 0.3046875, 0.9140625, 0.453125, 0.953125, 0.328125, + 0.359375, 0.1875, 0.9453125, 0.390625, 0.21875, 0.9921875, + 0.8203125, 0.453125, 0.875}}, + "bfloat16_default_min_max"), + RandomUniformParams(std::vector{7, 3}, + Tensor{{1}, element::bf16, std::vector{-150}}, + Tensor{{1}, element::bf16, std::vector{200}}, + element::Type_t::bf16, + 4978, + 5164, + Tensor{{7, 3}, + element::bf16, + std::vector{164, 146, -92.5, -84.5, 14, 90, 33, -43.5, 170, 8, 182, + -35, -24, -84.5, 180, -14, -73.5, 198, 138, 8, 156}}, + "bfloat16_non_default_min_max")), + ReferenceRandomUniformLayerTest::getTestCaseName); +} // namespace reference_tests diff --git a/inference-engine/src/transformations/include/transformations/common_optimizations/disable_random_uniform_constant_folding.hpp b/inference-engine/src/transformations/include/transformations/common_optimizations/disable_random_uniform_constant_folding.hpp new file mode 100644 index 00000000000..e9c59587eb6 --- /dev/null +++ b/inference-engine/src/transformations/include/transformations/common_optimizations/disable_random_uniform_constant_folding.hpp @@ -0,0 +1,27 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +namespace ngraph { +namespace pass { + +class DisableRandomUniformConstantFolding; + +} // namespace pass +} // namespace ngraph + +/** + * @ingroup ie_transformation_common_api + * @brief Disables ConstantFolding for RandomUniform operation. It is required as RandomUniform + * should generate new sequence each run. + */ +class ngraph::pass::DisableRandomUniformConstantFolding : public ngraph::pass::MatcherPass { +public: + NGRAPH_RTTI_DECLARATION; + DisableRandomUniformConstantFolding(); +}; diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp index 253c4f113ab..4e176543504 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp @@ -27,6 +27,7 @@ #include "transformations/common_optimizations/hswish_fusion.hpp" #include "transformations/common_optimizations/convert_quantize_dequantize.hpp" #include "transformations/common_optimizations/relu_fake_quantize_fusion.hpp" +#include "transformations/common_optimizations/disable_random_uniform_constant_folding.hpp" #include "transformations/common_optimizations/add_fake_quantize_fusion.hpp" #include "transformations/common_optimizations/mul_fake_quantize_fusion.hpp" #include "transformations/common_optimizations/clamp_fusion.hpp" @@ -88,6 +89,7 @@ bool ngraph::pass::CommonOptimizations::run_on_function(std::shared_ptr(); + manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); // Resolves dynamism (replaces NonZero), CF needed diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/disable_random_uniform_constant_folding.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/disable_random_uniform_constant_folding.cpp new file mode 100644 index 00000000000..7c93745d3fa --- /dev/null +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/disable_random_uniform_constant_folding.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "transformations/common_optimizations/disable_random_uniform_constant_folding.hpp" + +#include +#include +#include +#include + +NGRAPH_RTTI_DEFINITION(ngraph::pass::DisableRandomUniformConstantFolding, "DisableRandomUniformConstantFolding", 0); + +ngraph::pass::DisableRandomUniformConstantFolding::DisableRandomUniformConstantFolding() { + auto random_uniform = pattern::wrap_type(); + + ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) { + disable_constant_folding(m.get_match_root()); + return true; + }; + + auto m = std::make_shared(random_uniform, "DisableRandomUniformConstantFolding"); + this->register_matcher(m, callback); +} diff --git a/ngraph/core/include/ngraph/op/random_uniform.hpp b/ngraph/core/include/ngraph/op/random_uniform.hpp index 242294cc748..f20ddecccab 100644 --- a/ngraph/core/include/ngraph/op/random_uniform.hpp +++ b/ngraph/core/include/ngraph/op/random_uniform.hpp @@ -30,8 +30,8 @@ public: const Output& min_val, const Output& max_val, const ngraph::element::Type& out_type, - uint64_t global_seed, - uint64_t op_seed); + uint64_t global_seed = 0, + uint64_t op_seed = 0); void validate_and_infer_types() override; @@ -39,6 +39,11 @@ public: std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + /// \return Turns off constant folding for RandomUniform operation. + bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override { + return false; + } + /// \return The output tensor type. const ngraph::element::Type& get_out_type() const { return m_output_type; @@ -63,10 +68,17 @@ public: m_op_seed = seed2; } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + + bool has_evaluate() const override; + protected: ngraph::element::Type m_output_type; uint64_t m_global_seed; uint64_t m_op_seed; + + mutable std::mutex m_state_mutex; + mutable std::pair m_state; }; } // namespace v8 } // namespace op diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/random_uniform.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/random_uniform.hpp new file mode 100644 index 00000000000..43df6529de3 --- /dev/null +++ b/ngraph/core/reference/include/ngraph/runtime/reference/random_uniform.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "ngraph/shape.hpp" + +namespace ngraph { +namespace runtime { +namespace reference { +std::pair random_uniform(const uint64_t* out_shape, + const char* min_val, + const char* max_val, + char* out, + const Shape& out_shape_shape, + const ngraph::element::Type& elem_type, + uint64_t seed, + uint64_t seed2, + std::pair prev_state); + +// Following const values are taken from the original paper: +// https://www.thesalmons.org/john/random123/papers/random123sc11.pdf +const uint32_t crush_resistance_const_lower_value = 0x9E3779B9; +const uint32_t crush_resistance_const_upper_value = 0xBB67AE85; +const uint64_t statistic_maximizing_multiplier_n = 0xD2511F53; +const uint64_t statistic_maximizing_multiplier_counter = 0xCD9E8D57; +const size_t rounds_number = 10; + +// Determines how many sequence elements of RNG sequence are skipped between runs. +// Can be any positive value, 256 is chosen for parity with Tensorflow. +const uint64_t skip_const = 256; + +} // namespace reference +} // namespace runtime +} // namespace ngraph diff --git a/ngraph/core/reference/src/runtime/reference/random_uniform.cpp b/ngraph/core/reference/src/runtime/reference/random_uniform.cpp new file mode 100644 index 00000000000..6e6f1f7c95d --- /dev/null +++ b/ngraph/core/reference/src/runtime/reference/random_uniform.cpp @@ -0,0 +1,333 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ngraph/runtime/reference/random_uniform.hpp" + +#include + +#include "ngraph/shape.hpp" + +namespace ngraph { +namespace runtime { +namespace reference { + +// Splits uint64 value into two uint32 values with right and left part of original value. +std::pair split_high_low(uint64_t value) { + uint32_t low = static_cast(value); + uint32_t high = static_cast(value >> 32); + return {low, high}; +} + +// Concatenates two uint32 values into single uint64 values. +uint64_t unite_high_low(uint32_t high, uint32_t low) { + return (static_cast(high) << 32) + low; +} + +// Runs single "round" of Philox algorithm. +void calculate_round(uint64_t key, uint64_t& counter, uint64_t& n) { + // Split key, counter and n into two uint32 values. + auto counter_lr = split_high_low(counter); + auto key_lr = split_high_low(key); + auto n_lr = split_high_low(n); + + // Each round performs following updating for n and counter: + // left uint32 part = mullo(R, M) + // right uint32 part = mulhi(R, M) xor k xor L + // mulhi(a, b) = floor((a * b) / 2^32) + // mullo(a, b) = (a * b) mod 2^32, + // where M - statistic_maximizing_multiplier const + auto prod0 = split_high_low(statistic_maximizing_multiplier_n * n_lr.first); + auto prod1 = split_high_low(statistic_maximizing_multiplier_counter * counter_lr.first); + n_lr.first = prod1.second ^ n_lr.second ^ key_lr.first; + n_lr.second = prod1.first; + counter_lr.first = prod0.second ^ counter_lr.second ^ key_lr.second; + counter_lr.second = prod0.first; + + // Unite counter and n into uint64 values. + counter = unite_high_low(counter_lr.second, counter_lr.first); + n = unite_high_low(n_lr.second, n_lr.first); +} + +// Increases key value. +void raise_key(uint64_t& key) { + auto key_lr = split_high_low(key); + key_lr.first += crush_resistance_const_lower_value; + key_lr.second += crush_resistance_const_upper_value; + key = unite_high_low(key_lr.second, key_lr.first); +} + +// Helper function for converting uint32 values to float32. Sets fractional part of +// floating value with bits from uint32 value. Resulting value is in interval [0,1). +float uint32_to_float(uint32_t x) { + // float32 is formatted as follows: sign(1 bit) exponent(8 bits) mantissa(23 bits). The value is interpreted + // The value is interpreted using following formula: + // (-1)^sign * 1, mantissa * 2 ^ (exponent - 127) + // Here we set the following values: + // sign = 0 + // exponent = 127, for obtaining a zero exponent. + // mantissa = 23 right bits from generated uint32 random value. + + uint32_t x_uint32 = (static_cast(127) << 23) | (x & 0x7fffffu); + + float x_float; + memcpy(&x_float, &x_uint32, sizeof(x_uint32)); + return x_float - 1.0f; +} + +// Helper function for converting uint32 values to float16.Sets fractional part of +// floating value with bits from uint32 value. Resulting value is in interval [0,1). +float16 uint32_to_float16(uint32_t x) { + // float16 is formatted as follows: sign(1 bit) exponent(5 bits) mantissa(10 bits). The value is interpreted + // The value is interpreted using following formula: + // (-1)^sign * 1, mantissa * 2 ^ (exponent - 15) + // Here we set the following values: + // sign = 0 + // exponent = 15, for obtaining a zero exponent. + // mantissa = 10 right bits from generated uint32 random value. + + uint16_t x_uint16 = static_cast(x); + x_uint16 = (static_cast(15) << 10) | (x_uint16 & 0x3ffu); + + float16 x_float16; + memcpy(&x_float16, &x_uint16, sizeof(x_uint16)); + return x_float16 - static_cast(1); +} + +// Helper function for converting uint32 values to double. Sets fractional part of +// floating double with bits from uint32 values. Resulting value is in interval [0,1). +double uint32_to_double(uint32_t x1, uint32_t x2) { + // float64 is formatted as follows: sign(1 bit) exponent(11 bits) mantissa(52 bits). The value is interpreted + // The value is interpreted using following formula: + // (-1)^sign * 1, mantissa * 2 ^ (exponent - 1023) + // Here we set the following values: + // sign = 0 + // exponent = 1023, for obtaining a zero exponent. + // mantissa = 52 right bits from two concatenated uint32 values from random integer generator. + + uint64_t significant = ((static_cast(x1) & 0xfffffu) << 32) | static_cast(x2); + uint64_t x_uint64 = ((static_cast(1023) << 52) | significant); + + double x_double; + memcpy(&x_double, &x_uint64, sizeof(x_uint64)); + return x_double - 1.0; +} + +// Helper function for converting uint32 values to bfloat16. Sets fractional part of +// floating value with bits from uint32 value. Resulting value is in interval [0,1). +bfloat16 uint32_to_bfloat16(uint32_t x) { + // bfloat16 is formatted as follows: sign(1 bit) exponent(8 bits) mantissa(7 bits). The value is interpreted + // The value is interpreted using following formula: + // (-1)^sign * 1, mantissa * 2 ^ (exponent - 127) + // Here we set the following values: + // sign = 0 + // exponent = 127, for obtaining a zero exponent. + // mantissa = 7 right bits from generated uint32 random value. + + uint16_t x_uint16 = static_cast(x); + x_uint16 = (static_cast(127) << 7) | (x_uint16 & 0x7fu); + + bfloat16 x_bfloat16; + memcpy(&x_bfloat16, &x_uint16, sizeof(x_uint16)); + return x_bfloat16 - static_cast(1); +} + +// Runs Philox algorithm. +void run_philox(uint64_t key, uint64_t counter, uint64_t n, size_t n_rounds, std::vector& res) { + for (size_t i = 0; i < n_rounds; i++) { + calculate_round(key, counter, n); + if (i < n_rounds - 1) + raise_key(key); + } + auto res1 = split_high_low(n); + auto res2 = split_high_low(counter); + res[0] = res1.first; + res[1] = res1.second; + res[2] = res2.first; + res[3] = res2.second; +} + +// Converts uint32 values to destination type and normalizes to required range +template +void convert_to_output_type(const std::vector& res, + size_t step, + const ngraph::element::Type& elem_type, + const char* min_val, + const char* max_val, + char* out, + size_t k, + size_t elem_count, + T (*convert_single_input)(uint32_t) = nullptr, + T (*convert_two_inputs)(uint32_t, uint32_t, T, T) = nullptr, + T (*mod_func)(uint32_t, T, T) = nullptr) { + // Get min and max values + T mn[1]; + T mx[1]; + memcpy(mn, min_val, elem_type.size()); + memcpy(mx, max_val, elem_type.size()); + + std::vector res_out_type(step); + if (elem_type.size() > 4) { + // Each element of resulting sequence is formed using two uint32 values + res_out_type[0] = convert_two_inputs(res[0], res[1], mn[0], mx[0]); + res_out_type[1] = convert_two_inputs(res[2], res[3], mn[0], mx[0]); + } else { + // Each element of resulting sequence is formed using single uint32 value + std::transform(res.data(), + res.data() + step, + res_out_type.data(), + [&mn, &mx, &convert_single_input, &mod_func](uint32_t elem) { + if (convert_single_input != nullptr) { + return convert_single_input(elem) * (mx[0] - mn[0]) + mn[0]; + } else { + return mod_func(elem, mn[0], mx[0]); + } + }); + } + + memcpy(out + k * elem_type.size(), res_out_type.data(), std::min(step, elem_count - k) * elem_type.size()); +} + +// Implementation of RandomUniform that uses Philox algorithm as inner random unsigned integer generator. +std::pair random_uniform(const uint64_t* out_shape, + const char* min_val, + const char* max_val, + char* out, + const Shape& out_shape_shape, + const ngraph::element::Type& elem_type, + uint64_t seed, + uint64_t seed2, + std::pair prev_state) { + // When both seed values are equal to zero RandomUniform should generate non-deterministic sequence. + // Implementation in plugins may differ for this case. + if (seed == 0 && seed2 == 0) { + std::srand(std::time(nullptr)); + seed = std::rand(); + } + + // Get previous counter state + uint64_t n_state = prev_state.first; + uint64_t counter_state = prev_state.second; + + // Initialize Philox key and counters + uint64_t key = seed; + uint64_t counter = counter_state > 0 ? counter_state : seed2; + uint64_t n = n_state; + + // Calculate total element count for generation + size_t shape_count = shape_size(out_shape_shape); + size_t elem_count = 1; + for (size_t i = 0; i < shape_count; i++) { + elem_count *= out_shape[i]; + } + + // Philox algorithm returns 4 elements of RNG sequence per each invocation + const size_t philox_output_size = 4; + + // Each run of Philox algorithm generates 4 uint32 values. + // If output_type is int32, f32, bf16, or f16 each value is converted to + // corresponding type so we have 4 result values. For f64 and i64 we use + // a pair of values for conversion, so we have 2 result values. + // Step indicates how many values we generate in one iteration. + const size_t step = elem_type.size() > 4 ? 2 : 4; + + for (size_t k = 0; k < elem_count; k += step) { + // generate 4 random uint32 values using Philox algorithm + std::vector res(philox_output_size); + run_philox(key, counter, n, rounds_number, res); + + // convert values to corresponding output_type + switch (elem_type) { + case ngraph::element::Type_t::f32: { + convert_to_output_type(res, step, elem_type, min_val, max_val, out, k, elem_count, uint32_to_float); + break; + } + case ngraph::element::Type_t::f16: { + convert_to_output_type(res, + step, + elem_type, + min_val, + max_val, + out, + k, + elem_count, + uint32_to_float16); + break; + } + case ngraph::element::Type_t::bf16: { + convert_to_output_type(res, + step, + elem_type, + min_val, + max_val, + out, + k, + elem_count, + uint32_to_bfloat16); + break; + } + case ngraph::element::Type_t::f64: { + convert_to_output_type(res, + step, + elem_type, + min_val, + max_val, + out, + k, + elem_count, + nullptr, + [](uint32_t a, uint32_t b, double mn, double mx) { + return uint32_to_double(a, b) * (mx - mn) + mn; + }); + break; + } + case ngraph::element::Type_t::i32: { + convert_to_output_type(res, + step, + elem_type, + min_val, + max_val, + out, + k, + elem_count, + nullptr, + nullptr, + [](uint32_t x, int mn, int mx) { + return static_cast(x % (mx - mn) + mn); + }); + break; + } + case ngraph::element::Type_t::i64: { + convert_to_output_type(res, + step, + elem_type, + min_val, + max_val, + out, + k, + elem_count, + nullptr, + [](uint32_t a, uint32_t b, int64_t mn, int64_t mx) { + return static_cast(unite_high_low(b, a) % (mx - mn) + mn); + }); + break; + } + default: + throw ngraph_error("Unsupported type of RandomUniform: " + elem_type.get_type_name()); + } + if (++n == 0) + ++counter; + } + + // Calculate counter values for next RandomUniform run + uint64_t skip_count = elem_count * skip_const; + n_state += skip_count; + if (n_state < skip_count) + counter_state++; + + return {n_state, counter_state}; +} + +} // namespace reference +} // namespace runtime +} // namespace ngraph diff --git a/ngraph/core/src/op/random_uniform.cpp b/ngraph/core/src/op/random_uniform.cpp index 9b0b6cdc9fc..90a356cdef3 100644 --- a/ngraph/core/src/op/random_uniform.cpp +++ b/ngraph/core/src/op/random_uniform.cpp @@ -7,6 +7,7 @@ #include #include "itt.hpp" +#include "ngraph/runtime/reference/random_uniform.hpp" using namespace std; using namespace ngraph; @@ -116,7 +117,7 @@ bool op::v8::RandomUniform::visit_attributes(AttributeVisitor& visitor) { } shared_ptr op::v8::RandomUniform::clone_with_new_inputs(const OutputVector& new_args) const { - NGRAPH_OP_SCOPE(v8_Roll_clone_with_new_inputs); + NGRAPH_OP_SCOPE(v8_RandomUniform_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args[0], new_args[1], @@ -125,3 +126,94 @@ shared_ptr op::v8::RandomUniform::clone_with_new_inputs(const OutputVector m_global_seed, m_op_seed); } + +bool op::v8::RandomUniform::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { + NGRAPH_OP_SCOPE(v8_RandomUniform_evaluate); + const uint64_t* out_shape; + std::vector out_shape_uint64(shape_size(inputs[0]->get_shape())); + + if (inputs[0]->get_element_type() == element::Type_t::u64) { + out_shape = inputs[0]->get_data_ptr(); + } else if (inputs[0]->get_element_type() == element::Type_t::i32) { + auto out_shape_i32 = inputs[0]->get_data_ptr(); + std::transform(out_shape_i32, + out_shape_i32 + shape_size(inputs[0]->get_shape()), + out_shape_uint64.begin(), + [](const int32_t& elem) { + return static_cast(elem); + }); + out_shape = out_shape_uint64.data(); + } else if (inputs[0]->get_element_type() == element::Type_t::i64) { + auto out_shape_i64 = inputs[0]->get_data_ptr(); + std::transform(out_shape_i64, + out_shape_i64 + shape_size(inputs[0]->get_shape()), + out_shape_uint64.begin(), + [](const int64_t& elem) { + return static_cast(elem); + }); + out_shape = out_shape_uint64.data(); + } else { + throw ngraph_error("Unsupported type of out shape in RandomUniform operation: " + + inputs[0]->get_element_type().get_type_name()); + } + + element::Type_t t_out = get_out_type(); + char* out; + switch (t_out) { + case element::Type_t::i32: + out = (char*)outputs[0]->get_data_ptr(); + break; + case element::Type_t::i64: + out = (char*)outputs[0]->get_data_ptr(); + break; + case element::Type_t::f16: + out = (char*)outputs[0]->get_data_ptr(); + break; + case element::Type_t::bf16: + out = (char*)outputs[0]->get_data_ptr(); + break; + case element::Type_t::f32: + out = (char*)outputs[0]->get_data_ptr(); + break; + case element::Type_t::f64: + out = (char*)outputs[0]->get_data_ptr(); + break; + default: + throw ngraph_error("Unsupported type of RandomUniform: " + get_out_type().get_type_name()); + } + + auto state = runtime::reference::random_uniform(out_shape, + inputs[1]->get_data_ptr(), + inputs[2]->get_data_ptr(), + out, + inputs[0]->get_shape(), + get_out_type(), + get_global_seed(), + get_op_seed(), + m_state); + + // Update RandomUniform state + std::lock_guard guard(m_state_mutex); + m_state = state; + return true; +} + +bool op::v8::RandomUniform::has_evaluate() const { + NGRAPH_OP_SCOPE(v8_RandomUniform_has_evaluate); + if (get_input_element_type(0) != ngraph::element::i32 && get_input_element_type(0) != ngraph::element::i64) { + return false; + } + + switch (get_out_type()) { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::f16: + case ngraph::element::bf16: + case ngraph::element::f32: + case ngraph::element::f64: + return true; + default: + break; + } + return false; +} From 15bef9ec70b1a56ee5a10146fbf0c437b4ff6594 Mon Sep 17 00:00:00 2001 From: Gabriele Galiero Casay Date: Mon, 6 Sep 2021 12:32:05 +0200 Subject: [PATCH 19/52] Remove deprecated mvn class for SLTs (#7340) --- .../shared/include/single_layer_tests/mvn.hpp | 5 --- .../shared_test_classes/single_layer/mvn.hpp | 17 ---------- .../src/single_layer/mvn.cpp | 32 ------------------- 3 files changed, 54 deletions(-) diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp index d1a9ff52a10..eda13543172 100644 --- a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp @@ -8,11 +8,6 @@ namespace LayerTestsDefinitions { -// DEPRECATED, remove MvnLayerTest when KMB and ARM plugin will switch to use Mvn1LayerTest (#60420) -TEST_P(MvnLayerTest, CompareWithRefs) { - Run(); -}; - TEST_P(Mvn1LayerTest, CompareWithRefs) { Run(); }; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/mvn.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/mvn.hpp index 747e0940da7..9876e825034 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/mvn.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/mvn.hpp @@ -11,23 +11,6 @@ namespace LayerTestsDefinitions { -// DEPRECATED, remove MvnLayerTest when KMB and ARM plugin will switch to use Mvn1LayerTest (#60420) -typedef std::tuple< - InferenceEngine::SizeVector, // Input shapes - InferenceEngine::Precision, // Input precision - bool, // Across channels - bool, // Normalize variance - double, // Epsilon - std::string> mvnParams; // Device name - -class MvnLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - typedef std::tuple< InferenceEngine::SizeVector, // Input shapes InferenceEngine::Precision, // Input precision diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/mvn.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/mvn.cpp index d4e2a0c0df8..5c04c6f177e 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/mvn.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/mvn.cpp @@ -7,38 +7,6 @@ namespace LayerTestsDefinitions { -// DEPRECATED, remove MvnLayerTest when KMB and ARM plugin will switch to use Mvn1LayerTest (#60420) -std::string MvnLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; - bool acrossChannels, normalizeVariance; - double eps; - std::string targetDevice; - std::tie(inputShapes, inputPrecision, acrossChannels, normalizeVariance, eps, targetDevice) = obj.param; - std::ostringstream result; - result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_"; - result << "Precision=" << inputPrecision.name() << "_"; - result << "AcrossChannels=" << (acrossChannels ? "TRUE" : "FALSE") << "_"; - result << "NormalizeVariance=" << (normalizeVariance ? "TRUE" : "FALSE") << "_"; - result << "Epsilon=" << eps << "_"; - result << "TargetDevice=" << targetDevice; - return result.str(); -} - -void MvnLayerTest::SetUp() { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; - bool acrossChanels, normalizeVariance; - double eps; - std::tie(inputShapes, inputPrecision, acrossChanels, normalizeVariance, eps, targetDevice) = this->GetParam(); - auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - auto param = ngraph::builder::makeParams(inType, {inputShapes}); - auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(param)); - auto mvn = std::dynamic_pointer_cast(ngraph::builder::makeMVN(paramOuts[0], acrossChanels, normalizeVariance, eps)); - ngraph::ResultVector results{std::make_shared(mvn)}; - function = std::make_shared(results, param, "mvn"); -} - std::string Mvn1LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::SizeVector inputShapes; InferenceEngine::Precision inputPrecision; From 5f1ffc527d46b43efcabf557de0eb3b429413289 Mon Sep 17 00:00:00 2001 From: Katarzyna Mitrus Date: Mon, 6 Sep 2021 12:47:23 +0200 Subject: [PATCH 20/52] Propose new Slice-8 operation - update (#7257) * Propose new Slice-8 operation (cherry picked from commit a44ed32a6fd79cabf90393b7ef1847e3ff1de374) * Align category * Update detailed descripiton * Update Inputs/Outputs description * Add no attributes info * Precise range of axis values * Update descriptions * More details about negative steps and slicing backwards * Add more details * Add examples * Change step input to Required * Apply spell check comments * Add missing commas * Add separate T_AXIS type for axis input * Make example data 2D * Remove duplicated example * Apply review suggestions Co-authored-by: Michal Karzynski --- docs/ops/movement/Slice_8.md | 419 +++++++++++++++++++++++++++++++++++ 1 file changed, 419 insertions(+) create mode 100644 docs/ops/movement/Slice_8.md diff --git a/docs/ops/movement/Slice_8.md b/docs/ops/movement/Slice_8.md new file mode 100644 index 00000000000..7ebfe6b1a9f --- /dev/null +++ b/docs/ops/movement/Slice_8.md @@ -0,0 +1,419 @@ +## Slice {#openvino_docs_ops_movement_Slice_8} + +**Versioned name**: *Slice-8* + +**Category**: *Data movement* + +**Short description**: *Slice* operation extracts a slice of the input tensor. + +**Detailed Description**: *Slice* operation selects a region of values from the `data` tensor. +Selected values start at indexes provided in the `start` input (inclusively) and end +at indexes provides in `stop` input (exclusively). + +The `step` input allows subsampling of `data`, selecting every *n*-th element, +where `n` is equal to `step` element for corresponding axis. +Negative `step` value indicates slicing backwards, so the sequence along the corresponding axis is reversed in the output tensor. +To select all values contiguously set `step` to `1` for each axis. + +The optional `axes` input allows specifying slice indexes only on selected axes. +Other axes will not be affected and will be output in full. + +The rules follow python language slicing `data[start:stop:step]`. + +**Attributes**: *Slice* operation has no attributes. + +**Inputs** + +* **1**: `data` - tensor (to be sliced) of type *T* and shape rank greater or equal to 1. **Required.** + +* **2**: `start` - 1D tensor of type *T_IND*. Indices corresponding to axes in `data`. + Defines the starting coordinate of the slice in the `data` tensor. + A negative index value represents counting elements from the end of that dimension. + A value larger than the size of a dimension is silently clamped. **Required.** + +* **3**: `stop` - 1D, type *T_IND*, similar to `start`. + Defines the coordinate of the opposite vertex of the slice, or where the slice ends. + Stop indexes are exclusive, which means values lying on the ending edge are + not included in the output slice. + To slice to the end of a dimension of unknown size `INT_MAX` + may be used (or `INT_MIN` if slicing backwards). **Required.** + +* **4**: `step` - 1D tensor of type *T_IND* and the same shape as `start` and `stop`. + Integer value that specifies the increment between each index used in slicing. + Value cannot be `0`, negative value indicates slicing backwards. **Required.** + +* **5**: `axes` - 1D tensor of type *T_AXIS*. + Optional 1D tensor indicating which dimensions the values in `start` and `stop` apply to. + Negative value means counting dimensions from the end. The range is `[-r, r - 1]`, where `r` is the rank of the `data` input tensor. + Values are required to be unique. If a particular axis is unspecified, it will be output in full and not sliced. + Default value: `[0, 1, 2, ..., start.shape[0] - 1]`. **Optional.** + +Number of elements in `start`, `stop`, `step`, and `axes` inputs are required to be equal. + +**Outputs** + +* **1**: Tensor of type *T* with values of the selected slice. The shape of the output tensor has the same rank as the shape of `data` input and reduced dimensions according to the values specified by `start`, `stop`, and `step` inputs. + +**Types** + +* *T*: any arbitrary supported type. +* *T_IND*: any supported integer type. +* *T_AXIS*: any supported integer type. + + +**Examples** + +*Example 1: basic slicing* + +```xml + + + + 10 + + + 1 + + + 1 + + + 1 + + + 1 + + + + + 7 + + + +``` + +*Example 2: basic slicing, `axes` default* + +```xml + + + + 10 + + + 1 + + + 1 + + + 1 + + + + + 7 + + + +``` + +*Example 3: basic slicing, `step: [2]`* + +```xml + + + + 10 + + + 1 + + + 1 + + + 1 + + + 1 + + + + + 4 + + + +``` + +*Example 4: `start` and `stop` out of the dimension size, `step: [1]`* + +```xml + + + + 10 + + + 1 + + + 1 + + + 1 + + + 1 + + + + + 10 + + + +``` + +*Example 5: slicing backward all elements, `step: [-1]`, `stop: [-11]`* + +```xml + + + + 10 + + + 1 + + + 1 + + + 1 + + + 1 + + + + + 10 + + + +``` + +*Example 6: slicing backward, `step: [-1]`, `stop: [0]`* + +```xml + + + + 10 + + + 1 + + + 1 + + + 1 + + + 1 + + + + + 9 + + + +``` + +*Example 7: slicing backward, `step: [-1]`, `stop: [-10]`* + +```xml + + + + 10 + + + 1 + + + 1 + + + 1 + + + 1 + + + + + 9 + + + +``` + +*Example 8: slicing backward, `step: [-2]`* + +```xml + + + + 10 + + + 1 + + + 1 + + + 1 + + + 1 + + + + + 5 + + + +``` + +*Example 9: `start` and `stop` out of the dimension size, slicing backward* + +```xml + + + + 10 + + + 1 + + + 1 + + + 1 + + + 1 + + + + + 10 + + + +``` + +*Example 10: slicing 2D tensor, all axes specified* + +```xml + + + + 2 + 5 + + + 2 + + + 2 + + + 2 + + + 2 + + + + + 2 + 2 + + + +``` + +*Example 11: slicing 3D tensor, all axes specified* + +```xml + + + + 20 + 10 + 5 + + + 2 + + + 2 + + + 2 + + + 2 + + + + + 4 + 10 + 5 + + + +``` + +*Example 12: slicing 3D tensor, last axes default* + +```xml + + + + 20 + 10 + 5 + + + 2 + + + 2 + + + 2 + + + 2 + + + + + 4 + 10 + 5 + + + +``` From f99bf64397079570c5f471a3fc97ad0b6717f1ff Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Mon, 6 Sep 2021 17:40:18 +0300 Subject: [PATCH 21/52] Moved operations M-P to ov namespace (#7354) * Moved operations M-P to ov namespace * Fixed code style * Fixed build * Fixed comments --- ngraph/core/include/ngraph/op/matmul.hpp | 40 +- ngraph/core/include/ngraph/op/matrix_nms.hpp | 83 +--- ngraph/core/include/ngraph/op/max.hpp | 19 +- ngraph/core/include/ngraph/op/max_pool.hpp | 117 +---- ngraph/core/include/ngraph/op/maximum.hpp | 24 +- ngraph/core/include/ngraph/op/min.hpp | 21 +- ngraph/core/include/ngraph/op/minimum.hpp | 24 +- ngraph/core/include/ngraph/op/mish.hpp | 22 +- ngraph/core/include/ngraph/op/mod.hpp | 20 +- .../core/include/ngraph/op/multiclass_nms.hpp | 57 +-- ngraph/core/include/ngraph/op/multiply.hpp | 24 +- ngraph/core/include/ngraph/op/mvn.hpp | 131 +----- ngraph/core/include/ngraph/op/negative.hpp | 18 +- .../include/ngraph/op/non_max_suppression.hpp | 400 +---------------- ngraph/core/include/ngraph/op/non_zero.hpp | 55 +-- .../core/include/ngraph/op/normalize_l2.hpp | 39 +- ngraph/core/include/ngraph/op/not.hpp | 20 +- ngraph/core/include/ngraph/op/not_equal.hpp | 23 +- ngraph/core/include/ngraph/op/one_hot.hpp | 42 +- ngraph/core/include/ngraph/op/or.hpp | 27 +- ngraph/core/include/ngraph/op/pad.hpp | 65 +-- ngraph/core/include/ngraph/op/power.hpp | 37 +- ngraph/core/include/ngraph/op/prelu.hpp | 25 +- ngraph/core/include/ngraph/op/prior_box.hpp | 57 +-- .../include/ngraph/op/prior_box_clustered.hpp | 49 +-- ngraph/core/include/ngraph/op/proposal.hpp | 82 +--- .../core/include/ngraph/op/psroi_pooling.hpp | 60 +-- .../core/include/openvino/op/logical_not.hpp | 32 ++ .../core/include/openvino/op/logical_or.hpp | 41 ++ .../include/openvino/op/lstm_sequence.hpp | 8 +- ngraph/core/include/openvino/op/matmul.hpp | 55 +++ .../core/include/openvino/op/matrix_nms.hpp | 91 ++++ ngraph/core/include/openvino/op/max.hpp | 31 ++ ngraph/core/include/openvino/op/max_pool.hpp | 133 ++++++ ngraph/core/include/openvino/op/maximum.hpp | 36 ++ ngraph/core/include/openvino/op/minimum.hpp | 36 ++ ngraph/core/include/openvino/op/mish.hpp | 34 ++ ngraph/core/include/openvino/op/mod.hpp | 32 ++ .../include/openvino/op/multiclass_nms.hpp | 69 +++ ngraph/core/include/openvino/op/multiply.hpp | 36 ++ ngraph/core/include/openvino/op/mvn.hpp | 144 +++++++ ngraph/core/include/openvino/op/negative.hpp | 30 ++ .../openvino/op/non_max_suppression.hpp | 408 ++++++++++++++++++ ngraph/core/include/openvino/op/non_zero.hpp | 67 +++ .../core/include/openvino/op/normalize_l2.hpp | 53 +++ ngraph/core/include/openvino/op/not_equal.hpp | 35 ++ ngraph/core/include/openvino/op/one_hot.hpp | 54 +++ ngraph/core/include/openvino/op/pad.hpp | 79 ++++ ngraph/core/include/openvino/op/power.hpp | 49 +++ ngraph/core/include/openvino/op/prelu.hpp | 37 ++ ngraph/core/include/openvino/op/prior_box.hpp | 67 +++ .../openvino/op/prior_box_clustered.hpp | 59 +++ ngraph/core/include/openvino/op/proposal.hpp | 95 ++++ .../include/openvino/op/psroi_pooling.hpp | 72 ++++ .../core/include/openvino/op/reduce_min.hpp | 33 ++ .../include/openvino/op/tensor_iterator.hpp | 4 +- .../core/src/op/{not.cpp => logical_not.cpp} | 5 +- ngraph/core/src/op/{or.cpp => logical_or.cpp} | 5 +- ngraph/core/src/op/matmul.cpp | 2 +- ngraph/core/src/op/matrix_nms.cpp | 4 +- ngraph/core/src/op/max.cpp | 2 +- ngraph/core/src/op/max_pool.cpp | 6 +- ngraph/core/src/op/maximum.cpp | 2 +- ngraph/core/src/op/minimum.cpp | 2 +- ngraph/core/src/op/mish.cpp | 2 +- ngraph/core/src/op/mod.cpp | 4 +- ngraph/core/src/op/multiclass_nms.cpp | 2 +- ngraph/core/src/op/multiply.cpp | 2 +- ngraph/core/src/op/mvn.cpp | 6 +- ngraph/core/src/op/negative.cpp | 2 +- ngraph/core/src/op/non_max_suppression.cpp | 69 +-- ngraph/core/src/op/non_zero.cpp | 2 +- ngraph/core/src/op/normalize_l2.cpp | 2 +- ngraph/core/src/op/not_equal.cpp | 2 +- ngraph/core/src/op/one_hot.cpp | 10 +- ngraph/core/src/op/pad.cpp | 4 +- ngraph/core/src/op/power.cpp | 2 +- ngraph/core/src/op/prelu.cpp | 35 +- ngraph/core/src/op/prior_box.cpp | 15 +- ngraph/core/src/op/prior_box_clustered.cpp | 18 +- ngraph/core/src/op/proposal.cpp | 8 +- ngraph/core/src/op/psroi_pooling.cpp | 22 +- .../core/src/op/{min.cpp => reduce_min.cpp} | 5 +- 83 files changed, 2102 insertions(+), 1639 deletions(-) create mode 100644 ngraph/core/include/openvino/op/logical_not.hpp create mode 100644 ngraph/core/include/openvino/op/logical_or.hpp create mode 100644 ngraph/core/include/openvino/op/matmul.hpp create mode 100644 ngraph/core/include/openvino/op/matrix_nms.hpp create mode 100644 ngraph/core/include/openvino/op/max.hpp create mode 100644 ngraph/core/include/openvino/op/max_pool.hpp create mode 100644 ngraph/core/include/openvino/op/maximum.hpp create mode 100644 ngraph/core/include/openvino/op/minimum.hpp create mode 100644 ngraph/core/include/openvino/op/mish.hpp create mode 100644 ngraph/core/include/openvino/op/mod.hpp create mode 100644 ngraph/core/include/openvino/op/multiclass_nms.hpp create mode 100644 ngraph/core/include/openvino/op/multiply.hpp create mode 100644 ngraph/core/include/openvino/op/mvn.hpp create mode 100644 ngraph/core/include/openvino/op/negative.hpp create mode 100644 ngraph/core/include/openvino/op/non_max_suppression.hpp create mode 100644 ngraph/core/include/openvino/op/non_zero.hpp create mode 100644 ngraph/core/include/openvino/op/normalize_l2.hpp create mode 100644 ngraph/core/include/openvino/op/not_equal.hpp create mode 100644 ngraph/core/include/openvino/op/one_hot.hpp create mode 100644 ngraph/core/include/openvino/op/pad.hpp create mode 100644 ngraph/core/include/openvino/op/power.hpp create mode 100644 ngraph/core/include/openvino/op/prelu.hpp create mode 100644 ngraph/core/include/openvino/op/prior_box.hpp create mode 100644 ngraph/core/include/openvino/op/prior_box_clustered.hpp create mode 100644 ngraph/core/include/openvino/op/proposal.hpp create mode 100644 ngraph/core/include/openvino/op/psroi_pooling.hpp create mode 100644 ngraph/core/include/openvino/op/reduce_min.hpp rename ngraph/core/src/op/{not.cpp => logical_not.cpp} (98%) rename ngraph/core/src/op/{or.cpp => logical_or.cpp} (96%) rename ngraph/core/src/op/{min.cpp => reduce_min.cpp} (97%) diff --git a/ngraph/core/include/ngraph/op/matmul.hpp b/ngraph/core/include/ngraph/op/matmul.hpp index 2ace4905c0a..92a1701f12d 100644 --- a/ngraph/core/include/ngraph/op/matmul.hpp +++ b/ngraph/core/include/ngraph/op/matmul.hpp @@ -6,48 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/matmul.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Operator performing Matrix Multiplication. -class NGRAPH_API MatMul : public Op { -public: - NGRAPH_RTTI_DECLARATION; - MatMul() = default; - /// \brief Constructs an Matrix Multiplication operation. - /// - /// \param A Matrix A - /// \param B Matrix B - /// \param transpose_a If matrix A should be transposed. - /// \param transpose_b If matrix B should be transposed. - MatMul(const Output& A, const Output& B, const bool& transpose_a = 0, const bool& transpose_b = 0); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - - bool get_transpose_a() const { - return m_transpose_a; - } - bool get_transpose_b() const { - return m_transpose_b; - } - void set_transpose_a(bool transpose_a) { - m_transpose_a = transpose_a; - } - void set_transpose_b(bool transpose_b) { - m_transpose_b = transpose_b; - } - -private: - bool m_transpose_a; - bool m_transpose_b; -}; +using ov::op::v0::MatMul; } // namespace v0 using v0::MatMul; } // namespace op diff --git a/ngraph/core/include/ngraph/op/matrix_nms.hpp b/ngraph/core/include/ngraph/op/matrix_nms.hpp index e77b7b439c3..b7e2b3d730a 100644 --- a/ngraph/core/include/ngraph/op/matrix_nms.hpp +++ b/ngraph/core/include/ngraph/op/matrix_nms.hpp @@ -5,90 +5,13 @@ #pragma once #include "ngraph/op/util/nms_base.hpp" +#include "openvino/op/matrix_nms.hpp" namespace ngraph { namespace op { namespace v8 { -/// \brief MatrixNms operation -/// -class NGRAPH_API MatrixNms : public util::NmsBase { -public: - NGRAPH_RTTI_DECLARATION; - - enum class DecayFunction { GAUSSIAN, LINEAR }; - - /// \brief Structure that specifies attributes of the operation - struct Attributes { - // specifies order of output elements - SortResultType sort_result_type = SortResultType::NONE; - // specifies whenever it is necessary to sort selected boxes across batches or - // not - bool sort_result_across_batch = false; - // specifies the output tensor type - ngraph::element::Type output_type = ngraph::element::i64; - // specifies minimum score to consider box for the processing - float score_threshold = 0.0f; - // specifies maximum number of boxes to be selected per class, -1 meaning to - // keep all boxes - int nms_top_k = -1; - // specifies maximum number of boxes to be selected per batch element, -1 - // meaning to keep all boxes - int keep_top_k = -1; - // specifies the background class id, -1 meaning to keep all classes - int background_class = -1; - // specifies decay function used to decay scores - DecayFunction decay_function = DecayFunction::LINEAR; - // specifies gaussian_sigma parameter for gaussian decay_function - float gaussian_sigma = 2.0f; - // specifies threshold to filter out boxes with low confidence score after - // decaying - float post_threshold = 0.0f; - // specifies whether boxes are normalized or not - bool normalized = true; - }; - - MatrixNms(); - - /// \brief Constructs a MatrixNms operation - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param attrs Attributes of the operation - MatrixNms(const Output& boxes, const Output& scores, const Attributes& attrs); - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \brief Returns attributes of the operation MatrixNms - const Attributes& get_attrs() const { - return m_attrs; - } - -protected: - Attributes m_attrs; - - void validate() override; -}; +using ov::op::v8::MatrixNms; } // namespace v8 } // namespace op -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v8::MatrixNms::DecayFunction& type); +using ov::operator<<; } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v8::MatrixNms::DecayFunction& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/max.hpp b/ngraph/core/include/ngraph/op/max.hpp index 7d0a2e825b4..5d59bf8567d 100644 --- a/ngraph/core/include/ngraph/op/max.hpp +++ b/ngraph/core/include/ngraph/op/max.hpp @@ -6,27 +6,12 @@ #include "ngraph/op/util/arithmetic_reduction.hpp" #include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" +#include "openvino/op/max.hpp" namespace ngraph { namespace op { namespace v1 { -class NGRAPH_API ReduceMax : public util::ArithmeticReductionKeepDims { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a summation operation. - ReduceMax() = default; - /// \brief Constructs a summation operation. - /// - /// \param arg The tensor to be summed. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - /// \param keep_dims If set to 1 it holds axes that are used for reduction. - ReduceMax(const Output& arg, const Output& reduction_axes, bool keep_dims = false); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::ReduceMax; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/max_pool.hpp b/ngraph/core/include/ngraph/op/max_pool.hpp index e870a1e0303..25c4cb50f40 100644 --- a/ngraph/core/include/ngraph/op/max_pool.hpp +++ b/ngraph/core/include/ngraph/op/max_pool.hpp @@ -7,127 +7,16 @@ #include #include "ngraph/op/util/max_pool_base.hpp" +#include "openvino/op/max_pool.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Batched max pooling operation. -class NGRAPH_API MaxPool : public op::util::MaxPoolBase { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a batched max pooling operation. - MaxPool() = default; - - /// \brief Constructs a batched max pooling operation. - /// - /// \param arg The node producing the input data batch tensor. - /// \param strides The strides. - /// \param pads_begin The beginning of padding shape. - /// \param pads_end The end of padding shape. - /// \param kernel The kernel shape. - /// \param rounding_type Whether to use ceiling or floor rounding type while - /// computing output shape. - /// \param auto_pad The pad type for automatically computing padding sizes. - MaxPool(const Output& arg, - const Strides& strides, - const Shape& pads_begin, - const Shape& pads_end, - const Shape& kernel, - const op::RoundingType rounding_type = op::RoundingType::FLOOR, - const PadType auto_pad = op::PadType::EXPLICIT); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The default value for MaxPool. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - bool evaluate_maxpool(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v1::MaxPool; } // namespace v1 namespace v8 { -/// \brief MaxPooling operation with values and indices calculated as individual outputs -class NGRAPH_API MaxPool : public op::util::MaxPoolBase { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs an empty MaxPool operation. - MaxPool() = default; - - /// \brief Constructs a parametrized MaxPool operation. - /// - /// \param arg Output of a node producing the feature tensor to be pooled. - /// \param strides The strides of the pooling filter. - /// \param dilations The dilations of the pooling filter. - /// \param pads_begin Paddings at the beginning of each spatial axis. - /// \param pads_end Paddings at the end of each spatial axis. - /// \param kernel The kernel shape. - /// \param rounding_type Whether to use ceiling or floor rounding type while - /// computing the output shape. - /// \param auto_pad The pad type for automatic calculation of the padding sizes. - /// \param index_element_type The data type used by the second output tensor - /// containing the selected indices. - /// \param axis Indicates a dimension in the input data shape which should be used - /// as a starting point for calculation of the upper bound of allowed - /// values of the indices output. - MaxPool(const Output& arg, - const Strides& strides, - const Strides& dilations, - const Shape& pads_begin, - const Shape& pads_end, - const Shape& kernel, - const op::RoundingType rounding_type = op::RoundingType::FLOOR, - const PadType auto_pad = op::PadType::EXPLICIT, - const element::Type index_element_type = element::i64, - const int64_t axis = 0); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The pooling filter's dilations. - const Strides& get_dilations() const noexcept { - return m_dilations; - } - void set_dilations(const Strides& dilations) { - m_dilations = dilations; - } - - /// \return The data type of the second output tensor (indices). - element::Type get_index_element_type() const noexcept { - return m_index_element_type; - } - void set_index_element_type(const element::Type index_element_type) { - m_index_element_type = index_element_type; - } - - // \return The 'axis' attribute value. - int64_t get_axis() const { - return m_axis; - } - void set_axis(const int64_t axis) { - m_axis = axis; - } - - bool has_evaluate() const override; - bool evaluate(const HostTensorVector&, const HostTensorVector&) const override; - -private: - Strides m_dilations; - element::Type m_index_element_type{element::i64}; - int64_t m_axis{0}; -}; +using ov::op::v8::MaxPool; } // namespace v8 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/maximum.hpp b/ngraph/core/include/ngraph/op/maximum.hpp index 02bfd109379..11f63f2f532 100644 --- a/ngraph/core/include/ngraph/op/maximum.hpp +++ b/ngraph/core/include/ngraph/op/maximum.hpp @@ -5,32 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/maximum.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise maximum operation. -class NGRAPH_API Maximum : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a maximum operation. - Maximum() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - - /// \brief Constructs a maximum operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - Maximum(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Maximum; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/min.hpp b/ngraph/core/include/ngraph/op/min.hpp index 21c3b8c710c..e3b6a610b6c 100644 --- a/ngraph/core/include/ngraph/op/min.hpp +++ b/ngraph/core/include/ngraph/op/min.hpp @@ -6,29 +6,12 @@ #include "ngraph/op/util/arithmetic_reduction.hpp" #include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" +#include "openvino/op/reduce_min.hpp" namespace ngraph { namespace op { namespace v1 { -class NGRAPH_API ReduceMin : public util::ArithmeticReductionKeepDims { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a summation operation. - ReduceMin() = default; - /// \brief Constructs a summation operation. - /// - /// \param arg The tensor to be summed. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - /// \param keep_dims If set to 1 it holds axes that are used for reduction. - ReduceMin(const Output& arg, const Output& reduction_axes, bool keep_dims = false); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& outputs) const override; - bool evaluate_upper(const HostTensorVector& outputs) const override; -}; +using ov::op::v1::ReduceMin; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/minimum.hpp b/ngraph/core/include/ngraph/op/minimum.hpp index 38d10ea1e10..201fc8a3238 100644 --- a/ngraph/core/include/ngraph/op/minimum.hpp +++ b/ngraph/core/include/ngraph/op/minimum.hpp @@ -5,32 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/minimum.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise minimum operation. -class NGRAPH_API Minimum : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a minimum operation. - Minimum() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - - /// \brief Constructs a minimum operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - Minimum(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Minimum; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/mish.hpp b/ngraph/core/include/ngraph/op/mish.hpp index 943ef5699d8..43884f6f318 100644 --- a/ngraph/core/include/ngraph/op/mish.hpp +++ b/ngraph/core/include/ngraph/op/mish.hpp @@ -6,30 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/mish.hpp" namespace ngraph { namespace op { namespace v4 { -/// \brief A Self Regularized Non-Monotonic Neural Activation Function -/// f(x) = x * tanh(log(exp(x) + 1.)) -/// -class NGRAPH_API Mish : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - - Mish() = default; - /// \brief Constructs an Mish operation. - /// - /// \param data Input tensor - Mish(const Output& arg); - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v4::Mish; } // namespace v4 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/mod.hpp b/ngraph/core/include/ngraph/op/mod.hpp index 4726c24945c..7a86e04230d 100644 --- a/ngraph/core/include/ngraph/op/mod.hpp +++ b/ngraph/core/include/ngraph/op/mod.hpp @@ -5,28 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/mod.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Mod returns an element-wise division reminder with two given tensors applying -/// multi-directional broadcast rules. -class NGRAPH_API Mod : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a Mod node. - Mod() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - /// - /// \param A - Dividend tensor - /// \param B - Divisor tensor - /// \param auto_broadcast Auto broadcast specification - Mod(const Output& A, - const Output& B, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v1::Mod; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/multiclass_nms.hpp b/ngraph/core/include/ngraph/op/multiclass_nms.hpp index 5ecde65cae4..d88e4d4b6e9 100644 --- a/ngraph/core/include/ngraph/op/multiclass_nms.hpp +++ b/ngraph/core/include/ngraph/op/multiclass_nms.hpp @@ -5,65 +5,12 @@ #pragma once #include "ngraph/op/util/nms_base.hpp" +#include "openvino/op/multiclass_nms.hpp" namespace ngraph { namespace op { namespace v8 { -/// \brief MulticlassNms operation -/// -class NGRAPH_API MulticlassNms : public util::NmsBase { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Structure that specifies attributes of the operation - struct Attributes { - // specifies order of output elements - SortResultType sort_result_type = SortResultType::NONE; - // specifies whenever it is necessary to sort selected boxes across batches or - // not - bool sort_result_across_batch = false; - // specifies the output tensor type - ngraph::element::Type output_type = ngraph::element::i64; - // specifies intersection over union threshold - float iou_threshold = 0.0f; - // specifies minimum score to consider box for the processing - float score_threshold = 0.0f; - // specifies maximum number of boxes to be selected per class, -1 meaning to - // keep all boxes - int nms_top_k = -1; - // specifies maximum number of boxes to be selected per batch element, -1 - // meaning to keep all boxes - int keep_top_k = -1; - // specifies the background class id, -1 meaning to keep all classes - int background_class = -1; - // specifies eta parameter for adpative NMS, in close range [0, 1.0] - float nms_eta = 1.0f; - // specifies whether boxes are normalized or not - bool normalized = true; - }; - - MulticlassNms(); - - /// \brief Constructs a MulticlassNms operation - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param attrs Attributes of the operation - MulticlassNms(const Output& boxes, const Output& scores, const Attributes& attrs); - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \brief Returns attributes of the operation MulticlassNms - const Attributes& get_attrs() const { - return m_attrs; - } - -protected: - Attributes m_attrs; - void validate() override; -}; +using ov::op::v8::MulticlassNms; } // namespace v8 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/multiply.hpp b/ngraph/core/include/ngraph/op/multiply.hpp index 0826854ebf4..2af4196858d 100644 --- a/ngraph/core/include/ngraph/op/multiply.hpp +++ b/ngraph/core/include/ngraph/op/multiply.hpp @@ -5,32 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/multiply.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise multiplication operation. -class NGRAPH_API Multiply : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a multiplication operation. - Multiply() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - - /// \brief Constructs a multiplication operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - Multiply(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Multiply; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/mvn.hpp b/ngraph/core/include/ngraph/op/mvn.hpp index 8795a7cbb8f..e4d887ab070 100644 --- a/ngraph/core/include/ngraph/op/mvn.hpp +++ b/ngraph/core/include/ngraph/op/mvn.hpp @@ -6,142 +6,19 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/mvn.hpp" namespace ngraph { namespace op { - namespace v0 { -/// \brief Operator performing Mean Variance Normalization -/// -class NGRAPH_API MVN : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - MVN() = default; - /// \brief Constructs an MVN operation. - /// - /// \param data Input tensor with data - /// \param normalize_variance flag that denotes whether to perform variance - /// normalization. - /// \param across_channels flag that denotes if mean values are shared across - /// channels. - /// \param eps the number to be added to the variance to avoid division by zero when - /// normalizing the value - /// - MVN(const Output& data, bool across_channels = true, bool normalize_variance = true, double eps = 1e-9); - - /// \brief Constructs an MVN operation. - /// - /// \param data Input tensor with data - /// \param reduction_axes A list of axes, along which to reduce. - /// \param normalize_variance flag that denotes whether to perform variance - /// normalization. - /// \param eps the number to be added to the variance to avoid division by zero when - /// normalizing the value - /// - MVN(const Output& data, AxisSet reduction_axes, bool normalize_variance = true, double eps = 1e-9); - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - double get_eps() const { - return m_eps; - } - bool get_across_channels() const { - return m_across_channels; - } - bool get_normalize_variance() const { - return m_normalize_variance; - } - AxisSet get_reduction_axes() const { - return m_reduction_axes; - } - void set_reduction_axes(AxisSet axes) { - m_reduction_axes = axes; - } - -private: - double m_eps; - bool m_across_channels; - bool m_normalize_variance; - AxisSet m_reduction_axes; -}; +using ov::op::v0::MVN; } // namespace v0 using v0::MVN; -/// \brief Specifies how eps is applied in MVN -enum class MVNEpsMode { - // Apply eps inside sqrt - INSIDE_SQRT, - // Apply eps outside sqrt - OUTSIDE_SQRT -}; - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const MVNEpsMode& type); +using ov::op::MVNEpsMode; namespace v6 { -/// \brief Operator performing Mean Variance Normalization -/// -class NGRAPH_API MVN : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - - MVN() = default; - /// \brief Constructs an MVN operation. - /// - /// \param data Input tensor with data - /// \param reduction_axes A list of axes, along which to reduce. - /// \param normalize_variance flag that denotes whether to perform variance - /// normalization. - /// \param eps the number to be added to the variance to avoid division by zero when - /// normalizing the value - /// \param eps_mode the mode of applying epsilon - /// - MVN(const Output& data, - const Output& reduction_axes, - bool normalize_variance, - float eps, - MVNEpsMode eps_mode); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - float get_eps() const { - return m_eps; - } - bool get_normalize_variance() const { - return m_normalize_variance; - } - MVNEpsMode get_eps_mode() const { - return m_eps_mode; - } - -private: - bool m_normalize_variance; - float m_eps; - MVNEpsMode m_eps_mode; -}; +using ov::op::v6::MVN; } // namespace v6 } // namespace op } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::MVNEpsMode& value) : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/negative.hpp b/ngraph/core/include/ngraph/op/negative.hpp index e3c385dd72c..b7c078e01f4 100644 --- a/ngraph/core/include/ngraph/op/negative.hpp +++ b/ngraph/core/include/ngraph/op/negative.hpp @@ -5,26 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/negative.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise negative operation. -class NGRAPH_API Negative : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a negative operation. - Negative() = default; - /// \brief Constructs a negative operation. - /// - /// \param arg Node that produces the input tensor. - Negative(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Negative; } // namespace v0 using v0::Negative; } // namespace op diff --git a/ngraph/core/include/ngraph/op/non_max_suppression.hpp b/ngraph/core/include/ngraph/op/non_max_suppression.hpp index ef6e6178139..3b83d9fbc41 100644 --- a/ngraph/core/include/ngraph/op/non_max_suppression.hpp +++ b/ngraph/core/include/ngraph/op/non_max_suppression.hpp @@ -5,413 +5,25 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/non_max_suppression.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise addition operation. -/// -class NGRAPH_API NonMaxSuppression : public Op { -public: - enum class BoxEncodingType { CORNER, CENTER }; - - NGRAPH_RTTI_DECLARATION; - - NonMaxSuppression() = default; - - /// \brief Constructs a NonMaxSuppression operation. - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param max_output_boxes_per_class Node producing maximum number of boxes to be - /// selected per class - /// \param iou_threshold Node producing intersection over union threshold - /// \param score_threshold Node producing minimum score threshold - /// \param box_encoding Specifies the format of boxes data encoding - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true); - - /// \brief Constructs a NonMaxSuppression operation with default values for the last - /// 3 inputs - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box coordinates - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - BoxEncodingType get_box_encoding() const { - return m_box_encoding; - } - void set_box_encoding(const BoxEncodingType box_encoding) { - m_box_encoding = box_encoding; - } - bool get_sort_result_descending() const { - return m_sort_result_descending; - } - void set_sort_result_descending(const bool sort_result_descending) { - m_sort_result_descending = sort_result_descending; - } - -protected: - BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; - bool m_sort_result_descending = true; - -private: - int64_t max_boxes_output_from_input() const; -}; +using ov::op::v1::NonMaxSuppression; } // namespace v1 namespace v3 { -/// \brief NonMaxSuppression operation -/// -class NGRAPH_API NonMaxSuppression : public Op { -public: - enum class BoxEncodingType { CORNER, CENTER }; - - static constexpr NodeTypeInfo type_info{"NonMaxSuppression", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - NonMaxSuppression() = default; - - /// \brief Constructs a NonMaxSuppression operation. - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param max_output_boxes_per_class Node producing maximum number of boxes to be - /// selected per class - /// \param iou_threshold Node producing intersection over union threshold - /// \param score_threshold Node producing minimum score threshold - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - /// \brief Constructs a NonMaxSuppression operation with default values for the last - /// 3 inputs - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box coordinates - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - BoxEncodingType get_box_encoding() const { - return m_box_encoding; - } - void set_box_encoding(const BoxEncodingType box_encoding) { - m_box_encoding = box_encoding; - } - bool get_sort_result_descending() const { - return m_sort_result_descending; - } - void set_sort_result_descending(const bool sort_result_descending) { - m_sort_result_descending = sort_result_descending; - } - - element::Type get_output_type() const { - return m_output_type; - } - void set_output_type(const element::Type& output_type) { - m_output_type = output_type; - } - using Node::set_output_type; - -protected: - BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; - bool m_sort_result_descending = true; - ngraph::element::Type m_output_type = ngraph::element::i64; - void validate(); - int64_t max_boxes_output_from_input() const; -}; +using ov::op::v3::NonMaxSuppression; } // namespace v3 namespace v4 { -/// \brief NonMaxSuppression operation -/// -class NGRAPH_API NonMaxSuppression : public op::v3::NonMaxSuppression { -public: - static constexpr NodeTypeInfo type_info{"NonMaxSuppression", 4}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - NonMaxSuppression() = default; - - /// \brief Constructs a NonMaxSuppression operation. - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param max_output_boxes_per_class Node producing maximum number of boxes to be - /// selected per class - /// \param iou_threshold Node producing intersection over union threshold - /// \param score_threshold Node producing minimum score threshold - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - /// \brief Constructs a NonMaxSuppression operation with default values for the last - /// 3 inputs - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box coordinates - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v4::NonMaxSuppression; } // namespace v4 namespace v5 { -/// \brief NonMaxSuppression operation -/// -class NGRAPH_API NonMaxSuppression : public Op { -public: - NGRAPH_RTTI_DECLARATION; - enum class BoxEncodingType { CORNER, CENTER }; - - NonMaxSuppression() = default; - - /// \brief Constructs a NonMaxSuppression operation with default values in the last - /// 4 inputs. - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - /// \brief Constructs a NonMaxSuppression operation with default values in the last. - /// 3 inputs. - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param max_output_boxes_per_class Node producing maximum number of boxes to be - /// selected per class - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - /// \brief Constructs a NonMaxSuppression operation with default values in the last. - /// 2 inputs. - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param max_output_boxes_per_class Node producing maximum number of boxes to be - /// selected per class - /// \param iou_threshold Node producing intersection over union threshold - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - /// \brief Constructs a NonMaxSuppression operation with default value in the last. - /// input. - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param max_output_boxes_per_class Node producing maximum number of boxes to be - /// selected per class - /// \param iou_threshold Node producing intersection over union threshold - /// \param score_threshold Node producing minimum score threshold - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - /// \brief Constructs a NonMaxSuppression operation. - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param max_output_boxes_per_class Node producing maximum number of boxes to be - /// selected per class - /// \param iou_threshold Node producing intersection over union threshold - /// \param score_threshold Node producing minimum score threshold - /// \param soft_nms_sigma Node specifying the sigma parameter for Soft-NMS - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const Output& soft_nms_sigma, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - BoxEncodingType get_box_encoding() const { - return m_box_encoding; - } - void set_box_encoding(const BoxEncodingType box_encoding) { - m_box_encoding = box_encoding; - } - bool get_sort_result_descending() const { - return m_sort_result_descending; - } - void set_sort_result_descending(const bool sort_result_descending) { - m_sort_result_descending = sort_result_descending; - } - - element::Type get_output_type() const { - return m_output_type; - } - void set_output_type(const element::Type& output_type) { - m_output_type = output_type; - } - using Node::set_output_type; - - int64_t max_boxes_output_from_input() const; - float iou_threshold_from_input() const; - float score_threshold_from_input() const; - float soft_nms_sigma_from_input() const; - bool is_soft_nms_sigma_constant_and_default() const; - -protected: - BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; - bool m_sort_result_descending = true; - ngraph::element::Type m_output_type = ngraph::element::i64; - void validate(); -}; +using ov::op::v5::NonMaxSuppression; } // namespace v5 } // namespace op - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v1::NonMaxSuppression::BoxEncodingType& type); - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v3::NonMaxSuppression::BoxEncodingType& type); - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v5::NonMaxSuppression::BoxEncodingType& type); +using ov::operator<<; } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v1::NonMaxSuppression::BoxEncodingType& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v3::NonMaxSuppression::BoxEncodingType& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v5::NonMaxSuppression::BoxEncodingType& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/non_zero.hpp b/ngraph/core/include/ngraph/op/non_zero.hpp index 84259e2dd48..a7c5d71cacd 100644 --- a/ngraph/core/include/ngraph/op/non_zero.hpp +++ b/ngraph/core/include/ngraph/op/non_zero.hpp @@ -5,63 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/non_zero.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief NonZero operation returning indices of non-zero elements in the input tensor. -/// -/// \note The indices are returned by-dimension in row-major order. For example -/// the following output contains 3 indices of a 3D input tensor elements: -/// [[0, 0, 2], -/// [0, 1, 1], -/// [0, 1, 2]] -/// The values point to input elements at [0,0,0], [0,1,1] and [2,1,2] -class NGRAPH_API NonZero : public Op { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a NonZero operation. - NonZero() = default; - /// \brief Constructs a NonZero operation. - /// - /// \note The output type is int64. - /// - /// \param arg Node that produces the input tensor. - NonZero(const Output& arg); - /// \brief Constructs a NonZero operation. - /// - /// \param arg Node that produces the input tensor. - /// \param output_type produce indices. Currently, only 'int64' or 'int32' - /// are - /// supported - NonZero(const Output& arg, const std::string& output_type); - /// \brief Constructs a NonZero operation. - /// - /// \param arg Node that produces the input tensor. - /// \param output_type produce indices. Currently, only int64 or int32 are - /// supported - NonZero(const Output& arg, const element::Type& output_type); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - element::Type get_output_type() const { - return m_output_type; - } - void set_output_type(element::Type output_type) { - m_output_type = output_type; - } - // Overload collision with method on Node - using Node::set_output_type; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - element::Type m_output_type = element::i64; -}; +using ov::op::v3::NonZero; } // namespace v3 using v3::NonZero; } // namespace op diff --git a/ngraph/core/include/ngraph/op/normalize_l2.hpp b/ngraph/core/include/ngraph/op/normalize_l2.hpp index 3979e953ea1..cad21c0f75a 100644 --- a/ngraph/core/include/ngraph/op/normalize_l2.hpp +++ b/ngraph/core/include/ngraph/op/normalize_l2.hpp @@ -9,47 +9,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/normalize_l2.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Normalization with L2 norm. -/// -class NGRAPH_API NormalizeL2 : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - NormalizeL2() = default; - /// - /// \brief Constructs a NormalizeL2 operation. - /// - /// \param data - Node producing the input tensor - /// \param axes - Node indicating axes along which reduction is - /// calculated - /// \param eps - The epsilon added to L2 norm. - /// \param eps_mode - Specifies how eps is combined with L2 value - /// calculated before division - /// - NormalizeL2(const Output& data, const Output& axes, float eps, EpsMode eps_mode); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - float get_eps() const { - return m_eps; - } - EpsMode get_eps_mode() const { - return m_eps_mode; - } - AxisSet get_reduction_axes() const; - -protected: - float m_eps; - EpsMode m_eps_mode; -}; +using ov::op::v0::NormalizeL2; } // namespace v0 -using v0::NormalizeL2; } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/not.hpp b/ngraph/core/include/ngraph/op/not.hpp index e441e39f903..8973a13c42c 100644 --- a/ngraph/core/include/ngraph/op/not.hpp +++ b/ngraph/core/include/ngraph/op/not.hpp @@ -5,28 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/logical_not.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise logical negation operation. -class NGRAPH_API LogicalNot : public Op { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a logical negation operation. - LogicalNot() = default; - /// \brief Constructs a logical negation operation. - /// - /// \param arg Node that produces the input tensor. - LogicalNot(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::LogicalNot; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/not_equal.hpp b/ngraph/core/include/ngraph/op/not_equal.hpp index a2d37109abc..4c0aa702b7c 100644 --- a/ngraph/core/include/ngraph/op/not_equal.hpp +++ b/ngraph/core/include/ngraph/op/not_equal.hpp @@ -5,31 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_comparison.hpp" +#include "openvino/op/not_equal.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise not-equal operation. -class NGRAPH_API NotEqual : public util::BinaryElementwiseComparison { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a not-equal operation. - NotEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} - /// \brief Constructs a not-equal operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - NotEqual(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool visit_attributes(AttributeVisitor& visitor) override; -}; +using ov::op::v1::NotEqual; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/one_hot.hpp b/ngraph/core/include/ngraph/op/one_hot.hpp index 5f203cfd057..ef96a185eb3 100644 --- a/ngraph/core/include/ngraph/op/one_hot.hpp +++ b/ngraph/core/include/ngraph/op/one_hot.hpp @@ -5,50 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/one_hot.hpp" namespace ngraph { namespace op { namespace v1 { -class NGRAPH_API OneHot : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a one-hot operation. - OneHot() = default; - /// \brief Constructs a one-hot operation. - /// - /// \param indices Input tensor containing indices. - /// \param depth Specifies number of classes and the size of one-hot dimension. - /// \param on_value Specifies value that the locations in output tensor represented - /// by indices in input take. - /// \param off_value Specifies value that the locations in output tensor not - /// represented - /// by indices in input take. - /// \param axis Axis along which one-hot representation in added. - OneHot(const Output& indices, - const Output& depth, - const Output& on_value, - const Output& off_value, - int64_t axis); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - void validate_and_infer_types() override; - - virtual bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; - bool has_evaluate() const override; - - /// \return The index of the one-hot axis. - int64_t get_axis() const { - return m_axis; - } - void set_axis(int64_t axis) { - m_axis = axis; - } - -protected: - int64_t m_axis; -}; +using ov::op::v1::OneHot; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/or.hpp b/ngraph/core/include/ngraph/op/or.hpp index 7f3d5391226..9eccc75caa2 100644 --- a/ngraph/core/include/ngraph/op/or.hpp +++ b/ngraph/core/include/ngraph/op/or.hpp @@ -7,35 +7,12 @@ #include #include "ngraph/op/util/binary_elementwise_logical.hpp" +#include "openvino/op/logical_or.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise logical-or operation. -/// -class NGRAPH_API LogicalOr : public util::BinaryElementwiseLogical { -public: - NGRAPH_RTTI_DECLARATION; - LogicalOr() = default; - /// \brief Constructs a logical-or operation. - /// - /// \param arg0 Node that produces the first input tensor.
- /// `[d0, ...]` - /// \param arg1 Node that produces the second input tensor.
- /// `[d0, ...]` - /// \param auto_broadcast Auto broadcast specification - /// - /// Output `[d0, ...]` - /// - LogicalOr(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::LogicalOr; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/pad.hpp b/ngraph/core/include/ngraph/op/pad.hpp index 4570b0d65f2..c1cbf03c29f 100644 --- a/ngraph/core/include/ngraph/op/pad.hpp +++ b/ngraph/core/include/ngraph/op/pad.hpp @@ -7,73 +7,12 @@ #include "ngraph/coordinate_diff.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/pad.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Generic padding operation. -class NGRAPH_API Pad : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a generic padding operation. - /// - /// \param arg The output producing input tensor to be padded. - /// \param pads_begin The output which specifies the number of padding elements - /// added - /// before position 0 on each axis of arg. - /// \param pads_end The output which specifies the number of padding elements - /// after the last element on each axis. - /// \param arg_pad_value The scalar output with the value used for padding - /// if pad_mode is CONSTANT - /// \param pad_mode The padding mode: CONSTANT, EDGE, REFLECT or SYMMETRIC. - /// CONSTANT initializes new elements with arg_pad_value, EDGE uses the nearest - /// value from arg. REFLECT and SYMMETRIC tile the background by flipping arg - /// at the edge (SYMMETRIC) or on the last row/column/etc. (REFLECT). - Pad(const Output& arg, - const Output& pads_begin, - const Output& pads_end, - const Output& arg_pad_value, - PadMode pad_mode); - - /// \brief Constructs a generic padding operation. - /// - /// \param arg The output producing input tensor to be padded. - /// \param pads_begin The output which specifies the number of padding elements - /// added - /// \param pads_end The output which specifies the number of padding elements - /// after the last element on each axis. - /// \param pad_mode The padding mode: CONSTANT, EDGE, REFLECT or SYMMETRIC. - Pad(const Output& arg, const Output& pads_begin, const Output& pads_end, PadMode pad_mode); - - /// \brief Constructs a generic padding operation. - Pad() = default; - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// return The node which specifies the number of padding elements - /// added at the beginning of each axis - CoordinateDiff get_pads_begin() const; - /// return The node which specifies the number of padding elements - /// added at the end of each axis - CoordinateDiff get_pads_end() const; - - /// \return The padding mode. - PadMode get_pad_mode() const { - return m_pad_mode; - } - void set_pad_mode(PadMode pad_mode) { - m_pad_mode = pad_mode; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - PadMode m_pad_mode; - bool evaluate_pad(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v1::Pad; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/power.hpp b/ngraph/core/include/ngraph/op/power.hpp index b56f3b12371..2ce0962868d 100644 --- a/ngraph/core/include/ngraph/op/power.hpp +++ b/ngraph/core/include/ngraph/op/power.hpp @@ -5,45 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/power.hpp" namespace ngraph { namespace op { namespace v1 { -// clang-format off - /// \brief Elementwise exponentiation operation. - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ------ | --------------------------------- | ------------------------------------------------------ | - /// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | - /// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ---------------------- | -------------------------------------------------------------------------------------------------------------- | - /// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n]^{\texttt{arg1}[i_1,\dots,i_n]}\f$ | -// clang-format on -class NGRAPH_API Power : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - Power() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - - /// \brief Constructs an exponentiation operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - Power(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Power; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/prelu.hpp b/ngraph/core/include/ngraph/op/prelu.hpp index cc615c958bf..bf69e1c8438 100644 --- a/ngraph/core/include/ngraph/op/prelu.hpp +++ b/ngraph/core/include/ngraph/op/prelu.hpp @@ -6,33 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/prelu.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Parametrized Relu -/// x < 0 => f(x) = x * slope -/// x >= 0 => f(x) = x -/// -class NGRAPH_API PRelu : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - PRelu(); - /// \brief Constructs a PRelu operation. - /// - /// \param data Input tensor - /// \param slope Multipliers for negative values - PRelu(const Output& data, const Output& slope); - - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - void validate_and_infer_types() override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::PRelu; } // namespace v0 using v0::PRelu; } // namespace op diff --git a/ngraph/core/include/ngraph/op/prior_box.hpp b/ngraph/core/include/ngraph/op/prior_box.hpp index 12d5b814ac4..0f0b760ef3c 100644 --- a/ngraph/core/include/ngraph/op/prior_box.hpp +++ b/ngraph/core/include/ngraph/op/prior_box.hpp @@ -5,64 +5,13 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/prior_box.hpp" namespace ngraph { namespace op { -struct PriorBoxAttrs { - // min_size Desired min_size of prior boxes - // max_size Desired max_size of prior boxes - // aspect_ratio Aspect ratios of prior boxes - // clip Clip output to [0,1] - // flip Flip aspect ratios - // step Distance between prior box centers - // offset Box offset relative to top center of image - // variance Values to adjust prior boxes with - // scale_all_sizes Scale all sizes - std::vector min_size; - std::vector max_size; - std::vector aspect_ratio; - std::vector density; - std::vector fixed_ratio; - std::vector fixed_size; - bool clip = false; - bool flip = false; - float step = 0.0f; - float offset = 0.0f; - std::vector variance; - bool scale_all_sizes = true; -}; - +using PriorBoxAttrs = ov::op::v0::PriorBox::Attributes; namespace v0 { -/// \brief Layer which generates prior boxes of specified sizes -/// normalized to input image size -class NGRAPH_API PriorBox : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - PriorBox() = default; - /// \brief Constructs a PriorBox operation - /// - /// \param layer_shape Shape of layer for which prior boxes are computed - /// \param image_shape Shape of image to which prior boxes are scaled - /// \param attrs PriorBox attributes - PriorBox(const Output& layer_shape, const Output& image_shape, const PriorBoxAttrs& attrs); - - void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - static int64_t number_of_priors(const PriorBoxAttrs& attrs); - - static std::vector normalized_aspect_ratio(const std::vector& aspect_ratio, bool flip); - const PriorBoxAttrs& get_attrs() const { - return m_attrs; - } - bool visit_attributes(AttributeVisitor& visitor) override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - PriorBoxAttrs m_attrs; -}; +using ov::op::v0::PriorBox; } // namespace v0 using v0::PriorBox; } // namespace op diff --git a/ngraph/core/include/ngraph/op/prior_box_clustered.hpp b/ngraph/core/include/ngraph/op/prior_box_clustered.hpp index 77a1d5fca42..b5646c37427 100644 --- a/ngraph/core/include/ngraph/op/prior_box_clustered.hpp +++ b/ngraph/core/include/ngraph/op/prior_box_clustered.hpp @@ -5,57 +5,14 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/prior_box_clustered.hpp" namespace ngraph { namespace op { -struct NGRAPH_API PriorBoxClusteredAttrs { - // widths Desired widths of prior boxes - // heights Desired heights of prior boxes - // clip Clip output to [0,1] - // step_widths Distance between prior box centers - // step_heights Distance between prior box centers - // step Distance between prior box centers (when step_w = step_h) - // offset Box offset relative to top center of image - // variances Values to adjust prior boxes with - std::vector widths; - std::vector heights; - bool clip = true; - float step_widths = 0.0f; - float step_heights = 0.0f; - float step = 0.0f; - float offset = 0.0f; - std::vector variances; -}; +using PriorBoxClusteredAttrs = ov::op::v0::PriorBoxClustered::Attributes; namespace v0 { -/// \brief Layer which generates prior boxes of specified sizes -/// normalized to input image size -class NGRAPH_API PriorBoxClustered : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - PriorBoxClustered() = default; - /// \brief Constructs a PriorBoxClustered operation - /// - /// \param layer_shape Shape of layer for which prior boxes are computed - /// \param image_shape Shape of image to which prior boxes are scaled - /// \param attrs PriorBoxClustered attributes - PriorBoxClustered(const Output& layer_shape, - const Output& image_shape, - const PriorBoxClusteredAttrs& attrs); - - void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - const PriorBoxClusteredAttrs& get_attrs() const { - return m_attrs; - } - bool visit_attributes(AttributeVisitor& visitor) override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - PriorBoxClusteredAttrs m_attrs; -}; +using ov::op::v0::PriorBoxClustered; } // namespace v0 using v0::PriorBoxClustered; } // namespace op diff --git a/ngraph/core/include/ngraph/op/proposal.hpp b/ngraph/core/include/ngraph/op/proposal.hpp index 0c4e7b023d1..8d83da40671 100644 --- a/ngraph/core/include/ngraph/op/proposal.hpp +++ b/ngraph/core/include/ngraph/op/proposal.hpp @@ -5,93 +5,19 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/proposal.hpp" namespace ngraph { namespace op { -// base_size Anchor sizes -// pre_nms_topn Number of boxes before nms -// post_nms_topn Number of boxes after nms -// nms_thresh Threshold for nms -// feat_stride Feature stride -// min_size Minimum box size -// ratio Ratios for anchor generation -// scale Scales for anchor generation -// clip_before_nms Clip before NMs -// clip_after_nms Clip after NMs -// normalize Normalize boxes to [0,1] -// box_size_scale Scale factor for scaling box size -// box_coordinate_scale Scale factor for scaling box coordiate -// framework Calculation frameworkrithm to use -struct ProposalAttrs { - size_t base_size; - size_t pre_nms_topn; - size_t post_nms_topn; - float nms_thresh = 0.0f; - size_t feat_stride = 1; - size_t min_size = 1; - std::vector ratio; - std::vector scale; - bool clip_before_nms = true; - bool clip_after_nms = false; - bool normalize = false; - float box_size_scale = 1.0f; - float box_coordinate_scale = 1.0f; - std::string framework; - bool infer_probs = false; -}; +using ProposalAttrs = ov::op::v0::Proposal::Attributes; namespace v0 { -class NGRAPH_API Proposal : public Op { -public: - NGRAPH_RTTI_DECLARATION; - Proposal() = default; - /// \brief Constructs a Proposal operation - /// - /// \param class_probs Class probability scores - /// \param bbox_deltas Prediction of bounding box deltas - /// \param image_shape Shape of image - /// \param attrs Proposal op attributes - Proposal(const Output& class_probs, - const Output& bbox_deltas, - const Output& image_shape, - const ProposalAttrs& attrs); - - void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - const ProposalAttrs& get_attrs() const { - return m_attrs; - } - bool visit_attributes(AttributeVisitor& visitor) override; - -protected: - ProposalAttrs m_attrs; -}; +using ov::op::v0::Proposal; } // namespace v0 namespace v4 { -class NGRAPH_API Proposal : public op::v0::Proposal { -public: - NGRAPH_RTTI_DECLARATION; - Proposal() = default; - /// \brief Constructs a Proposal operation - /// - /// \param class_probs Class probability scores - /// \param bbox_deltas Prediction of bounding box deltas - /// \param image_shape Shape of image - /// \param attrs Proposal op attributes - Proposal(const Output& class_probs, - const Output& bbox_deltas, - const Output& image_shape, - const ProposalAttrs& attrs); - - void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - const ProposalAttrs& get_attrs() const { - return m_attrs; - } -}; +using ov::op::v4::Proposal; } // namespace v4 - using v0::Proposal; } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/psroi_pooling.hpp b/ngraph/core/include/ngraph/op/psroi_pooling.hpp index 02d7815df99..2f4ca9c5681 100644 --- a/ngraph/core/include/ngraph/op/psroi_pooling.hpp +++ b/ngraph/core/include/ngraph/op/psroi_pooling.hpp @@ -5,68 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/psroi_pooling.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API PSROIPooling : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - PSROIPooling() = default; - /// \brief Constructs a PSROIPooling operation - /// - /// \param input Input feature map {N, C, ...} - /// \param coords Coordinates of bounding boxes - /// \param output_dim Output channel number - /// \param group_size Number of groups to encode position-sensitive scores - /// \param spatial_scale Ratio of input feature map over input image size - /// \param spatial_bins_x Numbers of bins to divide the input feature maps over - /// width - /// \param spatial_bins_y Numbers of bins to divide the input feature maps over - /// height - /// \param mode Mode of pooling - Avg or Bilinear - PSROIPooling(const Output& input, - const Output& coords, - const size_t output_dim, - const size_t group_size, - const float spatial_scale, - int spatial_bins_x, - int spatial_bins_y, - const std::string& mode); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - size_t get_output_dim() const { - return m_output_dim; - } - size_t get_group_size() const { - return m_group_size; - } - float get_spatial_scale() const { - return m_spatial_scale; - } - int get_spatial_bins_x() const { - return m_spatial_bins_x; - } - int get_spatial_bins_y() const { - return m_spatial_bins_y; - } - const std::string& get_mode() const { - return m_mode; - } - -private: - size_t m_output_dim; - size_t m_group_size; - float m_spatial_scale; - int m_spatial_bins_x; - int m_spatial_bins_y; - std::string m_mode; -}; +using ov::op::v0::PSROIPooling; } // namespace v0 using v0::PSROIPooling; } // namespace op diff --git a/ngraph/core/include/openvino/op/logical_not.hpp b/ngraph/core/include/openvino/op/logical_not.hpp new file mode 100644 index 00000000000..2156e70166a --- /dev/null +++ b/ngraph/core/include/openvino/op/logical_not.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise logical negation operation. +class OPENVINO_API LogicalNot : public Op { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a logical negation operation. + LogicalNot() = default; + /// \brief Constructs a logical negation operation. + /// + /// \param arg Node that produces the input tensor. + LogicalNot(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/logical_or.hpp b/ngraph/core/include/openvino/op/logical_or.hpp new file mode 100644 index 00000000000..a95eadd5ef3 --- /dev/null +++ b/ngraph/core/include/openvino/op/logical_or.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/binary_elementwise_logical.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise logical-or operation. +/// +class OPENVINO_API LogicalOr : public util::BinaryElementwiseLogical { +public: + OPENVINO_RTTI_DECLARATION; + LogicalOr() = default; + /// \brief Constructs a logical-or operation. + /// + /// \param arg0 Node that produces the first input tensor.
+ /// `[d0, ...]` + /// \param arg1 Node that produces the second input tensor.
+ /// `[d0, ...]` + /// \param auto_broadcast Auto broadcast specification + /// + /// Output `[d0, ...]` + /// + LogicalOr(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/lstm_sequence.hpp b/ngraph/core/include/openvino/op/lstm_sequence.hpp index a89f425a052..fdb733c5815 100644 --- a/ngraph/core/include/openvino/op/lstm_sequence.hpp +++ b/ngraph/core/include/openvino/op/lstm_sequence.hpp @@ -28,9 +28,9 @@ namespace v0 { /// \sa LSTMCell, RNNCell, GRUCell /// /// -class NGRAPH_API LSTMSequence : public Op { +class OPENVINO_API LSTMSequence : public Op { public: - NGRAPH_RTTI_DECLARATION; + OPENVINO_RTTI_DECLARATION; LSTMSequence(); using direction = RecurrentSequenceDirection; @@ -146,9 +146,9 @@ namespace v5 { /// \sa LSTMCell, RNNCell, GRUCell /// /// -class NGRAPH_API LSTMSequence : public util::RNNCellBase { +class OPENVINO_API LSTMSequence : public util::RNNCellBase { public: - NGRAPH_RTTI_DECLARATION; + OPENVINO_RTTI_DECLARATION; LSTMSequence() = default; using direction = RecurrentSequenceDirection; diff --git a/ngraph/core/include/openvino/op/matmul.hpp b/ngraph/core/include/openvino/op/matmul.hpp new file mode 100644 index 00000000000..c511300ada2 --- /dev/null +++ b/ngraph/core/include/openvino/op/matmul.hpp @@ -0,0 +1,55 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Operator performing Matrix Multiplication. +class OPENVINO_API MatMul : public Op { +public: + OPENVINO_RTTI_DECLARATION; + MatMul() = default; + /// \brief Constructs an Matrix Multiplication operation. + /// + /// \param A Matrix A + /// \param B Matrix B + /// \param transpose_a If matrix A should be transposed. + /// \param transpose_b If matrix B should be transposed. + MatMul(const Output& A, + const Output& B, + const bool& transpose_a = false, + const bool& transpose_b = false); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + + bool get_transpose_a() const { + return m_transpose_a; + } + bool get_transpose_b() const { + return m_transpose_b; + } + void set_transpose_a(bool transpose_a) { + m_transpose_a = transpose_a; + } + void set_transpose_b(bool transpose_b) { + m_transpose_b = transpose_b; + } + +private: + bool m_transpose_a; + bool m_transpose_b; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/matrix_nms.hpp b/ngraph/core/include/openvino/op/matrix_nms.hpp new file mode 100644 index 00000000000..e16b290da18 --- /dev/null +++ b/ngraph/core/include/openvino/op/matrix_nms.hpp @@ -0,0 +1,91 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/nms_base.hpp" + +namespace ov { +namespace op { +namespace v8 { +/// \brief MatrixNms operation +/// +class OPENVINO_API MatrixNms : public util::NmsBase { +public: + OPENVINO_RTTI_DECLARATION; + + enum class DecayFunction { GAUSSIAN, LINEAR }; + + /// \brief Structure that specifies attributes of the operation + struct Attributes { + // specifies order of output elements + SortResultType sort_result_type = SortResultType::NONE; + // specifies whenever it is necessary to sort selected boxes across batches or + // not + bool sort_result_across_batch = false; + // specifies the output tensor type + ngraph::element::Type output_type = ngraph::element::i64; + // specifies minimum score to consider box for the processing + float score_threshold = 0.0f; + // specifies maximum number of boxes to be selected per class, -1 meaning to + // keep all boxes + int nms_top_k = -1; + // specifies maximum number of boxes to be selected per batch element, -1 + // meaning to keep all boxes + int keep_top_k = -1; + // specifies the background class id, -1 meaning to keep all classes + int background_class = -1; + // specifies decay function used to decay scores + DecayFunction decay_function = DecayFunction::LINEAR; + // specifies gaussian_sigma parameter for gaussian decay_function + float gaussian_sigma = 2.0f; + // specifies threshold to filter out boxes with low confidence score after + // decaying + float post_threshold = 0.0f; + // specifies whether boxes are normalized or not + bool normalized = true; + }; + + MatrixNms(); + + /// \brief Constructs a MatrixNms operation + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param attrs Attributes of the operation + MatrixNms(const Output& boxes, const Output& scores, const Attributes& attrs); + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \brief Returns attributes of the operation MatrixNms + const Attributes& get_attrs() const { + return m_attrs; + } + +protected: + Attributes m_attrs; + + void validate() override; +}; +} // namespace v8 +} // namespace op +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v8::MatrixNms::DecayFunction& type); + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v8::MatrixNms::DecayFunction& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/max.hpp b/ngraph/core/include/openvino/op/max.hpp new file mode 100644 index 00000000000..527a3af3780 --- /dev/null +++ b/ngraph/core/include/openvino/op/max.hpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v1 { +class OPENVINO_API ReduceMax : public util::ArithmeticReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a summation operation. + ReduceMax() = default; + /// \brief Constructs a summation operation. + /// + /// \param arg The tensor to be summed. + /// \param reduction_axes The axis positions (0-based) to be eliminated. + /// \param keep_dims If set to 1 it holds axes that are used for reduction. + ReduceMax(const Output& arg, const Output& reduction_axes, bool keep_dims = false); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/max_pool.hpp b/ngraph/core/include/openvino/op/max_pool.hpp new file mode 100644 index 00000000000..d64660961d2 --- /dev/null +++ b/ngraph/core/include/openvino/op/max_pool.hpp @@ -0,0 +1,133 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/max_pool_base.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Batched max pooling operation. +class OPENVINO_API MaxPool : public op::util::MaxPoolBase { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a batched max pooling operation. + MaxPool() = default; + + /// \brief Constructs a batched max pooling operation. + /// + /// \param arg The node producing the input data batch tensor. + /// \param strides The strides. + /// \param pads_begin The beginning of padding shape. + /// \param pads_end The end of padding shape. + /// \param kernel The kernel shape. + /// \param rounding_type Whether to use ceiling or floor rounding type while + /// computing output shape. + /// \param auto_pad The pad type for automatically computing padding sizes. + MaxPool(const Output& arg, + const Strides& strides, + const ngraph::Shape& pads_begin, + const ngraph::Shape& pads_end, + const ngraph::Shape& kernel, + const op::RoundingType rounding_type = op::RoundingType::FLOOR, + const PadType auto_pad = op::PadType::EXPLICIT); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The default value for MaxPool. + OPENVINO_SUPPRESS_DEPRECATED_START + std::shared_ptr get_default_value() const override; + OPENVINO_SUPPRESS_DEPRECATED_END + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool evaluate_maxpool(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v1 + +namespace v8 { +/// \brief MaxPooling operation with values and indices calculated as individual outputs +class OPENVINO_API MaxPool : public op::util::MaxPoolBase { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs an empty MaxPool operation. + MaxPool() = default; + + /// \brief Constructs a parametrized MaxPool operation. + /// + /// \param arg Output of a node producing the feature tensor to be pooled. + /// \param strides The strides of the pooling filter. + /// \param dilations The dilations of the pooling filter. + /// \param pads_begin Paddings at the beginning of each spatial axis. + /// \param pads_end Paddings at the end of each spatial axis. + /// \param kernel The kernel shape. + /// \param rounding_type Whether to use ceiling or floor rounding type while + /// computing the output shape. + /// \param auto_pad The pad type for automatic calculation of the padding sizes. + /// \param index_element_type The data type used by the second output tensor + /// containing the selected indices. + /// \param axis Indicates a dimension in the input data shape which should be used + /// as a starting point for calculation of the upper bound of allowed + /// values of the indices output. + MaxPool(const Output& arg, + const Strides& strides, + const Strides& dilations, + const ngraph::Shape& pads_begin, + const ngraph::Shape& pads_end, + const ngraph::Shape& kernel, + const op::RoundingType rounding_type = op::RoundingType::FLOOR, + const PadType auto_pad = op::PadType::EXPLICIT, + const element::Type index_element_type = element::i64, + const int64_t axis = 0); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The pooling filter's dilations. + const Strides& get_dilations() const noexcept { + return m_dilations; + } + void set_dilations(const Strides& dilations) { + m_dilations = dilations; + } + + /// \return The data type of the second output tensor (indices). + element::Type get_index_element_type() const noexcept { + return m_index_element_type; + } + void set_index_element_type(const element::Type index_element_type) { + m_index_element_type = index_element_type; + } + + // \return The 'axis' attribute value. + int64_t get_axis() const { + return m_axis; + } + void set_axis(const int64_t axis) { + m_axis = axis; + } + + bool has_evaluate() const override; + bool evaluate(const HostTensorVector&, const HostTensorVector&) const override; + +private: + Strides m_dilations; + element::Type m_index_element_type{element::i64}; + int64_t m_axis{0}; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/maximum.hpp b/ngraph/core/include/openvino/op/maximum.hpp new file mode 100644 index 00000000000..5218eacdce4 --- /dev/null +++ b/ngraph/core/include/openvino/op/maximum.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise maximum operation. +class OPENVINO_API Maximum : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a maximum operation. + Maximum() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + + /// \brief Constructs a maximum operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + Maximum(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/minimum.hpp b/ngraph/core/include/openvino/op/minimum.hpp new file mode 100644 index 00000000000..7219af61379 --- /dev/null +++ b/ngraph/core/include/openvino/op/minimum.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise minimum operation. +class OPENVINO_API Minimum : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a minimum operation. + Minimum() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + + /// \brief Constructs a minimum operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + Minimum(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/mish.hpp b/ngraph/core/include/openvino/op/mish.hpp new file mode 100644 index 00000000000..37e1bcab112 --- /dev/null +++ b/ngraph/core/include/openvino/op/mish.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v4 { +/// \brief A Self Regularized Non-Monotonic Neural Activation Function +/// f(x) = x * tanh(log(exp(x) + 1.)) +/// +class OPENVINO_API Mish : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Mish() = default; + /// \brief Constructs an Mish operation. + /// + /// \param data Input tensor + Mish(const Output& arg); + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v4 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/mod.hpp b/ngraph/core/include/openvino/op/mod.hpp new file mode 100644 index 00000000000..679a8d591ba --- /dev/null +++ b/ngraph/core/include/openvino/op/mod.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Mod returns an element-wise division reminder with two given tensors applying +/// multi-directional broadcast rules. +class OPENVINO_API Mod : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a Mod node. + Mod() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + /// + /// \param A - Dividend tensor + /// \param B - Divisor tensor + /// \param auto_broadcast Auto broadcast specification + Mod(const Output& A, + const Output& B, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/multiclass_nms.hpp b/ngraph/core/include/openvino/op/multiclass_nms.hpp new file mode 100644 index 00000000000..55d4a10e661 --- /dev/null +++ b/ngraph/core/include/openvino/op/multiclass_nms.hpp @@ -0,0 +1,69 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/nms_base.hpp" + +namespace ov { +namespace op { +namespace v8 { +/// \brief MulticlassNms operation +/// +class OPENVINO_API MulticlassNms : public util::NmsBase { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Structure that specifies attributes of the operation + struct Attributes { + // specifies order of output elements + SortResultType sort_result_type = SortResultType::NONE; + // specifies whenever it is necessary to sort selected boxes across batches or + // not + bool sort_result_across_batch = false; + // specifies the output tensor type + ngraph::element::Type output_type = ngraph::element::i64; + // specifies intersection over union threshold + float iou_threshold = 0.0f; + // specifies minimum score to consider box for the processing + float score_threshold = 0.0f; + // specifies maximum number of boxes to be selected per class, -1 meaning to + // keep all boxes + int nms_top_k = -1; + // specifies maximum number of boxes to be selected per batch element, -1 + // meaning to keep all boxes + int keep_top_k = -1; + // specifies the background class id, -1 meaning to keep all classes + int background_class = -1; + // specifies eta parameter for adpative NMS, in close range [0, 1.0] + float nms_eta = 1.0f; + // specifies whether boxes are normalized or not + bool normalized = true; + }; + + MulticlassNms(); + + /// \brief Constructs a MulticlassNms operation + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param attrs Attributes of the operation + MulticlassNms(const Output& boxes, const Output& scores, const Attributes& attrs); + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \brief Returns attributes of the operation MulticlassNms + const Attributes& get_attrs() const { + return m_attrs; + } + +protected: + Attributes m_attrs; + void validate() override; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/multiply.hpp b/ngraph/core/include/openvino/op/multiply.hpp new file mode 100644 index 00000000000..57fb9cdfceb --- /dev/null +++ b/ngraph/core/include/openvino/op/multiply.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise multiplication operation. +class OPENVINO_API Multiply : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a multiplication operation. + Multiply() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + + /// \brief Constructs a multiplication operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + Multiply(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/mvn.hpp b/ngraph/core/include/openvino/op/mvn.hpp new file mode 100644 index 00000000000..8e315f947a9 --- /dev/null +++ b/ngraph/core/include/openvino/op/mvn.hpp @@ -0,0 +1,144 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { + +namespace v0 { +/// \brief Operator performing Mean Variance Normalization +/// +class OPENVINO_API MVN : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + MVN() = default; + /// \brief Constructs an MVN operation. + /// + /// \param data Input tensor with data + /// \param normalize_variance flag that denotes whether to perform variance + /// normalization. + /// \param across_channels flag that denotes if mean values are shared across + /// channels. + /// \param eps the number to be added to the variance to avoid division by zero when + /// normalizing the value + /// + MVN(const Output& data, bool across_channels = true, bool normalize_variance = true, double eps = 1e-9); + + /// \brief Constructs an MVN operation. + /// + /// \param data Input tensor with data + /// \param reduction_axes A list of axes, along which to reduce. + /// \param normalize_variance flag that denotes whether to perform variance + /// normalization. + /// \param eps the number to be added to the variance to avoid division by zero when + /// normalizing the value + /// + MVN(const Output& data, AxisSet reduction_axes, bool normalize_variance = true, double eps = 1e-9); + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + double get_eps() const { + return m_eps; + } + bool get_across_channels() const { + return m_across_channels; + } + bool get_normalize_variance() const { + return m_normalize_variance; + } + AxisSet get_reduction_axes() const { + return m_reduction_axes; + } + void set_reduction_axes(AxisSet axes) { + m_reduction_axes = std::move(axes); + } + +private: + double m_eps; + bool m_across_channels; + bool m_normalize_variance; + AxisSet m_reduction_axes; +}; +} // namespace v0 + +/// \brief Specifies how eps is applied in MVN +enum class MVNEpsMode { + // Apply eps inside sqrt + INSIDE_SQRT, + // Apply eps outside sqrt + OUTSIDE_SQRT +}; + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const MVNEpsMode& type); + +namespace v6 { +/// \brief Operator performing Mean Variance Normalization +/// +class OPENVINO_API MVN : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + MVN() = default; + /// \brief Constructs an MVN operation. + /// + /// \param data Input tensor with data + /// \param reduction_axes A list of axes, along which to reduce. + /// \param normalize_variance flag that denotes whether to perform variance + /// normalization. + /// \param eps the number to be added to the variance to avoid division by zero when + /// normalizing the value + /// \param eps_mode the mode of applying epsilon + /// + MVN(const Output& data, + const Output& reduction_axes, + bool normalize_variance, + float eps, + MVNEpsMode eps_mode); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + float get_eps() const { + return m_eps; + } + bool get_normalize_variance() const { + return m_normalize_variance; + } + MVNEpsMode get_eps_mode() const { + return m_eps_mode; + } + +private: + bool m_normalize_variance; + float m_eps; + MVNEpsMode m_eps_mode; +}; +} // namespace v6 +} // namespace op + +template <> +class OPENVINO_API AttributeAdapter : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::MVNEpsMode& value) : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/negative.hpp b/ngraph/core/include/openvino/op/negative.hpp new file mode 100644 index 00000000000..2aa2448df5b --- /dev/null +++ b/ngraph/core/include/openvino/op/negative.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise negative operation. +class OPENVINO_API Negative : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a negative operation. + Negative() = default; + /// \brief Constructs a negative operation. + /// + /// \param arg Node that produces the input tensor. + Negative(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/non_max_suppression.hpp b/ngraph/core/include/openvino/op/non_max_suppression.hpp new file mode 100644 index 00000000000..b5111909005 --- /dev/null +++ b/ngraph/core/include/openvino/op/non_max_suppression.hpp @@ -0,0 +1,408 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise addition operation. +/// +class OPENVINO_API NonMaxSuppression : public Op { +public: + enum class BoxEncodingType { CORNER, CENTER }; + + OPENVINO_RTTI_DECLARATION; + + NonMaxSuppression() = default; + + /// \brief Constructs a NonMaxSuppression operation. + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param max_output_boxes_per_class Node producing maximum number of boxes to be + /// selected per class + /// \param iou_threshold Node producing intersection over union threshold + /// \param score_threshold Node producing minimum score threshold + /// \param box_encoding Specifies the format of boxes data encoding + NonMaxSuppression(const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true); + + /// \brief Constructs a NonMaxSuppression operation with default values for the last + /// 3 inputs + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box coordinates + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + BoxEncodingType get_box_encoding() const { + return m_box_encoding; + } + void set_box_encoding(const BoxEncodingType box_encoding) { + m_box_encoding = box_encoding; + } + bool get_sort_result_descending() const { + return m_sort_result_descending; + } + void set_sort_result_descending(const bool sort_result_descending) { + m_sort_result_descending = sort_result_descending; + } + +protected: + BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; + bool m_sort_result_descending = true; + +private: + int64_t max_boxes_output_from_input() const; +}; +} // namespace v1 + +namespace v3 { +/// \brief NonMaxSuppression operation +/// +class OPENVINO_API NonMaxSuppression : public Op { +public: + enum class BoxEncodingType { CORNER, CENTER }; + + OPENVINO_RTTI_DECLARATION; + NonMaxSuppression() = default; + + /// \brief Constructs a NonMaxSuppression operation. + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param max_output_boxes_per_class Node producing maximum number of boxes to be + /// selected per class + /// \param iou_threshold Node producing intersection over union threshold + /// \param score_threshold Node producing minimum score threshold + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + /// \brief Constructs a NonMaxSuppression operation with default values for the last + /// 3 inputs + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box coordinates + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + BoxEncodingType get_box_encoding() const { + return m_box_encoding; + } + void set_box_encoding(const BoxEncodingType box_encoding) { + m_box_encoding = box_encoding; + } + bool get_sort_result_descending() const { + return m_sort_result_descending; + } + void set_sort_result_descending(const bool sort_result_descending) { + m_sort_result_descending = sort_result_descending; + } + + element::Type get_output_type() const { + return m_output_type; + } + void set_output_type(const element::Type& output_type) { + m_output_type = output_type; + } + using Node::set_output_type; + +protected: + BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; + bool m_sort_result_descending = true; + ngraph::element::Type m_output_type = ngraph::element::i64; + void validate(); + int64_t max_boxes_output_from_input() const; +}; +} // namespace v3 + +namespace v4 { +/// \brief NonMaxSuppression operation +/// +class OPENVINO_API NonMaxSuppression : public op::v3::NonMaxSuppression { +public: + OPENVINO_RTTI_DECLARATION; + NonMaxSuppression() = default; + + /// \brief Constructs a NonMaxSuppression operation. + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param max_output_boxes_per_class Node producing maximum number of boxes to be + /// selected per class + /// \param iou_threshold Node producing intersection over union threshold + /// \param score_threshold Node producing minimum score threshold + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + /// \brief Constructs a NonMaxSuppression operation with default values for the last + /// 3 inputs + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box coordinates + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v4 + +namespace v5 { +/// \brief NonMaxSuppression operation +/// +class OPENVINO_API NonMaxSuppression : public Op { +public: + OPENVINO_RTTI_DECLARATION; + enum class BoxEncodingType { CORNER, CENTER }; + + NonMaxSuppression() = default; + + /// \brief Constructs a NonMaxSuppression operation with default values in the last + /// 4 inputs. + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + /// \brief Constructs a NonMaxSuppression operation with default values in the last. + /// 3 inputs. + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param max_output_boxes_per_class Node producing maximum number of boxes to be + /// selected per class + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + /// \brief Constructs a NonMaxSuppression operation with default values in the last. + /// 2 inputs. + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param max_output_boxes_per_class Node producing maximum number of boxes to be + /// selected per class + /// \param iou_threshold Node producing intersection over union threshold + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + /// \brief Constructs a NonMaxSuppression operation with default value in the last. + /// input. + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param max_output_boxes_per_class Node producing maximum number of boxes to be + /// selected per class + /// \param iou_threshold Node producing intersection over union threshold + /// \param score_threshold Node producing minimum score threshold + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + /// \brief Constructs a NonMaxSuppression operation. + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param max_output_boxes_per_class Node producing maximum number of boxes to be + /// selected per class + /// \param iou_threshold Node producing intersection over union threshold + /// \param score_threshold Node producing minimum score threshold + /// \param soft_nms_sigma Node specifying the sigma parameter for Soft-NMS + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const Output& soft_nms_sigma, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + BoxEncodingType get_box_encoding() const { + return m_box_encoding; + } + void set_box_encoding(const BoxEncodingType box_encoding) { + m_box_encoding = box_encoding; + } + bool get_sort_result_descending() const { + return m_sort_result_descending; + } + void set_sort_result_descending(const bool sort_result_descending) { + m_sort_result_descending = sort_result_descending; + } + + element::Type get_output_type() const { + return m_output_type; + } + void set_output_type(const element::Type& output_type) { + m_output_type = output_type; + } + using Node::set_output_type; + + int64_t max_boxes_output_from_input() const; + float iou_threshold_from_input() const; + float score_threshold_from_input() const; + float soft_nms_sigma_from_input() const; + bool is_soft_nms_sigma_constant_and_default() const; + +protected: + BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; + bool m_sort_result_descending = true; + ngraph::element::Type m_output_type = ngraph::element::i64; + void validate(); +}; +} // namespace v5 +} // namespace op + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v1::NonMaxSuppression::BoxEncodingType& type); + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v3::NonMaxSuppression::BoxEncodingType& type); + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v5::NonMaxSuppression::BoxEncodingType& type); + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v1::NonMaxSuppression::BoxEncodingType& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v3::NonMaxSuppression::BoxEncodingType& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v5::NonMaxSuppression::BoxEncodingType& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/non_zero.hpp b/ngraph/core/include/openvino/op/non_zero.hpp new file mode 100644 index 00000000000..13fc7728243 --- /dev/null +++ b/ngraph/core/include/openvino/op/non_zero.hpp @@ -0,0 +1,67 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief NonZero operation returning indices of non-zero elements in the input tensor. +/// +/// \note The indices are returned by-dimension in row-major order. For example +/// the following output contains 3 indices of a 3D input tensor elements: +/// [[0, 0, 2], +/// [0, 1, 1], +/// [0, 1, 2]] +/// The values point to input elements at [0,0,0], [0,1,1] and [2,1,2] +class OPENVINO_API NonZero : public Op { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a NonZero operation. + NonZero() = default; + /// \brief Constructs a NonZero operation. + /// + /// \note The output type is int64. + /// + /// \param arg Node that produces the input tensor. + NonZero(const Output& arg); + /// \brief Constructs a NonZero operation. + /// + /// \param arg Node that produces the input tensor. + /// \param output_type produce indices. Currently, only 'int64' or 'int32' + /// are + /// supported + NonZero(const Output& arg, const std::string& output_type); + /// \brief Constructs a NonZero operation. + /// + /// \param arg Node that produces the input tensor. + /// \param output_type produce indices. Currently, only int64 or int32 are + /// supported + NonZero(const Output& arg, const element::Type& output_type); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + element::Type get_output_type() const { + return m_output_type; + } + void set_output_type(element::Type output_type) { + m_output_type = output_type; + } + // Overload collision with method on Node + using Node::set_output_type; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + element::Type m_output_type = element::i64; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/normalize_l2.hpp b/ngraph/core/include/openvino/op/normalize_l2.hpp new file mode 100644 index 00000000000..dee0ef24eeb --- /dev/null +++ b/ngraph/core/include/openvino/op/normalize_l2.hpp @@ -0,0 +1,53 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Normalization with L2 norm. +/// +class OPENVINO_API NormalizeL2 : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + NormalizeL2() = default; + /// + /// \brief Constructs a NormalizeL2 operation. + /// + /// \param data - Node producing the input tensor + /// \param axes - Node indicating axes along which reduction is + /// calculated + /// \param eps - The epsilon added to L2 norm. + /// \param eps_mode - Specifies how eps is combined with L2 value + /// calculated before division + /// + NormalizeL2(const Output& data, const Output& axes, float eps, EpsMode eps_mode); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + float get_eps() const { + return m_eps; + } + EpsMode get_eps_mode() const { + return m_eps_mode; + } + AxisSet get_reduction_axes() const; + +protected: + float m_eps; + EpsMode m_eps_mode; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/not_equal.hpp b/ngraph/core/include/openvino/op/not_equal.hpp new file mode 100644 index 00000000000..b31944e9939 --- /dev/null +++ b/ngraph/core/include/openvino/op/not_equal.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_comparison.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise not-equal operation. +class OPENVINO_API NotEqual : public util::BinaryElementwiseComparison { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a not-equal operation. + NotEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} + /// \brief Constructs a not-equal operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + NotEqual(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool visit_attributes(AttributeVisitor& visitor) override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/one_hot.hpp b/ngraph/core/include/openvino/op/one_hot.hpp new file mode 100644 index 00000000000..6680ff1d099 --- /dev/null +++ b/ngraph/core/include/openvino/op/one_hot.hpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +class OPENVINO_API OneHot : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a one-hot operation. + OneHot() = default; + /// \brief Constructs a one-hot operation. + /// + /// \param indices Input tensor containing indices. + /// \param depth Specifies number of classes and the size of one-hot dimension. + /// \param on_value Specifies value that the locations in output tensor represented + /// by indices in input take. + /// \param off_value Specifies value that the locations in output tensor not + /// represented + /// by indices in input take. + /// \param axis Axis along which one-hot representation in added. + OneHot(const Output& indices, + const Output& depth, + const Output& on_value, + const Output& off_value, + int64_t axis); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + void validate_and_infer_types() override; + + bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; + bool has_evaluate() const override; + + /// \return The index of the one-hot axis. + int64_t get_axis() const { + return m_axis; + } + void set_axis(int64_t axis) { + m_axis = axis; + } + +protected: + int64_t m_axis; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/pad.hpp b/ngraph/core/include/openvino/op/pad.hpp new file mode 100644 index 00000000000..2f42122e408 --- /dev/null +++ b/ngraph/core/include/openvino/op/pad.hpp @@ -0,0 +1,79 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Generic padding operation. +class OPENVINO_API Pad : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a generic padding operation. + /// + /// \param arg The output producing input tensor to be padded. + /// \param pads_begin The output which specifies the number of padding elements + /// added + /// before position 0 on each axis of arg. + /// \param pads_end The output which specifies the number of padding elements + /// after the last element on each axis. + /// \param arg_pad_value The scalar output with the value used for padding + /// if pad_mode is CONSTANT + /// \param pad_mode The padding mode: CONSTANT, EDGE, REFLECT or SYMMETRIC. + /// CONSTANT initializes new elements with arg_pad_value, EDGE uses the nearest + /// value from arg. REFLECT and SYMMETRIC tile the background by flipping arg + /// at the edge (SYMMETRIC) or on the last row/column/etc. (REFLECT). + Pad(const Output& arg, + const Output& pads_begin, + const Output& pads_end, + const Output& arg_pad_value, + PadMode pad_mode); + + /// \brief Constructs a generic padding operation. + /// + /// \param arg The output producing input tensor to be padded. + /// \param pads_begin The output which specifies the number of padding elements + /// added + /// \param pads_end The output which specifies the number of padding elements + /// after the last element on each axis. + /// \param pad_mode The padding mode: CONSTANT, EDGE, REFLECT or SYMMETRIC. + Pad(const Output& arg, const Output& pads_begin, const Output& pads_end, PadMode pad_mode); + + /// \brief Constructs a generic padding operation. + Pad() = default; + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// return The node which specifies the number of padding elements + /// added at the beginning of each axis + CoordinateDiff get_pads_begin() const; + /// return The node which specifies the number of padding elements + /// added at the end of each axis + CoordinateDiff get_pads_end() const; + + /// \return The padding mode. + PadMode get_pad_mode() const { + return m_pad_mode; + } + void set_pad_mode(PadMode pad_mode) { + m_pad_mode = pad_mode; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + PadMode m_pad_mode; + bool evaluate_pad(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/power.hpp b/ngraph/core/include/openvino/op/power.hpp new file mode 100644 index 00000000000..0c074760eed --- /dev/null +++ b/ngraph/core/include/openvino/op/power.hpp @@ -0,0 +1,49 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v1 { +// clang-format off +/// \brief Elementwise exponentiation operation. +/// +/// ## Inputs +/// +/// | | Type | Description | +/// | ------ | --------------------------------- | ------------------------------------------------------ | +/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | +/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. | +/// +/// ## Output +/// +/// | Type | Description | +/// | ---------------------- | -------------------------------------------------------------------------------------------------------------- | +/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n]^{\texttt{arg1}[i_1,\dots,i_n]}\f$ | +// clang-format on +class OPENVINO_API Power : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + Power() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + + /// \brief Constructs an exponentiation operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + Power(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/prelu.hpp b/ngraph/core/include/openvino/op/prelu.hpp new file mode 100644 index 00000000000..53394810103 --- /dev/null +++ b/ngraph/core/include/openvino/op/prelu.hpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Parametrized Relu +/// x < 0 => f(x) = x * slope +/// x >= 0 => f(x) = x +/// +class OPENVINO_API PRelu : public Op { +public: + OPENVINO_RTTI_DECLARATION; + PRelu(); + /// \brief Constructs a PRelu operation. + /// + /// \param data Input tensor + /// \param slope Multipliers for negative values + PRelu(const Output& data, const Output& slope); + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + void validate_and_infer_types() override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/prior_box.hpp b/ngraph/core/include/openvino/op/prior_box.hpp new file mode 100644 index 00000000000..2acf9150b1c --- /dev/null +++ b/ngraph/core/include/openvino/op/prior_box.hpp @@ -0,0 +1,67 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Layer which generates prior boxes of specified sizes +/// normalized to input image size +class OPENVINO_API PriorBox : public Op { +public: + OPENVINO_RTTI_DECLARATION; + struct Attributes { + // min_size Desired min_size of prior boxes + // max_size Desired max_size of prior boxes + // aspect_ratio Aspect ratios of prior boxes + // clip Clip output to [0,1] + // flip Flip aspect ratios + // step Distance between prior box centers + // offset Box offset relative to top center of image + // variance Values to adjust prior boxes with + // scale_all_sizes Scale all sizes + std::vector min_size; + std::vector max_size; + std::vector aspect_ratio; + std::vector density; + std::vector fixed_ratio; + std::vector fixed_size; + bool clip = false; + bool flip = false; + float step = 0.0f; + float offset = 0.0f; + std::vector variance; + bool scale_all_sizes = true; + }; + + PriorBox() = default; + /// \brief Constructs a PriorBox operation + /// + /// \param layer_shape Shape of layer for which prior boxes are computed + /// \param image_shape Shape of image to which prior boxes are scaled + /// \param attrs PriorBox attributes + PriorBox(const Output& layer_shape, const Output& image_shape, const Attributes& attrs); + + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + static int64_t number_of_priors(const Attributes& attrs); + + static std::vector normalized_aspect_ratio(const std::vector& aspect_ratio, bool flip); + const Attributes& get_attrs() const { + return m_attrs; + } + bool visit_attributes(AttributeVisitor& visitor) override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + Attributes m_attrs; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/prior_box_clustered.hpp b/ngraph/core/include/openvino/op/prior_box_clustered.hpp new file mode 100644 index 00000000000..9c4342def22 --- /dev/null +++ b/ngraph/core/include/openvino/op/prior_box_clustered.hpp @@ -0,0 +1,59 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { + +namespace v0 { +/// \brief Layer which generates prior boxes of specified sizes +/// normalized to input image size +class OPENVINO_API PriorBoxClustered : public Op { +public: + OPENVINO_RTTI_DECLARATION; + struct Attributes { + // widths Desired widths of prior boxes + // heights Desired heights of prior boxes + // clip Clip output to [0,1] + // step_widths Distance between prior box centers + // step_heights Distance between prior box centers + // step Distance between prior box centers (when step_w = step_h) + // offset Box offset relative to top center of image + // variances Values to adjust prior boxes with + std::vector widths; + std::vector heights; + bool clip = true; + float step_widths = 0.0f; + float step_heights = 0.0f; + float step = 0.0f; + float offset = 0.0f; + std::vector variances; + }; + + PriorBoxClustered() = default; + /// \brief Constructs a PriorBoxClustered operation + /// + /// \param layer_shape Shape of layer for which prior boxes are computed + /// \param image_shape Shape of image to which prior boxes are scaled + /// \param attrs PriorBoxClustered attributes + PriorBoxClustered(const Output& layer_shape, const Output& image_shape, const Attributes& attrs); + + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + const Attributes& get_attrs() const { + return m_attrs; + } + bool visit_attributes(AttributeVisitor& visitor) override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + Attributes m_attrs; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/proposal.hpp b/ngraph/core/include/openvino/op/proposal.hpp new file mode 100644 index 00000000000..0daf18d5744 --- /dev/null +++ b/ngraph/core/include/openvino/op/proposal.hpp @@ -0,0 +1,95 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { + +namespace v0 { +class OPENVINO_API Proposal : public Op { +public: + OPENVINO_RTTI_DECLARATION; + // base_size Anchor sizes + // pre_nms_topn Number of boxes before nms + // post_nms_topn Number of boxes after nms + // nms_thresh Threshold for nms + // feat_stride Feature stride + // min_size Minimum box size + // ratio Ratios for anchor generation + // scale Scales for anchor generation + // clip_before_nms Clip before NMs + // clip_after_nms Clip after NMs + // normalize Normalize boxes to [0,1] + // box_size_scale Scale factor for scaling box size + // box_coordinate_scale Scale factor for scaling box coordiate + // framework Calculation frameworkrithm to use + struct Attributes { + size_t base_size; + size_t pre_nms_topn; + size_t post_nms_topn; + float nms_thresh = 0.0f; + size_t feat_stride = 1; + size_t min_size = 1; + std::vector ratio; + std::vector scale; + bool clip_before_nms = true; + bool clip_after_nms = false; + bool normalize = false; + float box_size_scale = 1.0f; + float box_coordinate_scale = 1.0f; + std::string framework; + bool infer_probs = false; + }; + Proposal() = default; + /// \brief Constructs a Proposal operation + /// + /// \param class_probs Class probability scores + /// \param bbox_deltas Prediction of bounding box deltas + /// \param image_shape Shape of image + /// \param attrs Proposal op attributes + Proposal(const Output& class_probs, + const Output& bbox_deltas, + const Output& image_shape, + const Attributes& attrs); + + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + const Attributes& get_attrs() const { + return m_attrs; + } + bool visit_attributes(AttributeVisitor& visitor) override; + +protected: + Attributes m_attrs; +}; +} // namespace v0 + +namespace v4 { +class OPENVINO_API Proposal : public op::v0::Proposal { +public: + OPENVINO_RTTI_DECLARATION; + Proposal() = default; + /// \brief Constructs a Proposal operation + /// + /// \param class_probs Class probability scores + /// \param bbox_deltas Prediction of bounding box deltas + /// \param image_shape Shape of image + /// \param attrs Proposal op attributes + Proposal(const Output& class_probs, + const Output& bbox_deltas, + const Output& image_shape, + const Attributes& attrs); + + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + const Attributes& get_attrs() const { + return m_attrs; + } +}; +} // namespace v4 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/psroi_pooling.hpp b/ngraph/core/include/openvino/op/psroi_pooling.hpp new file mode 100644 index 00000000000..6574a1dbb44 --- /dev/null +++ b/ngraph/core/include/openvino/op/psroi_pooling.hpp @@ -0,0 +1,72 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API PSROIPooling : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + PSROIPooling() = default; + /// \brief Constructs a PSROIPooling operation + /// + /// \param input Input feature map {N, C, ...} + /// \param coords Coordinates of bounding boxes + /// \param output_dim Output channel number + /// \param group_size Number of groups to encode position-sensitive scores + /// \param spatial_scale Ratio of input feature map over input image size + /// \param spatial_bins_x Numbers of bins to divide the input feature maps over + /// width + /// \param spatial_bins_y Numbers of bins to divide the input feature maps over + /// height + /// \param mode Mode of pooling - Avg or Bilinear + PSROIPooling(const Output& input, + const Output& coords, + const size_t output_dim, + const size_t group_size, + const float spatial_scale, + int spatial_bins_x, + int spatial_bins_y, + const std::string& mode); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + size_t get_output_dim() const { + return m_output_dim; + } + size_t get_group_size() const { + return m_group_size; + } + float get_spatial_scale() const { + return m_spatial_scale; + } + int get_spatial_bins_x() const { + return m_spatial_bins_x; + } + int get_spatial_bins_y() const { + return m_spatial_bins_y; + } + const std::string& get_mode() const { + return m_mode; + } + +private: + size_t m_output_dim; + size_t m_group_size; + float m_spatial_scale; + int m_spatial_bins_x; + int m_spatial_bins_y; + std::string m_mode; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reduce_min.hpp b/ngraph/core/include/openvino/op/reduce_min.hpp new file mode 100644 index 00000000000..8509979fc37 --- /dev/null +++ b/ngraph/core/include/openvino/op/reduce_min.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v1 { +class OPENVINO_API ReduceMin : public util::ArithmeticReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a summation operation. + ReduceMin() = default; + /// \brief Constructs a summation operation. + /// + /// \param arg The tensor to be summed. + /// \param reduction_axes The axis positions (0-based) to be eliminated. + /// \param keep_dims If set to 1 it holds axes that are used for reduction. + ReduceMin(const Output& arg, const Output& reduction_axes, bool keep_dims = false); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& outputs) const override; + bool evaluate_upper(const HostTensorVector& outputs) const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/tensor_iterator.hpp b/ngraph/core/include/openvino/op/tensor_iterator.hpp index 9d8a2e5a838..232ef4c13db 100644 --- a/ngraph/core/include/openvino/op/tensor_iterator.hpp +++ b/ngraph/core/include/openvino/op/tensor_iterator.hpp @@ -14,9 +14,9 @@ namespace ov { namespace op { namespace v0 { /// \brief Iterate a body over tensors, accumulating into tensors. -class NGRAPH_API TensorIterator : public op::util::SubGraphOp { +class OPENVINO_API TensorIterator : public op::util::SubGraphOp { public: - NGRAPH_RTTI_DECLARATION; + OPENVINO_RTTI_DECLARATION; bool visit_attributes(AttributeVisitor& visitor) override; diff --git a/ngraph/core/src/op/not.cpp b/ngraph/core/src/op/logical_not.cpp similarity index 98% rename from ngraph/core/src/op/not.cpp rename to ngraph/core/src/op/logical_not.cpp index 26dcc0d7345..c96a4834ffb 100644 --- a/ngraph/core/src/op/not.cpp +++ b/ngraph/core/src/op/logical_not.cpp @@ -2,9 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/not.hpp" - #include "itt.hpp" +#include "ngraph/op/not.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/elementwise_args.hpp" #include "ngraph/runtime/host_tensor.hpp" @@ -14,7 +13,7 @@ using namespace ngraph; using namespace std; -NGRAPH_RTTI_DEFINITION(op::v1::LogicalNot, "LogicalNot", 1); +OPENVINO_RTTI_DEFINITION(op::v1::LogicalNot, "LogicalNot", 1); op::v1::LogicalNot::LogicalNot(const Output& arg) : Op({arg}) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/or.cpp b/ngraph/core/src/op/logical_or.cpp similarity index 96% rename from ngraph/core/src/op/or.cpp rename to ngraph/core/src/op/logical_or.cpp index 5c1518e5c28..c19eb3d8ab5 100644 --- a/ngraph/core/src/op/or.cpp +++ b/ngraph/core/src/op/logical_or.cpp @@ -2,9 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/or.hpp" - #include "itt.hpp" +#include "ngraph/op/or.hpp" #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/or.hpp" #include "ngraph/validation_util.hpp" @@ -12,7 +11,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::LogicalOr, "LogicalOr", 1, util::BinaryElementwiseLogical); +OPENVINO_RTTI_DEFINITION(op::v1::LogicalOr, "LogicalOr", 1, util::BinaryElementwiseLogical); op::v1::LogicalOr::LogicalOr(const Output& arg0, const Output& arg1, diff --git a/ngraph/core/src/op/matmul.cpp b/ngraph/core/src/op/matmul.cpp index 4175fafcd57..432d5c73099 100644 --- a/ngraph/core/src/op/matmul.cpp +++ b/ngraph/core/src/op/matmul.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::MatMul, "MatMul", 0); +OPENVINO_RTTI_DEFINITION(op::v0::MatMul, "MatMul", 0); op::MatMul::MatMul(const Output& A, const Output& B, const bool& transpose_a, const bool& transpose_b) : Op(OutputVector{A, B}), diff --git a/ngraph/core/src/op/matrix_nms.cpp b/ngraph/core/src/op/matrix_nms.cpp index a4f0879a1db..5df137505ae 100644 --- a/ngraph/core/src/op/matrix_nms.cpp +++ b/ngraph/core/src/op/matrix_nms.cpp @@ -18,7 +18,7 @@ using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v8::MatrixNms, "MatrixNms", 8, op::util::NmsBase); +OPENVINO_RTTI_DEFINITION(op::v8::MatrixNms, "MatrixNms", 8, op::util::NmsBase); op::v8::MatrixNms::MatrixNms() : NmsBase(m_attrs.output_type, m_attrs.nms_top_k, m_attrs.keep_top_k) {} @@ -64,7 +64,7 @@ bool ngraph::op::v8::MatrixNms::visit_attributes(AttributeVisitor& visitor) { return true; } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v8::MatrixNms::DecayFunction& type) { +std::ostream& ov::operator<<(std::ostream& s, const op::v8::MatrixNms::DecayFunction& type) { return s << as_string(type); } diff --git a/ngraph/core/src/op/max.cpp b/ngraph/core/src/op/max.cpp index fe929584dc1..66def527bf9 100644 --- a/ngraph/core/src/op/max.cpp +++ b/ngraph/core/src/op/max.cpp @@ -41,7 +41,7 @@ bool evaluate_max(const HostTensorPtr& arg, const HostTensorPtr& out, const Axis } } // namespace maxop -NGRAPH_RTTI_DEFINITION(op::v1::ReduceMax, "ReduceMax", 1, util::ArithmeticReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v1::ReduceMax, "ReduceMax", 1, util::ArithmeticReductionKeepDims); op::v1::ReduceMax::ReduceMax(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { diff --git a/ngraph/core/src/op/max_pool.cpp b/ngraph/core/src/op/max_pool.cpp index 6398db7fc48..675d0bc8e6f 100644 --- a/ngraph/core/src/op/max_pool.cpp +++ b/ngraph/core/src/op/max_pool.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::MaxPool, "MaxPool", 1, op::util::MaxPoolBase); +OPENVINO_RTTI_DEFINITION(op::v1::MaxPool, "MaxPool", 1, op::util::MaxPoolBase); op::v1::MaxPool::MaxPool(const Output& arg, const Strides& strides, @@ -62,7 +62,7 @@ shared_ptr op::v1::MaxPool::clone_with_new_inputs(const OutputVector& new_ } shared_ptr op::v1::MaxPool::get_default_value() const { - return op::Constant::create(get_element_type(), get_shape(), {0}); + return op::v0::Constant::create(get_element_type(), get_shape(), {0}); } namespace maxpool { @@ -250,7 +250,7 @@ bool evaluate_maxpool(const HostTensorPtr& data, } } // namespace maxpool_v8 -NGRAPH_RTTI_DEFINITION(op::v8::MaxPool, "MaxPool", 8, op::util::MaxPoolBase); +OPENVINO_RTTI_DEFINITION(op::v8::MaxPool, "MaxPool", 8, op::util::MaxPoolBase); op::v8::MaxPool::MaxPool(const Output& arg, const Strides& strides, diff --git a/ngraph/core/src/op/maximum.cpp b/ngraph/core/src/op/maximum.cpp index 20788312bd6..b88b7751527 100644 --- a/ngraph/core/src/op/maximum.cpp +++ b/ngraph/core/src/op/maximum.cpp @@ -57,7 +57,7 @@ bool evaluate_maximum(const HostTensorPtr& arg0, // ------------------------------------ v1 ------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::Maximum, "Maximum", 1, op::util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v1::Maximum, "Maximum", 1, op::util::BinaryElementwiseArithmetic); op::v1::Maximum::Maximum(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/minimum.cpp b/ngraph/core/src/op/minimum.cpp index 5e233eabd9a..4a6f6d77c2c 100644 --- a/ngraph/core/src/op/minimum.cpp +++ b/ngraph/core/src/op/minimum.cpp @@ -55,7 +55,7 @@ bool evaluate_minimum(const HostTensorPtr& arg0, // ------------------------------ v1 ------------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::Minimum, "Minimum", 1, op::util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v1::Minimum, "Minimum", 1, op::util::BinaryElementwiseArithmetic); op::v1::Minimum::Minimum(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/mish.cpp b/ngraph/core/src/op/mish.cpp index 970b78d9105..f09f23e81ea 100644 --- a/ngraph/core/src/op/mish.cpp +++ b/ngraph/core/src/op/mish.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v4::Mish, "Mish", 4); +OPENVINO_RTTI_DEFINITION(op::v4::Mish, "Mish", 4); op::v4::Mish::Mish(const Output& arg) : Op({arg}) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/mod.cpp b/ngraph/core/src/op/mod.cpp index d1a1e54b96f..d64b8cd9eca 100644 --- a/ngraph/core/src/op/mod.cpp +++ b/ngraph/core/src/op/mod.cpp @@ -11,7 +11,7 @@ using namespace ngraph; // ------------------------------ v1 ------------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::Mod, "Mod", 1, op::util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v1::Mod, "Mod", 1, op::util::BinaryElementwiseArithmetic); op::v1::Mod::Mod(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { @@ -22,4 +22,4 @@ shared_ptr op::v1::Mod::clone_with_new_inputs(const OutputVector& new_args NGRAPH_OP_SCOPE(v1_Mod_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); -} \ No newline at end of file +} diff --git a/ngraph/core/src/op/multiclass_nms.cpp b/ngraph/core/src/op/multiclass_nms.cpp index 20c6174bd29..84078ad25e5 100644 --- a/ngraph/core/src/op/multiclass_nms.cpp +++ b/ngraph/core/src/op/multiclass_nms.cpp @@ -18,7 +18,7 @@ using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v8::MulticlassNms, "MulticlassNms", 8, op::util::NmsBase); +OPENVINO_RTTI_DEFINITION(op::v8::MulticlassNms, "MulticlassNms", 8, op::util::NmsBase); op::v8::MulticlassNms::MulticlassNms() : NmsBase(m_attrs.output_type, m_attrs.nms_top_k, m_attrs.keep_top_k) {} diff --git a/ngraph/core/src/op/multiply.cpp b/ngraph/core/src/op/multiply.cpp index 5af58e62052..7fc75453ce9 100644 --- a/ngraph/core/src/op/multiply.cpp +++ b/ngraph/core/src/op/multiply.cpp @@ -50,7 +50,7 @@ bool evaluate_multiply(const HostTensorPtr& arg0, // ------------------------------------ v1 ------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::Multiply, "Multiply", 1, util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v1::Multiply, "Multiply", 1, util::BinaryElementwiseArithmetic); op::v1::Multiply::Multiply(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/mvn.cpp b/ngraph/core/src/op/mvn.cpp index 0a6ae9a78d7..10cd0e74ab4 100644 --- a/ngraph/core/src/op/mvn.cpp +++ b/ngraph/core/src/op/mvn.cpp @@ -13,7 +13,7 @@ using namespace ngraph; // ------------------------------ V0 ------------------------------ -NGRAPH_RTTI_DEFINITION(op::v0::MVN, "MVN", 0); +OPENVINO_RTTI_DEFINITION(op::v0::MVN, "MVN", 0); op::v0::MVN::MVN(const Output& data, bool across_channels, bool normalize_variance, double eps) : Op({data}), @@ -83,11 +83,11 @@ constexpr DiscreteTypeInfo AttributeAdapter::type_info; } // namespace ov -std::ostream& op::operator<<(std::ostream& s, const ngraph::op::MVNEpsMode& type) { +std::ostream& ov::op::operator<<(std::ostream& s, const ngraph::op::MVNEpsMode& type) { return s << as_string(type); } -NGRAPH_RTTI_DEFINITION(op::v6::MVN, "MVN", 6); +OPENVINO_RTTI_DEFINITION(op::v6::MVN, "MVN", 6); op::v6::MVN::MVN(const Output& data, const Output& reduction_axes, diff --git a/ngraph/core/src/op/negative.cpp b/ngraph/core/src/op/negative.cpp index 10454d7a3a1..2f9360226b8 100644 --- a/ngraph/core/src/op/negative.cpp +++ b/ngraph/core/src/op/negative.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Negative, "Negative", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Negative, "Negative", 0, util::UnaryElementwiseArithmetic); op::Negative::Negative(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/non_max_suppression.cpp b/ngraph/core/src/op/non_max_suppression.cpp index 9cdf6615ce3..143b89d60ef 100644 --- a/ngraph/core/src/op/non_max_suppression.cpp +++ b/ngraph/core/src/op/non_max_suppression.cpp @@ -20,7 +20,7 @@ using namespace ngraph; // ------------------------------ V1 ------------------------------ -NGRAPH_RTTI_DEFINITION(op::v1::NonMaxSuppression, "NonMaxSuppression", 1); +OPENVINO_RTTI_DEFINITION(op::v1::NonMaxSuppression, "NonMaxSuppression", 1); op::v1::NonMaxSuppression::NonMaxSuppression(const Output& boxes, const Output& scores, @@ -41,9 +41,9 @@ op::v1::NonMaxSuppression::NonMaxSuppression(const Output& boxes, const bool sort_result_descending) : Op({boxes, scores, - op::Constant::create(element::i64, Shape{}, {0}), - op::Constant::create(element::f32, Shape{}, {.0f}), - op::Constant::create(element::f32, Shape{}, {.0f})}), + op::v0::Constant::create(element::i64, Shape{}, {0}), + op::v0::Constant::create(element::f32, Shape{}, {.0f}), + op::v0::Constant::create(element::f32, Shape{}, {.0f})}), m_box_encoding{box_encoding}, m_sort_result_descending{sort_result_descending} { constructor_validate_and_infer_types(); @@ -54,11 +54,12 @@ std::shared_ptr op::v1::NonMaxSuppression::clone_with_new_inputs(const Out check_new_args_count(this, new_args); NODE_VALIDATION_CHECK(this, new_args.size() >= 2 && new_args.size() <= 5, "Number of inputs must be 2, 3, 4 or 5"); - const auto& arg2 = new_args.size() > 2 ? new_args.at(2) : ngraph::op::Constant::create(element::i32, Shape{}, {0}); + const auto& arg2 = + new_args.size() > 2 ? new_args.at(2) : ngraph::op::v0::Constant::create(element::i32, Shape{}, {0}); const auto& arg3 = - new_args.size() > 3 ? new_args.at(3) : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 3 ? new_args.at(3) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); const auto& arg4 = - new_args.size() > 4 ? new_args.at(4) : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 4 ? new_args.at(4) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); return std::make_shared(new_args.at(0), new_args.at(1), @@ -109,7 +110,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types() { if (inputs().size() >= 3) { const auto max_boxes_ps = get_input_partial_shape(2); NODE_VALIDATION_CHECK(this, - max_boxes_ps.is_dynamic() || is_scalar(max_boxes_ps.to_shape()), + max_boxes_ps.is_dynamic() || ngraph::is_scalar(max_boxes_ps.to_shape()), "Expected a scalar for the 'max_output_boxes_per_class' input. Got: ", max_boxes_ps); } @@ -117,7 +118,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types() { if (inputs().size() >= 4) { const auto iou_threshold_ps = get_input_partial_shape(3); NODE_VALIDATION_CHECK(this, - iou_threshold_ps.is_dynamic() || is_scalar(iou_threshold_ps.to_shape()), + iou_threshold_ps.is_dynamic() || ngraph::is_scalar(iou_threshold_ps.to_shape()), "Expected a scalar for the 'iou_threshold' input. Got: ", iou_threshold_ps); } @@ -125,7 +126,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types() { if (inputs().size() >= 5) { const auto score_threshold_ps = get_input_partial_shape(4); NODE_VALIDATION_CHECK(this, - score_threshold_ps.is_dynamic() || is_scalar(score_threshold_ps.to_shape()), + score_threshold_ps.is_dynamic() || ngraph::is_scalar(score_threshold_ps.to_shape()), "Expected a scalar for the 'score_threshold' input. Got: ", score_threshold_ps); } @@ -189,13 +190,12 @@ constexpr DiscreteTypeInfo AttributeAdapter& boxes, const Output& scores, @@ -219,9 +219,9 @@ op::v3::NonMaxSuppression::NonMaxSuppression(const Output& boxes, const element::Type& output_type) : Op({boxes, scores, - op::Constant::create(element::i64, Shape{}, {0}), - op::Constant::create(element::f32, Shape{}, {.0f}), - op::Constant::create(element::f32, Shape{}, {.0f})}), + op::v0::Constant::create(element::i64, Shape{}, {0}), + op::v0::Constant::create(element::f32, Shape{}, {.0f}), + op::v0::Constant::create(element::f32, Shape{}, {.0f})}), m_box_encoding{box_encoding}, m_sort_result_descending{sort_result_descending}, m_output_type{output_type} { @@ -233,11 +233,12 @@ std::shared_ptr op::v3::NonMaxSuppression::clone_with_new_inputs(const Out check_new_args_count(this, new_args); NODE_VALIDATION_CHECK(this, new_args.size() >= 2 && new_args.size() <= 5, "Number of inputs must be 2, 3, 4 or 5"); - const auto& arg2 = new_args.size() > 2 ? new_args.at(2) : ngraph::op::Constant::create(element::i32, Shape{}, {0}); + const auto& arg2 = + new_args.size() > 2 ? new_args.at(2) : ngraph::op::v0::Constant::create(element::i32, Shape{}, {0}); const auto& arg3 = - new_args.size() > 3 ? new_args.at(3) : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 3 ? new_args.at(3) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); const auto& arg4 = - new_args.size() > 4 ? new_args.at(4) : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 4 ? new_args.at(4) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); return std::make_shared(new_args.at(0), new_args.at(1), @@ -282,7 +283,7 @@ void op::v3::NonMaxSuppression::validate() { if (inputs().size() >= 3) { const auto max_boxes_ps = get_input_partial_shape(2); NODE_VALIDATION_CHECK(this, - max_boxes_ps.is_dynamic() || is_scalar(max_boxes_ps.to_shape()), + max_boxes_ps.is_dynamic() || ngraph::is_scalar(max_boxes_ps.to_shape()), "Expected a scalar for the 'max_output_boxes_per_class' input. Got: ", max_boxes_ps); } @@ -290,7 +291,7 @@ void op::v3::NonMaxSuppression::validate() { if (inputs().size() >= 4) { const auto iou_threshold_ps = get_input_partial_shape(3); NODE_VALIDATION_CHECK(this, - iou_threshold_ps.is_dynamic() || is_scalar(iou_threshold_ps.to_shape()), + iou_threshold_ps.is_dynamic() || ngraph::is_scalar(iou_threshold_ps.to_shape()), "Expected a scalar for the 'iou_threshold' input. Got: ", iou_threshold_ps); } @@ -298,7 +299,7 @@ void op::v3::NonMaxSuppression::validate() { if (inputs().size() >= 5) { const auto score_threshold_ps = get_input_partial_shape(4); NODE_VALIDATION_CHECK(this, - score_threshold_ps.is_dynamic() || is_scalar(score_threshold_ps.to_shape()), + score_threshold_ps.is_dynamic() || ngraph::is_scalar(score_threshold_ps.to_shape()), "Expected a scalar for the 'score_threshold' input. Got: ", score_threshold_ps); } @@ -377,13 +378,13 @@ constexpr DiscreteTypeInfo AttributeAdapter& boxes, const Output& scores, @@ -411,9 +412,9 @@ op::v4::NonMaxSuppression::NonMaxSuppression(const Output& boxes, const element::Type& output_type) : op::v3::NonMaxSuppression(boxes, scores, - op::Constant::create(element::i64, Shape{}, {0}), - op::Constant::create(element::f32, Shape{}, {.0f}), - op::Constant::create(element::f32, Shape{}, {.0f}), + op::v0::Constant::create(element::i64, Shape{}, {0}), + op::v0::Constant::create(element::f32, Shape{}, {.0f}), + op::v0::Constant::create(element::f32, Shape{}, {.0f}), box_encoding, sort_result_descending, output_type) { @@ -425,11 +426,12 @@ std::shared_ptr op::v4::NonMaxSuppression::clone_with_new_inputs(const Out check_new_args_count(this, new_args); NODE_VALIDATION_CHECK(this, new_args.size() >= 2 && new_args.size() <= 5, "Number of inputs must be 2, 3, 4 or 5"); - const auto& arg2 = new_args.size() > 2 ? new_args.at(2) : ngraph::op::Constant::create(element::i32, Shape{}, {0}); + const auto& arg2 = + new_args.size() > 2 ? new_args.at(2) : ngraph::op::v0::Constant::create(element::i32, Shape{}, {0}); const auto& arg3 = - new_args.size() > 3 ? new_args.at(3) : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 3 ? new_args.at(3) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); const auto& arg4 = - new_args.size() > 4 ? new_args.at(4) : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 4 ? new_args.at(4) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); return std::make_shared(new_args.at(0), new_args.at(1), @@ -469,7 +471,7 @@ void op::v4::NonMaxSuppression::validate_and_infer_types() { // ------------------------------ V5 ------------------------------ -NGRAPH_RTTI_DEFINITION(op::v5::NonMaxSuppression, "NonMaxSuppression", 5); +OPENVINO_RTTI_DEFINITION(op::v5::NonMaxSuppression, "NonMaxSuppression", 5); op::v5::NonMaxSuppression::NonMaxSuppression(const Output& boxes, const Output& scores, @@ -776,7 +778,7 @@ bool op::v5::NonMaxSuppression::is_soft_nms_sigma_constant_and_default() const { if (inputs().size() < 6 || !ngraph::op::is_constant(soft_nms_sigma_node)) { return false; } - const auto soft_nms_sigma_input = ov::as_type_ptr(soft_nms_sigma_node); + const auto soft_nms_sigma_input = ov::as_type_ptr(soft_nms_sigma_node); return soft_nms_sigma_input->cast_vector().at(0) == 0.0f; } @@ -817,7 +819,7 @@ void op::v5::NonMaxSuppression::validate_and_infer_types() { set_output_type(2, m_output_type, Shape{1}); } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v5::NonMaxSuppression::BoxEncodingType& type) { +std::ostream& ov::operator<<(std::ostream& s, const op::v5::NonMaxSuppression::BoxEncodingType& type) { return s << as_string(type); } @@ -833,5 +835,4 @@ EnumNames::get() { } constexpr DiscreteTypeInfo AttributeAdapter::type_info; - } // namespace ov diff --git a/ngraph/core/src/op/non_zero.cpp b/ngraph/core/src/op/non_zero.cpp index dce70de461d..e16d134a556 100644 --- a/ngraph/core/src/op/non_zero.cpp +++ b/ngraph/core/src/op/non_zero.cpp @@ -16,7 +16,7 @@ using namespace ngraph; using namespace std; -NGRAPH_RTTI_DEFINITION(op::v3::NonZero, "NonZero", 3); +OPENVINO_RTTI_DEFINITION(op::v3::NonZero, "NonZero", 3); op::v3::NonZero::NonZero(const Output& arg) : Op({arg}) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/normalize_l2.cpp b/ngraph/core/src/op/normalize_l2.cpp index ed36f34fdbc..3ea426b27ca 100644 --- a/ngraph/core/src/op/normalize_l2.cpp +++ b/ngraph/core/src/op/normalize_l2.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::NormalizeL2, "NormalizeL2", 0); +OPENVINO_RTTI_DEFINITION(op::v0::NormalizeL2, "NormalizeL2", 0); op::v0::NormalizeL2::NormalizeL2(const Output& data, const Output& axes, float eps, EpsMode eps_mode) : Op({data, axes}), diff --git a/ngraph/core/src/op/not_equal.cpp b/ngraph/core/src/op/not_equal.cpp index 56f969b214c..a97751381a6 100644 --- a/ngraph/core/src/op/not_equal.cpp +++ b/ngraph/core/src/op/not_equal.cpp @@ -50,7 +50,7 @@ bool evaluate_not_equal(const HostTensorPtr& arg0, // ----------------------------------- v1 -------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::NotEqual, "NotEqual", 1, op::util::BinaryElementwiseComparison); +OPENVINO_RTTI_DEFINITION(op::v1::NotEqual, "NotEqual", 1, op::util::BinaryElementwiseComparison); op::v1::NotEqual::NotEqual(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseComparison(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/one_hot.cpp b/ngraph/core/src/op/one_hot.cpp index 2ca430fca06..7e3443ec0fe 100644 --- a/ngraph/core/src/op/one_hot.cpp +++ b/ngraph/core/src/op/one_hot.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::OneHot, "OneHot", 1); +OPENVINO_RTTI_DEFINITION(op::v1::OneHot, "OneHot", 1); op::v1::OneHot::OneHot(const Output& indices, const Output& depth, @@ -50,15 +50,15 @@ void op::v1::OneHot::validate_and_infer_types() { const auto& off_value_shape = get_input_partial_shape(3); NODE_VALIDATION_CHECK(this, - depth_shape.is_dynamic() || is_scalar(depth_shape.to_shape()), + depth_shape.is_dynamic() || ngraph::is_scalar(depth_shape.to_shape()), "depth input must be scalar."); NODE_VALIDATION_CHECK(this, - on_value_shape.is_dynamic() || is_scalar(on_value_shape.to_shape()), + on_value_shape.is_dynamic() || ngraph::is_scalar(on_value_shape.to_shape()), "on_value input must be scalar."); NODE_VALIDATION_CHECK(this, - off_value_shape.is_dynamic() || is_scalar(off_value_shape.to_shape()), + off_value_shape.is_dynamic() || ngraph::is_scalar(off_value_shape.to_shape()), "off_value input must be scalar."); PartialShape result_shape{PartialShape::dynamic()}; @@ -77,7 +77,7 @@ void op::v1::OneHot::validate_and_infer_types() { ")."); NODE_VALIDATION_CHECK(this, - is_scalar(depth->get_shape()), + ngraph::is_scalar(depth->get_shape()), "A scalar input should be provided as 'depth' to OneHot", " (got ", depth->get_shape(), diff --git a/ngraph/core/src/op/pad.cpp b/ngraph/core/src/op/pad.cpp index d9353bd0324..4e2126524c3 100644 --- a/ngraph/core/src/op/pad.cpp +++ b/ngraph/core/src/op/pad.cpp @@ -17,7 +17,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::Pad, "Pad", 1); +OPENVINO_RTTI_DEFINITION(op::v1::Pad, "Pad", 1); op::v1::Pad::Pad(const Output& arg, const Output& pads_begin, @@ -33,7 +33,7 @@ op::v1::Pad::Pad(const Output& arg, const Output& pads_begin, const Output& pads_end, PadMode pad_mode) - : Op({arg, pads_begin, pads_end, op::Constant::create(arg.get_element_type(), Shape{}, {0})}), + : Op({arg, pads_begin, pads_end, op::v0::Constant::create(arg.get_element_type(), Shape{}, {0})}), m_pad_mode{pad_mode} { constructor_validate_and_infer_types(); } diff --git a/ngraph/core/src/op/power.cpp b/ngraph/core/src/op/power.cpp index edfb815c104..ff3bf65aebd 100644 --- a/ngraph/core/src/op/power.cpp +++ b/ngraph/core/src/op/power.cpp @@ -53,7 +53,7 @@ bool evaluate_power(const HostTensorPtr& arg0, // ------------------------------ v1 ------------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::Power, "Power", 1, op::util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v1::Power, "Power", 1, op::util::BinaryElementwiseArithmetic); op::v1::Power::Power(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/prelu.cpp b/ngraph/core/src/op/prelu.cpp index afd178ca4be..20a69dd4954 100644 --- a/ngraph/core/src/op/prelu.cpp +++ b/ngraph/core/src/op/prelu.cpp @@ -10,13 +10,12 @@ #include "itt.hpp" using namespace std; -using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::PRelu, "PRelu", 0); +OPENVINO_RTTI_DEFINITION(ov::op::v0::PRelu, "PRelu", 0); -op::PRelu::PRelu() : Op() {} +ov::op::v0::PRelu::PRelu() : Op() {} -op::PRelu::PRelu(const Output& data, const Output& slope) : Op({data, slope}) { +ov::op::v0::PRelu::PRelu(const Output& data, const Output& slope) : Op({data, slope}) { constructor_validate_and_infer_types(); } @@ -29,26 +28,28 @@ void ngraph::op::v0::PRelu::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } -shared_ptr op::PRelu::clone_with_new_inputs(const OutputVector& new_args) const { +shared_ptr ov::op::v0::PRelu::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_PRelu_clone_with_new_inputs); if (new_args.size() != 2) { - throw ngraph_error("Incorrect number of new arguments"); + throw ov::Exception("Incorrect number of new arguments"); } return make_shared(new_args.at(0), new_args.at(1)); } namespace prelu { -template -bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& slope, const HostTensorPtr& out) { - runtime::reference::prelu(arg->get_data_ptr(), - slope->get_data_ptr(), - out->get_data_ptr(), - arg->get_shape(), - slope->get_shape()); +template +bool evaluate(const ngraph::HostTensorPtr& arg, const ngraph::HostTensorPtr& slope, const ngraph::HostTensorPtr& out) { + ngraph::runtime::reference::prelu(arg->get_data_ptr(), + slope->get_data_ptr(), + out->get_data_ptr(), + arg->get_shape(), + slope->get_shape()); return true; } -bool evaluate_prelu(const HostTensorPtr& arg, const HostTensorPtr& slope, const HostTensorPtr& out) { +bool evaluate_prelu(const ngraph::HostTensorPtr& arg, + const ngraph::HostTensorPtr& slope, + const ngraph::HostTensorPtr& out) { bool rc = true; switch (arg->get_element_type()) { NGRAPH_TYPE_CASE(evaluate_prelu, i8, arg, slope, out); @@ -63,13 +64,13 @@ bool evaluate_prelu(const HostTensorPtr& arg, const HostTensorPtr& slope, const } } // namespace prelu -bool op::PRelu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ov::op::v0::PRelu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v0_PRelu_evaluate); - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 2)); + NGRAPH_CHECK(ngraph::validate_host_tensor_vector(outputs, 1) && ngraph::validate_host_tensor_vector(inputs, 2)); return prelu::evaluate_prelu(inputs[0], inputs[1], outputs[0]); } -bool op::PRelu::has_evaluate() const { +bool ov::op::v0::PRelu::has_evaluate() const { NGRAPH_OP_SCOPE(v0_PRelu_has_evaluate); switch (get_input_element_type(0)) { case ngraph::element::i8: diff --git a/ngraph/core/src/op/prior_box.cpp b/ngraph/core/src/op/prior_box.cpp index d622176de1a..a782cf08bdd 100644 --- a/ngraph/core/src/op/prior_box.cpp +++ b/ngraph/core/src/op/prior_box.cpp @@ -14,9 +14,11 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::PriorBox, "PriorBox", 0); +OPENVINO_RTTI_DEFINITION(op::v0::PriorBox, "PriorBox", 0); -op::PriorBox::PriorBox(const Output& layer_shape, const Output& image_shape, const PriorBoxAttrs& attrs) +op::PriorBox::PriorBox(const Output& layer_shape, + const Output& image_shape, + const PriorBox::Attributes& attrs) : Op({layer_shape, image_shape}), m_attrs(attrs) { constructor_validate_and_infer_types(); @@ -70,7 +72,7 @@ shared_ptr op::PriorBox::clone_with_new_inputs(const OutputVector& new_arg return make_shared(new_args.at(0), new_args.at(1), m_attrs); } -int64_t op::PriorBox::number_of_priors(const PriorBoxAttrs& attrs) { +int64_t op::PriorBox::number_of_priors(const PriorBox::Attributes& attrs) { // Starting with 0 number of prior and then various conditions on attributes will contribute // real number of prior boxes as PriorBox is a fat thing with several modes of // operation that will be checked in order in the next statements. @@ -129,7 +131,10 @@ bool op::PriorBox::visit_attributes(AttributeVisitor& visitor) { namespace prior_box { template -bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& arg1, const HostTensorPtr& out, op::PriorBoxAttrs attrs) { +bool evaluate(const HostTensorPtr& arg0, + const HostTensorPtr& arg1, + const HostTensorPtr& out, + op::PriorBox::Attributes attrs) { runtime::reference::prior_box(arg0->get_data_ptr(), arg1->get_data_ptr(), out->get_data_ptr(), @@ -141,7 +146,7 @@ bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& arg1, const HostTe bool evaluate_prior_box(const HostTensorPtr& arg0, const HostTensorPtr& arg1, const HostTensorPtr& out, - const op::PriorBoxAttrs& attrs) { + const op::PriorBox::Attributes& attrs) { bool rc = true; switch (arg0->get_element_type()) { NGRAPH_TYPE_CASE(evaluate_prior_box, i8, arg0, arg1, out, attrs); diff --git a/ngraph/core/src/op/prior_box_clustered.cpp b/ngraph/core/src/op/prior_box_clustered.cpp index cf1ce9fa23c..6cb8cc8e02c 100644 --- a/ngraph/core/src/op/prior_box_clustered.cpp +++ b/ngraph/core/src/op/prior_box_clustered.cpp @@ -14,17 +14,17 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::PriorBoxClustered, "PriorBoxClustered", 0); +OPENVINO_RTTI_DEFINITION(ov::op::v0::PriorBoxClustered, "PriorBoxClustered", 0); -op::PriorBoxClustered::PriorBoxClustered(const Output& layer_shape, - const Output& image_shape, - const PriorBoxClusteredAttrs& attrs) +ov::op::v0::PriorBoxClustered::PriorBoxClustered(const Output& layer_shape, + const Output& image_shape, + const Attributes& attrs) : Op({layer_shape, image_shape}), m_attrs(attrs) { constructor_validate_and_infer_types(); } -void op::PriorBoxClustered::validate_and_infer_types() { +void ov::op::v0::PriorBoxClustered::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_PriorBoxClustered_validate_and_infer_types); // shape node should have integer data type. For now we only allow i64 auto layer_shape_et = get_input_element_type(0); @@ -72,13 +72,13 @@ void op::PriorBoxClustered::validate_and_infer_types() { } } -shared_ptr op::PriorBoxClustered::clone_with_new_inputs(const OutputVector& new_args) const { +shared_ptr ov::op::v0::PriorBoxClustered::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_PriorBoxClustered_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_attrs); } -bool op::PriorBoxClustered::visit_attributes(AttributeVisitor& visitor) { +bool ov::op::v0::PriorBoxClustered::visit_attributes(AttributeVisitor& visitor) { NGRAPH_OP_SCOPE(v0_PriorBoxClustered_visit_attributes); float step = 0; float step_w_tmp = m_attrs.step_widths; @@ -110,7 +110,7 @@ template bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& arg1, const HostTensorPtr& out, - op::PriorBoxClusteredAttrs attrs) { + ov::op::v0::PriorBoxClustered::Attributes attrs) { runtime::reference::prior_box_clustered(arg0->get_data_ptr(), arg1->get_data_ptr(), out->get_data_ptr(), @@ -122,7 +122,7 @@ bool evaluate(const HostTensorPtr& arg0, bool evaluate_prior_box(const HostTensorPtr& arg0, const HostTensorPtr& arg1, const HostTensorPtr& out, - const op::PriorBoxClusteredAttrs& attrs) { + const ov::op::v0::PriorBoxClustered::Attributes& attrs) { bool rc = true; switch (arg0->get_element_type()) { NGRAPH_TYPE_CASE(evaluate_prior_box, i8, arg0, arg1, out, attrs); diff --git a/ngraph/core/src/op/proposal.cpp b/ngraph/core/src/op/proposal.cpp index 54813c5418f..fe4c95257c5 100644 --- a/ngraph/core/src/op/proposal.cpp +++ b/ngraph/core/src/op/proposal.cpp @@ -10,12 +10,12 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Proposal, "Proposal", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Proposal, "Proposal", 0); op::v0::Proposal::Proposal(const Output& class_probs, const Output& bbox_deltas, const Output& image_shape, - const ProposalAttrs& attrs) + const Attributes& attrs) : Op({class_probs, bbox_deltas, image_shape}), m_attrs(attrs) { constructor_validate_and_infer_types(); @@ -128,12 +128,12 @@ bool op::v0::Proposal::visit_attributes(AttributeVisitor& visitor) { return true; } -NGRAPH_RTTI_DEFINITION(op::v4::Proposal, "Proposal", 4); +OPENVINO_RTTI_DEFINITION(op::v4::Proposal, "Proposal", 4); op::v4::Proposal::Proposal(const Output& class_probs, const Output& class_bbox_deltas, const Output& image_shape, - const op::ProposalAttrs& attrs) + const op::v0::Proposal::Attributes& attrs) : v0::Proposal(class_probs, class_bbox_deltas, image_shape, attrs) { constructor_validate_and_infer_types(); } diff --git a/ngraph/core/src/op/psroi_pooling.cpp b/ngraph/core/src/op/psroi_pooling.cpp index 9b6fdf41c94..becc9425ada 100644 --- a/ngraph/core/src/op/psroi_pooling.cpp +++ b/ngraph/core/src/op/psroi_pooling.cpp @@ -10,16 +10,16 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::PSROIPooling, "PSROIPooling", 0); +OPENVINO_RTTI_DEFINITION(ov::op::v0::PSROIPooling, "PSROIPooling", 0); -op::PSROIPooling::PSROIPooling(const Output& input, - const Output& coords, - const size_t output_dim, - const size_t group_size, - const float spatial_scale, - int spatial_bins_x, - int spatial_bins_y, - const string& mode) +ov::op::v0::PSROIPooling::PSROIPooling(const Output& input, + const Output& coords, + const size_t output_dim, + const size_t group_size, + const float spatial_scale, + int spatial_bins_x, + int spatial_bins_y, + const string& mode) : Op({input, coords}), m_output_dim(output_dim), m_group_size(group_size), @@ -41,7 +41,7 @@ bool ngraph::op::v0::PSROIPooling::visit_attributes(AttributeVisitor& visitor) { return true; } -void op::PSROIPooling::validate_and_infer_types() { +void ov::op::v0::PSROIPooling::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_PSROIPooling_validate_and_infer_types); auto feat_maps_et = get_input_element_type(0); auto coords_et = get_input_element_type(1); @@ -104,7 +104,7 @@ void op::PSROIPooling::validate_and_infer_types() { } } -shared_ptr op::PSROIPooling::clone_with_new_inputs(const OutputVector& new_args) const { +shared_ptr ov::op::v0::PSROIPooling::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_PSROIPooling_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), diff --git a/ngraph/core/src/op/min.cpp b/ngraph/core/src/op/reduce_min.cpp similarity index 97% rename from ngraph/core/src/op/min.cpp rename to ngraph/core/src/op/reduce_min.cpp index 06c0f3d4839..c3210c1a97f 100644 --- a/ngraph/core/src/op/min.cpp +++ b/ngraph/core/src/op/reduce_min.cpp @@ -2,11 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/min.hpp" - #include #include "itt.hpp" +#include "ngraph/op/min.hpp" #include "ngraph/op/util/evaluate_helpers.hpp" #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/min.hpp" @@ -40,7 +39,7 @@ bool evaluate_min(const HostTensorPtr& arg, const HostTensorPtr& out, const Axis } } // namespace minop -NGRAPH_RTTI_DEFINITION(op::v1::ReduceMin, "ReduceMin", 1, util::ArithmeticReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v1::ReduceMin, "ReduceMin", 1, util::ArithmeticReductionKeepDims); op::v1::ReduceMin::ReduceMin(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { From 8eeee5e441929a32aeaec817932b7a5a2a875a5d Mon Sep 17 00:00:00 2001 From: cecilia peng Date: Tue, 7 Sep 2021 11:50:09 +0800 Subject: [PATCH 22/52] =?UTF-8?q?[FrontEnd][PaddlePaddle]=20fix=20fill=5Fc?= =?UTF-8?q?onstant=5Fbatch=5Fsize=5Flike=20when=20attri=E2=80=A6=20(#7214)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [FrontEnd][PaddlePaddle] fix fill_constant_batch_size_like when attribute str_value be empty. This happens when export ppyolo with PaddleDetection release/2.2. * code refactor. * remove uncertain comments --- .../src/op/fill_constant_batch_size_like.cpp | 33 +++++++++++++++---- 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.cpp b/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.cpp index 94753f31402..5883dae573b 100644 --- a/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.cpp @@ -55,25 +55,46 @@ static std::shared_ptr set_val(int32_t idx, std::shared_ptr val_node return std::make_shared(nodes, 0); } +template , + typename std::enable_if::type = true> static Output get_seed_node(const NodeContext& node) { - auto dtype = node.get_attribute("dtype"); Output val_node; + auto dtype = node.get_attribute("dtype"); auto str_value = node.get_attribute("str_value"); + if (str_value.empty()) { + auto float_value = node.get_attribute("value"); + val_node = ngraph::opset6::Constant::create(dtype, {1}, {static_cast(float_value)}); + } else { + std::stringstream ss(str_value); + StorageDataType tmp_value; + ss >> tmp_value; + val_node = ngraph::opset6::Constant::create(dtype, {1}, {static_cast(tmp_value)}); + } + return val_node; +} + +static Output get_seed_node(const NodeContext& node) { + Output val_node; + auto dtype = node.get_attribute("dtype"); + switch (dtype) { case element::i32: - val_node = ngraph::opset6::Constant::create(dtype, {1}, {std::stoi(str_value)}); + val_node = get_seed_node(node); break; case element::i64: - val_node = ngraph::opset6::Constant::create(dtype, {1}, {std::stoll(str_value)}); + val_node = get_seed_node(node); break; case element::f32: - val_node = ngraph::opset6::Constant::create(dtype, {1}, {std::stof(str_value)}); + val_node = get_seed_node(node); break; case element::f64: - val_node = ngraph::opset6::Constant::create(dtype, {1}, {std::stod(str_value)}); + val_node = get_seed_node(node); break; default: - throw std::runtime_error("fill_constant_batch_size_like: dtype value is invalid"); + throw std::runtime_error("fill_constant_batch_size_like: unsupported dtype"); } return val_node; From c568791d1e611ba6ac26fa39d1ea090f86a7d04b Mon Sep 17 00:00:00 2001 From: Bartosz Lesniewski Date: Tue, 7 Sep 2021 06:54:00 +0200 Subject: [PATCH 23/52] Deprecate passing nodes to op constructor (#7327) * Add a warning * Remove quotes * Replace outputs with output(s) --- ngraph/python/src/ngraph/utils/node_factory.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ngraph/python/src/ngraph/utils/node_factory.py b/ngraph/python/src/ngraph/utils/node_factory.py index 67e1825a4b3..0809a55b4c5 100644 --- a/ngraph/python/src/ngraph/utils/node_factory.py +++ b/ngraph/python/src/ngraph/utils/node_factory.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import logging as log + from functools import partial from typing import Any, Dict, List, Optional, Union @@ -82,6 +84,9 @@ class NodeFactory(object): if issubclass(type(argument), Output): outputs.append(argument) else: + log.warning("Op arguments were passed as Node, please avoid passing arguments in " + "this manner, and pass Output(s) instead, because accepting Nodes will " + "be deprecated in a future release.") outputs.extend(argument.outputs()) return outputs From 9e68a673e49f6d47597533630a6edf7ab49153c0 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Tue, 7 Sep 2021 09:50:46 +0300 Subject: [PATCH 24/52] Moved operations R-Z to ov namespace (#7365) * Moved operations M-P to ov namespace * Fixed code style * Fixed build * Fixed comments * Moved operations R-Z to ov namespace * Fixed build * Fixed comments Co-authored-by: y --- .../core/include/ngraph/op/random_uniform.hpp | 72 +------- ngraph/core/include/ngraph/op/range.hpp | 61 +------ ngraph/core/include/ngraph/op/reduce_l1.hpp | 28 +-- ngraph/core/include/ngraph/op/reduce_l2.hpp | 27 +-- .../include/ngraph/op/reduce_logical_and.hpp | 23 +-- .../include/ngraph/op/reduce_logical_or.hpp | 23 +-- ngraph/core/include/ngraph/op/reduce_mean.hpp | 17 +- ngraph/core/include/ngraph/op/reduce_prod.hpp | 29 +--- ngraph/core/include/ngraph/op/reduce_sum.hpp | 84 +-------- ngraph/core/include/ngraph/op/region_yolo.hpp | 72 +------- ngraph/core/include/ngraph/op/relu.hpp | 19 +-- ngraph/core/include/ngraph/op/reorg_yolo.hpp | 29 +--- ngraph/core/include/ngraph/op/reshape.hpp | 54 +----- ngraph/core/include/ngraph/op/reverse.hpp | 68 +------- .../include/ngraph/op/reverse_sequence.hpp | 49 +----- ngraph/core/include/ngraph/op/rnn_cell.hpp | 113 +----------- .../core/include/ngraph/op/rnn_sequence.hpp | 34 +--- ngraph/core/include/ngraph/op/roi_align.hpp | 92 +--------- ngraph/core/include/ngraph/op/roi_pooling.hpp | 43 +---- ngraph/core/include/ngraph/op/roll.hpp | 26 +-- ngraph/core/include/ngraph/op/round.hpp | 56 +----- .../ngraph/op/scatter_elements_update.hpp | 28 +-- .../include/ngraph/op/scatter_nd_update.hpp | 20 +-- .../core/include/ngraph/op/scatter_update.hpp | 33 +--- ngraph/core/include/ngraph/op/select.hpp | 58 +------ ngraph/core/include/ngraph/op/selu.hpp | 22 +-- ngraph/core/include/ngraph/op/shape_of.hpp | 56 +----- .../include/ngraph/op/shuffle_channels.hpp | 40 +---- ngraph/core/include/ngraph/op/sigmoid.hpp | 14 +- ngraph/core/include/ngraph/op/sign.hpp | 19 +-- ngraph/core/include/ngraph/op/sin.hpp | 34 +--- ngraph/core/include/ngraph/op/sinh.hpp | 17 +- ngraph/core/include/ngraph/op/softmax.hpp | 34 +--- ngraph/core/include/ngraph/op/softplus.hpp | 22 +-- .../core/include/ngraph/op/space_to_batch.hpp | 42 +---- .../core/include/ngraph/op/space_to_depth.hpp | 68 +------- ngraph/core/include/ngraph/op/split.hpp | 33 +--- ngraph/core/include/ngraph/op/sqrt.hpp | 32 +--- .../include/ngraph/op/squared_difference.hpp | 22 +-- ngraph/core/include/ngraph/op/squeeze.hpp | 25 +-- .../core/include/ngraph/op/strided_slice.hpp | 94 +--------- ngraph/core/include/ngraph/op/subtract.hpp | 22 +-- ngraph/core/include/ngraph/op/swish.hpp | 26 +-- ngraph/core/include/ngraph/op/tan.hpp | 31 +--- ngraph/core/include/ngraph/op/tanh.hpp | 18 +- ngraph/core/include/ngraph/op/tile.hpp | 26 +-- ngraph/core/include/ngraph/op/topk.hpp | 147 +--------------- ngraph/core/include/ngraph/op/transpose.hpp | 27 +-- ngraph/core/include/ngraph/op/unsqueeze.hpp | 20 +-- .../core/include/ngraph/op/variadic_split.hpp | 35 +--- ngraph/core/include/ngraph/op/xor.hpp | 59 +------ .../core/include/openvino/op/logical_xor.hpp | 41 +++++ .../include/openvino/op/random_uniform.hpp | 84 +++++++++ ngraph/core/include/openvino/op/range.hpp | 74 ++++++++ ngraph/core/include/openvino/op/reduce_l1.hpp | 40 +++++ ngraph/core/include/openvino/op/reduce_l2.hpp | 39 +++++ .../openvino/op/reduce_logical_and.hpp | 35 ++++ .../include/openvino/op/reduce_logical_or.hpp | 35 ++++ .../core/include/openvino/op/reduce_mean.hpp | 29 ++++ .../core/include/openvino/op/reduce_prod.hpp | 41 +++++ .../core/include/openvino/op/reduce_sum.hpp | 85 +++++++++ .../core/include/openvino/op/region_yolo.hpp | 84 +++++++++ ngraph/core/include/openvino/op/relu.hpp | 33 ++++ .../core/include/openvino/op/reorg_yolo.hpp | 41 +++++ ngraph/core/include/openvino/op/reshape.hpp | 66 +++++++ ngraph/core/include/openvino/op/reverse.hpp | 75 ++++++++ .../include/openvino/op/reverse_sequence.hpp | 61 +++++++ ngraph/core/include/openvino/op/rnn_cell.hpp | 132 ++++++++++++++ .../core/include/openvino/op/rnn_sequence.hpp | 50 ++++++ ngraph/core/include/openvino/op/roi_align.hpp | 98 +++++++++++ .../core/include/openvino/op/roi_pooling.hpp | 52 ++++++ ngraph/core/include/openvino/op/roll.hpp | 38 +++++ ngraph/core/include/openvino/op/round.hpp | 63 +++++++ .../openvino/op/scatter_elements_update.hpp | 40 +++++ .../include/openvino/op/scatter_nd_update.hpp | 29 ++++ .../include/openvino/op/scatter_update.hpp | 42 +++++ ngraph/core/include/openvino/op/select.hpp | 70 ++++++++ ngraph/core/include/openvino/op/selu.hpp | 33 ++++ ngraph/core/include/openvino/op/shape_of.hpp | 67 ++++++++ .../include/openvino/op/shuffle_channels.hpp | 54 ++++++ ngraph/core/include/openvino/op/sigmoid.hpp | 23 +++ ngraph/core/include/openvino/op/sign.hpp | 31 ++++ ngraph/core/include/openvino/op/sin.hpp | 43 +++++ ngraph/core/include/openvino/op/sinh.hpp | 29 ++++ ngraph/core/include/openvino/op/softmax.hpp | 46 +++++ ngraph/core/include/openvino/op/softplus.hpp | 34 ++++ .../include/openvino/op/space_to_batch.hpp | 54 ++++++ .../include/openvino/op/space_to_depth.hpp | 77 +++++++++ ngraph/core/include/openvino/op/split.hpp | 48 ++++++ ngraph/core/include/openvino/op/sqrt.hpp | 44 +++++ .../openvino/op/squared_difference.hpp | 34 ++++ ngraph/core/include/openvino/op/squeeze.hpp | 37 ++++ .../include/openvino/op/strided_slice.hpp | 110 ++++++++++++ ngraph/core/include/openvino/op/subtract.hpp | 34 ++++ ngraph/core/include/openvino/op/swish.hpp | 38 +++++ ngraph/core/include/openvino/op/tan.hpp | 43 +++++ ngraph/core/include/openvino/op/tanh.hpp | 30 ++++ ngraph/core/include/openvino/op/tile.hpp | 38 +++++ ngraph/core/include/openvino/op/topk.hpp | 161 ++++++++++++++++++ ngraph/core/include/openvino/op/transpose.hpp | 39 +++++ ngraph/core/include/openvino/op/unsqueeze.hpp | 34 ++++ .../include/openvino/op/util/scatter_base.hpp | 5 +- .../openvino/op/util/scatter_nd_base.hpp | 5 +- .../include/openvino/op/variadic_split.hpp | 46 +++++ ngraph/core/include/openvino/op/xor.hpp | 41 +++++ ngraph/core/src/op/lstm_sequence.cpp | 2 +- ngraph/core/src/op/random_uniform.cpp | 2 +- ngraph/core/src/op/range.cpp | 4 +- ngraph/core/src/op/reduce_l1.cpp | 2 +- ngraph/core/src/op/reduce_l2.cpp | 2 +- ngraph/core/src/op/reduce_logical_and.cpp | 2 +- ngraph/core/src/op/reduce_logical_or.cpp | 2 +- ngraph/core/src/op/reduce_mean.cpp | 2 +- ngraph/core/src/op/reduce_prod.cpp | 2 +- ngraph/core/src/op/reduce_sum.cpp | 2 +- ngraph/core/src/op/region_yolo.cpp | 2 +- ngraph/core/src/op/relu.cpp | 2 +- ngraph/core/src/op/reorg_yolo.cpp | 2 +- ngraph/core/src/op/reshape.cpp | 10 +- ngraph/core/src/op/reverse.cpp | 4 +- ngraph/core/src/op/reverse_sequence.cpp | 2 +- ngraph/core/src/op/rnn_cell.cpp | 2 +- ngraph/core/src/op/rnn_sequence.cpp | 2 +- ngraph/core/src/op/roi_align.cpp | 2 +- ngraph/core/src/op/roi_pooling.cpp | 2 +- ngraph/core/src/op/roll.cpp | 4 +- ngraph/core/src/op/round.cpp | 4 +- .../core/src/op/scatter_elements_update.cpp | 2 +- ngraph/core/src/op/scatter_nd_update.cpp | 2 +- ngraph/core/src/op/scatter_update.cpp | 2 +- ngraph/core/src/op/select.cpp | 2 +- ngraph/core/src/op/selu.cpp | 2 +- ngraph/core/src/op/shape_of.cpp | 4 +- ngraph/core/src/op/shuffle_channels.cpp | 2 +- ngraph/core/src/op/sigmoid.cpp | 10 +- ngraph/core/src/op/sign.cpp | 9 +- ngraph/core/src/op/sin.cpp | 2 +- ngraph/core/src/op/sinh.cpp | 2 +- ngraph/core/src/op/sink.cpp | 4 +- ngraph/core/src/op/softmax.cpp | 2 +- ngraph/core/src/op/softplus.cpp | 2 +- ngraph/core/src/op/space_to_batch.cpp | 2 +- ngraph/core/src/op/space_to_depth.cpp | 12 +- ngraph/core/src/op/split.cpp | 2 +- ngraph/core/src/op/sqrt.cpp | 2 +- ngraph/core/src/op/squared_difference.cpp | 13 +- ngraph/core/src/op/squeeze.cpp | 6 +- ngraph/core/src/op/strided_slice.cpp | 2 +- ngraph/core/src/op/subtract.cpp | 2 +- ngraph/core/src/op/swish.cpp | 2 +- ngraph/core/src/op/tan.cpp | 2 +- ngraph/core/src/op/tanh.cpp | 2 +- ngraph/core/src/op/tile.cpp | 2 +- ngraph/core/src/op/topk.cpp | 22 +-- ngraph/core/src/op/transpose.cpp | 2 +- ngraph/core/src/op/type_relaxed.cpp | 3 +- ngraph/core/src/op/unsqueeze.cpp | 6 +- ngraph/core/src/op/util/scatter_base.cpp | 2 +- ngraph/core/src/op/util/scatter_nd_base.cpp | 2 +- ngraph/core/src/op/variadic_split.cpp | 2 +- ngraph/core/src/op/xor.cpp | 4 +- 161 files changed, 2956 insertions(+), 2141 deletions(-) create mode 100644 ngraph/core/include/openvino/op/logical_xor.hpp create mode 100644 ngraph/core/include/openvino/op/random_uniform.hpp create mode 100644 ngraph/core/include/openvino/op/range.hpp create mode 100644 ngraph/core/include/openvino/op/reduce_l1.hpp create mode 100644 ngraph/core/include/openvino/op/reduce_l2.hpp create mode 100644 ngraph/core/include/openvino/op/reduce_logical_and.hpp create mode 100644 ngraph/core/include/openvino/op/reduce_logical_or.hpp create mode 100644 ngraph/core/include/openvino/op/reduce_mean.hpp create mode 100644 ngraph/core/include/openvino/op/reduce_prod.hpp create mode 100644 ngraph/core/include/openvino/op/reduce_sum.hpp create mode 100644 ngraph/core/include/openvino/op/region_yolo.hpp create mode 100644 ngraph/core/include/openvino/op/relu.hpp create mode 100644 ngraph/core/include/openvino/op/reorg_yolo.hpp create mode 100644 ngraph/core/include/openvino/op/reshape.hpp create mode 100644 ngraph/core/include/openvino/op/reverse.hpp create mode 100644 ngraph/core/include/openvino/op/reverse_sequence.hpp create mode 100644 ngraph/core/include/openvino/op/rnn_cell.hpp create mode 100644 ngraph/core/include/openvino/op/rnn_sequence.hpp create mode 100644 ngraph/core/include/openvino/op/roi_align.hpp create mode 100644 ngraph/core/include/openvino/op/roi_pooling.hpp create mode 100644 ngraph/core/include/openvino/op/roll.hpp create mode 100644 ngraph/core/include/openvino/op/round.hpp create mode 100644 ngraph/core/include/openvino/op/scatter_elements_update.hpp create mode 100644 ngraph/core/include/openvino/op/scatter_nd_update.hpp create mode 100644 ngraph/core/include/openvino/op/scatter_update.hpp create mode 100644 ngraph/core/include/openvino/op/select.hpp create mode 100644 ngraph/core/include/openvino/op/selu.hpp create mode 100644 ngraph/core/include/openvino/op/shape_of.hpp create mode 100644 ngraph/core/include/openvino/op/shuffle_channels.hpp create mode 100644 ngraph/core/include/openvino/op/sigmoid.hpp create mode 100644 ngraph/core/include/openvino/op/sign.hpp create mode 100644 ngraph/core/include/openvino/op/sin.hpp create mode 100644 ngraph/core/include/openvino/op/sinh.hpp create mode 100644 ngraph/core/include/openvino/op/softmax.hpp create mode 100644 ngraph/core/include/openvino/op/softplus.hpp create mode 100644 ngraph/core/include/openvino/op/space_to_batch.hpp create mode 100644 ngraph/core/include/openvino/op/space_to_depth.hpp create mode 100644 ngraph/core/include/openvino/op/split.hpp create mode 100644 ngraph/core/include/openvino/op/sqrt.hpp create mode 100644 ngraph/core/include/openvino/op/squared_difference.hpp create mode 100644 ngraph/core/include/openvino/op/squeeze.hpp create mode 100644 ngraph/core/include/openvino/op/strided_slice.hpp create mode 100644 ngraph/core/include/openvino/op/subtract.hpp create mode 100644 ngraph/core/include/openvino/op/swish.hpp create mode 100644 ngraph/core/include/openvino/op/tan.hpp create mode 100644 ngraph/core/include/openvino/op/tanh.hpp create mode 100644 ngraph/core/include/openvino/op/tile.hpp create mode 100644 ngraph/core/include/openvino/op/topk.hpp create mode 100644 ngraph/core/include/openvino/op/transpose.hpp create mode 100644 ngraph/core/include/openvino/op/unsqueeze.hpp create mode 100644 ngraph/core/include/openvino/op/variadic_split.hpp create mode 100644 ngraph/core/include/openvino/op/xor.hpp diff --git a/ngraph/core/include/ngraph/op/random_uniform.hpp b/ngraph/core/include/ngraph/op/random_uniform.hpp index f20ddecccab..bf0af07c2d1 100644 --- a/ngraph/core/include/ngraph/op/random_uniform.hpp +++ b/ngraph/core/include/ngraph/op/random_uniform.hpp @@ -6,80 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/random_uniform.hpp" namespace ngraph { namespace op { namespace v8 { -/// \brief Tensor RandomUniform operation. -class NGRAPH_API RandomUniform : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - RandomUniform() = default; - - /// - /// \brief Constructs a RandomUniform operation. - /// - /// \param out_shape Node producing the tensor with output shape. - /// \param min_val Node producing the tensor with minimum value. - /// \param max_val Node producing the tensor with maximum value. - /// \param out_type Output type of the tensor. - /// \param global_seed Global seed value. - /// \param op_seed Operational seed value. - RandomUniform(const Output& out_shape, - const Output& min_val, - const Output& max_val, - const ngraph::element::Type& out_type, - uint64_t global_seed = 0, - uint64_t op_seed = 0); - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return Turns off constant folding for RandomUniform operation. - bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override { - return false; - } - - /// \return The output tensor type. - const ngraph::element::Type& get_out_type() const { - return m_output_type; - } - void set_out_type(const ngraph::element::Type& output_type) { - m_output_type = output_type; - } - - /// \return The global seed value. - uint64_t get_global_seed() const { - return m_global_seed; - } - void set_global_seed(uint64_t seed) { - m_global_seed = seed; - } - - /// \return The operational seed value. - uint64_t get_op_seed() const { - return m_op_seed; - } - void set_op_seed(uint64_t seed2) { - m_op_seed = seed2; - } - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - - bool has_evaluate() const override; - -protected: - ngraph::element::Type m_output_type; - uint64_t m_global_seed; - uint64_t m_op_seed; - - mutable std::mutex m_state_mutex; - mutable std::pair m_state; -}; +using ov::op::v8::RandomUniform; } // namespace v8 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/range.hpp b/ngraph/core/include/ngraph/op/range.hpp index 294804baa21..72fc1984fc8 100644 --- a/ngraph/core/include/ngraph/op/range.hpp +++ b/ngraph/core/include/ngraph/op/range.hpp @@ -6,70 +6,15 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/range.hpp" namespace ngraph { namespace op { namespace v4 { -/// \brief Range operation, analogous to `arange()` in Numpy. -class NGRAPH_API Range : public Op { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs an unitialized range operation. - Range() = default; - - /// \brief Constructs a range operation. - /// - /// \param start The tensor producing the start value. Must be a scalar of numeric - /// element type. - /// \param stop The tensor producing the stop value. Must be a scalar of numeric - /// element type. - /// \param step The tensor producing the step value. Must be a scalar of numeric - /// element type. - /// \param output_type The type of the output. - Range(const Output& start, const Output& stop, const Output& step, element::Type output_type); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - void set_output_type(element::Type output_type) { - m_output_type = output_type; - } - // Overload collision with method on Node - using Node::set_output_type; - -private: - element::Type m_output_type; -}; +using ov::op::v4::Range; } // namespace v4 namespace v0 { -/// \brief Range operation, analogous to `range()` in Python. -class NGRAPH_API Range : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs an unitialized range operation. - Range() = default; - - /// \brief Constructs a range operation. - /// - /// \param start The tensor producing the start value. Must be a scalar of integer - /// element type, and same element type as `stop` and `step`. - /// \param stop The tensor producing the stop value. Must be a scalar of integer - /// element type, and same element type as `start` and `step`. - /// \param step The tensor producing the step value. Must be a scalar of integer - /// element type, and same element type as `start` and `stop`. - Range(const Output& start, const Output& stop, const Output& step); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Range; } // namespace v0 using v0::Range; } // namespace op diff --git a/ngraph/core/include/ngraph/op/reduce_l1.hpp b/ngraph/core/include/ngraph/op/reduce_l1.hpp index 991eaa19abf..aa0dd2001c7 100644 --- a/ngraph/core/include/ngraph/op/reduce_l1.hpp +++ b/ngraph/core/include/ngraph/op/reduce_l1.hpp @@ -5,36 +5,12 @@ #pragma once #include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" +#include "openvino/op/reduce_l1.hpp" namespace ngraph { namespace op { namespace v4 { -/// \brief Reduction operation using L1 norm: L1(x) = sum(abs(x)) if all dimensions are -/// specified for the normalisation. -/// -/// Reduces the tensor, eliminating the specified reduction axes by taking the L1-norm. -class NGRAPH_API ReduceL1 : public util::ArithmeticReductionKeepDims { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a reducet L1-norm operation. - ReduceL1() = default; - /// \brief Constructs a reduce L1-norm operation. - /// - /// \param arg The tensor to be reduced. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - /// \param keep_dims If set to true it holds axes that are used for reduction. - ReduceL1(const Output& arg, const Output& reduction_axes, bool keep_dims = false); - - /// \return The default value for Reduce. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v4::ReduceL1; } // namespace v4 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/reduce_l2.hpp b/ngraph/core/include/ngraph/op/reduce_l2.hpp index 2629a365396..7f9aef888ca 100644 --- a/ngraph/core/include/ngraph/op/reduce_l2.hpp +++ b/ngraph/core/include/ngraph/op/reduce_l2.hpp @@ -5,35 +5,12 @@ #pragma once #include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" +#include "openvino/op/reduce_l2.hpp" namespace ngraph { namespace op { namespace v4 { -/// \brief Reduction operation using L2 norm: -/// -/// Reduces the tensor, eliminating the specified reduction axes by taking the L2-norm. -class NGRAPH_API ReduceL2 : public util::ArithmeticReductionKeepDims { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a reducet L2-norm operation. - ReduceL2() = default; - /// \brief Constructs a reduce L2-norm operation. - /// - /// \param arg The tensor to be reduced. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - /// \param keep_dims If set to true it holds axes that are used for reduction. - ReduceL2(const Output& arg, const Output& reduction_axes, bool keep_dims = false); - - /// \return The default value for Reduce. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v4::ReduceL2; } // namespace v4 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/reduce_logical_and.hpp b/ngraph/core/include/ngraph/op/reduce_logical_and.hpp index 2d2ffcc9575..ced9f28b8bc 100644 --- a/ngraph/core/include/ngraph/op/reduce_logical_and.hpp +++ b/ngraph/core/include/ngraph/op/reduce_logical_and.hpp @@ -5,31 +5,12 @@ #pragma once #include "ngraph/op/util/logical_reduction_keep_dims.hpp" +#include "openvino/op/reduce_logical_and.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Performs a reduction using "logical and" -/// -/// The reduction is performed over slices of the first input. The slices shape depends -/// on the values passed to the second input - the axes. -class NGRAPH_API ReduceLogicalAnd : public util::LogicalReductionKeepDims { -public: - NGRAPH_RTTI_DECLARATION; - ReduceLogicalAnd() = default; - /// \brief Constructs a ReduceLogicalAnd node. - /// - /// \param data - The input tensor with data to be reduced - /// \param reduction_axes - The input tensor with information about axes over which - /// the first tensor should be sliced prior to the reduction operation - /// \param keep_dims - Indicates if the axes used for reduction should be held/kept - ReduceLogicalAnd(const Output& data, const Output& reduction_axes, const bool keep_dims = false); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::ReduceLogicalAnd; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/reduce_logical_or.hpp b/ngraph/core/include/ngraph/op/reduce_logical_or.hpp index 43ec7a04aa8..26111f556b5 100644 --- a/ngraph/core/include/ngraph/op/reduce_logical_or.hpp +++ b/ngraph/core/include/ngraph/op/reduce_logical_or.hpp @@ -5,31 +5,12 @@ #pragma once #include "ngraph/op/util/logical_reduction_keep_dims.hpp" +#include "openvino/op/reduce_logical_or.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Performs a reduction using "logical or" -/// -/// The reduction is performed over slices of the first input. The slices shape depends -/// on the values passed to the second input - the axes. -class NGRAPH_API ReduceLogicalOr : public util::LogicalReductionKeepDims { -public: - NGRAPH_RTTI_DECLARATION; - ReduceLogicalOr() = default; - /// \brief Constructs a ReduceLogicalOr node. - /// - /// \param data - The input tensor with data to be reduced - /// \param reduction_axes - The input tensor with information about axes over which - /// the first tensor should be sliced prior to the reduction operation - /// \param keep_dims - Indicates if the axes used for reduction should be held/kept - ReduceLogicalOr(const Output& data, const Output& reduction_axes, const bool keep_dims = false); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::ReduceLogicalOr; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/reduce_mean.hpp b/ngraph/core/include/ngraph/op/reduce_mean.hpp index 488f153a2aa..836c9ead84f 100644 --- a/ngraph/core/include/ngraph/op/reduce_mean.hpp +++ b/ngraph/core/include/ngraph/op/reduce_mean.hpp @@ -6,25 +6,12 @@ #include "ngraph/axis_set.hpp" #include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" +#include "openvino/op/reduce_mean.hpp" namespace ngraph { namespace op { namespace v1 { -class NGRAPH_API ReduceMean : public util::ArithmeticReductionKeepDims { -public: - NGRAPH_RTTI_DECLARATION; - ReduceMean() = default; - - /// \param arg The tensor to be summed. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - /// \param keep_dims If set to 1 it holds axes that are used for reduction. - ReduceMean(const Output& arg, const Output& reduction_axes, bool keep_dims = false); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::ReduceMean; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/reduce_prod.hpp b/ngraph/core/include/ngraph/op/reduce_prod.hpp index 4199c8b57e8..51f85ded5d2 100644 --- a/ngraph/core/include/ngraph/op/reduce_prod.hpp +++ b/ngraph/core/include/ngraph/op/reduce_prod.hpp @@ -5,37 +5,12 @@ #pragma once #include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" +#include "openvino/op/reduce_prod.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Product reduction operation. -/// -/// Reduces the tensor, eliminating the specified reduction axes by taking the product. -class NGRAPH_API ReduceProd : public util::ArithmeticReductionKeepDims { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a product reduction operation. - ReduceProd() = default; - /// \brief Constructs a product reduction operation. - /// - /// \param arg The tensor to be reduced. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - /// \param keep_dims If set to true it holds axes that are used for reduction. - ReduceProd(const Output& arg, const Output& reduction_axes, bool keep_dims = false); - - /// \return The default value for Product. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& outputs) const override; - bool evaluate_upper(const HostTensorVector& outputs) const override; -}; +using ov::op::v1::ReduceProd; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/reduce_sum.hpp b/ngraph/core/include/ngraph/op/reduce_sum.hpp index 9ef5a433882..30d28796055 100644 --- a/ngraph/core/include/ngraph/op/reduce_sum.hpp +++ b/ngraph/core/include/ngraph/op/reduce_sum.hpp @@ -6,86 +6,12 @@ #include "ngraph/axis_set.hpp" #include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" +#include "openvino/op/reduce_sum.hpp" namespace ngraph { namespace op { namespace v1 { -// clang-format off - /// \brief Tensor sum operation. - /// - /// Element-wise sums the input tensor, eliminating the specified reduction axes. - /// For example: - /// - /// \f[ - /// \mathit{sum}\left(\{0\}, - /// \left[ \begin{array}{ccc} - /// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) = - /// \left[ (1 + 3 + 5), (2 + 4 + 6) \right] = - /// \left[ 9, 12 \right]~~~\text{(dimension 0 (rows) is eliminated)} - /// \f] - /// - /// \f[ - /// \mathit{sum}\left(\{1\}, - /// \left[ \begin{array}{ccc} - /// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) = - /// \left[ (1 + 2), (3 + 4), (5 + 6) \right] = - /// \left[ 3, 7, 11 \right]~~~\text{(dimension 1 (columns) is eliminated)} - /// \f] - /// - /// \f[ - /// \mathit{sum}\left(\{0,1\}, - /// \left[ \begin{array}{ccc} - /// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) = - /// (1 + 2) + (3 + 4) + (5 + 6) = - /// 21~~~\text{(both dimensions (rows and columns) are eliminated)} - /// \f] - /// - /// ## Parameters - /// - /// | | Description | - /// | -------------------- | ---------------------------------------- | - /// | `reduction_axes` | The axes to eliminate through summation. | - /// | `keep_dims` | If set to 1 it holds axes that are used for reduction. | - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ----- | --------------------------------- | ------------------------------------------------------ | - /// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | - /// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. | - // clang-format off - class NGRAPH_API ReduceSum : public util::ArithmeticReductionKeepDims - { - public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a summation operation. - ReduceSum() = default; - /// \brief Constructs a summation operation. - /// - /// \param arg The tensor to be summed. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - /// \param keep_dims If set to 1 it holds axes that are used for reduction. - ReduceSum(const Output& arg, - const Output& reduction_axes, - bool keep_dims = false); - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The default value for Sum. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - - bool evaluate(const HostTensorVector& outputs, - const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - }; - } - } -} +using ov::op::v1::ReduceSum; +} // namespace v1 +} // namespace op +} // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/region_yolo.hpp b/ngraph/core/include/ngraph/op/region_yolo.hpp index f3430d3cdb5..8f3efbc0a76 100644 --- a/ngraph/core/include/ngraph/op/region_yolo.hpp +++ b/ngraph/core/include/ngraph/op/region_yolo.hpp @@ -5,80 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/region_yolo.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API RegionYolo : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - RegionYolo() = default; - /// - /// \brief Constructs a RegionYolo operation - /// - /// \param[in] input Input - /// \param[in] coords Number of coordinates for each region - /// \param[in] classes Number of classes for each region - /// \param[in] regions Number of regions - /// \param[in] do_softmax Compute softmax - /// \param[in] mask Mask - /// \param[in] axis Axis to begin softmax on - /// \param[in] end_axis Axis to end softmax on - /// \param[in] anchors A flattened list of pairs `[width, height]` that - /// describes - /// prior box sizes. - /// - RegionYolo(const Output& input, - const size_t coords, - const size_t classes, - const size_t regions, - const bool do_softmax, - const std::vector& mask, - const int axis, - const int end_axis, - const std::vector& anchors = std::vector{}); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - size_t get_num_coords() const { - return m_num_coords; - } - size_t get_num_classes() const { - return m_num_classes; - } - size_t get_num_regions() const { - return m_num_regions; - } - bool get_do_softmax() const { - return m_do_softmax; - } - const std::vector& get_mask() const { - return m_mask; - } - const std::vector& get_anchors() const { - return m_anchors; - } - int get_axis() const { - return m_axis; - } - int get_end_axis() const { - return m_end_axis; - } - -private: - size_t m_num_coords; - size_t m_num_classes; - size_t m_num_regions; - bool m_do_softmax; - std::vector m_mask; - std::vector m_anchors{}; - int m_axis; - int m_end_axis; -}; +using ov::op::v0::RegionYolo; } // namespace v0 using v0::RegionYolo; } // namespace op diff --git a/ngraph/core/include/ngraph/op/relu.hpp b/ngraph/core/include/ngraph/op/relu.hpp index 1b38d55bb30..6bf7253ef1f 100644 --- a/ngraph/core/include/ngraph/op/relu.hpp +++ b/ngraph/core/include/ngraph/op/relu.hpp @@ -10,27 +10,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/relu.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise Relu operation. -/// -class NGRAPH_API Relu : public ngraph::op::util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - Relu() = default; - /// \brief Constructs a Relu operation. - /// - /// \param arg Node that produces the input tensor. - Relu(const Output& arg); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool visit_attributes(AttributeVisitor& visitor) override; -}; +using ov::op::v0::Relu; } // namespace v0 using v0::Relu; } // namespace op diff --git a/ngraph/core/include/ngraph/op/reorg_yolo.hpp b/ngraph/core/include/ngraph/op/reorg_yolo.hpp index ee0ff0b1633..1e10622dc7a 100644 --- a/ngraph/core/include/ngraph/op/reorg_yolo.hpp +++ b/ngraph/core/include/ngraph/op/reorg_yolo.hpp @@ -5,37 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/reorg_yolo.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API ReorgYolo : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - ReorgYolo() = default; - /// \brief Constructs a ReorgYolo operation - /// - /// \param input Input - /// \param stride Stride to reorganize input by - ReorgYolo(const Output& input, const size_t stride); - - // Constructor with `strides` for backward compatibility - ReorgYolo(const Output& input, const Strides& strides); - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - Strides get_strides() const { - return m_strides; - } - -private: - Strides m_strides; -}; +using ov::op::v0::ReorgYolo; } // namespace v0 using v0::ReorgYolo; } // namespace op diff --git a/ngraph/core/include/ngraph/op/reshape.hpp b/ngraph/core/include/ngraph/op/reshape.hpp index f5ab9e1263e..72730ee9d7e 100644 --- a/ngraph/core/include/ngraph/op/reshape.hpp +++ b/ngraph/core/include/ngraph/op/reshape.hpp @@ -8,62 +8,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" #include "ngraph/runtime/host_tensor.hpp" +#include "openvino/op/reshape.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Tensor dynamic reshape operation. -/// -/// "Converts" an input tensor into a new shape with the same number of elements. -/// This op does not touch the actual data. If needed, use Transpose for that purpose. -/// -class NGRAPH_API Reshape : public Op { -public: - NGRAPH_RTTI_DECLARATION; - Reshape() = default; - /// \brief Constructs a dynamic reshape operation. This operation does not perform - /// transpose. - /// - /// \param arg The tensor to be reshaped. - /// \param shape_pattern The node that defines output shape shape_pattern. - /// If the input shape is \f$(a_0,\dots,a_{k-1})\f$ then the output shape - /// must - /// be of the form \f$(b_0,\dots,b_{j-1})\f$ where \f$\Pi(a_i) = \Pi(b_i)\f$. - /// A value of -1 is allowed for at most one dimension, in which case the - /// dimension size is inferred based on element count of input tensor. - /// \param special_zero Treats zeros in `shape_pattern` as wildcard flags indicating - /// a - /// copy from input shape at the same index. - /// - Reshape(const Output& arg, const Output& shape_pattern, bool special_zero); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool get_special_zero() const { - return m_special_zero; - } - void set_special_zero(bool special_zero) { - m_special_zero = special_zero; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& outputs) const override; - bool evaluate_upper(const HostTensorVector& outputs) const override; - bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; - -protected: - bool m_special_zero; - bool evaluate_reshape(const HostTensorVector& outputs, const HostTensorVector& inputs) const; - -private: - void calculate_output_shape(std::vector& reshape_pattern, - const int64_t& minus_one_idx, - const PartialShape& input_pshape, - std::vector& output_shape) const; -}; +using ov::op::v1::Reshape; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/reverse.hpp b/ngraph/core/include/ngraph/op/reverse.hpp index cbbdb750013..6874c94977a 100644 --- a/ngraph/core/include/ngraph/op/reverse.hpp +++ b/ngraph/core/include/ngraph/op/reverse.hpp @@ -5,76 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/reverse.hpp" namespace ngraph { namespace op { namespace v1 { -class NGRAPH_API Reverse : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - enum class Mode { INDEX, MASK }; - - Reverse() = default; - /// \brief Constructs a reverse operation. - /// - /// \param data The input tensor, some of whose axes are to be reversed. - /// \param reversed_axes The axes to reverse in a form of a set of indices or - /// boolean mask. - /// \param mode The way reversed_axes should be interpreted - a set or a mask. - Reverse(const Output& data, const Output& reversed_axes, const std::string& mode); - - Reverse(const Output& data, const Output& reversed_axes, const Mode mode); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The second input data interpretation mode. - Mode get_mode() const { - return m_mode; - } - void set_mode(const Mode mode) { - m_mode = mode; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - Mode mode_from_string(const std::string& mode) const; - - /// \brief Indicates how the values from the second input should be interpreted. - /// - /// The second input can contain a set of indices pointing to axes in the data - /// tensor shape. - /// Alternatively it can contain a boolean mask that indicates which axes should be - /// reversed. - Mode m_mode; - -private: - bool evaluate_reverse(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v1::Reverse; } // namespace v1 } // namespace op - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v1::Reverse::Mode& type); } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v1::Reverse::Mode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/reverse_sequence.hpp b/ngraph/core/include/ngraph/op/reverse_sequence.hpp index 836c89e11ea..694a4b98593 100644 --- a/ngraph/core/include/ngraph/op/reverse_sequence.hpp +++ b/ngraph/core/include/ngraph/op/reverse_sequence.hpp @@ -5,57 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/reverse_sequence.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API ReverseSequence : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - ReverseSequence() = default; - /// \brief Constructs a ReverseSequence operation. - /// - /// \param arg tensor with input data to reverse - /// \param seq_lengths 1D tensor of integers with sequence lengths in the input - /// tensor. - /// \param batch_axis index of the batch dimension. - /// \param seq_axis index of the sequence dimension. - ReverseSequence(const Output& arg, - const Output& seq_lengths, - int64_t batch_axis = 0, - int64_t seq_axis = 1); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - size_t get_batch_axis() const { - return m_normalized_batch_axis; - } - int64_t get_origin_batch_axis() const { - return m_batch_axis; - } - void set_batch_axis(int64_t batch_axis) { - m_batch_axis = batch_axis; - } - size_t get_sequence_axis() const { - return m_normalized_seq_axis; - } - int64_t get_origin_sequence_axis() const { - return m_seq_axis; - } - void set_sequence_axis(int64_t sequence_axis) { - m_seq_axis = sequence_axis; - } - -private: - int64_t m_batch_axis; - int64_t m_seq_axis = 1; - size_t m_normalized_batch_axis; - size_t m_normalized_seq_axis; -}; +using ov::op::v0::ReverseSequence; } // namespace v0 using v0::ReverseSequence; } // namespace op diff --git a/ngraph/core/include/ngraph/op/rnn_cell.hpp b/ngraph/core/include/ngraph/op/rnn_cell.hpp index 4a1dbad316b..a176689d25a 100644 --- a/ngraph/core/include/ngraph/op/rnn_cell.hpp +++ b/ngraph/core/include/ngraph/op/rnn_cell.hpp @@ -13,121 +13,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/activation_functions.hpp" #include "ngraph/op/util/rnn_cell_base.hpp" +#include "openvino/op/rnn_cell.hpp" namespace ngraph { namespace op { namespace v0 { -/// -/// \brief Class for single RNN cell node. -/// -/// \note It follows notation and equations defined as in ONNX standard: -/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#RNN -/// -/// \note It calculates following equations: -/// -/// Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) -/// -/// * - Is a dot product, -/// f - is activation functions. -/// -/// \note This class represents only single *cell* (for current time step) -/// and not the whole RNN Sequence layer -/// -/// \sa LSTMSequence, LSTMCell, GRUCell -/// -class NGRAPH_API RNNCell : public util::RNNCellBase { -public: - NGRAPH_RTTI_DECLARATION; - - RNNCell(); - /// - /// \brief Constructs RNNCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [hidden_size, - /// input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// - RNNCell(const Output& X, - const Output& initial_hidden_state, - const Output& W, - const Output& R, - std::size_t hidden_size, - const std::vector& activations = std::vector{"tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f); - - /// - /// \brief Constructs RNNCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [hidden_size, - /// input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [hidden_size, hidden_size]. - /// \param[in] B The bias tensor for input gate with shape: - /// [hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// - RNNCell(const Output& X, - const Output& initial_hidden_state, - const Output& W, - const Output& R, - const Output& B, - std::size_t hidden_size, - const std::vector& activations = std::vector{"tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - -private: - /// - /// \brief Creates the default bias input initialized with zeros. - /// - /// \return The object of Output class. - /// - Output get_default_bias_input() const; - - /// - /// \brief The Activation function f. - /// - util::ActivationFunction m_activation_f; - - static constexpr std::size_t s_gates_count{1}; -}; +using ov::op::v0::RNNCell; } // namespace v0 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/rnn_sequence.hpp b/ngraph/core/include/ngraph/op/rnn_sequence.hpp index 4aa6fe1fa75..7f982b8f434 100644 --- a/ngraph/core/include/ngraph/op/rnn_sequence.hpp +++ b/ngraph/core/include/ngraph/op/rnn_sequence.hpp @@ -10,42 +10,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/rnn_cell_base.hpp" +#include "openvino/op/rnn_sequence.hpp" namespace ngraph { namespace op { namespace v5 { -class NGRAPH_API RNNSequence : public util::RNNCellBase { -public: - NGRAPH_RTTI_DECLARATION; - - RNNSequence(); - - RNNSequence(const Output& X, - const Output& H_t, - const Output& sequence_lengths, - const Output& W, - const Output& R, - const Output& B, - size_t hidden_size, - op::RecurrentSequenceDirection direction, - const std::vector& activations = std::vector{"tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - op::RecurrentSequenceDirection get_direction() const { - return m_direction; - } - -protected: - op::RecurrentSequenceDirection m_direction; -}; +using ov::op::v5::RNNSequence; } // namespace v5 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/roi_align.hpp b/ngraph/core/include/ngraph/op/roi_align.hpp index 03f333c98a1..abb7297998c 100644 --- a/ngraph/core/include/ngraph/op/roi_align.hpp +++ b/ngraph/core/include/ngraph/op/roi_align.hpp @@ -5,101 +5,13 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/roi_align.hpp" namespace ngraph { namespace op { namespace v3 { -class NGRAPH_API ROIAlign : public Op { -public: - static constexpr NodeTypeInfo type_info{"ROIAlign", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - enum class PoolingMode { AVG, MAX }; - - ROIAlign() = default; - /// \brief Constructs a ROIAlign node matching the ONNX ROIAlign specification - /// - /// \param input Input feature map {N, C, H, W} - /// \param rois Regions of interest to pool over - /// \param batch_indices Indices of images in the batch matching - /// the number or ROIs - /// \param pooled_h Height of the ROI output features - /// \param pooled_w Width of the ROI output features - /// \param sampling_ratio Number of sampling points used to compute - /// an output element - /// \param spatial_scale Spatial scale factor used to translate ROI coordinates - /// \param mode Method of pooling - 'avg' or 'max' - ROIAlign(const Output& input, - const Output& rois, - const Output& batch_indices, - const int pooled_h, - const int pooled_w, - const int sampling_ratio, - const float spatial_scale, - const std::string& mode); - - ROIAlign(const Output& input, - const Output& rois, - const Output& batch_indices, - const int pooled_h, - const int pooled_w, - const int sampling_ratio, - const float spatial_scale, - const PoolingMode mode); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - int get_pooled_h() const { - return m_pooled_h; - } - int get_pooled_w() const { - return m_pooled_w; - } - int get_sampling_ratio() const { - return m_sampling_ratio; - } - float get_spatial_scale() const { - return m_spatial_scale; - } - PoolingMode get_mode() const { - return m_mode; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - PoolingMode mode_from_string(const std::string& mode) const; - -private: - int m_pooled_h; - int m_pooled_w; - int m_sampling_ratio; - float m_spatial_scale; - PoolingMode m_mode; -}; +using ov::op::v3::ROIAlign; } // namespace v3 using v3::ROIAlign; } // namespace op - -std::ostream& operator<<(std::ostream& s, const op::v3::ROIAlign::PoolingMode& mode); } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v3::ROIAlign::PoolingMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 3}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/roi_pooling.hpp b/ngraph/core/include/ngraph/op/roi_pooling.hpp index 9edc39f9ac0..fcf687897de 100644 --- a/ngraph/core/include/ngraph/op/roi_pooling.hpp +++ b/ngraph/core/include/ngraph/op/roi_pooling.hpp @@ -5,52 +5,13 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/roi_pooling.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API ROIPooling : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - ROIPooling() = default; - /// \brief Constructs a ROIPooling operation - /// - /// \param input Input feature map {N, C, H, W} - /// \param coords Coordinates of bounding boxes - /// \param output_size Height/Width of ROI output features - /// \param spatial_scale Ratio of input feature map over input image size - /// \param method Method of pooling - Max or Bilinear - ROIPooling(const Output& input, - const Output& coords, - const Shape& output_size, - const float spatial_scale, - const std::string& method = "max"); - - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - const Shape& get_output_size() const { - return m_output_size; - } - float get_spatial_scale() const { - return m_spatial_scale; - } - const std::string& get_method() const { - return m_method; - } - bool visit_attributes(AttributeVisitor& visitor) override; - -private: - Shape m_output_size{0, 0}; - float m_spatial_scale; - std::string m_method = "max"; -}; - +using ov::op::v0::ROIPooling; } // namespace v0 using v0::ROIPooling; - } // namespace op - } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/roll.hpp b/ngraph/core/include/ngraph/op/roll.hpp index 6f376a93def..76e1e4675aa 100644 --- a/ngraph/core/include/ngraph/op/roll.hpp +++ b/ngraph/core/include/ngraph/op/roll.hpp @@ -6,34 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/roll.hpp" namespace ngraph { namespace op { namespace v7 { -/// \brief Tensor roll operation. -class NGRAPH_API Roll : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - Roll() = default; - - /// - /// \brief Constructs a roll operation. - /// - /// \param data Node producing the tensor to be shifted. - /// \param shift Node producing the 0D or 1D tensor which specifies the - /// number of places by which the elements are shifted. - /// \param axes Node producing the 0D or 1D tensor which specifies axes - /// along which elements are shifted. - /// - Roll(const Output& data, const Output& shift, const Output& axes); - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v7::Roll; } // namespace v7 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/round.hpp b/ngraph/core/include/ngraph/op/round.hpp index 38a462132f0..6e00c0db103 100644 --- a/ngraph/core/include/ngraph/op/round.hpp +++ b/ngraph/core/include/ngraph/op/round.hpp @@ -7,64 +7,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/round.hpp" namespace ngraph { namespace op { namespace v5 { -/// \brief Elementwise round operation. The output is round to the nearest integer -/// for each value. In case of halfs, the rule is defined in attribute 'mode': -/// 'HALF_TO_EVEN' - round halfs to the nearest even integer. -/// 'HALF_AWAY_FROM_ZERO': - round in such a way that the result heads away from -/// zero. - -class NGRAPH_API Round : public ngraph::op::Op { -public: - enum class RoundMode { HALF_TO_EVEN, HALF_AWAY_FROM_ZERO }; - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a round operation. - Round() = default; - - /// \brief Constructs a round operation. - /// - /// \param arg Node that produces the input tensor. - /// \param mode Rule to resolve halfs - Round(const Output& arg, const RoundMode mode); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - - RoundMode get_mode() const { - return m_mode; - } - -private: - RoundMode m_mode; -}; +using ov::op::v5::Round; } // namespace v5 } // namespace op -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v5::Round::RoundMode& type); } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v5::Round::RoundMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 5}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/scatter_elements_update.hpp b/ngraph/core/include/ngraph/op/scatter_elements_update.hpp index 78847c2a33e..667b84d30b3 100644 --- a/ngraph/core/include/ngraph/op/scatter_elements_update.hpp +++ b/ngraph/core/include/ngraph/op/scatter_elements_update.hpp @@ -9,36 +9,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/runtime/aligned_buffer.hpp" #include "ngraph/runtime/host_tensor.hpp" +#include "openvino/op/scatter_elements_update.hpp" namespace ngraph { namespace op { namespace v3 { -class NGRAPH_API ScatterElementsUpdate : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - ScatterElementsUpdate() = default; - /// \brief Constructs a ScatterElementsUpdate node - - /// \param data Input data - /// \param indices Data entry index that will be updated - /// \param updates Update values - /// \param axis Axis to scatter on - ScatterElementsUpdate(const Output& data, - const Output& indices, - const Output& updates, - const Output& axis); - - virtual void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - bool evaluate_scatter_element_update(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v3::ScatterElementsUpdate; } // namespace v3 using v3::ScatterElementsUpdate; } // namespace op diff --git a/ngraph/core/include/ngraph/op/scatter_nd_update.hpp b/ngraph/core/include/ngraph/op/scatter_nd_update.hpp index 044025ee1a2..d43dede27b0 100644 --- a/ngraph/core/include/ngraph/op/scatter_nd_update.hpp +++ b/ngraph/core/include/ngraph/op/scatter_nd_update.hpp @@ -6,28 +6,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/scatter_nd_base.hpp" +#include "openvino/op/scatter_nd_update.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief Add updates to slices from inputs addressed by indices -class NGRAPH_API ScatterNDUpdate : public util::ScatterNDBase { -public: - static constexpr NodeTypeInfo type_info{"ScatterNDUpdate", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - ScatterNDUpdate() = default; - /// \param inputs Tensor - /// \param indices Index tensor: Data type must be `element::i32` or `element::i64` - /// \param updates Tensor: Must have same type as inputs - ScatterNDUpdate(const Output& inputs, const Output& indices, const Output& updates) - : util::ScatterNDBase(inputs, indices, updates) {} - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v3::ScatterNDUpdate; } // namespace v3 using v3::ScatterNDUpdate; } // namespace op diff --git a/ngraph/core/include/ngraph/op/scatter_update.hpp b/ngraph/core/include/ngraph/op/scatter_update.hpp index d3ec15f94b3..f89c2dc8656 100644 --- a/ngraph/core/include/ngraph/op/scatter_update.hpp +++ b/ngraph/core/include/ngraph/op/scatter_update.hpp @@ -7,41 +7,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/scatter_base.hpp" #include "ngraph/runtime/host_tensor.hpp" +#include "openvino/op/scatter_update.hpp" namespace ngraph { namespace op { namespace v3 { -/// -/// \brief Set new values to slices from data addressed by indices -/// -class NGRAPH_API ScatterUpdate : public util::ScatterBase { -public: - static constexpr NodeTypeInfo type_info{"ScatterUpdate", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - ScatterUpdate() = default; - /// - /// \brief Constructs ScatterUpdate operator object. - /// - /// \param data The input tensor to be updated. - /// \param indices The tensor with indexes which will be updated. - /// \param updates The tensor with update values. - /// \param[in] axis The axis at which elements will be updated. - /// - ScatterUpdate(const Output& data, - const Output& indices, - const Output& updates, - const Output& axis); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - bool evaluate_scatter_update(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v3::ScatterUpdate; } // namespace v3 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/select.hpp b/ngraph/core/include/ngraph/op/select.hpp index 2c33bb2b8d6..e9eb2e1e17b 100644 --- a/ngraph/core/include/ngraph/op/select.hpp +++ b/ngraph/core/include/ngraph/op/select.hpp @@ -5,66 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/select.hpp" namespace ngraph { namespace op { namespace v1 { -// clang-format off - /// \brief Elementwise selection operation. - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ------ | --------------------------------------------- | ------------------------------------------------------------ | - /// | `arg0` | \f$\texttt{bool}[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape, with element `bool`. | - /// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of a shape that is broadcast-compatible with `arg0`, with any element type. | - /// | `arg2` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of a shape that is broadcast-compatible with `arg0`, and same element type as `arg1`. | - /// | `auto_broadcast`| AutoBroadcastSpec | Auto broadcast specification. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | - /// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg1}[i_1,\dots,i_n]\text{ if }\texttt{arg0}[i_1,\dots,i_n] \neq 0\text{, else }\texttt{arg2}[i_1,\dots,i_n]\f$ | -// clang-format on -class NGRAPH_API Select : public Op { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a selection operation. - Select() : m_auto_broadcast(AutoBroadcastSpec(AutoBroadcastType::NUMPY)) {} - - /// \brief Constructs a selection operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param arg2 Node that produces the third input tensor. - /// \param auto_broadcast Auto broadcast specification. Default is Numpy-style - /// implicit broadcasting. - Select(const Output& arg0, - const Output& arg1, - const Output& arg2, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - const AutoBroadcastSpec& get_auto_broadcast() const { - return m_auto_broadcast; - } - void set_auto_broadcast(const AutoBroadcastSpec& auto_broadcast) { - m_auto_broadcast = auto_broadcast; - } - // TODO: Move all uses of get_autob to get_auto_broadcast() and remove this. - const AutoBroadcastSpec& get_autob() const override { - return m_auto_broadcast; - } - virtual bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; - bool has_evaluate() const override; - -private: - AutoBroadcastSpec m_auto_broadcast; -}; +using ov::op::v1::Select; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/selu.hpp b/ngraph/core/include/ngraph/op/selu.hpp index 89e0a08f69e..8236dc67904 100644 --- a/ngraph/core/include/ngraph/op/selu.hpp +++ b/ngraph/core/include/ngraph/op/selu.hpp @@ -6,28 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/selu.hpp" + namespace ngraph { namespace op { namespace v0 { -/// \brief Performs a SELU activation function on all elements of the input node -class NGRAPH_API Selu : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - - Selu() = default; - /// \brief Constructs a Selu node. - /// - /// \param data - Node producing the input tensor - /// \param alpha - Alpha coefficient of SELU operation - /// \param lambda - Lambda coefficient of SELU operation - Selu(const Output& data, const Output& alpha, const Output& lambda); - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v0::Selu; } // namespace v0 using v0::Selu; } // namespace op diff --git a/ngraph/core/include/ngraph/op/shape_of.hpp b/ngraph/core/include/ngraph/op/shape_of.hpp index 9e1d97f05a7..00c8f80e0a9 100644 --- a/ngraph/core/include/ngraph/op/shape_of.hpp +++ b/ngraph/core/include/ngraph/op/shape_of.hpp @@ -5,66 +5,16 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/shape_of.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief Operation that returns the shape of its input argument as a tensor. -class NGRAPH_API ShapeOf : public Op { -public: - static constexpr NodeTypeInfo type_info{"ShapeOf", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - ShapeOf() = default; - /// \brief Constructs a shape-of operation. - ShapeOf(const Output& arg, const element::Type output_type = element::i64); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - void validate_and_infer_types() override; - - element::Type get_output_type() const { - return m_output_type; - } - void set_output_type(element::Type output_type) { - m_output_type = output_type; - } - // Overload collision with method on Node - using Node::set_output_type; - - bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& output_values) const override; - bool evaluate_upper(const HostTensorVector& output_values) const override; - bool constant_fold(OutputVector& output_values, const OutputVector& input_values) override; - -private: - element::Type m_output_type; -}; +using ov::op::v3::ShapeOf; } // namespace v3 namespace v0 { -/// \brief Operation that returns the shape of its input argument as a tensor. -class NGRAPH_API ShapeOf : public Op { -public: - NGRAPH_RTTI_DECLARATION; - ShapeOf() = default; - /// \brief Constructs a shape-of operation. - ShapeOf(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - void validate_and_infer_types() override; - - bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& output_values) const override; - bool evaluate_upper(const HostTensorVector& output_values) const override; - bool constant_fold(OutputVector& output_values, const OutputVector& input_values) override; -}; +using ov::op::v0::ShapeOf; } // namespace v0 using v0::ShapeOf; } // namespace op diff --git a/ngraph/core/include/ngraph/op/shuffle_channels.hpp b/ngraph/core/include/ngraph/op/shuffle_channels.hpp index f72ba64b45c..9e68fd92aa7 100644 --- a/ngraph/core/include/ngraph/op/shuffle_channels.hpp +++ b/ngraph/core/include/ngraph/op/shuffle_channels.hpp @@ -8,48 +8,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/shuffle_channels.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Permutes data in the channel dimension of the input -class NGRAPH_API ShuffleChannels : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - ShuffleChannels() = default; - /// \brief Constructs a ShuffleChannels node. - /// - /// \param data Node producing the input tensor. - /// \param axis Channel dimension index in the data tensor. - /// A negative value means that the index should be - /// calculated from the back of the input data shape. - /// \param group Number of group the channel dimension should be split into. - /// - ShuffleChannels(const Output& data, const int64_t axis = 1, const int64_t group = 1); - - bool visit_attributes(AttributeVisitor& visitor) override; - size_t get_zero_based_axis() const; - - virtual void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - int64_t get_axis() const { - return m_axis; - } - int64_t get_group() const { - return m_group; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - bool evaluate_shuffle_channels(const HostTensorVector& outputs, const HostTensorVector& inputs) const; - - int64_t m_axis; - int64_t m_group; -}; +using ov::op::v0::ShuffleChannels; } // namespace v0 using v0::ShuffleChannels; } // namespace op diff --git a/ngraph/core/include/ngraph/op/sigmoid.hpp b/ngraph/core/include/ngraph/op/sigmoid.hpp index 0bb61ddd780..b5c13da1593 100644 --- a/ngraph/core/include/ngraph/op/sigmoid.hpp +++ b/ngraph/core/include/ngraph/op/sigmoid.hpp @@ -8,22 +8,12 @@ #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" #include "ngraph/util.hpp" +#include "openvino/op/sigmoid.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API Sigmoid : public util::UnaryElementwiseArithmetic { -public: - static constexpr NodeTypeInfo type_info{"Sigmoid", 0}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - Sigmoid(const Output& arg); - Sigmoid() = default; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Sigmoid; } // namespace v0 using v0::Sigmoid; } // namespace op diff --git a/ngraph/core/include/ngraph/op/sign.hpp b/ngraph/core/include/ngraph/op/sign.hpp index c647da35c59..5bca5fe909d 100644 --- a/ngraph/core/include/ngraph/op/sign.hpp +++ b/ngraph/core/include/ngraph/op/sign.hpp @@ -5,27 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/sign.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise sign operation. -/// -class NGRAPH_API Sign : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - Sign() = default; - /// \brief Constructs an elementwise sign operation. - /// - /// \param arg Node that produces the input tensor. - Sign(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Sign; } // namespace v0 using v0::Sign; } // namespace op diff --git a/ngraph/core/include/ngraph/op/sin.hpp b/ngraph/core/include/ngraph/op/sin.hpp index edac3b9da51..4dd276f09ba 100644 --- a/ngraph/core/include/ngraph/op/sin.hpp +++ b/ngraph/core/include/ngraph/op/sin.hpp @@ -5,42 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/sin.hpp" namespace ngraph { namespace op { namespace v0 { -// clang-format off - /// \brief Elementwise sine operation. - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ----- | --------------------------------- | ----------------------------------------------- | - /// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ---------------------- | ------------------------------------------------------------------------------------ | - /// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sin(\texttt{arg}[i_1,\dots,i_n])\f$ | -// clang-format on -class NGRAPH_API Sin : public util::UnaryElementwiseArithmetic { -public: - static constexpr NodeTypeInfo type_info{"Sin", 0}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - /// \brief Constructs a sine operation. - /// - /// \param arg Node that produces the input tensor. - Sin(const Output& arg); - Sin() = default; - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Sin; } // namespace v0 using v0::Sin; } // namespace op diff --git a/ngraph/core/include/ngraph/op/sinh.hpp b/ngraph/core/include/ngraph/op/sinh.hpp index 6ddb594fc22..927f33bdae3 100644 --- a/ngraph/core/include/ngraph/op/sinh.hpp +++ b/ngraph/core/include/ngraph/op/sinh.hpp @@ -5,25 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/sinh.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise hyperbolic sine (sinh) operation. -class NGRAPH_API Sinh : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a hyperbolic sine operation. - /// - /// \param arg Node that produces the input tensor. - Sinh(const Output& arg); - Sinh() = default; - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Sinh; } // namespace v0 using v0::Sinh; } // namespace op diff --git a/ngraph/core/include/ngraph/op/softmax.hpp b/ngraph/core/include/ngraph/op/softmax.hpp index 9eae046c216..f0fadb841a4 100644 --- a/ngraph/core/include/ngraph/op/softmax.hpp +++ b/ngraph/core/include/ngraph/op/softmax.hpp @@ -5,42 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/softmax.hpp" namespace ngraph { namespace op { namespace v1 { -class NGRAPH_API Softmax : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - Softmax() : m_axis(0) {} - /// \brief Constructs a softmax operation. - /// - /// \param arg Node that produces the first input tensor.
- /// `[d0, ...]` - /// \param axis The axis position (0-based) on which to calculate the softmax. - /// - /// Output `[d0, ...]` - /// - Softmax(const Output& arg, const size_t axis = 1); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - size_t get_axis() const { - return m_axis; - } - void set_axis(const size_t axis) { - m_axis = axis; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - size_t m_axis; -}; +using ov::op::v1::Softmax; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/softplus.hpp b/ngraph/core/include/ngraph/op/softplus.hpp index cc49918d5e9..2c627d19edd 100644 --- a/ngraph/core/include/ngraph/op/softplus.hpp +++ b/ngraph/core/include/ngraph/op/softplus.hpp @@ -6,30 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/softplus.hpp" namespace ngraph { namespace op { namespace v4 { -/// \brief A Self Regularized Non-Monotonic Neural Activation Function -/// f(x) = ln(exp(x) + 1.) -/// -class NGRAPH_API SoftPlus : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - - SoftPlus() = default; - /// \brief Constructs an SoftPlus operation. - /// - /// \param data Input tensor - SoftPlus(const Output& arg); - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v4::SoftPlus; } // namespace v4 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/space_to_batch.hpp b/ngraph/core/include/ngraph/op/space_to_batch.hpp index 6564947339e..6ace3da5793 100644 --- a/ngraph/core/include/ngraph/op/space_to_batch.hpp +++ b/ngraph/core/include/ngraph/op/space_to_batch.hpp @@ -5,50 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/space_to_batch.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief SpaceToBatch permutes data tensor blocks of spatial data into batch -/// dimension. -/// -/// \note Values from spatial blocks dimensions are moved in the batch dimension. -/// -/// Output node produces a tensor with shape: tensor with shape -/// `[batch * block_shape[0] * block_shape[1] * ... * block_shape[N - 1], -/// (pads_begin[1] + D_1 + pads_end[1]) / block_shape[1], -/// (pads_begin[2] + D_2 + pads_end[2]) / block_shape[2], ..., -/// (pads_begin[N - 1] + D_{N - 1} + pads_end[N - 1]) / block_shape[N - 1]` -/// of the same type as `data` input. -class NGRAPH_API SpaceToBatch : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - SpaceToBatch() = default; - - /// \brief Constructs a SpaceToBatch operation. - /// - /// \param data Node producing the data tensor - /// \param block_shape The sizes of the block of values to be moved - /// \param pads_begin Specifies the padding for the beginning along each axis of - /// `data` input - /// \param pads_end Specifies the padding for the ending along each axis of `data` - /// input. - SpaceToBatch(const Output& data, - const Output& block_shape, - const Output& pads_begin, - const Output& pads_end); - - void validate_and_infer_types() override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor& visitor) override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - bool evaluate_space_to_batch(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v1::SpaceToBatch; } // namespace v1 using v1::SpaceToBatch; } // namespace op diff --git a/ngraph/core/include/ngraph/op/space_to_depth.hpp b/ngraph/core/include/ngraph/op/space_to_depth.hpp index d72c725f60e..121279414e6 100644 --- a/ngraph/core/include/ngraph/op/space_to_depth.hpp +++ b/ngraph/core/include/ngraph/op/space_to_depth.hpp @@ -5,77 +5,13 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/space_to_depth.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief SpaceToDepth permutes input tensor blocks of spatial data into depth -/// dimension. -/// -/// \note Values from the height and width dimensions are moved to the depth dimension. -/// -/// Output node produces a tensor with shape: -/// [N, C * blocksize * blocksize, H / blocksize, W / blocksize] -class NGRAPH_API SpaceToDepth : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - enum class SpaceToDepthMode { - // The output depth is gathered from [block_size, ..., block_size, C] - BLOCKS_FIRST, - // The output depth is gathered from [C, block_size, ..., block_size] - DEPTH_FIRST - }; - - SpaceToDepth() = default; - /// \brief Constructs a SpaceToDepth operation. - /// - /// \param data - Node producing the input tensor - /// \param mode Specifies how the output depth dimension is gathered - /// from block coordinates and the old depth dimension. - /// \param block_size - the size of the block of values to be moved - SpaceToDepth(const Output& data, const SpaceToDepthMode& mode, std::size_t block_size = 1); - - SpaceToDepth(const Output& data, const std::string& mode, std::size_t block_size = 1); - - bool visit_attributes(AttributeVisitor& visitor) override; - std::size_t get_block_size() const { - return m_blocksize; - } - SpaceToDepthMode get_mode() const { - return m_mode; - } - void validate_and_infer_types() override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - std::size_t m_blocksize; - SpaceToDepthMode m_mode; -}; +using ov::op::v0::SpaceToDepth; } // namespace v0 using v0::SpaceToDepth; } // namespace op - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v0::SpaceToDepth::SpaceToDepthMode& type); } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v0::SpaceToDepth::SpaceToDepthMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/split.hpp b/ngraph/core/include/ngraph/op/split.hpp index fa3023697d2..591b5d915d8 100644 --- a/ngraph/core/include/ngraph/op/split.hpp +++ b/ngraph/core/include/ngraph/op/split.hpp @@ -9,41 +9,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/split.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Splits the input tensor into a list of equal sized tensors -class NGRAPH_API Split : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a split operation. - Split() = default; - /// \brief Constructs a split operation. - /// \param data The tensor to be split. - /// \param axis The index of an axis in "data" along which to perform - /// the split. - /// \param num_splits The number of pieces that the data tensor should be - /// split into. - Split(const Output& data, const Output& axis, const size_t num_splits); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - size_t get_num_splits() const { - return m_num_splits; - } - void set_num_splits(const size_t num_splits) { - m_num_splits = num_splits; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - size_t m_num_splits; -}; +using ov::op::v1::Split; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/sqrt.hpp b/ngraph/core/include/ngraph/op/sqrt.hpp index f6b55158c17..bcad6a2392b 100644 --- a/ngraph/core/include/ngraph/op/sqrt.hpp +++ b/ngraph/core/include/ngraph/op/sqrt.hpp @@ -5,40 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/sqrt.hpp" namespace ngraph { namespace op { namespace v0 { -// clang-format off - /// \brief Elementwise square root operation. - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ----- | --------------------------------- | ----------------------------------------------- | - /// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ---------------------- | ------------------------------------------------------------------------------------- | - /// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sqrt{\texttt{arg}[i_1,\dots,i_n]}\f$ | -// clang-format on -class NGRAPH_API Sqrt : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a square operation. - /// - /// \param arg Node that produces the input tensor. - Sqrt(const Output& arg); - Sqrt() = default; - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Sqrt; } // namespace v0 using v0::Sqrt; } // namespace op diff --git a/ngraph/core/include/ngraph/op/squared_difference.hpp b/ngraph/core/include/ngraph/op/squared_difference.hpp index 9c9888e4cf2..4b22f17f83a 100644 --- a/ngraph/core/include/ngraph/op/squared_difference.hpp +++ b/ngraph/core/include/ngraph/op/squared_difference.hpp @@ -5,30 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/squared_difference.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Calculates an element-wise squared difference between two tensors -/// -/// y[i] = (x1[i] - x2[i])^2 -class NGRAPH_API SquaredDifference : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constrcuts an uninitialized squared difference operation - SquaredDifference() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - /// \brief Constructs the squared difference operation. - /// - /// \param x1 First input tensor - /// \param x2 Second input tensor - /// \param auto_broadcast Auto broadcast specification - SquaredDifference(const Output& x1, - const Output& x2, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v0::SquaredDifference; } // namespace v0 using v0::SquaredDifference; } // namespace op diff --git a/ngraph/core/include/ngraph/op/squeeze.hpp b/ngraph/core/include/ngraph/op/squeeze.hpp index 6c1d78586f8..bec910010c0 100644 --- a/ngraph/core/include/ngraph/op/squeeze.hpp +++ b/ngraph/core/include/ngraph/op/squeeze.hpp @@ -9,33 +9,12 @@ #include "ngraph/axis_vector.hpp" #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/squeeze.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API Squeeze : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - - Squeeze(); - Squeeze(const Output& data, const Output& axes); - Squeeze(const Output& data); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& outputs) const override; - bool evaluate_upper(const HostTensorVector& outputs) const override; - bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool is_dynamic() const override; - -private: - Output get_default_axes_input() const; -}; +using ov::op::v0::Squeeze; } // namespace v0 using v0::Squeeze; } // namespace op diff --git a/ngraph/core/include/ngraph/op/strided_slice.hpp b/ngraph/core/include/ngraph/op/strided_slice.hpp index 6c3cff7fa81..8239d67cf6b 100644 --- a/ngraph/core/include/ngraph/op/strided_slice.hpp +++ b/ngraph/core/include/ngraph/op/strided_slice.hpp @@ -10,102 +10,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/strided_slice.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a -/// bounding box, optionally with stride. -class NGRAPH_API StridedSlice : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - StridedSlice() = default; - - /// \brief Constructs a dynamic tensor strided slice operation. - /// - /// \param data The tensor to be sliced. - /// \param begin 1D tensor with begin indexes for input blob slicing. - /// \param end 1D tensor with end indexes for input blob slicing. - /// \param strides The slicing strides; for example, strides of `{n,m}` - /// means to take every nth row and every mth column - /// of the input matrix. - /// \param begin_mask When begin_mask[i] equal to 1 means that the - /// corresponding dimension of the begin input is ignored. - /// \param end_mask When end_mask[i] is 1, the corresponding dimension of - /// the end input is ignored. - /// \param new_axis_mask If new_axis_mask[i] is 1, a length 1 dimension - /// is inserted on the i-th position. - /// \param shrink_axis_mask If shrink_axis_mask[i] is 1, the dimension - /// on the i-th position is deleted. - /// \param ellipsis_mask It inserts missing dimensions - /// on a position of a non-zero bit. - StridedSlice(const Output& data, - const Output& begin, - const Output& end, - const Output& strides, - const std::vector& begin_mask, - const std::vector& end_mask, - const std::vector& new_axis_mask = std::vector{}, - const std::vector& shrink_axis_mask = std::vector{}, - const std::vector& ellipsis_mask = std::vector{}); - - /// \brief Constructs a dynamic tensor strided slice operation. - /// - /// \param data The tensor to be sliced. - /// \param begin 1D tensor with begin indexes for input blob slicing. - /// \param end 1D tensor with end indexes for input blob slicing. - /// \param begin_mask When begin_mask[i] equal to 1 means that the - /// corresponding dimension of the begin input is ignored. - /// \param end_mask When end_mask[i] is 1, the corresponding dimension of - /// the end input is ignored. - /// \param new_axis_mask If new_axis_mask[i] is 1, a length 1 dimension - /// is inserted on the i-th position. - /// \param shrink_axis_mask If shrink_axis_mask[i] is 1, the dimension - /// on the i-th position is deleted. - /// \param ellipsis_mask It inserts missing dimensions - /// on a position of a non-zero bit. - StridedSlice(const Output& data, - const Output& begin, - const Output& end, - const std::vector& begin_mask, - const std::vector& end_mask, - const std::vector& new_axis_mask = std::vector{}, - const std::vector& shrink_axis_mask = std::vector{}, - const std::vector& ellipsis_mask = std::vector{}); - - bool visit_attributes(AttributeVisitor& visitor) override; - const std::vector& get_begin_mask() const { - return m_begin_mask; - } - const std::vector& get_end_mask() const { - return m_end_mask; - } - const std::vector& get_new_axis_mask() const { - return m_new_axis_mask; - } - const std::vector& get_shrink_axis_mask() const { - return m_shrink_axis_mask; - } - const std::vector& get_ellipsis_mask() const { - return m_ellipsis_mask; - } - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - void validate_and_infer_types() override; - bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& outputs) const override; - bool evaluate_upper(const HostTensorVector& outputs) const override; - -private: - AxisSet convert_mask_to_axis_set(const std::vector& mask) const; - - std::vector m_begin_mask; - std::vector m_end_mask; - std::vector m_new_axis_mask; - std::vector m_shrink_axis_mask; - std::vector m_ellipsis_mask; -}; +using ov::op::v1::StridedSlice; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/subtract.hpp b/ngraph/core/include/ngraph/op/subtract.hpp index d6ba078714c..93fdf98e702 100644 --- a/ngraph/core/include/ngraph/op/subtract.hpp +++ b/ngraph/core/include/ngraph/op/subtract.hpp @@ -5,30 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/subtract.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise subtraction operation. -class NGRAPH_API Subtract : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - Subtract() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - - /// \brief Constructs a subtraction operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - Subtract(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Subtract; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/swish.hpp b/ngraph/core/include/ngraph/op/swish.hpp index 980e3390c1b..d125ec55797 100644 --- a/ngraph/core/include/ngraph/op/swish.hpp +++ b/ngraph/core/include/ngraph/op/swish.hpp @@ -6,34 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/swish.hpp" namespace ngraph { namespace op { namespace v4 { -/// \brief A Swish Activation Function -/// f(x) = x / (1.0 + exp(-beta * x)) or -/// f(x) = x * sigmoid(beta * x) -/// -class NGRAPH_API Swish : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - Swish() = default; - - /// \brief Constructs an Swish operation. - /// - /// \param data Input tensor - /// \param beta Scalar with beta value. If the argument is not specified then use - /// the default value 1.0 - Swish(const Output& arg, const Output& beta); - explicit Swish(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v4::Swish; } // namespace v4 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/tan.hpp b/ngraph/core/include/ngraph/op/tan.hpp index 33dc9e32bec..992f237e09c 100644 --- a/ngraph/core/include/ngraph/op/tan.hpp +++ b/ngraph/core/include/ngraph/op/tan.hpp @@ -5,39 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/tan.hpp" namespace ngraph { namespace op { namespace v0 { -// clang-format off - /// \brief Elementwise tangent operation. - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ----- | --------------------------------- | ----------------------------------------------- | - /// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ---------------------- | ------------------------------------------------------------------------------------ | - /// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \tan(\texttt{arg}[i_1,\dots,i_n])\f$ | -// clang-format on -class NGRAPH_API Tan : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a tangent operation. - /// - /// \param arg Node that produces the input tensor. - Tan(const Output& arg); - Tan() = default; - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Tan; } // namespace v0 using v0::Tan; } // namespace op diff --git a/ngraph/core/include/ngraph/op/tanh.hpp b/ngraph/core/include/ngraph/op/tanh.hpp index a891ff691ce..0d2d9293886 100644 --- a/ngraph/core/include/ngraph/op/tanh.hpp +++ b/ngraph/core/include/ngraph/op/tanh.hpp @@ -5,26 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/tanh.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise hyperbolic tangent operation. -class NGRAPH_API Tanh : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a hyperbolic tangent operation. - /// - /// \param arg Node that produces the input tensor. - Tanh(const Output& arg); - Tanh() = default; - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Tanh; } // namespace v0 using v0::Tanh; } // namespace op diff --git a/ngraph/core/include/ngraph/op/tile.hpp b/ngraph/core/include/ngraph/op/tile.hpp index cc9c886472c..fb3b5436175 100644 --- a/ngraph/core/include/ngraph/op/tile.hpp +++ b/ngraph/core/include/ngraph/op/tile.hpp @@ -6,34 +6,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/runtime/host_tensor.hpp" +#include "openvino/op/tile.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Dynamic Tiling operation which repeats a tensor multiple times -/// along each dimension -class NGRAPH_API Tile : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - Tile() = default; - /// \brief Perform dynamic padding of a tensor - /// - /// \param data The node producing input tensor to be padded. - /// \param repeats The node producing the per-dimension replication factor - Tile(const Output& data, const Output& repeats); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - bool evaluate_tile(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v0::Tile; } // namespace v0 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/topk.hpp b/ngraph/core/include/ngraph/op/topk.hpp index 8ce1eef6f11..c6cb2a9cf84 100644 --- a/ngraph/core/include/ngraph/op/topk.hpp +++ b/ngraph/core/include/ngraph/op/topk.hpp @@ -9,157 +9,16 @@ #include "ngraph/axis_set.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/topk.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Computes indices and values of the k maximum/minimum values -/// for each slice along specified axis. -class NGRAPH_API TopK : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - using SortType = TopKSortType; - using Mode = TopKMode; - - /// \brief Constructs a TopK operation - TopK() = default; - /// \brief Constructs a TopK operation with two outputs: values and indices. - /// By default the indices output is described by i32 data type. - /// - /// \param data The input tensor - /// \param k Specifies how many maximum/minimum elements should be computed - /// (note: scalar input tensor) - /// \param axis The axis along which to compute top k indices - /// \param mode Specifies which operation (min or max) is used to select - /// the biggest element of two. - /// \param sort Specifies order of output elements and/or indices - /// Accepted values: none, index, value - /// \param index_element_type Specyfies type of produced indices - TopK(const Output& data, - const Output& k, - const int64_t axis, - const std::string& mode, - const std::string& sort, - const element::Type& index_element_type = element::i32); - - TopK(const Output& data, - const Output& k, - const int64_t axis, - const Mode mode, - const SortType sort, - const element::Type& index_element_type = element::i32); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \brief Returns axis value after normalization - /// \note If input rank required to normalization is dynamic, the exception is - /// thrown - uint64_t get_axis() const; - /// \brief Returns axis value before normalization - int64_t get_provided_axis() const { - return m_axis; - } - void set_axis(const int64_t axis); - Mode get_mode() const { - return m_mode; - } - void set_mode(const Mode mode) { - m_mode = mode; - } - SortType get_sort_type() const { - return m_sort; - } - void set_sort_type(const SortType sort) { - m_sort = sort; - } - element::Type get_index_element_type() const { - return m_index_element_type; - } - void set_index_element_type(const element::Type& index_element_type) { - m_index_element_type = index_element_type; - } - /// \brief Returns the value of K, if available - /// - /// \note If the second input to this op is a constant, the value is retrieved - /// and returned. If the input is not constant(dynamic) this method returns 0 - size_t get_k() const; - void set_k(size_t k); - size_t get_default_output_index() const override { - return no_default_index(); - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - int64_t m_axis; - uint64_t m_normalized_axis; - Mode m_mode; - SortType m_sort; - element::Type m_index_element_type{element::i32}; - - virtual size_t read_k_from_constant_node(const std::shared_ptr& node, - const element::Type& k_element_type) const; - - template - size_t validate_and_get_k(const std::shared_ptr& k_constant) const; - Shape compute_output_shape(const std::string& node_description, - const PartialShape input_partial_shape, - const int64_t k) const; - void set_axis(const Rank input_rank, const int64_t axis); -}; +using ov::op::v1::TopK; } // namespace v1 namespace v3 { -/// \brief Computes indices and values of the k maximum/minimum values -/// for each slice along specified axis. -class NGRAPH_API TopK : public v1::TopK { -public: - static constexpr NodeTypeInfo type_info{"TopK", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - /// \brief Constructs a TopK operation - TopK() = default; - /// \brief Constructs a TopK operation with two outputs: values and indices. - /// By default the indices output is described by i32 data type. - /// - /// \param data The input tensor - /// \param k Specifies how many maximum/minimum elements should be computed - /// (note: scalar input tensor) - /// \param axis The axis along which to compute top k indices - /// \param mode Specifies which operation (min or max) is used to select - /// the biggest element of two. - /// \param sort Specifies order of output elements and/or indices - /// Accepted values: none, index, value - /// \param index_element_type Specyfies type of produced indices - TopK(const Output& data, - const Output& k, - const int64_t axis, - const std::string& mode, - const std::string& sort, - const element::Type& index_element_type = element::i32); - - TopK(const Output& data, - const Output& k, - const int64_t axis, - const Mode mode, - const SortType sort, - const element::Type& index_element_type = element::i32); - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - virtual size_t read_k_from_constant_node(const std::shared_ptr& node, - const element::Type& k_element_type) const override; -}; +using ov::op::v3::TopK; } // namespace v3 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/transpose.hpp b/ngraph/core/include/ngraph/op/transpose.hpp index 92127203c4d..41620e7539e 100644 --- a/ngraph/core/include/ngraph/op/transpose.hpp +++ b/ngraph/core/include/ngraph/op/transpose.hpp @@ -7,35 +7,12 @@ #include "ngraph/axis_vector.hpp" #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/transpose.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Tensor transpose operation. -class NGRAPH_API Transpose : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - Transpose() = default; - /// - /// \brief Constructs a transpose operation. - /// - /// \param arg Node producing the tensor to be transposed. - /// \param input_order Node producing the permutation to apply to the axes - /// of the input shape. Must be a vector with shape [n], - /// where n is the rank of arg. The tensor's value must - /// contain every integer in the range [0, n-1]. - /// - Transpose(const Output& arg, const Output& input_order); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Transpose; } // namespace v1 using v1::Transpose; } // namespace op diff --git a/ngraph/core/include/ngraph/op/unsqueeze.hpp b/ngraph/core/include/ngraph/op/unsqueeze.hpp index ffc0b5efdb4..a71ce25e7c6 100644 --- a/ngraph/core/include/ngraph/op/unsqueeze.hpp +++ b/ngraph/core/include/ngraph/op/unsqueeze.hpp @@ -9,28 +9,12 @@ #include "ngraph/axis_vector.hpp" #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/unsqueeze.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API Unsqueeze : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - Unsqueeze() = default; - Unsqueeze(const Output& data, const Output& axes); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& output_values) const override; - bool evaluate_upper(const HostTensorVector& output_values) const override; - - bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v0::Unsqueeze; } // namespace v0 using v0::Unsqueeze; } // namespace op diff --git a/ngraph/core/include/ngraph/op/variadic_split.hpp b/ngraph/core/include/ngraph/op/variadic_split.hpp index bb09f1fb486..9a6a9e3cb90 100644 --- a/ngraph/core/include/ngraph/op/variadic_split.hpp +++ b/ngraph/core/include/ngraph/op/variadic_split.hpp @@ -5,44 +5,13 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/variadic_split.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief VariadicSplit operation splits an input tensor into pieces along some axis. -/// The pieces may have variadic lengths depending on "split_lengths" attribute. -class NGRAPH_API VariadicSplit : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a variadic split operation. - VariadicSplit() = default; - /// \brief Constructs a variadic split operation. - /// - /// \param data The tensor to be split. - /// \param axis The index of an axis in "data" along which to perform the - /// split. - /// \param split_lengths A list containing the sizes of each output tensor - /// along the split "axis". Size of "split_lengths" should be equal to the number of - /// - /// outputs. The sum of split_lengths must match data.shape[axis] - VariadicSplit(const Output& data, const Output& axis, const Output& split_lengths); - - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - size_t get_default_output_index() const override { - return no_default_index(); - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - bool evaluate_variadic_split(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v1::VariadicSplit; } // namespace v1 - using v1::VariadicSplit; } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/xor.hpp b/ngraph/core/include/ngraph/op/xor.hpp index 34ff6c7675c..bfd0c955209 100644 --- a/ngraph/core/include/ngraph/op/xor.hpp +++ b/ngraph/core/include/ngraph/op/xor.hpp @@ -7,68 +7,17 @@ #include #include "ngraph/op/util/binary_elementwise_logical.hpp" +#include "openvino/op/logical_xor.hpp" +#include "openvino/op/xor.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise logical-xor operation. -/// -class NGRAPH_API LogicalXor : public util::BinaryElementwiseLogical { -public: - NGRAPH_RTTI_DECLARATION; - LogicalXor() = default; - /// \brief Constructs a logical-xor operation. - /// - /// \param arg0 Node that produces the first input tensor.
- /// `[d0, ...]` - /// \param arg1 Node that produces the second input tensor.
- /// `[d0, ...]` - /// \param auto_broadcast Auto broadcast specification - /// - /// Output `[d0, ...]` - /// - LogicalXor(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::LogicalXor; } // namespace v1 namespace v0 { -/// \brief Elementwise logical-xor operation. -/// -class NGRAPH_API Xor : public util::BinaryElementwiseLogical { -public: - static constexpr NodeTypeInfo type_info{"Xor", 0}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - Xor() = default; - /// \brief Constructs a logical-xor operation. - /// - /// \param arg0 Node that produces the first input tensor.
- /// `[d0, ...]` - /// \param arg1 Node that produces the second input tensor.
- /// `[d0, ...]` - /// \param auto_broadcast Auto broadcast specification - /// - /// Output `[d0, ...]` - /// - Xor(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec()); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Xor; } // namespace v0 - -// default opset version using v0::Xor; } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/openvino/op/logical_xor.hpp b/ngraph/core/include/openvino/op/logical_xor.hpp new file mode 100644 index 00000000000..beda8749ce2 --- /dev/null +++ b/ngraph/core/include/openvino/op/logical_xor.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/binary_elementwise_logical.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise logical-xor operation. +/// +class OPENVINO_API LogicalXor : public util::BinaryElementwiseLogical { +public: + OPENVINO_RTTI_DECLARATION; + LogicalXor() = default; + /// \brief Constructs a logical-xor operation. + /// + /// \param arg0 Node that produces the first input tensor.
+ /// `[d0, ...]` + /// \param arg1 Node that produces the second input tensor.
+ /// `[d0, ...]` + /// \param auto_broadcast Auto broadcast specification + /// + /// Output `[d0, ...]` + /// + LogicalXor(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/random_uniform.hpp b/ngraph/core/include/openvino/op/random_uniform.hpp new file mode 100644 index 00000000000..470936491dd --- /dev/null +++ b/ngraph/core/include/openvino/op/random_uniform.hpp @@ -0,0 +1,84 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v8 { +/// \brief Tensor RandomUniform operation. +class OPENVINO_API RandomUniform : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + RandomUniform() = default; + + /// + /// \brief Constructs a RandomUniform operation. + /// + /// \param out_shape Node producing the tensor with output shape. + /// \param min_val Node producing the tensor with minimum value. + /// \param max_val Node producing the tensor with maximum value. + /// \param out_type Output type of the tensor. + /// \param global_seed Global seed value. + /// \param op_seed Operational seed value. + RandomUniform(const Output& out_shape, + const Output& min_val, + const Output& max_val, + const ngraph::element::Type& out_type, + uint64_t global_seed = 0, + uint64_t op_seed = 0); + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return Turns off constant folding for RandomUniform operation. + bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override { + return false; + } + + /// \return The output tensor type. + const ngraph::element::Type& get_out_type() const { + return m_output_type; + } + void set_out_type(const ngraph::element::Type& output_type) { + m_output_type = output_type; + } + + /// \return The global seed value. + uint64_t get_global_seed() const { + return m_global_seed; + } + void set_global_seed(uint64_t seed) { + m_global_seed = seed; + } + + /// \return The operational seed value. + uint64_t get_op_seed() const { + return m_op_seed; + } + void set_op_seed(uint64_t seed2) { + m_op_seed = seed2; + } + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + + bool has_evaluate() const override; + +protected: + ngraph::element::Type m_output_type; + uint64_t m_global_seed; + uint64_t m_op_seed; + + mutable std::mutex m_state_mutex; + mutable std::pair m_state; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/range.hpp b/ngraph/core/include/openvino/op/range.hpp new file mode 100644 index 00000000000..5dcc53e928d --- /dev/null +++ b/ngraph/core/include/openvino/op/range.hpp @@ -0,0 +1,74 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v4 { +/// \brief Range operation, analogous to `arange()` in Numpy. +class OPENVINO_API Range : public Op { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs an unitialized range operation. + Range() = default; + + /// \brief Constructs a range operation. + /// + /// \param start The tensor producing the start value. Must be a scalar of numeric + /// element type. + /// \param stop The tensor producing the stop value. Must be a scalar of numeric + /// element type. + /// \param step The tensor producing the step value. Must be a scalar of numeric + /// element type. + /// \param output_type The type of the output. + Range(const Output& start, const Output& stop, const Output& step, element::Type output_type); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + void set_output_type(element::Type output_type) { + m_output_type = output_type; + } + // Overload collision with method on Node + using Node::set_output_type; + +private: + element::Type m_output_type; +}; +} // namespace v4 +namespace v0 { +/// \brief Range operation, analogous to `range()` in Python. +class OPENVINO_API Range : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs an unitialized range operation. + Range() = default; + + /// \brief Constructs a range operation. + /// + /// \param start The tensor producing the start value. Must be a scalar of integer + /// element type, and same element type as `stop` and `step`. + /// \param stop The tensor producing the stop value. Must be a scalar of integer + /// element type, and same element type as `start` and `step`. + /// \param step The tensor producing the step value. Must be a scalar of integer + /// element type, and same element type as `start` and `stop`. + Range(const Output& start, const Output& stop, const Output& step); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reduce_l1.hpp b/ngraph/core/include/openvino/op/reduce_l1.hpp new file mode 100644 index 00000000000..2e5f2abba84 --- /dev/null +++ b/ngraph/core/include/openvino/op/reduce_l1.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v4 { +/// \brief Reduction operation using L1 norm: L1(x) = sum(abs(x)) if all dimensions are +/// specified for the normalisation. +/// +/// Reduces the tensor, eliminating the specified reduction axes by taking the L1-norm. +class OPENVINO_API ReduceL1 : public util::ArithmeticReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a reducet L1-norm operation. + ReduceL1() = default; + /// \brief Constructs a reduce L1-norm operation. + /// + /// \param arg The tensor to be reduced. + /// \param reduction_axes The axis positions (0-based) to be eliminated. + /// \param keep_dims If set to true it holds axes that are used for reduction. + ReduceL1(const Output& arg, const Output& reduction_axes, bool keep_dims = false); + + /// \return The default value for Reduce. + OPENVINO_SUPPRESS_DEPRECATED_START + std::shared_ptr get_default_value() const override; + OPENVINO_SUPPRESS_DEPRECATED_END + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v4 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reduce_l2.hpp b/ngraph/core/include/openvino/op/reduce_l2.hpp new file mode 100644 index 00000000000..e257e222b6a --- /dev/null +++ b/ngraph/core/include/openvino/op/reduce_l2.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v4 { +/// \brief Reduction operation using L2 norm: +/// +/// Reduces the tensor, eliminating the specified reduction axes by taking the L2-norm. +class OPENVINO_API ReduceL2 : public util::ArithmeticReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a reducet L2-norm operation. + ReduceL2() = default; + /// \brief Constructs a reduce L2-norm operation. + /// + /// \param arg The tensor to be reduced. + /// \param reduction_axes The axis positions (0-based) to be eliminated. + /// \param keep_dims If set to true it holds axes that are used for reduction. + ReduceL2(const Output& arg, const Output& reduction_axes, bool keep_dims = false); + + /// \return The default value for Reduce. + OPENVINO_SUPPRESS_DEPRECATED_START + std::shared_ptr get_default_value() const override; + OPENVINO_SUPPRESS_DEPRECATED_END + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v4 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reduce_logical_and.hpp b/ngraph/core/include/openvino/op/reduce_logical_and.hpp new file mode 100644 index 00000000000..afbdc06440d --- /dev/null +++ b/ngraph/core/include/openvino/op/reduce_logical_and.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/logical_reduction_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Performs a reduction using "logical and" +/// +/// The reduction is performed over slices of the first input. The slices shape depends +/// on the values passed to the second input - the axes. +class OPENVINO_API ReduceLogicalAnd : public util::LogicalReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + ReduceLogicalAnd() = default; + /// \brief Constructs a ReduceLogicalAnd node. + /// + /// \param data - The input tensor with data to be reduced + /// \param reduction_axes - The input tensor with information about axes over which + /// the first tensor should be sliced prior to the reduction operation + /// \param keep_dims - Indicates if the axes used for reduction should be held/kept + ReduceLogicalAnd(const Output& data, const Output& reduction_axes, const bool keep_dims = false); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reduce_logical_or.hpp b/ngraph/core/include/openvino/op/reduce_logical_or.hpp new file mode 100644 index 00000000000..308e11bad38 --- /dev/null +++ b/ngraph/core/include/openvino/op/reduce_logical_or.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/logical_reduction_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Performs a reduction using "logical or" +/// +/// The reduction is performed over slices of the first input. The slices shape depends +/// on the values passed to the second input - the axes. +class OPENVINO_API ReduceLogicalOr : public util::LogicalReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + ReduceLogicalOr() = default; + /// \brief Constructs a ReduceLogicalOr node. + /// + /// \param data - The input tensor with data to be reduced + /// \param reduction_axes - The input tensor with information about axes over which + /// the first tensor should be sliced prior to the reduction operation + /// \param keep_dims - Indicates if the axes used for reduction should be held/kept + ReduceLogicalOr(const Output& data, const Output& reduction_axes, const bool keep_dims = false); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reduce_mean.hpp b/ngraph/core/include/openvino/op/reduce_mean.hpp new file mode 100644 index 00000000000..8a47de1e6fd --- /dev/null +++ b/ngraph/core/include/openvino/op/reduce_mean.hpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v1 { +class OPENVINO_API ReduceMean : public util::ArithmeticReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + ReduceMean() = default; + + /// \param arg The tensor to be summed. + /// \param reduction_axes The axis positions (0-based) to be eliminated. + /// \param keep_dims If set to 1 it holds axes that are used for reduction. + ReduceMean(const Output& arg, const Output& reduction_axes, bool keep_dims = false); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reduce_prod.hpp b/ngraph/core/include/openvino/op/reduce_prod.hpp new file mode 100644 index 00000000000..f82bba5e354 --- /dev/null +++ b/ngraph/core/include/openvino/op/reduce_prod.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Product reduction operation. +/// +/// Reduces the tensor, eliminating the specified reduction axes by taking the product. +class OPENVINO_API ReduceProd : public util::ArithmeticReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a product reduction operation. + ReduceProd() = default; + /// \brief Constructs a product reduction operation. + /// + /// \param arg The tensor to be reduced. + /// \param reduction_axes The axis positions (0-based) to be eliminated. + /// \param keep_dims If set to true it holds axes that are used for reduction. + ReduceProd(const Output& arg, const Output& reduction_axes, bool keep_dims = false); + + /// \return The default value for Product. + OPENVINO_SUPPRESS_DEPRECATED_START + std::shared_ptr get_default_value() const override; + OPENVINO_SUPPRESS_DEPRECATED_END + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& outputs) const override; + bool evaluate_upper(const HostTensorVector& outputs) const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reduce_sum.hpp b/ngraph/core/include/openvino/op/reduce_sum.hpp new file mode 100644 index 00000000000..229ed86fd8d --- /dev/null +++ b/ngraph/core/include/openvino/op/reduce_sum.hpp @@ -0,0 +1,85 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v1 { +// clang-format off +/// \brief Tensor sum operation. +/// +/// Element-wise sums the input tensor, eliminating the specified reduction axes. +/// For example: +/// +/// \f[ +/// \mathit{sum}\left(\{0\}, +/// \left[ \begin{array}{ccc} +/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) = +/// \left[ (1 + 3 + 5), (2 + 4 + 6) \right] = +/// \left[ 9, 12 \right]~~~\text{(dimension 0 (rows) is eliminated)} +/// \f] +/// +/// \f[ +/// \mathit{sum}\left(\{1\}, +/// \left[ \begin{array}{ccc} +/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) = +/// \left[ (1 + 2), (3 + 4), (5 + 6) \right] = +/// \left[ 3, 7, 11 \right]~~~\text{(dimension 1 (columns) is eliminated)} +/// \f] +/// +/// \f[ +/// \mathit{sum}\left(\{0,1\}, +/// \left[ \begin{array}{ccc} +/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) = +/// (1 + 2) + (3 + 4) + (5 + 6) = +/// 21~~~\text{(both dimensions (rows and columns) are eliminated)} +/// \f] +/// +/// ## Parameters +/// +/// | | Description | +/// | -------------------- | ---------------------------------------- | +/// | `reduction_axes` | The axes to eliminate through summation. | +/// | `keep_dims` | If set to 1 it holds axes that are used for reduction. | +/// +/// ## Inputs +/// +/// | | Type | Description | +/// | ----- | --------------------------------- | ------------------------------------------------------ | +/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. | +/// +/// ## Output +/// +/// | Type | Description | +/// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | +/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. | +// clang-format on +class OPENVINO_API ReduceSum : public util::ArithmeticReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a summation operation. + ReduceSum() = default; + /// \brief Constructs a summation operation. + /// + /// \param arg The tensor to be summed. + /// \param reduction_axes The axis positions (0-based) to be eliminated. + /// \param keep_dims If set to 1 it holds axes that are used for reduction. + ReduceSum(const Output& arg, const Output& reduction_axes, bool keep_dims = false); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The default value for Sum. + OPENVINO_SUPPRESS_DEPRECATED_START + std::shared_ptr get_default_value() const override; + OPENVINO_SUPPRESS_DEPRECATED_END + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/region_yolo.hpp b/ngraph/core/include/openvino/op/region_yolo.hpp new file mode 100644 index 00000000000..17bed4408b9 --- /dev/null +++ b/ngraph/core/include/openvino/op/region_yolo.hpp @@ -0,0 +1,84 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API RegionYolo : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + RegionYolo() = default; + /// + /// \brief Constructs a RegionYolo operation + /// + /// \param[in] input Input + /// \param[in] coords Number of coordinates for each region + /// \param[in] classes Number of classes for each region + /// \param[in] regions Number of regions + /// \param[in] do_softmax Compute softmax + /// \param[in] mask Mask + /// \param[in] axis Axis to begin softmax on + /// \param[in] end_axis Axis to end softmax on + /// \param[in] anchors A flattened list of pairs `[width, height]` that + /// describes + /// prior box sizes. + /// + RegionYolo(const Output& input, + const size_t coords, + const size_t classes, + const size_t regions, + const bool do_softmax, + const std::vector& mask, + const int axis, + const int end_axis, + const std::vector& anchors = std::vector{}); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + size_t get_num_coords() const { + return m_num_coords; + } + size_t get_num_classes() const { + return m_num_classes; + } + size_t get_num_regions() const { + return m_num_regions; + } + bool get_do_softmax() const { + return m_do_softmax; + } + const std::vector& get_mask() const { + return m_mask; + } + const std::vector& get_anchors() const { + return m_anchors; + } + int get_axis() const { + return m_axis; + } + int get_end_axis() const { + return m_end_axis; + } + +private: + size_t m_num_coords; + size_t m_num_classes; + size_t m_num_regions; + bool m_do_softmax; + std::vector m_mask; + std::vector m_anchors{}; + int m_axis; + int m_end_axis; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/relu.hpp b/ngraph/core/include/openvino/op/relu.hpp new file mode 100644 index 00000000000..d5e4fb50556 --- /dev/null +++ b/ngraph/core/include/openvino/op/relu.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise Relu operation. +/// +class OPENVINO_API Relu : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + Relu() = default; + /// \brief Constructs a Relu operation. + /// + /// \param arg Node that produces the input tensor. + Relu(const Output& arg); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool visit_attributes(AttributeVisitor& visitor) override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reorg_yolo.hpp b/ngraph/core/include/openvino/op/reorg_yolo.hpp new file mode 100644 index 00000000000..64183776a13 --- /dev/null +++ b/ngraph/core/include/openvino/op/reorg_yolo.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API ReorgYolo : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + ReorgYolo() = default; + /// \brief Constructs a ReorgYolo operation + /// + /// \param input Input + /// \param stride Stride to reorganize input by + ReorgYolo(const Output& input, const size_t stride); + + // Constructor with `strides` for backward compatibility + ReorgYolo(const Output& input, const Strides& strides); + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + Strides get_strides() const { + return m_strides; + } + +private: + Strides m_strides; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reshape.hpp b/ngraph/core/include/openvino/op/reshape.hpp new file mode 100644 index 00000000000..b1d0bbaa2ac --- /dev/null +++ b/ngraph/core/include/openvino/op/reshape.hpp @@ -0,0 +1,66 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Tensor dynamic reshape operation. +/// +/// "Converts" an input tensor into a new shape with the same number of elements. +/// This op does not touch the actual data. If needed, use Transpose for that purpose. +/// +class OPENVINO_API Reshape : public Op { +public: + OPENVINO_RTTI_DECLARATION; + Reshape() = default; + /// \brief Constructs a dynamic reshape operation. This operation does not perform + /// transpose. + /// + /// \param arg The tensor to be reshaped. + /// \param shape_pattern The node that defines output shape shape_pattern. + /// If the input shape is \f$(a_0,\dots,a_{k-1})\f$ then the output shape + /// must + /// be of the form \f$(b_0,\dots,b_{j-1})\f$ where \f$\Pi(a_i) = \Pi(b_i)\f$. + /// A value of -1 is allowed for at most one dimension, in which case the + /// dimension size is inferred based on element count of input tensor. + /// \param special_zero Treats zeros in `shape_pattern` as wildcard flags indicating + /// a + /// copy from input shape at the same index. + /// + Reshape(const Output& arg, const Output& shape_pattern, bool special_zero); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool get_special_zero() const { + return m_special_zero; + } + void set_special_zero(bool special_zero) { + m_special_zero = special_zero; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& outputs) const override; + bool evaluate_upper(const HostTensorVector& outputs) const override; + bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; + +protected: + bool m_special_zero; + bool evaluate_reshape(const HostTensorVector& outputs, const HostTensorVector& inputs) const; + +private: + void calculate_output_shape(std::vector& reshape_pattern, + const int64_t& minus_one_idx, + const PartialShape& input_pshape, + std::vector& output_shape) const; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reverse.hpp b/ngraph/core/include/openvino/op/reverse.hpp new file mode 100644 index 00000000000..23d9e640923 --- /dev/null +++ b/ngraph/core/include/openvino/op/reverse.hpp @@ -0,0 +1,75 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +class OPENVINO_API Reverse : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + enum class Mode { INDEX, MASK }; + + Reverse() = default; + /// \brief Constructs a reverse operation. + /// + /// \param data The input tensor, some of whose axes are to be reversed. + /// \param reversed_axes The axes to reverse in a form of a set of indices or + /// boolean mask. + /// \param mode The way reversed_axes should be interpreted - a set or a mask. + Reverse(const Output& data, const Output& reversed_axes, const std::string& mode); + + Reverse(const Output& data, const Output& reversed_axes, const Mode mode); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The second input data interpretation mode. + Mode get_mode() const { + return m_mode; + } + void set_mode(const Mode mode) { + m_mode = mode; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + Mode mode_from_string(const std::string& mode) const; + + /// \brief Indicates how the values from the second input should be interpreted. + /// + /// The second input can contain a set of indices pointing to axes in the data + /// tensor shape. + /// Alternatively it can contain a boolean mask that indicates which axes should be + /// reversed. + Mode m_mode; + +private: + bool evaluate_reverse(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v1 +} // namespace op + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v1::Reverse::Mode& type); + +template <> +class OPENVINO_API AttributeAdapter : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v1::Reverse::Mode& value) : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reverse_sequence.hpp b/ngraph/core/include/openvino/op/reverse_sequence.hpp new file mode 100644 index 00000000000..a9212a343ad --- /dev/null +++ b/ngraph/core/include/openvino/op/reverse_sequence.hpp @@ -0,0 +1,61 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API ReverseSequence : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + ReverseSequence() = default; + /// \brief Constructs a ReverseSequence operation. + /// + /// \param arg tensor with input data to reverse + /// \param seq_lengths 1D tensor of integers with sequence lengths in the input + /// tensor. + /// \param batch_axis index of the batch dimension. + /// \param seq_axis index of the sequence dimension. + ReverseSequence(const Output& arg, + const Output& seq_lengths, + int64_t batch_axis = 0, + int64_t seq_axis = 1); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + size_t get_batch_axis() const { + return m_normalized_batch_axis; + } + int64_t get_origin_batch_axis() const { + return m_batch_axis; + } + void set_batch_axis(int64_t batch_axis) { + m_batch_axis = batch_axis; + } + size_t get_sequence_axis() const { + return m_normalized_seq_axis; + } + int64_t get_origin_sequence_axis() const { + return m_seq_axis; + } + void set_sequence_axis(int64_t sequence_axis) { + m_seq_axis = sequence_axis; + } + +private: + int64_t m_batch_axis; + int64_t m_seq_axis = 1; + size_t m_normalized_batch_axis; + size_t m_normalized_seq_axis; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/rnn_cell.hpp b/ngraph/core/include/openvino/op/rnn_cell.hpp new file mode 100644 index 00000000000..3a1c00ca210 --- /dev/null +++ b/ngraph/core/include/openvino/op/rnn_cell.hpp @@ -0,0 +1,132 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "openvino/op/op.hpp" +#include "openvino/op/util/activation_functions.hpp" +#include "openvino/op/util/rnn_cell_base.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// +/// \brief Class for single RNN cell node. +/// +/// \note It follows notation and equations defined as in ONNX standard: +/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#RNN +/// +/// \note It calculates following equations: +/// +/// Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) +/// +/// * - Is a dot product, +/// f - is activation functions. +/// +/// \note This class represents only single *cell* (for current time step) +/// and not the whole RNN Sequence layer +/// +/// \sa LSTMSequence, LSTMCell, GRUCell +/// +class OPENVINO_API RNNCell : public util::RNNCellBase { +public: + OPENVINO_RTTI_DECLARATION; + + RNNCell(); + /// + /// \brief Constructs RNNCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [hidden_size, + /// input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// + RNNCell(const Output& X, + const Output& initial_hidden_state, + const Output& W, + const Output& R, + std::size_t hidden_size, + const std::vector& activations = std::vector{"tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f); + + /// + /// \brief Constructs RNNCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [hidden_size, + /// input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [hidden_size, hidden_size]. + /// \param[in] B The bias tensor for input gate with shape: + /// [hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// + RNNCell(const Output& X, + const Output& initial_hidden_state, + const Output& W, + const Output& R, + const Output& B, + std::size_t hidden_size, + const std::vector& activations = std::vector{"tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + +private: + /// + /// \brief Creates the default bias input initialized with zeros. + /// + /// \return The object of Output class. + /// + Output get_default_bias_input() const; + + /// + /// \brief The Activation function f. + /// + util::ActivationFunction m_activation_f; + + static constexpr std::size_t s_gates_count{1}; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/rnn_sequence.hpp b/ngraph/core/include/openvino/op/rnn_sequence.hpp new file mode 100644 index 00000000000..33b1de8f995 --- /dev/null +++ b/ngraph/core/include/openvino/op/rnn_sequence.hpp @@ -0,0 +1,50 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "openvino/op/util/rnn_cell_base.hpp" + +namespace ov { +namespace op { +namespace v5 { +class OPENVINO_API RNNSequence : public util::RNNCellBase { +public: + OPENVINO_RTTI_DECLARATION; + + RNNSequence(); + + RNNSequence(const Output& X, + const Output& H_t, + const Output& sequence_lengths, + const Output& W, + const Output& R, + const Output& B, + size_t hidden_size, + op::RecurrentSequenceDirection direction, + const std::vector& activations = std::vector{"tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + op::RecurrentSequenceDirection get_direction() const { + return m_direction; + } + +protected: + op::RecurrentSequenceDirection m_direction; +}; +} // namespace v5 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/roi_align.hpp b/ngraph/core/include/openvino/op/roi_align.hpp new file mode 100644 index 00000000000..2353885e519 --- /dev/null +++ b/ngraph/core/include/openvino/op/roi_align.hpp @@ -0,0 +1,98 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v3 { +class OPENVINO_API ROIAlign : public Op { +public: + OPENVINO_RTTI_DECLARATION; + enum class PoolingMode { AVG, MAX }; + + ROIAlign() = default; + /// \brief Constructs a ROIAlign node matching the ONNX ROIAlign specification + /// + /// \param input Input feature map {N, C, H, W} + /// \param rois Regions of interest to pool over + /// \param batch_indices Indices of images in the batch matching + /// the number or ROIs + /// \param pooled_h Height of the ROI output features + /// \param pooled_w Width of the ROI output features + /// \param sampling_ratio Number of sampling points used to compute + /// an output element + /// \param spatial_scale Spatial scale factor used to translate ROI coordinates + /// \param mode Method of pooling - 'avg' or 'max' + ROIAlign(const Output& input, + const Output& rois, + const Output& batch_indices, + const int pooled_h, + const int pooled_w, + const int sampling_ratio, + const float spatial_scale, + const std::string& mode); + + ROIAlign(const Output& input, + const Output& rois, + const Output& batch_indices, + const int pooled_h, + const int pooled_w, + const int sampling_ratio, + const float spatial_scale, + const PoolingMode mode); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + int get_pooled_h() const { + return m_pooled_h; + } + int get_pooled_w() const { + return m_pooled_w; + } + int get_sampling_ratio() const { + return m_sampling_ratio; + } + float get_spatial_scale() const { + return m_spatial_scale; + } + PoolingMode get_mode() const { + return m_mode; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + PoolingMode mode_from_string(const std::string& mode) const; + +private: + int m_pooled_h; + int m_pooled_w; + int m_sampling_ratio; + float m_spatial_scale; + PoolingMode m_mode; +}; +} // namespace v3 +} // namespace op + +std::ostream& operator<<(std::ostream& s, const op::v3::ROIAlign::PoolingMode& mode); + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v3::ROIAlign::PoolingMode& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 3}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/roi_pooling.hpp b/ngraph/core/include/openvino/op/roi_pooling.hpp new file mode 100644 index 00000000000..d81a14cf144 --- /dev/null +++ b/ngraph/core/include/openvino/op/roi_pooling.hpp @@ -0,0 +1,52 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API ROIPooling : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + ROIPooling() = default; + /// \brief Constructs a ROIPooling operation + /// + /// \param input Input feature map {N, C, H, W} + /// \param coords Coordinates of bounding boxes + /// \param output_size Height/Width of ROI output features + /// \param spatial_scale Ratio of input feature map over input image size + /// \param method Method of pooling - Max or Bilinear + ROIPooling(const Output& input, + const Output& coords, + const ngraph::Shape& output_size, + const float spatial_scale, + const std::string& method = "max"); + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + const ngraph::Shape& get_output_size() const { + return m_output_size; + } + float get_spatial_scale() const { + return m_spatial_scale; + } + const std::string& get_method() const { + return m_method; + } + bool visit_attributes(AttributeVisitor& visitor) override; + +private: + ngraph::Shape m_output_size{0, 0}; + float m_spatial_scale; + std::string m_method = "max"; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/roll.hpp b/ngraph/core/include/openvino/op/roll.hpp new file mode 100644 index 00000000000..ae0215b9dbc --- /dev/null +++ b/ngraph/core/include/openvino/op/roll.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v7 { +/// \brief Tensor roll operation. +class OPENVINO_API Roll : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Roll() = default; + + /// + /// \brief Constructs a roll operation. + /// + /// \param data Node producing the tensor to be shifted. + /// \param shift Node producing the 0D or 1D tensor which specifies the + /// number of places by which the elements are shifted. + /// \param axes Node producing the 0D or 1D tensor which specifies axes + /// along which elements are shifted. + /// + Roll(const Output& data, const Output& shift, const Output& axes); + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v7 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/round.hpp b/ngraph/core/include/openvino/op/round.hpp new file mode 100644 index 00000000000..c066af19c41 --- /dev/null +++ b/ngraph/core/include/openvino/op/round.hpp @@ -0,0 +1,63 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v5 { +/// \brief Elementwise round operation. The output is round to the nearest integer +/// for each value. In case of halfs, the rule is defined in attribute 'mode': +/// 'HALF_TO_EVEN' - round halfs to the nearest even integer. +/// 'HALF_AWAY_FROM_ZERO': - round in such a way that the result heads away from +/// zero. +class OPENVINO_API Round : public Op { +public: + enum class RoundMode { HALF_TO_EVEN, HALF_AWAY_FROM_ZERO }; + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a round operation. + Round() = default; + + /// \brief Constructs a round operation. + /// + /// \param arg Node that produces the input tensor. + /// \param mode Rule to resolve halfs + Round(const Output& arg, const RoundMode mode); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + + RoundMode get_mode() const { + return m_mode; + } + +private: + RoundMode m_mode; +}; +} // namespace v5 +} // namespace op +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v5::Round::RoundMode& type); + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v5::Round::RoundMode& value) : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 5}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/scatter_elements_update.hpp b/ngraph/core/include/openvino/op/scatter_elements_update.hpp new file mode 100644 index 00000000000..0f57b355297 --- /dev/null +++ b/ngraph/core/include/openvino/op/scatter_elements_update.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v3 { +class OPENVINO_API ScatterElementsUpdate : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + ScatterElementsUpdate() = default; + /// \brief Constructs a ScatterElementsUpdate node + + /// \param data Input data + /// \param indices Data entry index that will be updated + /// \param updates Update values + /// \param axis Axis to scatter on + ScatterElementsUpdate(const Output& data, + const Output& indices, + const Output& updates, + const Output& axis); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool evaluate_scatter_element_update(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/scatter_nd_update.hpp b/ngraph/core/include/openvino/op/scatter_nd_update.hpp new file mode 100644 index 00000000000..f56eb03db86 --- /dev/null +++ b/ngraph/core/include/openvino/op/scatter_nd_update.hpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/scatter_nd_base.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Add updates to slices from inputs addressed by indices +class OPENVINO_API ScatterNDUpdate : public util::ScatterNDBase { +public: + OPENVINO_RTTI_DECLARATION; + ScatterNDUpdate() = default; + /// \param inputs Tensor + /// \param indices Index tensor: Data type must be `element::i32` or `element::i64` + /// \param updates Tensor: Must have same type as inputs + ScatterNDUpdate(const Output& inputs, const Output& indices, const Output& updates) + : util::ScatterNDBase(inputs, indices, updates) {} + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/scatter_update.hpp b/ngraph/core/include/openvino/op/scatter_update.hpp new file mode 100644 index 00000000000..78e192160b2 --- /dev/null +++ b/ngraph/core/include/openvino/op/scatter_update.hpp @@ -0,0 +1,42 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/scatter_base.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// +/// \brief Set new values to slices from data addressed by indices +/// +class OPENVINO_API ScatterUpdate : public util::ScatterBase { +public: + OPENVINO_RTTI_DECLARATION; + ScatterUpdate() = default; + /// + /// \brief Constructs ScatterUpdate operator object. + /// + /// \param data The input tensor to be updated. + /// \param indices The tensor with indexes which will be updated. + /// \param updates The tensor with update values. + /// \param[in] axis The axis at which elements will be updated. + /// + ScatterUpdate(const Output& data, + const Output& indices, + const Output& updates, + const Output& axis); + + std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool evaluate_scatter_update(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/select.hpp b/ngraph/core/include/openvino/op/select.hpp new file mode 100644 index 00000000000..9dfe70d8b5d --- /dev/null +++ b/ngraph/core/include/openvino/op/select.hpp @@ -0,0 +1,70 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +// clang-format off +/// \brief Elementwise selection operation. +/// +/// ## Inputs +/// +/// | | Type | Description | +/// | ------ | --------------------------------------------- | ------------------------------------------------------------ | +/// | `arg0` | \f$\texttt{bool}[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape, with element `bool`. | +/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of a shape that is broadcast-compatible with `arg0`, with any element type. | +/// | `arg2` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of a shape that is broadcast-compatible with `arg0`, and same element type as `arg1`. | +/// | `auto_broadcast`| AutoBroadcastSpec | Auto broadcast specification. | +/// +/// ## Output +/// +/// | Type | Description | +/// | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg1}[i_1,\dots,i_n]\text{ if }\texttt{arg0}[i_1,\dots,i_n] \neq 0\text{, else }\texttt{arg2}[i_1,\dots,i_n]\f$ | +// clang-format on +class OPENVINO_API Select : public Op { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a selection operation. + Select() : m_auto_broadcast(AutoBroadcastSpec(AutoBroadcastType::NUMPY)) {} + + /// \brief Constructs a selection operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param arg2 Node that produces the third input tensor. + /// \param auto_broadcast Auto broadcast specification. Default is Numpy-style + /// implicit broadcasting. + Select(const Output& arg0, + const Output& arg1, + const Output& arg2, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + const AutoBroadcastSpec& get_auto_broadcast() const { + return m_auto_broadcast; + } + void set_auto_broadcast(const AutoBroadcastSpec& auto_broadcast) { + m_auto_broadcast = auto_broadcast; + } + // TODO: Move all uses of get_autob to get_auto_broadcast() and remove this. + const AutoBroadcastSpec& get_autob() const override { + return m_auto_broadcast; + } + bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; + bool has_evaluate() const override; + +private: + AutoBroadcastSpec m_auto_broadcast; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/selu.hpp b/ngraph/core/include/openvino/op/selu.hpp new file mode 100644 index 00000000000..bdf2ae5c966 --- /dev/null +++ b/ngraph/core/include/openvino/op/selu.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Performs a SELU activation function on all elements of the input node +class OPENVINO_API Selu : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Selu() = default; + /// \brief Constructs a Selu node. + /// + /// \param data - Node producing the input tensor + /// \param alpha - Alpha coefficient of SELU operation + /// \param lambda - Lambda coefficient of SELU operation + Selu(const Output& data, const Output& alpha, const Output& lambda); + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/shape_of.hpp b/ngraph/core/include/openvino/op/shape_of.hpp new file mode 100644 index 00000000000..6ad940083c5 --- /dev/null +++ b/ngraph/core/include/openvino/op/shape_of.hpp @@ -0,0 +1,67 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Operation that returns the shape of its input argument as a tensor. +class OPENVINO_API ShapeOf : public Op { +public: + OPENVINO_RTTI_DECLARATION; + ShapeOf() = default; + /// \brief Constructs a shape-of operation. + ShapeOf(const Output& arg, const element::Type output_type = element::i64); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + void validate_and_infer_types() override; + + element::Type get_output_type() const { + return m_output_type; + } + void set_output_type(element::Type output_type) { + m_output_type = output_type; + } + // Overload collision with method on Node + using Node::set_output_type; + + bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& output_values) const override; + bool evaluate_upper(const HostTensorVector& output_values) const override; + bool constant_fold(OutputVector& output_values, const OutputVector& input_values) override; + +private: + element::Type m_output_type; +}; +} // namespace v3 + +namespace v0 { +/// \brief Operation that returns the shape of its input argument as a tensor. +class OPENVINO_API ShapeOf : public Op { +public: + OPENVINO_RTTI_DECLARATION; + ShapeOf() = default; + /// \brief Constructs a shape-of operation. + ShapeOf(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + void validate_and_infer_types() override; + + bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& output_values) const override; + bool evaluate_upper(const HostTensorVector& output_values) const override; + bool constant_fold(OutputVector& output_values, const OutputVector& input_values) override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/shuffle_channels.hpp b/ngraph/core/include/openvino/op/shuffle_channels.hpp new file mode 100644 index 00000000000..0c32977d087 --- /dev/null +++ b/ngraph/core/include/openvino/op/shuffle_channels.hpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Permutes data in the channel dimension of the input +class OPENVINO_API ShuffleChannels : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + ShuffleChannels() = default; + /// \brief Constructs a ShuffleChannels node. + /// + /// \param data Node producing the input tensor. + /// \param axis Channel dimension index in the data tensor. + /// A negative value means that the index should be + /// calculated from the back of the input data shape. + /// \param group Number of group the channel dimension should be split into. + /// + ShuffleChannels(const Output& data, const int64_t axis = 1, const int64_t group = 1); + + bool visit_attributes(AttributeVisitor& visitor) override; + size_t get_zero_based_axis() const; + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + int64_t get_axis() const { + return m_axis; + } + int64_t get_group() const { + return m_group; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool evaluate_shuffle_channels(const HostTensorVector& outputs, const HostTensorVector& inputs) const; + + int64_t m_axis; + int64_t m_group; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/sigmoid.hpp b/ngraph/core/include/openvino/op/sigmoid.hpp new file mode 100644 index 00000000000..61199793205 --- /dev/null +++ b/ngraph/core/include/openvino/op/sigmoid.hpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API Sigmoid : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + Sigmoid(const Output& arg); + Sigmoid() = default; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/sign.hpp b/ngraph/core/include/openvino/op/sign.hpp new file mode 100644 index 00000000000..428784c3e22 --- /dev/null +++ b/ngraph/core/include/openvino/op/sign.hpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise sign operation. +/// +class OPENVINO_API Sign : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + Sign() = default; + /// \brief Constructs an elementwise sign operation. + /// + /// \param arg Node that produces the input tensor. + Sign(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/sin.hpp b/ngraph/core/include/openvino/op/sin.hpp new file mode 100644 index 00000000000..beea3cb5162 --- /dev/null +++ b/ngraph/core/include/openvino/op/sin.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +// clang-format off +/// \brief Elementwise sine operation. +/// +/// ## Inputs +/// +/// | | Type | Description | +/// | ----- | --------------------------------- | ----------------------------------------------- | +/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | +/// +/// ## Output +/// +/// | Type | Description | +/// | ---------------------- | ------------------------------------------------------------------------------------ | +/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sin(\texttt{arg}[i_1,\dots,i_n])\f$ | +// clang-format on +class OPENVINO_API Sin : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a sine operation. + /// + /// \param arg Node that produces the input tensor. + Sin(const Output& arg); + Sin() = default; + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/sinh.hpp b/ngraph/core/include/openvino/op/sinh.hpp new file mode 100644 index 00000000000..7759db39134 --- /dev/null +++ b/ngraph/core/include/openvino/op/sinh.hpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise hyperbolic sine (sinh) operation. +class OPENVINO_API Sinh : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a hyperbolic sine operation. + /// + /// \param arg Node that produces the input tensor. + Sinh(const Output& arg); + Sinh() = default; + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/softmax.hpp b/ngraph/core/include/openvino/op/softmax.hpp new file mode 100644 index 00000000000..8818953bd16 --- /dev/null +++ b/ngraph/core/include/openvino/op/softmax.hpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +class OPENVINO_API Softmax : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Softmax() = default; + /// \brief Constructs a softmax operation. + /// + /// \param arg Node that produces the first input tensor.
+ /// `[d0, ...]` + /// \param axis The axis position (0-based) on which to calculate the softmax. + /// + /// Output `[d0, ...]` + /// + Softmax(const Output& arg, const size_t axis = 1); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + size_t get_axis() const { + return m_axis; + } + void set_axis(const size_t axis) { + m_axis = axis; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + size_t m_axis{0}; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/softplus.hpp b/ngraph/core/include/openvino/op/softplus.hpp new file mode 100644 index 00000000000..ae4cea9a661 --- /dev/null +++ b/ngraph/core/include/openvino/op/softplus.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v4 { +/// \brief A Self Regularized Non-Monotonic Neural Activation Function +/// f(x) = ln(exp(x) + 1.) +/// +class OPENVINO_API SoftPlus : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + SoftPlus() = default; + /// \brief Constructs an SoftPlus operation. + /// + /// \param data Input tensor + SoftPlus(const Output& arg); + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v4 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/space_to_batch.hpp b/ngraph/core/include/openvino/op/space_to_batch.hpp new file mode 100644 index 00000000000..44bc31fa5c1 --- /dev/null +++ b/ngraph/core/include/openvino/op/space_to_batch.hpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief SpaceToBatch permutes data tensor blocks of spatial data into batch +/// dimension. +/// +/// \note Values from spatial blocks dimensions are moved in the batch dimension. +/// +/// Output node produces a tensor with shape: tensor with shape +/// `[batch * block_shape[0] * block_shape[1] * ... * block_shape[N - 1], +/// (pads_begin[1] + D_1 + pads_end[1]) / block_shape[1], +/// (pads_begin[2] + D_2 + pads_end[2]) / block_shape[2], ..., +/// (pads_begin[N - 1] + D_{N - 1} + pads_end[N - 1]) / block_shape[N - 1]` +/// of the same type as `data` input. +class OPENVINO_API SpaceToBatch : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + SpaceToBatch() = default; + + /// \brief Constructs a SpaceToBatch operation. + /// + /// \param data Node producing the data tensor + /// \param block_shape The sizes of the block of values to be moved + /// \param pads_begin Specifies the padding for the beginning along each axis of + /// `data` input + /// \param pads_end Specifies the padding for the ending along each axis of `data` + /// input. + SpaceToBatch(const Output& data, + const Output& block_shape, + const Output& pads_begin, + const Output& pads_end); + + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor& visitor) override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool evaluate_space_to_batch(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/space_to_depth.hpp b/ngraph/core/include/openvino/op/space_to_depth.hpp new file mode 100644 index 00000000000..3dee36bc3f4 --- /dev/null +++ b/ngraph/core/include/openvino/op/space_to_depth.hpp @@ -0,0 +1,77 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief SpaceToDepth permutes input tensor blocks of spatial data into depth +/// dimension. +/// +/// \note Values from the height and width dimensions are moved to the depth dimension. +/// +/// Output node produces a tensor with shape: +/// [N, C * blocksize * blocksize, H / blocksize, W / blocksize] +class OPENVINO_API SpaceToDepth : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + enum class SpaceToDepthMode { + // The output depth is gathered from [block_size, ..., block_size, C] + BLOCKS_FIRST, + // The output depth is gathered from [C, block_size, ..., block_size] + DEPTH_FIRST + }; + + SpaceToDepth() = default; + /// \brief Constructs a SpaceToDepth operation. + /// + /// \param data - Node producing the input tensor + /// \param mode Specifies how the output depth dimension is gathered + /// from block coordinates and the old depth dimension. + /// \param block_size - the size of the block of values to be moved + SpaceToDepth(const Output& data, const SpaceToDepthMode& mode, std::size_t block_size = 1); + + SpaceToDepth(const Output& data, const std::string& mode, std::size_t block_size = 1); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::size_t get_block_size() const { + return m_blocksize; + } + SpaceToDepthMode get_mode() const { + return m_mode; + } + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + std::size_t m_blocksize; + SpaceToDepthMode m_mode; +}; +} // namespace v0 +} // namespace op + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v0::SpaceToDepth::SpaceToDepthMode& type); + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v0::SpaceToDepth::SpaceToDepthMode& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/split.hpp b/ngraph/core/include/openvino/op/split.hpp new file mode 100644 index 00000000000..e1cb09bedf6 --- /dev/null +++ b/ngraph/core/include/openvino/op/split.hpp @@ -0,0 +1,48 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Splits the input tensor into a list of equal sized tensors +class OPENVINO_API Split : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a split operation. + Split() = default; + /// \brief Constructs a split operation. + /// \param data The tensor to be split. + /// \param axis The index of an axis in "data" along which to perform + /// the split. + /// \param num_splits The number of pieces that the data tensor should be + /// split into. + Split(const Output& data, const Output& axis, const size_t num_splits); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + size_t get_num_splits() const { + return m_num_splits; + } + void set_num_splits(const size_t num_splits) { + m_num_splits = num_splits; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + size_t m_num_splits; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/sqrt.hpp b/ngraph/core/include/openvino/op/sqrt.hpp new file mode 100644 index 00000000000..2f16e3bdf33 --- /dev/null +++ b/ngraph/core/include/openvino/op/sqrt.hpp @@ -0,0 +1,44 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +// clang-format off +/// \brief Elementwise square root operation. +/// +/// ## Inputs +/// +/// | | Type | Description | +/// | ----- | --------------------------------- | ----------------------------------------------- | +/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | +/// +/// ## Output +/// +/// | Type | Description | +/// | ---------------------- | ------------------------------------------------------------------------------------- | +/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sqrt{\texttt{arg}[i_1,\dots,i_n]}\f$ | +// clang-format on +class OPENVINO_API Sqrt : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a square operation. + /// + /// \param arg Node that produces the input tensor. + Sqrt(const Output& arg); + Sqrt() = default; + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/squared_difference.hpp b/ngraph/core/include/openvino/op/squared_difference.hpp new file mode 100644 index 00000000000..23c23de070b --- /dev/null +++ b/ngraph/core/include/openvino/op/squared_difference.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Calculates an element-wise squared difference between two tensors +/// +/// y[i] = (x1[i] - x2[i])^2 +class OPENVINO_API SquaredDifference : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constrcuts an uninitialized squared difference operation + SquaredDifference() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + /// \brief Constructs the squared difference operation. + /// + /// \param x1 First input tensor + /// \param x2 Second input tensor + /// \param auto_broadcast Auto broadcast specification + SquaredDifference(const Output& x1, + const Output& x2, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/squeeze.hpp b/ngraph/core/include/openvino/op/squeeze.hpp new file mode 100644 index 00000000000..ff212467127 --- /dev/null +++ b/ngraph/core/include/openvino/op/squeeze.hpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API Squeeze : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Squeeze(); + Squeeze(const Output& data, const Output& axes); + Squeeze(const Output& data); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& outputs) const override; + bool evaluate_upper(const HostTensorVector& outputs) const override; + bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool is_dynamic() const override; + +private: + Output get_default_axes_input() const; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/strided_slice.hpp b/ngraph/core/include/openvino/op/strided_slice.hpp new file mode 100644 index 00000000000..150595bc145 --- /dev/null +++ b/ngraph/core/include/openvino/op/strided_slice.hpp @@ -0,0 +1,110 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a +/// bounding box, optionally with stride. +class OPENVINO_API StridedSlice : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + StridedSlice() = default; + + /// \brief Constructs a dynamic tensor strided slice operation. + /// + /// \param data The tensor to be sliced. + /// \param begin 1D tensor with begin indexes for input blob slicing. + /// \param end 1D tensor with end indexes for input blob slicing. + /// \param strides The slicing strides; for example, strides of `{n,m}` + /// means to take every nth row and every mth column + /// of the input matrix. + /// \param begin_mask When begin_mask[i] equal to 1 means that the + /// corresponding dimension of the begin input is ignored. + /// \param end_mask When end_mask[i] is 1, the corresponding dimension of + /// the end input is ignored. + /// \param new_axis_mask If new_axis_mask[i] is 1, a length 1 dimension + /// is inserted on the i-th position. + /// \param shrink_axis_mask If shrink_axis_mask[i] is 1, the dimension + /// on the i-th position is deleted. + /// \param ellipsis_mask It inserts missing dimensions + /// on a position of a non-zero bit. + StridedSlice(const Output& data, + const Output& begin, + const Output& end, + const Output& strides, + const std::vector& begin_mask, + const std::vector& end_mask, + const std::vector& new_axis_mask = std::vector{}, + const std::vector& shrink_axis_mask = std::vector{}, + const std::vector& ellipsis_mask = std::vector{}); + + /// \brief Constructs a dynamic tensor strided slice operation. + /// + /// \param data The tensor to be sliced. + /// \param begin 1D tensor with begin indexes for input blob slicing. + /// \param end 1D tensor with end indexes for input blob slicing. + /// \param begin_mask When begin_mask[i] equal to 1 means that the + /// corresponding dimension of the begin input is ignored. + /// \param end_mask When end_mask[i] is 1, the corresponding dimension of + /// the end input is ignored. + /// \param new_axis_mask If new_axis_mask[i] is 1, a length 1 dimension + /// is inserted on the i-th position. + /// \param shrink_axis_mask If shrink_axis_mask[i] is 1, the dimension + /// on the i-th position is deleted. + /// \param ellipsis_mask It inserts missing dimensions + /// on a position of a non-zero bit. + StridedSlice(const Output& data, + const Output& begin, + const Output& end, + const std::vector& begin_mask, + const std::vector& end_mask, + const std::vector& new_axis_mask = std::vector{}, + const std::vector& shrink_axis_mask = std::vector{}, + const std::vector& ellipsis_mask = std::vector{}); + + bool visit_attributes(AttributeVisitor& visitor) override; + const std::vector& get_begin_mask() const { + return m_begin_mask; + } + const std::vector& get_end_mask() const { + return m_end_mask; + } + const std::vector& get_new_axis_mask() const { + return m_new_axis_mask; + } + const std::vector& get_shrink_axis_mask() const { + return m_shrink_axis_mask; + } + const std::vector& get_ellipsis_mask() const { + return m_ellipsis_mask; + } + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + void validate_and_infer_types() override; + bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& outputs) const override; + bool evaluate_upper(const HostTensorVector& outputs) const override; + +private: + AxisSet convert_mask_to_axis_set(const std::vector& mask) const; + + std::vector m_begin_mask; + std::vector m_end_mask; + std::vector m_new_axis_mask; + std::vector m_shrink_axis_mask; + std::vector m_ellipsis_mask; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/subtract.hpp b/ngraph/core/include/openvino/op/subtract.hpp new file mode 100644 index 00000000000..abb15c9305d --- /dev/null +++ b/ngraph/core/include/openvino/op/subtract.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise subtraction operation. +class OPENVINO_API Subtract : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + Subtract() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + + /// \brief Constructs a subtraction operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + Subtract(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/swish.hpp b/ngraph/core/include/openvino/op/swish.hpp new file mode 100644 index 00000000000..7ec3806a38d --- /dev/null +++ b/ngraph/core/include/openvino/op/swish.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v4 { +/// \brief A Swish Activation Function +/// f(x) = x / (1.0 + exp(-beta * x)) or +/// f(x) = x * sigmoid(beta * x) +/// +class OPENVINO_API Swish : public Op { +public: + OPENVINO_RTTI_DECLARATION; + Swish() = default; + + /// \brief Constructs an Swish operation. + /// + /// \param data Input tensor + /// \param beta Scalar with beta value. If the argument is not specified then use + /// the default value 1.0 + Swish(const Output& arg, const Output& beta); + explicit Swish(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v4 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/tan.hpp b/ngraph/core/include/openvino/op/tan.hpp new file mode 100644 index 00000000000..45f20a91b92 --- /dev/null +++ b/ngraph/core/include/openvino/op/tan.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +// clang-format off +/// \brief Elementwise tangent operation. +/// +/// ## Inputs +/// +/// | | Type | Description | +/// | ----- | --------------------------------- | ----------------------------------------------- | +/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | +/// +/// ## Output +/// +/// | Type | Description | +/// | ---------------------- | ------------------------------------------------------------------------------------ | +/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \tan(\texttt{arg}[i_1,\dots,i_n])\f$ | +// clang-format on +class OPENVINO_API Tan : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a tangent operation. + /// + /// \param arg Node that produces the input tensor. + Tan(const Output& arg); + Tan() = default; + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/tanh.hpp b/ngraph/core/include/openvino/op/tanh.hpp new file mode 100644 index 00000000000..f981d9037c5 --- /dev/null +++ b/ngraph/core/include/openvino/op/tanh.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise hyperbolic tangent operation. +class OPENVINO_API Tanh : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a hyperbolic tangent operation. + /// + /// \param arg Node that produces the input tensor. + Tanh(const Output& arg); + Tanh() = default; + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/tile.hpp b/ngraph/core/include/openvino/op/tile.hpp new file mode 100644 index 00000000000..73bcd627d82 --- /dev/null +++ b/ngraph/core/include/openvino/op/tile.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Dynamic Tiling operation which repeats a tensor multiple times +/// along each dimension +class OPENVINO_API Tile : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Tile() = default; + /// \brief Perform dynamic padding of a tensor + /// + /// \param data The node producing input tensor to be padded. + /// \param repeats The node producing the per-dimension replication factor + Tile(const Output& data, const Output& repeats); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool evaluate_tile(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/topk.hpp b/ngraph/core/include/openvino/op/topk.hpp new file mode 100644 index 00000000000..dc0c2ddfeb4 --- /dev/null +++ b/ngraph/core/include/openvino/op/topk.hpp @@ -0,0 +1,161 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/constant.hpp" +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Computes indices and values of the k maximum/minimum values +/// for each slice along specified axis. +class OPENVINO_API TopK : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + using SortType = TopKSortType; + using Mode = TopKMode; + + /// \brief Constructs a TopK operation + TopK() = default; + /// \brief Constructs a TopK operation with two outputs: values and indices. + /// By default the indices output is described by i32 data type. + /// + /// \param data The input tensor + /// \param k Specifies how many maximum/minimum elements should be computed + /// (note: scalar input tensor) + /// \param axis The axis along which to compute top k indices + /// \param mode Specifies which operation (min or max) is used to select + /// the biggest element of two. + /// \param sort Specifies order of output elements and/or indices + /// Accepted values: none, index, value + /// \param index_element_type Specyfies type of produced indices + TopK(const Output& data, + const Output& k, + const int64_t axis, + const std::string& mode, + const std::string& sort, + const element::Type& index_element_type = element::i32); + + TopK(const Output& data, + const Output& k, + const int64_t axis, + const Mode mode, + const SortType sort, + const element::Type& index_element_type = element::i32); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \brief Returns axis value after normalization + /// \note If input rank required to normalization is dynamic, the exception is + /// thrown + uint64_t get_axis() const; + /// \brief Returns axis value before normalization + int64_t get_provided_axis() const { + return m_axis; + } + void set_axis(const int64_t axis); + Mode get_mode() const { + return m_mode; + } + void set_mode(const Mode mode) { + m_mode = mode; + } + SortType get_sort_type() const { + return m_sort; + } + void set_sort_type(const SortType sort) { + m_sort = sort; + } + element::Type get_index_element_type() const { + return m_index_element_type; + } + void set_index_element_type(const element::Type& index_element_type) { + m_index_element_type = index_element_type; + } + /// \brief Returns the value of K, if available + /// + /// \note If the second input to this op is a constant, the value is retrieved + /// and returned. If the input is not constant(dynamic) this method returns 0 + size_t get_k() const; + void set_k(size_t k); + size_t get_default_output_index() const override { + return no_default_index(); + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + int64_t m_axis; + uint64_t m_normalized_axis; + Mode m_mode; + SortType m_sort; + element::Type m_index_element_type{element::i32}; + + virtual size_t read_k_from_constant_node(const std::shared_ptr& node, + const element::Type& k_element_type) const; + + template + size_t validate_and_get_k(const std::shared_ptr& k_constant) const; + ngraph::Shape compute_output_shape(const std::string& node_description, + const PartialShape input_partial_shape, + const int64_t k) const; + void set_axis(const Rank input_rank, const int64_t axis); +}; +} // namespace v1 + +namespace v3 { +/// \brief Computes indices and values of the k maximum/minimum values +/// for each slice along specified axis. +class OPENVINO_API TopK : public v1::TopK { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a TopK operation + TopK() = default; + /// \brief Constructs a TopK operation with two outputs: values and indices. + /// By default the indices output is described by i32 data type. + /// + /// \param data The input tensor + /// \param k Specifies how many maximum/minimum elements should be computed + /// (note: scalar input tensor) + /// \param axis The axis along which to compute top k indices + /// \param mode Specifies which operation (min or max) is used to select + /// the biggest element of two. + /// \param sort Specifies order of output elements and/or indices + /// Accepted values: none, index, value + /// \param index_element_type Specyfies type of produced indices + TopK(const Output& data, + const Output& k, + const int64_t axis, + const std::string& mode, + const std::string& sort, + const element::Type& index_element_type = element::i32); + + TopK(const Output& data, + const Output& k, + const int64_t axis, + const Mode mode, + const SortType sort, + const element::Type& index_element_type = element::i32); + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + size_t read_k_from_constant_node(const std::shared_ptr& node, + const element::Type& k_element_type) const override; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/transpose.hpp b/ngraph/core/include/openvino/op/transpose.hpp new file mode 100644 index 00000000000..944b6533044 --- /dev/null +++ b/ngraph/core/include/openvino/op/transpose.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Tensor transpose operation. +class OPENVINO_API Transpose : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Transpose() = default; + /// + /// \brief Constructs a transpose operation. + /// + /// \param arg Node producing the tensor to be transposed. + /// \param input_order Node producing the permutation to apply to the axes + /// of the input shape. Must be a vector with shape [n], + /// where n is the rank of arg. The tensor's value must + /// contain every integer in the range [0, n-1]. + /// + Transpose(const Output& arg, const Output& input_order); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/unsqueeze.hpp b/ngraph/core/include/openvino/op/unsqueeze.hpp new file mode 100644 index 00000000000..6eae736e9a0 --- /dev/null +++ b/ngraph/core/include/openvino/op/unsqueeze.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API Unsqueeze : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Unsqueeze() = default; + Unsqueeze(const Output& data, const Output& axes); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& output_values) const override; + bool evaluate_upper(const HostTensorVector& output_values) const override; + + bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/util/scatter_base.hpp b/ngraph/core/include/openvino/op/util/scatter_base.hpp index 6f12ee2b326..ffe6952d84c 100644 --- a/ngraph/core/include/openvino/op/util/scatter_base.hpp +++ b/ngraph/core/include/openvino/op/util/scatter_base.hpp @@ -14,10 +14,7 @@ namespace util { /// class OPENVINO_API ScatterBase : public Op { public: - static constexpr NodeTypeInfo type_info{"ScatterBase", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } + OPENVINO_RTTI_DECLARATION; void validate_and_infer_types() override; bool visit_attributes(AttributeVisitor& visitor) override; diff --git a/ngraph/core/include/openvino/op/util/scatter_nd_base.hpp b/ngraph/core/include/openvino/op/util/scatter_nd_base.hpp index 84f71a91261..36fb2e22b05 100644 --- a/ngraph/core/include/openvino/op/util/scatter_nd_base.hpp +++ b/ngraph/core/include/openvino/op/util/scatter_nd_base.hpp @@ -14,10 +14,7 @@ namespace util { /// class OPENVINO_API ScatterNDBase : public Op { public: - static constexpr NodeTypeInfo type_info{"ScatterNDBase", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } + OPENVINO_RTTI_DECLARATION; // Respective input ordinal number. static constexpr int INPUTS = 0; static constexpr int INDICES = 1; diff --git a/ngraph/core/include/openvino/op/variadic_split.hpp b/ngraph/core/include/openvino/op/variadic_split.hpp new file mode 100644 index 00000000000..74c3059dac5 --- /dev/null +++ b/ngraph/core/include/openvino/op/variadic_split.hpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief VariadicSplit operation splits an input tensor into pieces along some axis. +/// The pieces may have variadic lengths depending on "split_lengths" attribute. +class OPENVINO_API VariadicSplit : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a variadic split operation. + VariadicSplit() = default; + /// \brief Constructs a variadic split operation. + /// + /// \param data The tensor to be split. + /// \param axis The index of an axis in "data" along which to perform the + /// split. + /// \param split_lengths A list containing the sizes of each output tensor + /// along the split "axis". Size of "split_lengths" should be equal to the number of + /// + /// outputs. The sum of split_lengths must match data.shape[axis] + VariadicSplit(const Output& data, const Output& axis, const Output& split_lengths); + + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + size_t get_default_output_index() const override { + return no_default_index(); + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool evaluate_variadic_split(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/xor.hpp b/ngraph/core/include/openvino/op/xor.hpp new file mode 100644 index 00000000000..460117ae599 --- /dev/null +++ b/ngraph/core/include/openvino/op/xor.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/binary_elementwise_logical.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise logical-xor operation. +/// +class OPENVINO_API Xor : public util::BinaryElementwiseLogical { +public: + OPENVINO_RTTI_DECLARATION; + Xor() = default; + /// \brief Constructs a logical-xor operation. + /// + /// \param arg0 Node that produces the first input tensor.
+ /// `[d0, ...]` + /// \param arg1 Node that produces the second input tensor.
+ /// `[d0, ...]` + /// \param auto_broadcast Auto broadcast specification + /// + /// Output `[d0, ...]` + /// + Xor(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec()); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/src/op/lstm_sequence.cpp b/ngraph/core/src/op/lstm_sequence.cpp index 0b30eddf153..81a0e58f5fb 100644 --- a/ngraph/core/src/op/lstm_sequence.cpp +++ b/ngraph/core/src/op/lstm_sequence.cpp @@ -17,7 +17,7 @@ using namespace ngraph; using namespace std; OPENVINO_RTTI_DEFINITION(op::v0::LSTMSequence, "LSTMSequence", 0); -OPENVINO_RTTI_DEFINITION(op::v5::LSTMSequence, "LSTMSequence", 5); +OPENVINO_RTTI_DEFINITION(op::v5::LSTMSequence, "LSTMSequence", 5, util::RNNCellBase); op::v0::LSTMSequence::LSTMSequence() : Op(), diff --git a/ngraph/core/src/op/random_uniform.cpp b/ngraph/core/src/op/random_uniform.cpp index 90a356cdef3..c343c408c4b 100644 --- a/ngraph/core/src/op/random_uniform.cpp +++ b/ngraph/core/src/op/random_uniform.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v8::RandomUniform, "RandomUniform", 8); +OPENVINO_RTTI_DEFINITION(op::v8::RandomUniform, "RandomUniform", 8); op::v8::RandomUniform::RandomUniform(const Output& out_shape, const Output& min_val, diff --git a/ngraph/core/src/op/range.cpp b/ngraph/core/src/op/range.cpp index e3d7cd1da76..7bf31bdf81f 100644 --- a/ngraph/core/src/op/range.cpp +++ b/ngraph/core/src/op/range.cpp @@ -43,7 +43,7 @@ check_value(T value) { return value == value && value_minus_value == value_minus_value; } -NGRAPH_RTTI_DEFINITION(op::v4::Range, "Range", 4); +OPENVINO_RTTI_DEFINITION(op::v4::Range, "Range", 4); op::v4::Range::Range(const Output& start, const Output& stop, @@ -286,7 +286,7 @@ bool op::v4::Range::has_evaluate() const { return false; } -NGRAPH_RTTI_DEFINITION(op::v0::Range, "Range", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Range, "Range", 0); op::v0::Range::Range(const Output& start, const Output& stop, const Output& step) : Op({start, stop, step}) { diff --git a/ngraph/core/src/op/reduce_l1.cpp b/ngraph/core/src/op/reduce_l1.cpp index c23b63173fc..997b9c57af1 100644 --- a/ngraph/core/src/op/reduce_l1.cpp +++ b/ngraph/core/src/op/reduce_l1.cpp @@ -16,7 +16,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v4::ReduceL1, "ReduceL1", 4, util::ArithmeticReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v4::ReduceL1, "ReduceL1", 4, util::ArithmeticReductionKeepDims); op::v4::ReduceL1::ReduceL1(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { diff --git a/ngraph/core/src/op/reduce_l2.cpp b/ngraph/core/src/op/reduce_l2.cpp index 396c365f30e..8bdfccd80cf 100644 --- a/ngraph/core/src/op/reduce_l2.cpp +++ b/ngraph/core/src/op/reduce_l2.cpp @@ -16,7 +16,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v4::ReduceL2, "ReduceL2", 4, util::ArithmeticReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v4::ReduceL2, "ReduceL2", 4, util::ArithmeticReductionKeepDims); op::v4::ReduceL2::ReduceL2(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { diff --git a/ngraph/core/src/op/reduce_logical_and.cpp b/ngraph/core/src/op/reduce_logical_and.cpp index 081ea960e23..9c79c270a16 100644 --- a/ngraph/core/src/op/reduce_logical_and.cpp +++ b/ngraph/core/src/op/reduce_logical_and.cpp @@ -15,7 +15,7 @@ using namespace ngraph; using namespace std; -NGRAPH_RTTI_DEFINITION(op::v1::ReduceLogicalAnd, "ReduceLogicalAnd", 1, util::LogicalReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v1::ReduceLogicalAnd, "ReduceLogicalAnd", 1, util::LogicalReductionKeepDims); op::v1::ReduceLogicalAnd::ReduceLogicalAnd(const Output& data, const Output& reduction_axes, diff --git a/ngraph/core/src/op/reduce_logical_or.cpp b/ngraph/core/src/op/reduce_logical_or.cpp index f383cc51661..6cca3903d35 100644 --- a/ngraph/core/src/op/reduce_logical_or.cpp +++ b/ngraph/core/src/op/reduce_logical_or.cpp @@ -15,7 +15,7 @@ using namespace ngraph; using namespace std; -NGRAPH_RTTI_DEFINITION(op::v1::ReduceLogicalOr, "ReduceLogicalOr", 1, util::LogicalReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v1::ReduceLogicalOr, "ReduceLogicalOr", 1, util::LogicalReductionKeepDims); op::v1::ReduceLogicalOr::ReduceLogicalOr(const Output& data, const Output& reduction_axes, diff --git a/ngraph/core/src/op/reduce_mean.cpp b/ngraph/core/src/op/reduce_mean.cpp index b32bd7ac76e..ea92204c589 100644 --- a/ngraph/core/src/op/reduce_mean.cpp +++ b/ngraph/core/src/op/reduce_mean.cpp @@ -17,7 +17,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::ReduceMean, "ReduceMean", 1, util::ArithmeticReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v1::ReduceMean, "ReduceMean", 1, util::ArithmeticReductionKeepDims); op::v1::ReduceMean::ReduceMean(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { diff --git a/ngraph/core/src/op/reduce_prod.cpp b/ngraph/core/src/op/reduce_prod.cpp index 30bd01de272..1dcbdb09575 100644 --- a/ngraph/core/src/op/reduce_prod.cpp +++ b/ngraph/core/src/op/reduce_prod.cpp @@ -16,7 +16,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::ReduceProd, "ReduceProd", 1, util::ArithmeticReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v1::ReduceProd, "ReduceProd", 1, util::ArithmeticReductionKeepDims); op::v1::ReduceProd::ReduceProd(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { diff --git a/ngraph/core/src/op/reduce_sum.cpp b/ngraph/core/src/op/reduce_sum.cpp index ae0e9a60364..1055274312d 100644 --- a/ngraph/core/src/op/reduce_sum.cpp +++ b/ngraph/core/src/op/reduce_sum.cpp @@ -18,7 +18,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::ReduceSum, "ReduceSum", 1, util::ArithmeticReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v1::ReduceSum, "ReduceSum", 1, util::ArithmeticReductionKeepDims); op::v1::ReduceSum::ReduceSum(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { diff --git a/ngraph/core/src/op/region_yolo.cpp b/ngraph/core/src/op/region_yolo.cpp index a481da20de5..e8cf031ff3c 100644 --- a/ngraph/core/src/op/region_yolo.cpp +++ b/ngraph/core/src/op/region_yolo.cpp @@ -10,7 +10,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::RegionYolo, "RegionYolo", 0); +OPENVINO_RTTI_DEFINITION(op::v0::RegionYolo, "RegionYolo", 0); op::RegionYolo::RegionYolo(const Output& input, const size_t coords, diff --git a/ngraph/core/src/op/relu.cpp b/ngraph/core/src/op/relu.cpp index 4c65f2bc573..e5a053de0e9 100644 --- a/ngraph/core/src/op/relu.cpp +++ b/ngraph/core/src/op/relu.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::Relu, "Relu", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Relu, "Relu", 0, util::UnaryElementwiseArithmetic); op::Relu::Relu(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/reorg_yolo.cpp b/ngraph/core/src/op/reorg_yolo.cpp index ca6b20fa7dd..f90b07baa0e 100644 --- a/ngraph/core/src/op/reorg_yolo.cpp +++ b/ngraph/core/src/op/reorg_yolo.cpp @@ -10,7 +10,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::ReorgYolo, "ReorgYolo", 0); +OPENVINO_RTTI_DEFINITION(op::v0::ReorgYolo, "ReorgYolo", 0); op::ReorgYolo::ReorgYolo(const Output& input, const Strides& strides) : Op({input}), m_strides(strides) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/reshape.cpp b/ngraph/core/src/op/reshape.cpp index de161673a05..6593a7b2fd4 100644 --- a/ngraph/core/src/op/reshape.cpp +++ b/ngraph/core/src/op/reshape.cpp @@ -37,7 +37,7 @@ void compute_output_shape(const HostTensorPtr& shape_pattern, std::vector& arg, const Output& shape_pattern, bool zero_flag) : Op({arg, shape_pattern}), @@ -78,8 +78,8 @@ void op::v1::Reshape::validate_and_infer_types() { HostTensorPtr lb, ub; std::tie(lb, ub) = evaluate_both_bounds(get_input_source_output(1)); if (lb && ub) { - const auto lower_bound = std::make_shared(lb)->cast_vector(); - const auto upper_bound = std::make_shared(ub)->cast_vector(); + const auto lower_bound = std::make_shared(lb)->cast_vector(); + const auto upper_bound = std::make_shared(ub)->cast_vector(); shape_can_be_calculated = true; NGRAPH_CHECK(lower_bound.size() == upper_bound.size()); for (size_t i = 0; i < lower_bound.size(); ++i) { @@ -206,8 +206,8 @@ bool op::v1::Reshape::constant_fold(OutputVector& output_values, const OutputVec const auto& shape = get_output_shape(0); - if (auto data_const = std::dynamic_pointer_cast(inputs_values[0].get_node_shared_ptr())) { - output_values[0] = std::make_shared(*data_const, shape); + if (auto data_const = std::dynamic_pointer_cast(inputs_values[0].get_node_shared_ptr())) { + output_values[0] = std::make_shared(*data_const, shape); return true; } return false; diff --git a/ngraph/core/src/op/reverse.cpp b/ngraph/core/src/op/reverse.cpp index 9c7d72afe2c..f50ee79ab4d 100644 --- a/ngraph/core/src/op/reverse.cpp +++ b/ngraph/core/src/op/reverse.cpp @@ -19,7 +19,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::Reverse, "Reverse", 1); +OPENVINO_RTTI_DEFINITION(op::v1::Reverse, "Reverse", 1); op::v1::Reverse::Reverse(const Output& data, const Output& reversed_axes, const std::string& mode) : Op({data, reversed_axes}), @@ -197,7 +197,7 @@ bool op::v1::Reverse::has_evaluate() const { } } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v1::Reverse::Mode& type) { +std::ostream& ov::operator<<(std::ostream& s, const op::v1::Reverse::Mode& type) { return s << as_string(type); } diff --git a/ngraph/core/src/op/reverse_sequence.cpp b/ngraph/core/src/op/reverse_sequence.cpp index 1eda1f50b1c..efc380fefaa 100644 --- a/ngraph/core/src/op/reverse_sequence.cpp +++ b/ngraph/core/src/op/reverse_sequence.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::ReverseSequence, "ReverseSequence", 0); +OPENVINO_RTTI_DEFINITION(op::v0::ReverseSequence, "ReverseSequence", 0); op::ReverseSequence::ReverseSequence(const Output& arg, const Output& seq_indices, diff --git a/ngraph/core/src/op/rnn_cell.cpp b/ngraph/core/src/op/rnn_cell.cpp index 43cb8fd5c50..c67007d483e 100644 --- a/ngraph/core/src/op/rnn_cell.cpp +++ b/ngraph/core/src/op/rnn_cell.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::RNNCell, "RNNCell", 0, util::RNNCellBase); +OPENVINO_RTTI_DEFINITION(op::v0::RNNCell, "RNNCell", 0, util::RNNCellBase); op::v0::RNNCell::RNNCell() { m_activations = {"tanh"}; diff --git a/ngraph/core/src/op/rnn_sequence.cpp b/ngraph/core/src/op/rnn_sequence.cpp index 809baa02592..10f6f382608 100644 --- a/ngraph/core/src/op/rnn_sequence.cpp +++ b/ngraph/core/src/op/rnn_sequence.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v5::RNNSequence, "RNNSequence", 4); +OPENVINO_RTTI_DEFINITION(op::v5::RNNSequence, "RNNSequence", 4, util::RNNCellBase); op::v5::RNNSequence::RNNSequence() : m_direction(op::RecurrentSequenceDirection::FORWARD) {} diff --git a/ngraph/core/src/op/roi_align.cpp b/ngraph/core/src/op/roi_align.cpp index 580b7b8ddd3..4d4faa8b45d 100644 --- a/ngraph/core/src/op/roi_align.cpp +++ b/ngraph/core/src/op/roi_align.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v3::ROIAlign::type_info; +OPENVINO_RTTI_DEFINITION(op::v3::ROIAlign, "ROIAlign", 3); op::v3::ROIAlign::ROIAlign(const Output& input, const Output& rois, diff --git a/ngraph/core/src/op/roi_pooling.cpp b/ngraph/core/src/op/roi_pooling.cpp index 433d70c5a6c..cd349783b32 100644 --- a/ngraph/core/src/op/roi_pooling.cpp +++ b/ngraph/core/src/op/roi_pooling.cpp @@ -9,7 +9,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::ROIPooling, "ROIPooling", 0); +OPENVINO_RTTI_DEFINITION(op::v0::ROIPooling, "ROIPooling", 0); op::ROIPooling::ROIPooling(const Output& input, const Output& coords, diff --git a/ngraph/core/src/op/roll.cpp b/ngraph/core/src/op/roll.cpp index b9027f12b3a..088a593852f 100644 --- a/ngraph/core/src/op/roll.cpp +++ b/ngraph/core/src/op/roll.cpp @@ -11,7 +11,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v7::Roll, "Roll", 7); +OPENVINO_RTTI_DEFINITION(op::v7::Roll, "Roll", 7); op::v7::Roll::Roll(const Output& data, const Output& shift, const Output& axes) : Op({data, shift, axes}) { @@ -47,7 +47,7 @@ void op::v7::Roll::validate_and_infer_types() { // If shift is a scalar, than axes can be arbitrary 1d tensor and we don't need // to check shift shape consistency with axes, otherwise the check is needed. - if (!(shift_pshape.is_static() && is_scalar(shift_pshape.to_shape()))) { + if (!(shift_pshape.is_static() && ngraph::is_scalar(shift_pshape.to_shape()))) { NODE_VALIDATION_CHECK(this, shift_pshape.compatible(axes_pshape), "If shift is a 1D vector, axes must be a 1D tensor of the same size."); diff --git a/ngraph/core/src/op/round.cpp b/ngraph/core/src/op/round.cpp index aa33f7ccc18..36f74be1bfa 100644 --- a/ngraph/core/src/op/round.cpp +++ b/ngraph/core/src/op/round.cpp @@ -61,7 +61,7 @@ bool evaluate_round(const HostTensorPtr& arg0, } } // namespace roundop -NGRAPH_RTTI_DEFINITION(op::v5::Round, "Round", 5); +OPENVINO_RTTI_DEFINITION(op::v5::Round, "Round", 5); op::v5::Round::Round(const Output& arg, RoundMode mode) : Op({arg}), m_mode(mode) { constructor_validate_and_infer_types(); @@ -113,7 +113,7 @@ bool op::v5::Round::has_evaluate() const { return false; } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v5::Round::RoundMode& type) { +std::ostream& ov::operator<<(std::ostream& s, const op::v5::Round::RoundMode& type) { return s << as_string(type); } diff --git a/ngraph/core/src/op/scatter_elements_update.cpp b/ngraph/core/src/op/scatter_elements_update.cpp index 46275a703f4..b6bcb9c1a0e 100644 --- a/ngraph/core/src/op/scatter_elements_update.cpp +++ b/ngraph/core/src/op/scatter_elements_update.cpp @@ -13,7 +13,7 @@ using namespace ngraph; using namespace std; -NGRAPH_RTTI_DEFINITION(op::ScatterElementsUpdate, "ScatterElementsUpdate", 3); +OPENVINO_RTTI_DEFINITION(op::v3::ScatterElementsUpdate, "ScatterElementsUpdate", 3); op::v3::ScatterElementsUpdate::ScatterElementsUpdate(const Output& data, const Output& indices, diff --git a/ngraph/core/src/op/scatter_nd_update.cpp b/ngraph/core/src/op/scatter_nd_update.cpp index e06660d4992..a1544822a3d 100644 --- a/ngraph/core/src/op/scatter_nd_update.cpp +++ b/ngraph/core/src/op/scatter_nd_update.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v3::ScatterNDUpdate::type_info; +OPENVINO_RTTI_DEFINITION(op::v3::ScatterNDUpdate, "ScatterNDUpdate", 3, util::ScatterNDBase); shared_ptr op::v3::ScatterNDUpdate::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v3_ScatterNDUpdate_clone_with_new_inputs); diff --git a/ngraph/core/src/op/scatter_update.cpp b/ngraph/core/src/op/scatter_update.cpp index 0fb131bf5ff..e8182e8b211 100644 --- a/ngraph/core/src/op/scatter_update.cpp +++ b/ngraph/core/src/op/scatter_update.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v3::ScatterUpdate::type_info; +OPENVINO_RTTI_DEFINITION(op::v3::ScatterUpdate, "ScatterUpdate", 3, util::ScatterBase); op::v3::ScatterUpdate::ScatterUpdate(const Output& data, const Output& indices, diff --git a/ngraph/core/src/op/select.cpp b/ngraph/core/src/op/select.cpp index aff5c697e28..9a55c76dfd1 100644 --- a/ngraph/core/src/op/select.cpp +++ b/ngraph/core/src/op/select.cpp @@ -16,7 +16,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::Select, "Select", 1); +OPENVINO_RTTI_DEFINITION(op::v1::Select, "Select", 1); op::v1::Select::Select(const Output& arg0, const Output& arg1, diff --git a/ngraph/core/src/op/selu.cpp b/ngraph/core/src/op/selu.cpp index 08ddf2c31b8..8987823d612 100644 --- a/ngraph/core/src/op/selu.cpp +++ b/ngraph/core/src/op/selu.cpp @@ -9,7 +9,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Selu, "Selu", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Selu, "Selu", 0); op::v0::Selu::Selu(const Output& data, const Output& alpha, const Output& lambda) : Op({data, alpha, lambda}) { diff --git a/ngraph/core/src/op/shape_of.cpp b/ngraph/core/src/op/shape_of.cpp index 69090af304a..e4eb8f37021 100644 --- a/ngraph/core/src/op/shape_of.cpp +++ b/ngraph/core/src/op/shape_of.cpp @@ -20,7 +20,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v3::ShapeOf::type_info; +OPENVINO_RTTI_DEFINITION(op::v3::ShapeOf, "ShapeOf", 3); op::v3::ShapeOf::ShapeOf(const Output& arg, element::Type output_type) : Op({arg}), m_output_type(output_type) { constructor_validate_and_infer_types(); @@ -180,7 +180,7 @@ bool op::v3::ShapeOf::constant_fold(OutputVector& output_values, const OutputVec } // op::v0::ShapeOf -NGRAPH_RTTI_DEFINITION(op::v0::ShapeOf, "ShapeOf", 0); +OPENVINO_RTTI_DEFINITION(op::v0::ShapeOf, "ShapeOf", 0); op::v0::ShapeOf::ShapeOf(const Output& arg) : Op({arg}) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/shuffle_channels.cpp b/ngraph/core/src/op/shuffle_channels.cpp index 1e73dac967c..89dd145b705 100644 --- a/ngraph/core/src/op/shuffle_channels.cpp +++ b/ngraph/core/src/op/shuffle_channels.cpp @@ -18,7 +18,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::ShuffleChannels, "ShuffleChannels", 0); +OPENVINO_RTTI_DEFINITION(op::v0::ShuffleChannels, "ShuffleChannels", 0); op::ShuffleChannels::ShuffleChannels(const Output& data, const int64_t axis, const int64_t group) : Op({data}), diff --git a/ngraph/core/src/op/sigmoid.cpp b/ngraph/core/src/op/sigmoid.cpp index 1ceaca5144b..0565cf62f00 100644 --- a/ngraph/core/src/op/sigmoid.cpp +++ b/ngraph/core/src/op/sigmoid.cpp @@ -15,15 +15,15 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::Sigmoid::type_info; +OPENVINO_RTTI_DEFINITION(ov::op::v0::Sigmoid, "Sigmoid", 0, util::UnaryElementwiseArithmetic); -shared_ptr op::Sigmoid::clone_with_new_inputs(const OutputVector& new_args) const { +shared_ptr ov::op::v0::Sigmoid::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_Sigmoid_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } -op::Sigmoid::Sigmoid(const Output& arg) : UnaryElementwiseArithmetic(arg) { +ov::op::v0::Sigmoid::Sigmoid(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); } @@ -56,13 +56,13 @@ bool evaluate_sigmoid(const HostTensorPtr& arg0, const HostTensorPtr& out) { } } // namespace sigmoid -bool op::Sigmoid::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ov::op::v0::Sigmoid::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v0_Sigmoid_evaluate); NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); return sigmoid::evaluate_sigmoid(inputs[0], outputs[0]); } -bool op::Sigmoid::has_evaluate() const { +bool ov::op::v0::Sigmoid::has_evaluate() const { NGRAPH_OP_SCOPE(v0_Sigmoid_has_evaluate); switch (get_input_element_type(0)) { case ngraph::element::boolean: diff --git a/ngraph/core/src/op/sign.cpp b/ngraph/core/src/op/sign.cpp index 74b0e89acf9..50aa74539b9 100644 --- a/ngraph/core/src/op/sign.cpp +++ b/ngraph/core/src/op/sign.cpp @@ -5,15 +5,14 @@ #include "ngraph/op/sign.hpp" #include "itt.hpp" - -using namespace std; -using namespace ngraph; - #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/sign.hpp" #include "ngraph/validation_util.hpp" -NGRAPH_RTTI_DEFINITION(op::v0::Sign, "Sign", 0, util::UnaryElementwiseArithmetic); +using namespace std; +using namespace ngraph; + +OPENVINO_RTTI_DEFINITION(op::v0::Sign, "Sign", 0, util::UnaryElementwiseArithmetic); op::Sign::Sign(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/sin.cpp b/ngraph/core/src/op/sin.cpp index 07e91332df3..9afac15a716 100644 --- a/ngraph/core/src/op/sin.cpp +++ b/ngraph/core/src/op/sin.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::Sin::type_info; +OPENVINO_RTTI_DEFINITION(op::v0::Sin, "Sin", 0, util::UnaryElementwiseArithmetic); op::Sin::Sin(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/sinh.cpp b/ngraph/core/src/op/sinh.cpp index 24484615e3a..0fd742f575d 100644 --- a/ngraph/core/src/op/sinh.cpp +++ b/ngraph/core/src/op/sinh.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Sinh, "Sinh", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Sinh, "Sinh", 0, util::UnaryElementwiseArithmetic); op::Sinh::Sinh(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/sink.cpp b/ngraph/core/src/op/sink.cpp index a1ed861d5f6..567f7faa439 100644 --- a/ngraph/core/src/op/sink.cpp +++ b/ngraph/core/src/op/sink.cpp @@ -6,6 +6,6 @@ using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::Sink, "Sink", 0); +OPENVINO_RTTI_DEFINITION(op::Sink, "Sink", 0); -op::Sink::~Sink() {} +op::Sink::~Sink() = default; diff --git a/ngraph/core/src/op/softmax.cpp b/ngraph/core/src/op/softmax.cpp index 057725b414f..d7c2858a0de 100644 --- a/ngraph/core/src/op/softmax.cpp +++ b/ngraph/core/src/op/softmax.cpp @@ -46,7 +46,7 @@ bool evaluate_softmax(const HostTensorPtr& arg, const HostTensorPtr& out, const } // namespace // *** SOFTMAX OP SET V1 *** -NGRAPH_RTTI_DEFINITION(op::v1::Softmax, "Softmax", 1); +OPENVINO_RTTI_DEFINITION(op::v1::Softmax, "Softmax", 1); op::v1::Softmax::Softmax(const Output& arg, const size_t axis) : Op({arg}), m_axis(axis) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/softplus.cpp b/ngraph/core/src/op/softplus.cpp index 778b4380154..411a5d7b8a1 100644 --- a/ngraph/core/src/op/softplus.cpp +++ b/ngraph/core/src/op/softplus.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v4::SoftPlus, "SoftPlus", 4); +OPENVINO_RTTI_DEFINITION(op::v4::SoftPlus, "SoftPlus", 4); op::v4::SoftPlus::SoftPlus(const Output& arg) : Op({arg}) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/space_to_batch.cpp b/ngraph/core/src/op/space_to_batch.cpp index f380887b5cb..0bd707e689c 100644 --- a/ngraph/core/src/op/space_to_batch.cpp +++ b/ngraph/core/src/op/space_to_batch.cpp @@ -21,7 +21,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::SpaceToBatch, "SpaceToBatch", 1); +OPENVINO_RTTI_DEFINITION(op::v1::SpaceToBatch, "SpaceToBatch", 1); ngraph::op::v1::SpaceToBatch::SpaceToBatch(const ngraph::Output& data, const ngraph::Output& block_shape, diff --git a/ngraph/core/src/op/space_to_depth.cpp b/ngraph/core/src/op/space_to_depth.cpp index c9bd7a92b9e..f4a3436f4d8 100644 --- a/ngraph/core/src/op/space_to_depth.cpp +++ b/ngraph/core/src/op/space_to_depth.cpp @@ -17,16 +17,16 @@ using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::SpaceToDepth, "SpaceToDepth", 0); +OPENVINO_RTTI_DEFINITION(ov::op::v0::SpaceToDepth, "SpaceToDepth", 0); -op::SpaceToDepth::SpaceToDepth(const Output& data, const SpaceToDepthMode& mode, size_t block_size) +ov::op::v0::SpaceToDepth::SpaceToDepth(const Output& data, const SpaceToDepthMode& mode, size_t block_size) : Op({data}), m_blocksize(block_size), m_mode(mode) { constructor_validate_and_infer_types(); } -op::SpaceToDepth::SpaceToDepth(const Output& data, const std::string& mode, size_t block_size) +ov::op::v0::SpaceToDepth::SpaceToDepth(const Output& data, const std::string& mode, size_t block_size) : SpaceToDepth(data, as_enum(mode), block_size) {} bool ngraph::op::v0::SpaceToDepth::visit_attributes(AttributeVisitor& visitor) { @@ -36,7 +36,7 @@ bool ngraph::op::v0::SpaceToDepth::visit_attributes(AttributeVisitor& visitor) { return true; } -std::shared_ptr op::SpaceToDepth::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v0::SpaceToDepth::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_SpaceToDepth_clone_with_new_inputs); if (new_args.size() != 1) { throw ngraph_error("Incorrect number of new arguments"); @@ -88,7 +88,7 @@ void ngraph::op::v0::SpaceToDepth::validate_and_infer_types() { bool evaluate_space_to_depth(const HostTensorVector& outputs, const HostTensorVector& inputs, const std::size_t block_size, - const op::SpaceToDepth::SpaceToDepthMode mode) { + const ov::op::v0::SpaceToDepth::SpaceToDepthMode mode) { const auto& in = inputs[0]; const auto& out = outputs[0]; size_t elem_size = in->get_element_type().size(); @@ -116,7 +116,7 @@ bool ngraph::op::v0::SpaceToDepth::has_evaluate() const { return !get_input_partial_shape(0).is_dynamic(); } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v0::SpaceToDepth::SpaceToDepthMode& type) { +std::ostream& ov::operator<<(std::ostream& s, const op::v0::SpaceToDepth::SpaceToDepthMode& type) { return s << as_string(type); } diff --git a/ngraph/core/src/op/split.cpp b/ngraph/core/src/op/split.cpp index f1aaba10578..2d70a0be5f1 100644 --- a/ngraph/core/src/op/split.cpp +++ b/ngraph/core/src/op/split.cpp @@ -18,7 +18,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::Split, "Split", 1); +OPENVINO_RTTI_DEFINITION(op::v1::Split, "Split", 1); op::v1::Split::Split(const Output& data, const Output& axis, const size_t num_splits) : Op({data, axis}), diff --git a/ngraph/core/src/op/sqrt.cpp b/ngraph/core/src/op/sqrt.cpp index 32533220c5b..dec1c8622a9 100644 --- a/ngraph/core/src/op/sqrt.cpp +++ b/ngraph/core/src/op/sqrt.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Sqrt, "Sqrt", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Sqrt, "Sqrt", 0, util::UnaryElementwiseArithmetic); op::Sqrt::Sqrt(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/squared_difference.cpp b/ngraph/core/src/op/squared_difference.cpp index f8667e0fcb2..d9141388a99 100644 --- a/ngraph/core/src/op/squared_difference.cpp +++ b/ngraph/core/src/op/squared_difference.cpp @@ -7,21 +7,20 @@ #include "itt.hpp" using namespace std; -using namespace ngraph; // ------------------------------ v0 ------------------------------------------- -NGRAPH_RTTI_DEFINITION(op::SquaredDifference, "SquaredDifference", 0, util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(ov::op::v0::SquaredDifference, "SquaredDifference", 0, util::BinaryElementwiseArithmetic); -op::SquaredDifference::SquaredDifference(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast) +ov::op::v0::SquaredDifference::SquaredDifference(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { constructor_validate_and_infer_types(); } -shared_ptr op::SquaredDifference::clone_with_new_inputs(const OutputVector& new_args) const { +shared_ptr ov::op::v0::SquaredDifference::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_SquaredDifference_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); + return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } diff --git a/ngraph/core/src/op/squeeze.cpp b/ngraph/core/src/op/squeeze.cpp index d837b661635..0d04d6a08bd 100644 --- a/ngraph/core/src/op/squeeze.cpp +++ b/ngraph/core/src/op/squeeze.cpp @@ -19,7 +19,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Squeeze, "Squeeze", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Squeeze, "Squeeze", 0); op::Squeeze::Squeeze() : Op() {} @@ -268,8 +268,8 @@ bool op::v0::Squeeze::constant_fold(OutputVector& output_values, const OutputVec const auto& shape = get_output_shape(0); - if (auto data_const = std::dynamic_pointer_cast(inputs_values[0].get_node_shared_ptr())) { - output_values[0] = std::make_shared(*data_const, shape); + if (auto data_const = std::dynamic_pointer_cast(inputs_values[0].get_node_shared_ptr())) { + output_values[0] = std::make_shared(*data_const, shape); return true; } return false; diff --git a/ngraph/core/src/op/strided_slice.cpp b/ngraph/core/src/op/strided_slice.cpp index 6ae268a122d..c7ef03d52ec 100644 --- a/ngraph/core/src/op/strided_slice.cpp +++ b/ngraph/core/src/op/strided_slice.cpp @@ -23,7 +23,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::StridedSlice, "StridedSlice", 1); +OPENVINO_RTTI_DEFINITION(op::v1::StridedSlice, "StridedSlice", 1); op::v1::StridedSlice::StridedSlice(const Output& data, const Output& begin, diff --git a/ngraph/core/src/op/subtract.cpp b/ngraph/core/src/op/subtract.cpp index d7dbfd0182e..3554ab1d61e 100644 --- a/ngraph/core/src/op/subtract.cpp +++ b/ngraph/core/src/op/subtract.cpp @@ -51,7 +51,7 @@ bool evaluate_subtract(const HostTensorPtr& arg0, // ------------------------------- v1 ------------------------------------------ -NGRAPH_RTTI_DEFINITION(op::v1::Subtract, "Subtract", 1, util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v1::Subtract, "Subtract", 1, util::BinaryElementwiseArithmetic); op::v1::Subtract::Subtract(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/swish.cpp b/ngraph/core/src/op/swish.cpp index 4a990cd7609..6f08453cff9 100644 --- a/ngraph/core/src/op/swish.cpp +++ b/ngraph/core/src/op/swish.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v4::Swish, "Swish", 4); +OPENVINO_RTTI_DEFINITION(op::v4::Swish, "Swish", 4); op::v4::Swish::Swish(const Output& arg) : Op({arg}) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/tan.cpp b/ngraph/core/src/op/tan.cpp index cc59868935d..5c23321fe58 100644 --- a/ngraph/core/src/op/tan.cpp +++ b/ngraph/core/src/op/tan.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Tan, "Tan", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Tan, "Tan", 0, util::UnaryElementwiseArithmetic); op::Tan::Tan(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/tanh.cpp b/ngraph/core/src/op/tanh.cpp index 743dc5dfae3..e638c97f51e 100644 --- a/ngraph/core/src/op/tanh.cpp +++ b/ngraph/core/src/op/tanh.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Tanh, "Tanh", 0, op::util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Tanh, "Tanh", 0, op::util::UnaryElementwiseArithmetic); op::Tanh::Tanh(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/tile.cpp b/ngraph/core/src/op/tile.cpp index 2d55b04641d..210018d06a6 100644 --- a/ngraph/core/src/op/tile.cpp +++ b/ngraph/core/src/op/tile.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Tile, "Tile", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Tile, "Tile", 0); op::v0::Tile::Tile(const Output& data, const Output& repeats) : Op({data, repeats}) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/topk.cpp b/ngraph/core/src/op/topk.cpp index e11457eca43..ecbc42f5e43 100644 --- a/ngraph/core/src/op/topk.cpp +++ b/ngraph/core/src/op/topk.cpp @@ -136,7 +136,7 @@ size_t read_k_from_host_tensor(const HostTensorPtr& arg_k) { } // namespace topk // v1 version starts -NGRAPH_RTTI_DEFINITION(op::v1::TopK, "TopK", 1); +OPENVINO_RTTI_DEFINITION(op::v1::TopK, "TopK", 1); static const std::uint64_t UNKNOWN_NORMALIZED_AXIS = std::numeric_limits::max(); @@ -196,7 +196,7 @@ void op::v1::TopK::validate_and_infer_types() { "Index element type attribute should be either \'i32\' or \'i64\'. Got: ", m_index_element_type); - if (op::is_constant(input_value(1).get_node())) { + if (ov::op::util::is_constant(input_value(1).get_node())) { // Check k value read_k_from_constant_node(input_value(1).get_node_shared_ptr(), get_input_element_type(1)); } @@ -281,7 +281,7 @@ size_t op::v1::TopK::read_k_from_constant_node(const shared_ptr& node, k_element_type, ")."); - const auto k_constant = ov::as_type_ptr(node); + const auto k_constant = ov::as_type_ptr(node); size_t k = 0; @@ -303,7 +303,7 @@ size_t op::v1::TopK::read_k_from_constant_node(const shared_ptr& node, } template -size_t op::v1::TopK::validate_and_get_k(const shared_ptr& k_constant) const { +size_t op::v1::TopK::validate_and_get_k(const shared_ptr& k_constant) const { const auto k_const_contents = k_constant->get_vector(); NODE_VALIDATION_CHECK(this, @@ -334,7 +334,7 @@ shared_ptr op::v1::TopK::clone_with_new_inputs(const OutputVector& new_arg size_t op::v1::TopK::get_k() const { size_t k = 0; - if (op::is_constant(input_value(1).get_node())) { + if (op::util::is_constant(input_value(1).get_node())) { k = read_k_from_constant_node(input_value(1).get_node_shared_ptr(), get_input_element_type(1)); } @@ -345,7 +345,7 @@ size_t op::v1::TopK::get_k() const { } void op::v1::TopK::set_k(size_t k) { - this->input(1).replace_source_output(op::Constant::create(element::i64, Shape{}, {k})->output(0)); + this->input(1).replace_source_output(op::v0::Constant::create(element::i64, Shape{}, {k})->output(0)); } bool op::v1::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { @@ -358,7 +358,7 @@ bool op::v1::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVec // 2. get value of k - from constant node or from HT size_t k = 0; - if (op::is_constant(input_value(1).get_node())) { + if (op::util::is_constant(input_value(1).get_node())) { k = read_k_from_constant_node(input_value(1).get_node_shared_ptr(), get_input_element_type(1)); NGRAPH_CHECK(k <= arg_shape[axis], "'K' exceeds the dimension of top_k_axis"); } else { @@ -400,7 +400,7 @@ bool op::v1::TopK::has_evaluate() const { return false; } - if (op::is_constant(input_value(1).get_node())) { + if (op::util::is_constant(input_value(1).get_node())) { switch (get_input_element_type(1)) { case ngraph::element::i8: case ngraph::element::i32: @@ -429,7 +429,7 @@ bool op::v1::TopK::has_evaluate() const { } // v3 version starts -constexpr NodeTypeInfo op::v3::TopK::type_info; +OPENVINO_RTTI_DEFINITION(op::v3::TopK, "TopK", 3); op::v3::TopK::TopK(const Output& data, const Output& k, @@ -471,7 +471,7 @@ void op::v3::TopK::validate_and_infer_types() { size_t op::v3::TopK::read_k_from_constant_node(const shared_ptr& node, const element::Type& k_element_type) const { - const auto k_constant = ov::as_type_ptr(node); + const auto k_constant = ov::as_type_ptr(node); size_t k = 0; @@ -536,7 +536,7 @@ bool op::v3::TopK::has_evaluate() const { return false; } - if (op::is_constant(input_value(1).get_node())) { + if (op::util::is_constant(input_value(1).get_node())) { switch (get_input_element_type(1)) { case ngraph::element::i8: case ngraph::element::i32: diff --git a/ngraph/core/src/op/transpose.cpp b/ngraph/core/src/op/transpose.cpp index e0c9bd37a74..2b68e94bd3f 100644 --- a/ngraph/core/src/op/transpose.cpp +++ b/ngraph/core/src/op/transpose.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::Transpose, "Transpose", 1); +OPENVINO_RTTI_DEFINITION(op::v1::Transpose, "Transpose", 1); op::v1::Transpose::Transpose(const Output& arg, const Output& input_order) : Op({arg, input_order}) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/type_relaxed.cpp b/ngraph/core/src/op/type_relaxed.cpp index 6f5881d070d..cc3bf7253ec 100644 --- a/ngraph/core/src/op/type_relaxed.cpp +++ b/ngraph/core/src/op/type_relaxed.cpp @@ -10,7 +10,6 @@ namespace ngraph { namespace op { -TypeRelaxedBase::~TypeRelaxedBase() {} - +TypeRelaxedBase::~TypeRelaxedBase() = default; } // namespace op } // namespace ngraph diff --git a/ngraph/core/src/op/unsqueeze.cpp b/ngraph/core/src/op/unsqueeze.cpp index cec998664d3..5d186ba5873 100644 --- a/ngraph/core/src/op/unsqueeze.cpp +++ b/ngraph/core/src/op/unsqueeze.cpp @@ -19,7 +19,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Unsqueeze, "Unsqueeze", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Unsqueeze, "Unsqueeze", 0); op::v0::Unsqueeze::Unsqueeze(const Output& data, const Output& axes) : Op({data, axes}) { constructor_validate_and_infer_types(); @@ -166,8 +166,8 @@ bool op::v0::Unsqueeze::constant_fold(OutputVector& output_values, const OutputV const auto& shape = get_output_shape(0); - if (auto data_const = std::dynamic_pointer_cast(inputs_values[0].get_node_shared_ptr())) { - output_values[0] = std::make_shared(*data_const, shape); + if (auto data_const = std::dynamic_pointer_cast(inputs_values[0].get_node_shared_ptr())) { + output_values[0] = std::make_shared(*data_const, shape); return true; } return false; diff --git a/ngraph/core/src/op/util/scatter_base.cpp b/ngraph/core/src/op/util/scatter_base.cpp index 7a13b0801c2..219fecb832c 100644 --- a/ngraph/core/src/op/util/scatter_base.cpp +++ b/ngraph/core/src/op/util/scatter_base.cpp @@ -11,7 +11,7 @@ using namespace std; -constexpr ov::NodeTypeInfo ov::op::util::ScatterBase::type_info; +OPENVINO_RTTI_DEFINITION(ov::op::util::ScatterBase, "ScatterBase", 0); ov::op::util::ScatterBase::ScatterBase(const Output& data, const Output& indices, diff --git a/ngraph/core/src/op/util/scatter_nd_base.cpp b/ngraph/core/src/op/util/scatter_nd_base.cpp index 3fd67100bbd..0c2b6a2f52e 100644 --- a/ngraph/core/src/op/util/scatter_nd_base.cpp +++ b/ngraph/core/src/op/util/scatter_nd_base.cpp @@ -10,7 +10,7 @@ using namespace std; -constexpr ov::NodeTypeInfo ov::op::util::ScatterNDBase::type_info; +OPENVINO_RTTI_DEFINITION(ov::op::util::ScatterNDBase, "ScatterNDBase", 0); constexpr int ov::op::util::ScatterNDBase::INPUTS; constexpr int ov::op::util::ScatterNDBase::INDICES; constexpr int ov::op::util::ScatterNDBase::UPDATES; diff --git a/ngraph/core/src/op/variadic_split.cpp b/ngraph/core/src/op/variadic_split.cpp index 6482f7d4f6b..2dd58826146 100644 --- a/ngraph/core/src/op/variadic_split.cpp +++ b/ngraph/core/src/op/variadic_split.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::VariadicSplit, "VariadicSplit", 1); +OPENVINO_RTTI_DEFINITION(op::v1::VariadicSplit, "VariadicSplit", 1); op::v1::VariadicSplit::VariadicSplit(const Output& data, const Output& axis, diff --git a/ngraph/core/src/op/xor.cpp b/ngraph/core/src/op/xor.cpp index 00610fc9ceb..3277da80116 100644 --- a/ngraph/core/src/op/xor.cpp +++ b/ngraph/core/src/op/xor.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::LogicalXor, "LogicalXor", 1, util::BinaryElementwiseLogical); +OPENVINO_RTTI_DEFINITION(op::v1::LogicalXor, "LogicalXor", 1, util::BinaryElementwiseLogical); op::v1::LogicalXor::LogicalXor(const Output& arg0, const Output& arg1, @@ -75,7 +75,7 @@ bool op::v1::LogicalXor::has_evaluate() const { return false; } -constexpr NodeTypeInfo op::v0::Xor::type_info; +OPENVINO_RTTI_DEFINITION(op::v0::Xor, "Xor", 0, util::BinaryElementwiseLogical); op::v0::Xor::Xor(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseLogical(arg0, arg1, auto_broadcast) { From 8985feff6fb19b275065090bdd1bde8e77ee1bf6 Mon Sep 17 00:00:00 2001 From: Evgeny Kotov Date: Tue, 7 Sep 2021 10:56:41 +0300 Subject: [PATCH 25/52] [GNA] Rewrite RemoveSingleInputConcatPass using ngraph (#7208) * initial matcher pass * write test implementation; + add unit tests * base * add unit tests * code review fixes * code review fixes * fix * fix * move RemoveSingleInputConcat before opset to legacy conversion --- .../src/gna_plugin/gna_plugin.cpp | 5 +- .../remove_single_input_concat.cpp | 47 ++++++ .../remove_single_input_concat.hpp | 20 +++ .../gna_remove_single_input_concat.cpp | 143 ++++++++++++++++++ 4 files changed, 213 insertions(+), 2 deletions(-) create mode 100644 inference-engine/src/gna_plugin/transformations/remove_single_input_concat.cpp create mode 100644 inference-engine/src/gna_plugin/transformations/remove_single_input_concat.hpp create mode 100644 inference-engine/tests/unit/gna/ngraph/transformations/gna_remove_single_input_concat.cpp diff --git a/inference-engine/src/gna_plugin/gna_plugin.cpp b/inference-engine/src/gna_plugin/gna_plugin.cpp index 3f61d3289c7..e1d615dbdec 100644 --- a/inference-engine/src/gna_plugin/gna_plugin.cpp +++ b/inference-engine/src/gna_plugin/gna_plugin.cpp @@ -67,6 +67,7 @@ #include "transformations/decompose_2d_conv.hpp" #include "transformations/convert_padded2valid_conv.hpp" #include "transformations/op_conversions/lstm_cell_decomposition.hpp" +#include "transformations/remove_single_input_concat.hpp" #include @@ -738,6 +739,7 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) { manager.register_pass(); manager.register_pass(); manager.register_pass(); + manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); @@ -793,10 +795,9 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) { passes->registerPass(); passes->registerPass(); passes->registerPass(); + passes->registerPass(); } - passes->registerPass(); - // fake quantisation aware passes passes->registerPass(); passes->registerPass(); diff --git a/inference-engine/src/gna_plugin/transformations/remove_single_input_concat.cpp b/inference-engine/src/gna_plugin/transformations/remove_single_input_concat.cpp new file mode 100644 index 00000000000..b367bd63811 --- /dev/null +++ b/inference-engine/src/gna_plugin/transformations/remove_single_input_concat.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "transformations/remove_single_input_concat.hpp" + +#include +#include + +#include +#include +#include + +using NodeInput = ngraph::Input; +using NodeOutput = ngraph::Output; + +namespace GNAPluginNS { + NGRAPH_RTTI_DEFINITION(RemoveSingleInputConcat, "RemoveSingleInputConcat", 0); + + RemoveSingleInputConcat::RemoveSingleInputConcat() { + MATCHER_SCOPE(RemoveSingleInputConcat); + + auto is_required_node = [](const ngraph::Output& value) { + return value.get_node_shared_ptr()->get_input_size() == 1; + }; + + auto concat_operation = ngraph::pattern::wrap_type(is_required_node); + + ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + auto concat_operation_node = pattern_map.find(concat_operation)->second.get_node_shared_ptr(); + + NodeOutput prev_node_output = concat_operation_node->get_input_source_output(0); + + for (NodeInput child_input : concat_operation_node->get_output_target_inputs(0)) + child_input.replace_source_output(prev_node_output); + + return true; + }; + + auto m = std::make_shared(concat_operation, matcher_name); + this->register_matcher(m, callback); + } + +} // namespace GNAPluginNS diff --git a/inference-engine/src/gna_plugin/transformations/remove_single_input_concat.hpp b/inference-engine/src/gna_plugin/transformations/remove_single_input_concat.hpp new file mode 100644 index 00000000000..7730c36d9af --- /dev/null +++ b/inference-engine/src/gna_plugin/transformations/remove_single_input_concat.hpp @@ -0,0 +1,20 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace GNAPluginNS { + +/** + * @brief remove concat layers with single input + */ +class RemoveSingleInputConcat : public ngraph::pass::MatcherPass { +public: + NGRAPH_RTTI_DECLARATION; + RemoveSingleInputConcat(); +}; + +} // namespace GNAPluginNS diff --git a/inference-engine/tests/unit/gna/ngraph/transformations/gna_remove_single_input_concat.cpp b/inference-engine/tests/unit/gna/ngraph/transformations/gna_remove_single_input_concat.cpp new file mode 100644 index 00000000000..dfb2a0f0a2d --- /dev/null +++ b/inference-engine/tests/unit/gna/ngraph/transformations/gna_remove_single_input_concat.cpp @@ -0,0 +1,143 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "transformations/remove_single_input_concat.hpp" + +#include "common_test_utils/ngraph_test_utils.hpp" +#include +#include +#include +#include + +namespace testing { +namespace { + +using GraphInputs = std::vector>; +using GraphOutputs = ngraph::OutputVector; + +struct Graph { + std::shared_ptr createFunction(); + + GraphInputs inputs; + GraphOutputs outputs; +}; + +std::shared_ptr Graph::createFunction() { + ngraph::ResultVector results; + std::transform(outputs.begin(), outputs.end(), std::back_inserter(results), + [] (ngraph::Output output) { + return std::make_shared(output); + }); + + ngraph::ParameterVector params(inputs.begin(), inputs.end()); + + return std::make_shared(results, params); +} + +// ------------------------------------------------------------------------------------------------------- + +using Operations = std::vector>; + +Graph createGraph(int n_inputs, bool has_concat, int n_outputs) { + GraphInputs inputs; + Operations outputs; + + for (int i = 0; i < n_inputs; ++i) { + auto input = std::make_shared(ngraph::element::i64, + ngraph::Shape{1, 3, 64}); + inputs.push_back(input); + outputs.push_back(input); + } + + { + Operations new_outputs; + for (auto output : outputs) { + auto add_bias = ngraph::opset8::Constant::create(ngraph::element::i64, {1, 1, 1}, {2}); + auto add_operation = std::make_shared(output, add_bias); + new_outputs.push_back(add_operation); + } + outputs.swap(new_outputs); + } + + if (has_concat) { + auto concat_operation = std::make_shared(ngraph::OutputVector(outputs.begin(), + outputs.end()), + 0); + outputs = {concat_operation}; + } + + { + Operations new_outputs; + for (auto output : outputs) { + for (int i = 0; i < n_outputs; ++i) { + auto add_bias = ngraph::opset8::Constant::create(ngraph::element::i64, {1, 1, 1}, {3}); + auto add_operation = std::make_shared(output, add_bias); + new_outputs.push_back(add_operation); + } + } + outputs.swap(new_outputs); + } + + Graph graph; + graph.inputs.swap(inputs); + graph.outputs.insert(graph.outputs.end(), + std::make_move_iterator(outputs.begin()), + std::make_move_iterator(outputs.end())); + + return graph; +} + +// ------------------------------------------------------------------------------------------------------- + +class RemoveSingleInputConcatFixture: public CommonTestUtils::TestsCommon, + public ::testing::WithParamInterface> { +public: + void SetUp() override; +public: + std::shared_ptr function, reference_function; +}; + +void RemoveSingleInputConcatFixture::SetUp() { + // TODO: use auto & [transformed_graph, reference_graph] = this->GetParam() when C++17 + Graph transformed_graph; + Graph reference_graph; + std::tie(transformed_graph, reference_graph) = this->GetParam(); + + function = transformed_graph.createFunction(); + reference_function = reference_graph.createFunction(); +} + +ngraph::pass::Manager createPassManager() { + ngraph::pass::Manager manager; + manager.register_pass(); + manager.register_pass(); + return manager; +} + +void execute_test(std::shared_ptr function, + std::shared_ptr reference_function) { + ngraph::pass::Manager pass_manager = createPassManager(); + pass_manager.run_passes(function); + const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::ATTRIBUTES); + const FunctionsComparator::Result result = func_comparator(function, reference_function); + ASSERT_TRUE(result.valid); +} + +TEST_P(RemoveSingleInputConcatFixture, CompareFunctions) { + execute_test(function, reference_function); +} + +INSTANTIATE_TEST_SUITE_P(RemoveSingleInputConcatTestSuite, RemoveSingleInputConcatFixture, + ::testing::Values(std::make_tuple(createGraph(1 /* n_inputs */, true /* has_concat */, 1 /* n_outputs */), + createGraph(1 /* n_inputs */, false /* has_concat */, 1 /* n_outputs */)), + std::make_tuple(createGraph(1 /* n_inputs */, true /* has_concat */, 2 /* n_outputs */), + createGraph(1 /* n_inputs */, false /* has_concat */, 2 /* n_outputs */)), + std::make_tuple(createGraph(2 /* n_inputs */, true /* has_concat */, 1 /* n_outputs */), + createGraph(2 /* n_inputs */, true /* has_concat */, 1 /* n_outputs */)))); + +} // namespace +} // namespace testing From 5d6ef444a5daf4518c007f67d193742781feb337 Mon Sep 17 00:00:00 2001 From: Mateusz Tabaka Date: Tue, 7 Sep 2021 10:14:25 +0200 Subject: [PATCH 26/52] Reenable AddFakeQuantizeFusion and MulFakeQuantizeFusion (#5574) * Reenable AddFakeQuantizeFusion and MulFakeQuantizeFusion * remove unused variable * is_single_value simplify * skip transformations for low precision types * add comment regarding restriction in AddFakeQuantizeFusion * remove fp16 test * remove negative const handling --- .../src/layer_transformation.cpp | 13 +- .../add_fake_quantize_fusion.cpp | 72 ++++++-- .../common_optimizations.cpp | 2 + .../common_optimizations/fq_mul_fusion.cpp | 85 +++++---- .../mul_fake_quantize_fusion.cpp | 100 +++++----- .../pull_transpose_through_fq.cpp | 24 ++- .../add_fake_quantize_fusion.cpp | 158 +++++++++++++++- .../transformations/fq_mul_fusion_test.cpp | 61 ++++++- .../mul_fake_quantize_fusion.cpp | 171 +++++++++--------- .../ngraph_fq_transpose_test.cpp | 50 ++--- 10 files changed, 511 insertions(+), 225 deletions(-) diff --git a/inference-engine/src/low_precision_transformations/src/layer_transformation.cpp b/inference-engine/src/low_precision_transformations/src/layer_transformation.cpp index 1b05f965dc1..72c08bb5c78 100644 --- a/inference-engine/src/low_precision_transformations/src/layer_transformation.cpp +++ b/inference-engine/src/low_precision_transformations/src/layer_transformation.cpp @@ -171,11 +171,14 @@ std::stringstream toStream(const std::vector& dequantizationValues) { } void LayerTransformation::printDequantizationInfo(const std::shared_ptr& layer) { - const QuantizationDetails quantizationDetails = QuantizationDetails::getDetails(ov::as_type_ptr(layer)); - std::cout << - layer->get_type_name() << (NetworkHelper::isConstantPath(layer) ? " on weights " : " on activations ") << - layer->get_friendly_name() << ":" << std::endl << - " details : " << quantizationDetails << std::endl; + auto fq = as_type_ptr(layer); + if (fq) { + const QuantizationDetails quantizationDetails = QuantizationDetails::getDetails(ov::as_type_ptr(layer)); + std::cout << + layer->get_type_name() << (NetworkHelper::isConstantPath(layer) ? " on weights " : " on activations ") << + layer->get_friendly_name() << ":" << std::endl << + " details : " << quantizationDetails << std::endl; + } } void LayerTransformation::printDequantizationInfo(const DataPrecision& dataPrecision) { diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/add_fake_quantize_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/add_fake_quantize_fusion.cpp index 6a421a15c41..31fa2531652 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/add_fake_quantize_fusion.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/add_fake_quantize_fusion.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include "itt.hpp" @@ -29,38 +30,85 @@ ngraph::pass::AddFakeQuantizeFusion::AddFakeQuantizeFusion() { ngraph::pattern::any_input()}); ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) { const auto& pattern_value_map = m.get_pattern_value_map(); + const auto& input = pattern_value_map.at(input_pattern); + const auto& type = input.get_element_type(); + if (type.bitwidth() < element::f32.bitwidth()) + return false; auto fq = std::dynamic_pointer_cast(pattern_value_map.at(fq_pattern).get_node_shared_ptr()); if (!fq) return false; - std::shared_ptr add_const = std::dynamic_pointer_cast(pattern_value_map.at(const_pattern).get_node_shared_ptr()); + const auto& add_node = pattern_value_map.at(add_pattern).get_node_shared_ptr(); + auto add_const = std::dynamic_pointer_cast(pattern_value_map.at(const_pattern).get_node_shared_ptr()); if (!add_const) return false; + std::shared_ptr new_const = add_const; auto const_shape = add_const->get_shape(); size_t const_shape_size = shape_size(const_shape); - if (const_shape_size > 1) { + bool is_single_value = const_shape_size == 1; + + if (!is_single_value) { + float v; + is_single_value = op::util::get_single_value(add_const, v); + if (is_single_value) { + new_const = std::make_shared(add_const->get_element_type(), Shape{1}, v); + } + } + + if (!is_single_value) { // disallow constant shapes other than (N, 1, 1, ..., 1) or (1, C, 1, ..., 1) if (!(const_shape[0] > 1 && const_shape[0] == const_shape_size) && !(const_shape.size() > 1 && const_shape[1] == const_shape_size)) { return false; } + + // Convolution+Add or MatMul+Add can be fused later + // so don't fuse Add+FQ in that situation + const auto& add_inputs = add_node->input_values(); + bool add_parent_is_conv_or_mm = std::any_of(add_inputs.begin(), add_inputs.end(), + [] (const Output& node) -> bool { + auto node_ptr = node.get_node(); + return is_type(node_ptr) || + is_type(node_ptr) || + is_type(node_ptr) || + is_type(node_ptr) || + is_type(node_ptr); + }); + if (add_parent_is_conv_or_mm) + return false; + auto fq_users = fq->get_users(); + // Concat LPT transformation supports per tensor quantization only + bool fq_user_is_concat = std::any_of(fq_users.begin(), fq_users.end(), + [] (const Output& node) -> bool { + auto node_ptr = node.get_node(); + return is_type(node_ptr); + }); + if (fq_user_is_concat) + return false; + auto diff = fq->get_input_partial_shape(0).rank().get_length() - static_cast(const_shape.size()); + if (diff > 0) { + // Reshape constants like (C, 1, 1) to (1, C, 1, 1) + const_shape.insert(const_shape.begin(), diff, 1); + new_const = std::make_shared(new_const, + op::Constant::create(element::u64, Shape{const_shape.size()}, const_shape), false); + } } - if (const_shape_size > 1 && - static_cast(const_shape.size()) < fq->get_input_partial_shape(0).rank().get_length()) { - // Reshape constants like (C, 1, 1) to (1, C, 1, 1) - const_shape.insert(const_shape.begin(), fq->get_input_partial_shape(0).rank().get_length() - const_shape.size(), 1); - add_const = std::make_shared(add_const, op::Constant::create(element::u64, Shape{const_shape.size()}, const_shape), false); - } - auto new_input_low = std::make_shared(fq->input_value(1), add_const); - auto new_input_high = std::make_shared(fq->input_value(2), add_const); - auto new_fq = register_new_node(pattern_value_map.at(input_pattern), + auto input_low_sub = std::make_shared(fq->input_value(1), new_const); + std::shared_ptr new_input_low = get_constant_from_source(input_low_sub); + if (!new_input_low) + new_input_low = input_low_sub; + auto input_high_sub = std::make_shared(fq->input_value(2), new_const); + std::shared_ptr new_input_high = get_constant_from_source(input_high_sub); + if (!new_input_high) + new_input_high = input_high_sub; + auto new_fq = register_new_node(input, new_input_low, new_input_high, fq->input_value(3), fq->input_value(4), fq->get_levels()); new_fq->set_friendly_name(fq->get_friendly_name()); - copy_runtime_info({pattern_value_map.at(add_pattern).get_node_shared_ptr(), fq}, {new_input_low, new_input_high, new_fq}); + copy_runtime_info({add_node, fq}, {new_input_low, new_input_high, new_fq}); replace_node(fq, new_fq); return true; }; diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp index 4e176543504..af0b44446f7 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp @@ -195,6 +195,8 @@ bool ngraph::pass::CommonOptimizations::run_on_function(std::shared_ptradd_matcher(); fq_fusions->add_matcher(); fq_fusions->add_matcher(); + fq_fusions->add_matcher(); + fq_fusions->add_matcher(); fq_fusions->set_name("ngraph::pass::FakeQuantizeFusions"); // StridesOptimization should be at the very end diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/fq_mul_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/fq_mul_fusion.cpp index ff023b56b50..3fbe15eeb18 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/fq_mul_fusion.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/fq_mul_fusion.cpp @@ -12,35 +12,10 @@ #include #include #include +#include NGRAPH_RTTI_DEFINITION(ngraph::pass::FakeQuantizeMulFusion, "FakeQuantizeMulFusion", 0); -namespace { -std::pair, ngraph::Output> - get_adjusted_output_range(ngraph::Output out_low, - ngraph::Output out_high, - ngraph::Output multiplier) { - const auto mul_out_low = std::make_shared(out_low, multiplier); - const auto mul_out_high = std::make_shared(out_high, multiplier); - copy_runtime_info({out_low.get_node_shared_ptr(), multiplier.get_node_shared_ptr()}, - mul_out_low); - copy_runtime_info({out_high.get_node_shared_ptr(), multiplier.get_node_shared_ptr()}, - mul_out_high); - - ngraph::OutputVector new_out_low(1), new_out_high(1); - - if (!mul_out_low->constant_fold(new_out_low, {out_low, multiplier})) { - new_out_low[0] = mul_out_low; - } - - if (!mul_out_high->constant_fold(new_out_high, {out_high, multiplier})) { - new_out_high[0] = mul_out_high; - } - - return {new_out_low[0], new_out_high[0]}; - } -} // namespace - // This transformation multiplies the "output_low" and "output_high" inputs of the FQ operation // by the constant value that before transormation is used to multiply the output of FQ. // Both output_low and output_high are multiplied by the value represented as C (a constant) below. @@ -64,10 +39,11 @@ std::pair, ngraph::Output> ngraph::pass::FakeQuantizeMulFusion::FakeQuantizeMulFusion() { MATCHER_SCOPE(FakeQuantizeMulFusion); + const auto data_p = ngraph::pattern::any_input(); const auto fq_output_low_p = ngraph::pattern::any_input(); const auto fq_output_high_p = ngraph::pattern::any_input(); - const auto fq_node_p = ngraph::pattern::wrap_type({ngraph::pattern::any_input(), + const auto fq_node_p = ngraph::pattern::wrap_type({data_p, ngraph::pattern::any_input(), ngraph::pattern::any_input(), fq_output_low_p, @@ -81,20 +57,65 @@ ngraph::pass::FakeQuantizeMulFusion::FakeQuantizeMulFusion() { ngraph::matcher_pass_callback callback = [=](pattern::Matcher &m) { const auto& pattern_map = m.get_pattern_value_map(); + const auto& data = pattern_map.at(data_p); const auto fq_node = pattern_map.at(fq_node_p).get_node_shared_ptr(); const auto & original_output_low = pattern_map.at(fq_output_low_p); const auto & original_output_high = pattern_map.at(fq_output_high_p); - const auto & mul_constant = pattern_map.at(mul_constant_p); + auto mul_constant = pattern_map.at(mul_constant_p).get_node_shared_ptr(); + auto mul_constant_shape = mul_constant->get_shape(); + bool is_single_value = shape_size(mul_constant_shape) == 1; - const auto new_output_limits = get_adjusted_output_range( - original_output_low, original_output_high, mul_constant); + if (!is_single_value) { + float v; + auto constant = std::dynamic_pointer_cast(mul_constant); + if (constant) { + is_single_value = op::util::get_single_value(constant, v); + if (is_single_value) { + mul_constant_shape = Shape{1}; + mul_constant = std::make_shared(mul_constant->get_element_type(), mul_constant_shape, v); + } + } + } + + if (!is_single_value) { + auto fq_outputs = fq_node->get_users(); + // Convolution and GroupConvolution LP transformations require output low/high to have the same values + bool fq_output_is_conv = std::any_of(fq_outputs.begin(), fq_outputs.end(), + [] (const std::shared_ptr& node) -> bool { + return is_type(node) || + is_type(node); + }); + if (fq_output_is_conv) { + return false; + } + const auto & data_rank = data.get_partial_shape().rank(); + if (data_rank.is_dynamic()) { + return false; + } + auto rank = data_rank.get_length(); + auto diff = rank - mul_constant_shape.size(); + if (diff > 0) { + mul_constant_shape.insert(mul_constant_shape.begin(), diff, 1); + mul_constant = std::make_shared(mul_constant, + op::Constant::create(element::i64, Shape{mul_constant_shape.size()}, mul_constant_shape), false); + } + } + + auto get_adjusted_output_range = [&] (const Output& node) -> std::shared_ptr { + auto ret = std::make_shared(node, mul_constant); + copy_runtime_info(node.get_node_shared_ptr(), ret); + auto constant = get_constant_from_source(ret); + if (constant) + return constant; + return ret; + }; const auto new_fq_node = fq_node->clone_with_new_inputs({fq_node->input_value(0), fq_node->input_value(1), fq_node->input_value(2), - new_output_limits.first, - new_output_limits.second}); + get_adjusted_output_range(original_output_low), + get_adjusted_output_range(original_output_high)}); const auto mul_node = pattern_map.at(mul_node_p).get_node_shared_ptr(); diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/mul_fake_quantize_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/mul_fake_quantize_fusion.cpp index 1fcff0ac15c..cc64c79cba8 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/mul_fake_quantize_fusion.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/mul_fake_quantize_fusion.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include "itt.hpp" @@ -29,6 +30,10 @@ ngraph::pass::MulFakeQuantizeFusion::MulFakeQuantizeFusion() { ngraph::pattern::any_input()}); ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) { const auto& pattern_value_map = m.get_pattern_value_map(); + const auto& input = pattern_value_map.at(input_pattern); + const auto& type = input.get_element_type(); + if (type.bitwidth() < element::f32.bitwidth()) + return false; auto fq = std::dynamic_pointer_cast(pattern_value_map.at(fq_pattern).get_node_shared_ptr()); if (!fq) return false; @@ -37,74 +42,61 @@ ngraph::pass::MulFakeQuantizeFusion::MulFakeQuantizeFusion() { return false; auto mul_const_value = mul_const->cast_vector(); - if (std::any_of(mul_const_value.begin(), mul_const_value.end(), [] (float f) -> bool { return f == 0.0f; })) + if (std::any_of(mul_const_value.begin(), mul_const_value.end(), [] (float f) -> bool { return f <= 0.0f; })) return false; + std::shared_ptr new_const = mul_const; auto const_shape = mul_const->get_shape(); size_t const_shape_size = shape_size(const_shape); - if (const_shape_size > 1) { + bool is_single_value = const_shape_size == 1; + + if (!is_single_value) { + float v; + is_single_value = op::util::get_single_value(mul_const, v); + if (is_single_value) { + new_const = std::make_shared(mul_const->get_element_type(), Shape{1}, v); + const_shape = Shape{1}; + } + } + + if (!is_single_value) { // disallow constant shapes other than (N, 1, 1, ..., 1) or (1, C, 1, ..., 1) if (!(const_shape[0] > 1 && const_shape[0] == const_shape_size) && !(const_shape.size() > 1 && const_shape[1] == const_shape_size)) { return false; } - } - - std::shared_ptr mul_const_node = mul_const; - if (const_shape_size > 1 && - static_cast(const_shape.size()) < fq->get_input_partial_shape(0).rank().get_length()) { + const auto& rank = fq->get_input_partial_shape(0).rank(); + if (rank.is_dynamic()) + return false; + auto fq_users = fq->get_users(); + // Concat LPT transformation supports per tensor quantization only + bool fq_user_is_concat = std::any_of(fq_users.begin(), fq_users.end(), + [] (const Output& node) -> bool { + auto node_ptr = node.get_node(); + return is_type(node_ptr); + }); + if (fq_user_is_concat) + return false; + auto diff = rank.get_length() - static_cast(const_shape.size()); // Reshape constants like (C, 1, 1) to (1, C, 1, 1) - const_shape.insert(const_shape.begin(), fq->get_input_partial_shape(0).rank().get_length() - const_shape.size(), 1); - mul_const_node = std::make_shared(mul_const_node, + const_shape.insert(const_shape.begin(), diff, 1); + new_const = std::make_shared(new_const, op::Constant::create(element::u64, Shape{const_shape.size()}, const_shape), false); } - auto new_input_low = std::make_shared(fq->input_value(1), mul_const_node); - auto new_input_high = std::make_shared(fq->input_value(2), mul_const_node); + auto input_low_div = std::make_shared(fq->input_value(1), new_const); + std::shared_ptr new_input_low = get_constant_from_source(input_low_div); + if (!new_input_low) + new_input_low = input_low_div; + auto input_high_div = std::make_shared(fq->input_value(2), new_const); + std::shared_ptr new_input_high = get_constant_from_source(input_high_div); + if (!new_input_high) + new_input_high = input_high_div; - auto mul = pattern_value_map.at(mul_pattern).get_node_shared_ptr(); - const auto& mul_data = pattern_value_map.at(input_pattern); - - std::shared_ptr new_fq; - if (std::all_of(mul_const_value.begin(), mul_const_value.end(), [] (float f) -> bool { return f < 0.0f; })) { - new_fq = register_new_node(mul_data, new_input_low, new_input_high, - fq->input_value(4), fq->input_value(3), fq->get_levels()); - copy_runtime_info({mul, fq}, {mul_const_node, new_input_low, new_input_high, new_fq}); - } else if (std::any_of(mul_const_value.begin(), mul_const_value.end(), [] (float f) -> bool { return f < 0.0f; })) { - const auto& output_low = fq->input_value(3); - const auto& output_high = fq->input_value(4); - // get the mask of the values from mul_const that are less than zero - std::vector less_than_zero; - less_than_zero.reserve(mul_const_value.size()); - // and greater or equal to zero - std::vector greater_eq_zero; - greater_eq_zero.reserve(mul_const_value.size()); - for (size_t i = 0; i < mul_const_value.size(); i++) { - less_than_zero.push_back(mul_const_value[i] < 0); - greater_eq_zero.push_back(mul_const_value[i] >= 0); - } - auto less_const = op::Constant::create(output_low.get_element_type(), const_shape, less_than_zero); - auto greater_eq_const = op::Constant::create(output_low.get_element_type(), const_shape, greater_eq_zero); - // new_output_low is defined as follows: - // output_low[i], when mul_const[i] >= 0 - // output_high[i], when mul_const[i] < 0 - auto new_output_low = std::make_shared( - std::make_shared(greater_eq_const, output_low), - std::make_shared(less_const, output_high)); - // new_output_high is defined as follows: - // output_high[i], when mul_const[i] >= 0 - // output_low[i], when mul_const[i] < 0 - auto new_output_high = std::make_shared( - std::make_shared(greater_eq_const, output_high), - std::make_shared(less_const, output_low)); - new_fq = register_new_node(mul_data, new_input_low, - new_input_high, new_output_low, new_output_high, fq->get_levels()); - } else { - new_fq = register_new_node(mul_data, new_input_low, new_input_high, - fq->input_value(3), fq->input_value(4), fq->get_levels()); - } - - copy_runtime_info({mul, fq}, {mul_const_node, new_input_low, new_input_high, new_fq}); + auto new_fq = register_new_node(input, new_input_low, new_input_high, + fq->input_value(3), fq->input_value(4), fq->get_levels()); + copy_runtime_info({pattern_value_map.at(mul_pattern).get_node_shared_ptr(), fq}, + {new_const, new_input_low, new_input_high, new_fq}); new_fq->set_friendly_name(fq->get_friendly_name()); replace_node(fq, new_fq); return true; diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/pull_transpose_through_fq.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/pull_transpose_through_fq.cpp index 618b633260e..68eb7eb337c 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/pull_transpose_through_fq.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/pull_transpose_through_fq.cpp @@ -17,18 +17,32 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::PullTransposeThroughFQUp, "PullTransposeThr ngraph::pass::PullTransposeThroughFQUp::PullTransposeThroughFQUp() { MATCHER_SCOPE(PullTransposeThroughFQUp); auto m_fq = pattern::wrap_type({pattern::any_input(pattern::has_static_rank()), - pattern::any_input(pattern::has_static_rank()), - pattern::any_input(pattern::has_static_rank()), - pattern::any_input(pattern::has_static_rank()), - pattern::any_input(pattern::has_static_rank())}, + pattern::any_input(pattern::has_static_shape()), + pattern::any_input(pattern::has_static_shape()), + pattern::any_input(pattern::has_static_shape()), + pattern::any_input(pattern::has_static_shape())}, pattern::consumers_count(1)); - auto m_transpose = pattern::wrap_type({m_fq, pattern::wrap_type()}); + auto m_transpose_perm = pattern::wrap_type(); + auto m_transpose = pattern::wrap_type({m_fq, m_transpose_perm}); ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) { auto & pattern_map = m.get_pattern_value_map(); auto transpose = pattern_map[m_transpose].get_node_shared_ptr(); auto fq = pattern_map[m_fq].get_node_shared_ptr(); + auto are_inputs_scalars = shape_size(fq->input_value(1).get_shape()) == 1 && + shape_size(fq->input_value(2).get_shape()) == 1 && + shape_size(fq->input_value(3).get_shape()) == 1 && + shape_size(fq->input_value(4).get_shape()) == 1; + if (!are_inputs_scalars) { + auto perm = std::dynamic_pointer_cast(pattern_map[m_transpose_perm].get_node_shared_ptr()); + if (!perm) + return false; + auto perm_val = perm->cast_vector(); + if (!(perm_val[0] == 0 && perm_val[1] == 1)) + return false; + } + auto input_rank = fq->input(0).get_partial_shape().rank().get_length(); ngraph::NodeVector new_ops; diff --git a/inference-engine/tests/functional/inference_engine/transformations/add_fake_quantize_fusion.cpp b/inference-engine/tests/functional/inference_engine/transformations/add_fake_quantize_fusion.cpp index 6896c77f091..5c215aea4d4 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/add_fake_quantize_fusion.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/add_fake_quantize_fusion.cpp @@ -42,7 +42,6 @@ TEST(TransformationTests, AddFakeQuantizeFusion) { pass::Manager m; m.register_pass(); m.register_pass(); - m.register_pass(); m.run_passes(f); ASSERT_NO_THROW(check_rt_info(f)); } @@ -62,6 +61,50 @@ TEST(TransformationTests, AddFakeQuantizeFusion) { ASSERT_TRUE(res.first) << res.second; } +TEST(TransformationTests, AddFakeQuantizeFusionWithConvolutionAndScalarConstant) { + std::shared_ptr f(nullptr), f_ref(nullptr); + + Shape data_shape{1, 3, 14, 14}; + { + auto data = std::make_shared(element::f32, data_shape); + auto filter = std::make_shared(element::f32, Shape{1, 3, 2, 2}); + auto conv = std::make_shared(data, filter, Strides{1, 1}, + CoordinateDiff{0, 0}, CoordinateDiff{0, 0}, Strides{1, 1}); + auto add_const = opset5::Constant::create(element::f32, Shape{1}, {2}); + auto add = std::make_shared(conv, add_const); + auto input_low = opset5::Constant::create(element::f32, Shape{1}, {0}); + auto input_high = opset5::Constant::create(element::f32, Shape{1}, {20}); + auto output_low = opset5::Constant::create(element::f32, Shape{}, {0}); + auto output_high = opset5::Constant::create(element::f32, Shape{}, {10}); + auto fq = std::make_shared(add, input_low, + input_high, output_low, + output_high, 11); + f = std::make_shared(NodeVector{fq}, ParameterVector{data, filter}); + pass::Manager m; + m.register_pass(); + m.register_pass(); + m.run_passes(f); + ASSERT_NO_THROW(check_rt_info(f)); + } + { + auto data = std::make_shared(element::f32, data_shape); + auto filter = std::make_shared(element::f32, Shape{1, 3, 2, 2}); + auto conv = std::make_shared(data, filter, Strides{1, 1}, + CoordinateDiff{0, 0}, CoordinateDiff{0, 0}, Strides{1, 1}); + auto input_low = opset5::Constant::create(element::f32, Shape{1}, {-2}); + auto input_high = opset5::Constant::create(element::f32, Shape{1}, {18}); + auto output_low = opset5::Constant::create(element::f32, Shape{}, {0}); + auto output_high = opset5::Constant::create(element::f32, Shape{}, {10}); + auto fq = std::make_shared(conv, input_low, + input_high, output_low, + output_high, 11); + f_ref = std::make_shared(NodeVector{fq}, ParameterVector{data, filter}); + } + + auto res = compare_functions(f, f_ref, true); + ASSERT_TRUE(res.first) << res.second; +} + TEST(TransformationTests, AddFakeQuantizeFusionConstantOnFirstInput) { std::shared_ptr f(nullptr), f_ref(nullptr); @@ -81,7 +124,44 @@ TEST(TransformationTests, AddFakeQuantizeFusionConstantOnFirstInput) { pass::Manager m; m.register_pass(); m.register_pass(); - m.register_pass(); + m.run_passes(f); + ASSERT_NO_THROW(check_rt_info(f)); + } + { + auto data = std::make_shared(element::f32, data_shape); + auto input_low = opset5::Constant::create(element::f32, Shape{1}, {-2}); + auto input_high = opset5::Constant::create(element::f32, Shape{1}, {18}); + auto output_low = opset5::Constant::create(element::f32, Shape{}, {0}); + auto output_high = opset5::Constant::create(element::f32, Shape{}, {10}); + auto fq = std::make_shared(data, input_low, + input_high, output_low, + output_high, 11); + f_ref = std::make_shared(NodeVector{fq}, ParameterVector{data}); + } + + auto res = compare_functions(f, f_ref, true); + ASSERT_TRUE(res.first) << res.second; +} + +TEST(TransformationTests, AddFakeQuantizeFusionConstantWithEqualValues) { + std::shared_ptr f(nullptr), f_ref(nullptr); + + Shape data_shape{1, 3, 14, 14}; + { + auto data = std::make_shared(element::f32, data_shape); + auto add_const = opset5::Constant::create(element::f32, Shape{1, 3, 1, 1}, {2, 2, 2}); + auto add = std::make_shared(add_const, data); + auto input_low = opset5::Constant::create(element::f32, Shape{1}, {0}); + auto input_high = opset5::Constant::create(element::f32, Shape{1}, {20}); + auto output_low = opset5::Constant::create(element::f32, Shape{}, {0}); + auto output_high = opset5::Constant::create(element::f32, Shape{}, {10}); + auto fq = std::make_shared(add, input_low, + input_high, output_low, + output_high, 11); + f = std::make_shared(NodeVector{fq}, ParameterVector{data}); + pass::Manager m; + m.register_pass(); + m.register_pass(); m.run_passes(f); ASSERT_NO_THROW(check_rt_info(f)); } @@ -120,7 +200,6 @@ TEST(TransformationTests, AddFakeQuantizeFusionReshape) { pass::Manager m; m.register_pass(); m.register_pass(); - m.register_pass(); m.run_passes(f); ASSERT_NO_THROW(check_rt_info(f)); } @@ -159,7 +238,6 @@ TEST(TransformationTests, NegativeAddFakeQuantizeFusionNotAConstant) { pass::Manager m; m.register_pass(); m.register_pass(); - m.register_pass(); m.run_passes(f); ASSERT_NO_THROW(check_rt_info(f)); } @@ -180,3 +258,75 @@ TEST(TransformationTests, NegativeAddFakeQuantizeFusionNotAConstant) { auto res = compare_functions(f, f_ref, true); ASSERT_TRUE(res.first) << res.second; } + +TEST(TransformationTests, NegativeAddFakeQuantizeFusionWithConvolutionAndNonScalarConstant) { + std::shared_ptr f(nullptr), f_ref(nullptr); + + Shape data_shape{1, 3, 14, 14}; + { + auto data = std::make_shared(element::f32, data_shape); + auto filter = std::make_shared(element::f32, Shape{4, 3, 2, 2}); + auto conv = std::make_shared(data, filter, Strides{1, 1}, + CoordinateDiff{0, 0}, CoordinateDiff{0, 0}, Strides{1, 1}); + auto add_const = opset5::Constant::create(element::f32, Shape{1, 4, 1, 1}, {1, 2, 3, 4}); + auto add = std::make_shared(conv, add_const); + auto input_low = opset5::Constant::create(element::f32, Shape{1}, {0}); + auto input_high = opset5::Constant::create(element::f32, Shape{1}, {20}); + auto output_low = opset5::Constant::create(element::f32, Shape{}, {0}); + auto output_high = opset5::Constant::create(element::f32, Shape{}, {10}); + auto fq = std::make_shared(add, input_low, + input_high, output_low, + output_high, 11); + f = std::make_shared(NodeVector{fq}, ParameterVector{data, filter}); + pass::Manager m; + m.register_pass(); + m.register_pass(); + m.run_passes(f); + ASSERT_NO_THROW(check_rt_info(f)); + } + { + auto data = std::make_shared(element::f32, data_shape); + auto filter = std::make_shared(element::f32, Shape{4, 3, 2, 2}); + auto conv = std::make_shared(data, filter, Strides{1, 1}, + CoordinateDiff{0, 0}, CoordinateDiff{0, 0}, Strides{1, 1}); + auto add_const = opset5::Constant::create(element::f32, Shape{1, 4, 1, 1}, {1, 2, 3, 4}); + auto add = std::make_shared(conv, add_const); + auto input_low = opset5::Constant::create(element::f32, Shape{1}, {0}); + auto input_high = opset5::Constant::create(element::f32, Shape{1}, {20}); + auto output_low = opset5::Constant::create(element::f32, Shape{}, {0}); + auto output_high = opset5::Constant::create(element::f32, Shape{}, {10}); + auto fq = std::make_shared(add, input_low, + input_high, output_low, + output_high, 11); + f_ref = std::make_shared(NodeVector{fq}, ParameterVector{data, filter}); + } + + auto res = compare_functions(f, f_ref, true); + ASSERT_TRUE(res.first) << res.second; +} + +TEST(TransformationTests, NegativeAddFakeQuantizeFusionLowPrecision) { + std::shared_ptr f(nullptr), f_ref(nullptr); + + Shape data_shape{1, 3, 14, 14}; + auto data = std::make_shared(element::f16, data_shape); + auto add_const = opset5::Constant::create(element::f16, Shape{1}, {2}); + auto add = std::make_shared(data, add_const); + auto input_low = opset5::Constant::create(element::f16, Shape{1}, {0}); + auto input_high = opset5::Constant::create(element::f16, Shape{1}, {20}); + auto output_low = opset5::Constant::create(element::f16, Shape{}, {0}); + auto output_high = opset5::Constant::create(element::f16, Shape{}, {10}); + auto fq = std::make_shared(add, input_low, + input_high, output_low, + output_high, 11); + f = std::make_shared(NodeVector{fq}, ParameterVector{data}); + f_ref = clone_function(*f); + pass::Manager m; + m.register_pass(); + m.register_pass(); + m.run_passes(f); + ASSERT_NO_THROW(check_rt_info(f)); + + auto res = compare_functions(f, f_ref, true); + ASSERT_TRUE(res.first) << res.second; +} diff --git a/inference-engine/tests/functional/inference_engine/transformations/fq_mul_fusion_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/fq_mul_fusion_test.cpp index 57135f0285a..8be09d1dd38 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/fq_mul_fusion_test.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/fq_mul_fusion_test.cpp @@ -49,8 +49,10 @@ public: const auto fq = std::make_shared( data, in_low, in_high, out_low, out_high, 255); + std::vector mul_const(shape_size(mul_const_shape)); + std::iota(mul_const.begin(), mul_const.end(), 0); const auto mul_value = ngraph::opset4::Constant::create( - ngraph::element::Type_t::f32, mul_const_shape, {3.14f}); + ngraph::element::Type_t::f32, mul_const_shape, mul_const); const auto mul = std::make_shared(fq, mul_value); m_function = std::make_shared( @@ -167,7 +169,7 @@ INSTANTIATE_TEST_SUITE_P(FQOutputs_1D__multiplier_3D, FQMulFusion, ::testing::Values(ngraph::Shape{1, 64, 1, 1}), ::testing::Values(ngraph::Shape{1}), ::testing::Values(ngraph::Shape{1, 3, 1}), - ::testing::Values(ngraph::Shape{1, 3, 1}))); + ::testing::Values(ngraph::Shape{1, 1, 3, 1}))); INSTANTIATE_TEST_SUITE_P(FQInOUt_ones__multiplier_4D_with_channel, FQMulFusion, ::testing::Combine(::testing::Values(ngraph::Shape{1, 64, 3, 3}), @@ -176,6 +178,14 @@ INSTANTIATE_TEST_SUITE_P(FQInOUt_ones__multiplier_4D_with_channel, FQMulFusion, ::testing::Values(ngraph::Shape{1, 64, 3, 3}), ::testing::Values(ngraph::Shape{1, 64, 3, 3}))); +INSTANTIATE_TEST_CASE_P(FQInOUt_ones__multiplier_3D, FQMulFusion, + ::testing::Combine(::testing::Values(ngraph::Shape{1, 128, 512}), + ::testing::Values(ngraph::Shape{1}), + ::testing::Values(ngraph::Shape{1}), + ::testing::Values(ngraph::Shape{512}), + ::testing::Values(ngraph::Shape{1, 1, 512}))); + + TEST(FQMulFusion_NonConstInputs, AllInputsNonConst) { const auto data = std::make_shared( ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 224, 224}); @@ -383,6 +393,53 @@ TEST(TransformationTests, FakeQuantizeMultiplyFusionNegative) { ASSERT_EQ(function->get_output_shape(0), ngraph::Shape({1, 300, 16})); } +TEST(TransformationTests, FakeQuantizeMultiplyFusionMulConstWithEqualValues) { + const auto data = std::make_shared( + ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 224, 224}); + const auto in_low = + std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{}); + const auto in_high = + std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{}); + const auto out_low = ngraph::opset4::Constant::create( + ngraph::element::Type_t::f32, ngraph::Shape{}, {1.0f}); + const auto out_high = ngraph::opset4::Constant::create( + ngraph::element::Type_t::f32, ngraph::Shape{}, {100.0f}); + const auto fq = std::make_shared( + data, in_low, in_high, out_low, out_high, 42); + + const auto mul_value = ngraph::opset4::Constant::create( + ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 1, 1}, {3, 3, 3}); + const auto mul = std::make_shared(fq, mul_value); + + auto function = std::make_shared(ngraph::OutputVector{mul}, + ngraph::ParameterVector{data, in_low, in_high}); + + const auto expected_out_low = ngraph::opset4::Constant::create( + ngraph::element::Type_t::f32, ngraph::Shape{1}, {3.0f}); + // this constant should be created by constant folding of the last FQ input + const auto expected_out_high = ngraph::opset4::Constant::create( + ngraph::element::Type_t::f32, ngraph::Shape{1}, {300.0f}); + + const auto expected_fq = std::make_shared( + data, in_low, in_high, expected_out_low, expected_out_high, 42); + + const auto expected_function = + std::make_shared(ngraph::OutputVector{expected_fq}, + ngraph::ParameterVector{data, in_low, in_high}); + + ngraph::pass::Manager manager; + manager.register_pass(); + manager.register_pass(); + + manager.run_passes(function); + ASSERT_NO_THROW(check_rt_info(function)); + + const auto res = compare_functions(function, expected_function, true); + ASSERT_TRUE(res.first) << res.second; +} + + + } // namespace diff --git a/inference-engine/tests/functional/inference_engine/transformations/mul_fake_quantize_fusion.cpp b/inference-engine/tests/functional/inference_engine/transformations/mul_fake_quantize_fusion.cpp index b8133abffc2..4d67c713857 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/mul_fake_quantize_fusion.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/mul_fake_quantize_fusion.cpp @@ -42,7 +42,6 @@ TEST(TransformationTests, MulFakeQuantizeFusionPositiveConstant) { pass::Manager m; m.register_pass(); m.register_pass(); - m.register_pass(); m.run_passes(f); ASSERT_NO_THROW(check_rt_info(f)); } @@ -81,7 +80,6 @@ TEST(TransformationTests, MulFakeQuantizeFusionConstantOnFirstInput) { pass::Manager m; m.register_pass(); m.register_pass(); - m.register_pass(); m.run_passes(f); ASSERT_NO_THROW(check_rt_info(f)); } @@ -120,7 +118,6 @@ TEST(TransformationTests, MulFakeQuantizeFusionReshape) { pass::Manager m; m.register_pass(); m.register_pass(); - m.register_pass(); m.run_passes(f); ASSERT_NO_THROW(check_rt_info(f)); } @@ -140,13 +137,13 @@ TEST(TransformationTests, MulFakeQuantizeFusionReshape) { ASSERT_TRUE(res.first) << res.second; } -TEST(TransformationTests, MulFakeQuantizeFusionConstantAllNegative) { +TEST(TransformationTests, MulFakeQuantizeFusionConstantNonScalarWithEqualValues) { std::shared_ptr f(nullptr), f_ref(nullptr); Shape data_shape{1, 3, 14, 14}; { auto data = std::make_shared(element::f32, data_shape); - auto mul_const = opset5::Constant::create(element::f32, Shape{1}, {-2}); + auto mul_const = opset5::Constant::create(element::f32, Shape{1, 3, 1, 1}, {2, 2, 2}); auto mul = std::make_shared(data, mul_const); auto input_low = opset5::Constant::create(element::f32, Shape{1}, {1}); auto input_high = opset5::Constant::create(element::f32, Shape{1}, {20}); @@ -159,16 +156,15 @@ TEST(TransformationTests, MulFakeQuantizeFusionConstantAllNegative) { pass::Manager m; m.register_pass(); m.register_pass(); - m.register_pass(); m.run_passes(f); ASSERT_NO_THROW(check_rt_info(f)); } { auto data = std::make_shared(element::f32, data_shape); - auto input_low = opset5::Constant::create(element::f32, Shape{1}, {-0.5}); - auto input_high = opset5::Constant::create(element::f32, Shape{1}, {-10}); - auto output_low = opset5::Constant::create(element::f32, Shape{1}, {10}); - auto output_high = opset5::Constant::create(element::f32, Shape{1, 3, 1, 1}, {-10, -10, -10}); + auto input_low = opset5::Constant::create(element::f32, Shape{1}, {0.5}); + auto input_high = opset5::Constant::create(element::f32, Shape{1}, {10}); + auto output_low = opset5::Constant::create(element::f32, Shape{1, 3, 1, 1}, {-10, -10, -10}); + auto output_high = opset5::Constant::create(element::f32, Shape{1}, {10}); auto fq = std::make_shared(data, input_low, input_high, output_low, output_high, 11); @@ -179,84 +175,6 @@ TEST(TransformationTests, MulFakeQuantizeFusionConstantAllNegative) { ASSERT_TRUE(res.first) << res.second; } -TEST(TransformationTests, MulFakeQuantizeFusionConstantSomeNegative) { - std::shared_ptr f(nullptr), f_ref(nullptr); - - Shape data_shape{1, 3, 14, 14}; - { - auto data = std::make_shared(element::f32, data_shape); - auto mul_const = opset5::Constant::create(element::f32, Shape{3, 1, 1}, {2, 1, -2}); - auto mul = std::make_shared(data, mul_const); - auto input_low = opset5::Constant::create(element::f32, Shape{1}, {1}); - auto input_high = opset5::Constant::create(element::f32, Shape{1}, {20}); - auto output_low = opset5::Constant::create(element::f32, Shape{1, 3, 1, 1}, {-10, -10, -10}); - auto output_high = opset5::Constant::create(element::f32, Shape{1}, {10}); - auto fq = std::make_shared(mul, input_low, - input_high, output_low, - output_high, 20); - f = std::make_shared(NodeVector{fq}, ParameterVector{data}); - pass::Manager m; - m.register_pass(); - m.register_pass(); - m.register_pass(); - m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); - } - { - auto data = std::make_shared(element::f32, data_shape); - auto input_low = opset5::Constant::create(element::f32, Shape{1, 3, 1, 1}, {0.5f, 1.0f, -0.5f}); - auto input_high = opset5::Constant::create(element::f32, Shape{1, 3, 1, 1}, {10.0f, 20.0f, -10.0f}); - auto output_low = opset5::Constant::create(element::f32, Shape{1, 3, 1, 1}, {-10.0f, -10.0f, 10.0f}); - auto output_high = opset5::Constant::create(element::f32, Shape{1, 3, 1, 1}, {10.0f, 10.0f, -10.0f}); - auto fq = std::make_shared(data, input_low, - input_high, output_low, - output_high, 20); - f_ref = std::make_shared(NodeVector{fq}, ParameterVector{data}); - } - - auto res = compare_functions(f, f_ref, true); - ASSERT_TRUE(res.first) << res.second; -} - -TEST(TransformationTests, MulFakeQuantizeFusionConstantSomeNegativeF16) { - std::shared_ptr f(nullptr), f_ref(nullptr); - - Shape data_shape{1, 3, 14, 14}; - { - auto data = std::make_shared(element::f16, data_shape); - auto mul_const = opset5::Constant::create(element::f16, Shape{3, 1, 1}, {2, 1, -2}); - auto mul = std::make_shared(data, mul_const); - auto input_low = opset5::Constant::create(element::f16, Shape{1}, {1}); - auto input_high = opset5::Constant::create(element::f16, Shape{1}, {20}); - auto output_low = opset5::Constant::create(element::f16, Shape{1, 3, 1, 1}, {-10, -10, -10}); - auto output_high = opset5::Constant::create(element::f16, Shape{1}, {10}); - auto fq = std::make_shared(mul, input_low, - input_high, output_low, - output_high, 20); - f = std::make_shared(NodeVector{fq}, ParameterVector{data}); - pass::Manager m; - m.register_pass(); - m.register_pass(); - m.register_pass(); - m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); - } - { - auto data = std::make_shared(element::f16, data_shape); - auto input_low = opset5::Constant::create(element::f16, Shape{1, 3, 1, 1}, {0.5f, 1.0f, -0.5f}); - auto input_high = opset5::Constant::create(element::f16, Shape{1, 3, 1, 1}, {10.0f, 20.0f, -10.0f}); - auto output_low = opset5::Constant::create(element::f16, Shape{1, 3, 1, 1}, {-10.0f, -10.0f, 10.0f}); - auto output_high = opset5::Constant::create(element::f16, Shape{1, 3, 1, 1}, {10.0f, 10.0f, -10.0f}); - auto fq = std::make_shared(data, input_low, - input_high, output_low, - output_high, 20); - f_ref = std::make_shared(NodeVector{fq}, ParameterVector{data}); - } - - auto res = compare_functions(f, f_ref, true); - ASSERT_TRUE(res.first) << res.second; -} - TEST(TransformationTests, NegativeMulFakeQuantizeFusionNotAConstant) { std::shared_ptr f(nullptr), f_ref(nullptr); @@ -276,7 +194,6 @@ TEST(TransformationTests, NegativeMulFakeQuantizeFusionNotAConstant) { pass::Manager m; m.register_pass(); m.register_pass(); - m.register_pass(); m.run_passes(f); ASSERT_NO_THROW(check_rt_info(f)); } @@ -297,3 +214,79 @@ TEST(TransformationTests, NegativeMulFakeQuantizeFusionNotAConstant) { auto res = compare_functions(f, f_ref, true); ASSERT_TRUE(res.first) << res.second; } + +TEST(TransformationTests, NegativeMulFakeQuantizeFusionLowPrecision) { + std::shared_ptr f(nullptr), f_ref(nullptr); + + Shape data_shape{1, 3, 14, 14}; + auto data = std::make_shared(element::f16, data_shape); + auto mul_const = opset5::Constant::create(element::f16, Shape{1}, {2}); + auto mul = std::make_shared(data, mul_const); + auto input_low = opset5::Constant::create(element::f16, Shape{1}, {1}); + auto input_high = opset5::Constant::create(element::f16, Shape{1}, {20}); + auto output_low = opset5::Constant::create(element::f16, Shape{1, 3, 1, 1}, {0, 0, 0}); + auto output_high = opset5::Constant::create(element::f16, Shape{1}, {10}); + auto fq = std::make_shared(mul, input_low, + input_high, output_low, + output_high, 11); + f = std::make_shared(NodeVector{fq}, ParameterVector{data}); + f_ref = clone_function(*f); + pass::Manager m; + m.register_pass(); + m.register_pass(); + m.run_passes(f); + ASSERT_NO_THROW(check_rt_info(f)); + + auto res = compare_functions(f, f_ref, true); + ASSERT_TRUE(res.first) << res.second; +} + +TEST(TransformationTests, NegativeMulFakeQuantizeFusionConstantAllNegative) { + std::shared_ptr f(nullptr), f_ref(nullptr); + + Shape data_shape{1, 3, 14, 14}; + auto data = std::make_shared(element::f32, data_shape); + auto mul_const = opset5::Constant::create(element::f32, Shape{1}, {-2}); + auto mul = std::make_shared(data, mul_const); + auto input_low = opset5::Constant::create(element::f32, Shape{1}, {1}); + auto input_high = opset5::Constant::create(element::f32, Shape{1}, {20}); + auto output_low = opset5::Constant::create(element::f32, Shape{1, 3, 1, 1}, {-10, -10, -10}); + auto output_high = opset5::Constant::create(element::f32, Shape{1}, {10}); + auto fq = std::make_shared(mul, input_low, + input_high, output_low, + output_high, 11); + f = std::make_shared(NodeVector{fq}, ParameterVector{data}); + f_ref = clone_function(*f); + pass::Manager m; + m.register_pass(); + m.register_pass(); + m.run_passes(f); + ASSERT_NO_THROW(check_rt_info(f)); + auto res = compare_functions(f, f_ref, true); + ASSERT_TRUE(res.first) << res.second; +} + +TEST(TransformationTests, NegativeMulFakeQuantizeFusionConstantSomeNegative) { + std::shared_ptr f(nullptr), f_ref(nullptr); + + Shape data_shape{1, 3, 14, 14}; + auto data = std::make_shared(element::f32, data_shape); + auto mul_const = opset5::Constant::create(element::f32, Shape{3, 1, 1}, {2, 1, -2}); + auto mul = std::make_shared(data, mul_const); + auto input_low = opset5::Constant::create(element::f32, Shape{1}, {1}); + auto input_high = opset5::Constant::create(element::f32, Shape{1}, {20}); + auto output_low = opset5::Constant::create(element::f32, Shape{1, 3, 1, 1}, {-10, -10, -10}); + auto output_high = opset5::Constant::create(element::f32, Shape{1}, {10}); + auto fq = std::make_shared(mul, input_low, + input_high, output_low, + output_high, 20); + f = std::make_shared(NodeVector{fq}, ParameterVector{data}); + f_ref = clone_function(*f); + pass::Manager m; + m.register_pass(); + m.register_pass(); + m.run_passes(f); + ASSERT_NO_THROW(check_rt_info(f)); + auto res = compare_functions(f, f_ref, true); + ASSERT_TRUE(res.first) << res.second; +} diff --git a/inference-engine/tests/functional/inference_engine/transformations/ngraph_fq_transpose_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/ngraph_fq_transpose_test.cpp index e767a4a26fa..429eee56a7d 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/ngraph_fq_transpose_test.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/ngraph_fq_transpose_test.cpp @@ -24,16 +24,16 @@ using namespace testing; TEST(TransformationTests, FQTransposeTest1) { - auto data1 = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 1, 3}, {1, 2, 3}); - auto data2 = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{3}, {1, 2, 3}); - auto data3 = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 3}, {1, 2, 3}); - auto data4 = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 3}, {1, 2, 3}); - auto data5 = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 3}, {1, 2, 3}); - auto transpose_order = ngraph::op::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {0, 2, 1}); - - std::shared_ptr f(nullptr); + std::shared_ptr f(nullptr), f_ref(nullptr); { - auto fq = std::make_shared(data1, data2, data3, data4, data5, 1); + auto data = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 1, 3}, {1, 2, 3}); + auto input_low = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {2}); + auto input_high = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {3}); + auto output_low = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {2}); + auto output_high = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {3}); + auto transpose_order = ngraph::op::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {0, 2, 1}); + + auto fq = std::make_shared(data, input_low, input_high, output_low, output_high, 1); auto transpose = std::make_shared(fq, transpose_order); f = std::make_shared(ngraph::NodeVector{transpose}, ngraph::ParameterVector{}); @@ -47,29 +47,35 @@ TEST(TransformationTests, FQTransposeTest1) { manager.register_pass(); ASSERT_NO_THROW(manager.run_passes(f)); } - std::vector ref_shape{1, 3, 1}; - for (auto op : f->get_ops()) { - if (auto constant = ngraph::as_type_ptr(op)) { - auto shape = constant->get_shape(); - ASSERT_EQ(shape, ref_shape); - } + { + auto data = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 3, 1}, {1, 2, 3}); + auto input_low = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 1, 1}, {2}); + auto input_high = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 1, 1}, {3}); + auto output_low = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 1, 1}, {2}); + auto output_high = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 1, 1}, {3}); + + auto fq = std::make_shared(data, input_low, input_high, output_low, output_high, 1); + + f_ref = std::make_shared(ngraph::NodeVector{fq}, ngraph::ParameterVector{}); } + auto res = compare_functions(f, f_ref, true); + ASSERT_TRUE(res.first) << res.second; } TEST(TransformationTests, FQTransposeDynamic) { - auto data1 = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic()); - auto data2 = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{3}, {1, 2, 3}); - auto data3 = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 3}, {1, 2, 3}); - auto data4 = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 3}, {1, 2, 3}); - auto data5 = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 3}, {1, 2, 3}); + auto data = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic()); + auto input_low = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {2}); + auto input_high = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {3}); + auto output_low = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {2}); + auto output_high = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {3}); auto transpose_order = ngraph::op::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {0, 2, 1}); std::shared_ptr f(nullptr); { - auto fq = std::make_shared(data1, data2, data3, data4, data5, 1); + auto fq = std::make_shared(data, input_low, input_high, output_low, output_high, 1); auto transpose = std::make_shared(fq, transpose_order); - f = std::make_shared(ngraph::NodeVector{transpose}, ngraph::ParameterVector{data1}); + f = std::make_shared(ngraph::NodeVector{transpose}, ngraph::ParameterVector{data}); ngraph::pass::Manager manager; manager.register_pass(); From 5fc0abe9cdfdb3c30a9f1e48111b55974e685217 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Do=C5=82bniak?= Date: Tue, 7 Sep 2021 10:18:57 +0200 Subject: [PATCH 27/52] Assertion message when blob precisions dont match (#7394) --- ngraph/test/util/engine/ie_engines.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ngraph/test/util/engine/ie_engines.cpp b/ngraph/test/util/engine/ie_engines.cpp index 321e1132076..a1da94ee61d 100644 --- a/ngraph/test/util/engine/ie_engines.cpp +++ b/ngraph/test/util/engine/ie_engines.cpp @@ -91,7 +91,7 @@ namespace if (computed_precision != expected_precision) { - return testing::AssertionFailure(); + return testing::AssertionFailure() << "Blob precisions mismatch"; } switch (static_cast(computed_precision)) From f890b12fd532e7d3ba45db70abcd6fe10745d1a6 Mon Sep 17 00:00:00 2001 From: Anastasiia Urlapova Date: Tue, 7 Sep 2021 12:13:12 +0300 Subject: [PATCH 28/52] [XXX-55386] Change nets version to v10 (#7289) * CVS-56144 Enable all OMZ scope * [CVS-55368] Change nets version to v10 * [CVS-55386] Fix some coments * [CVS-55386] Added float param to LSTM net * Updated nets * [CVS-55386] Change to ngraph way --- .../inference_engine/local_test.cpp | 397 +++++++++--------- 1 file changed, 203 insertions(+), 194 deletions(-) diff --git a/inference-engine/tests/functional/inference_engine/local_test.cpp b/inference-engine/tests/functional/inference_engine/local_test.cpp index b79e591dfb7..91f379a4fa7 100644 --- a/inference-engine/tests/functional/inference_engine/local_test.cpp +++ b/inference-engine/tests/functional/inference_engine/local_test.cpp @@ -3,10 +3,8 @@ // #include - #include -#include -#include "common_test_utils/common_utils.hpp" +#include "ngraph/ops.hpp" using namespace ::testing; using namespace std; @@ -15,183 +13,204 @@ using namespace InferenceEngine; class LocaleTests : public ::testing::Test { std::string originalLocale; std::string _model = R"V0G0N( - - - - - - 2 - 3 - 5 - 5 - - - - - - - - - 2 - 3 - 5 - 5 - - - - - 2 - 3 - 5 - 5 - - - - - - - - 2 - 3 - 5 - 5 - - - 2 - 3 - 5 - 5 - - - - - 2 - 3 - 5 - 5 - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + 1 + 256 + 200 + 272 + + + + + + + + 1000 + 4 + + + + + + + + 1000 + + + + + + + + 1 + 256 + 200 + 272 + + + 1000 + 4 + + + 1000 + + + + + 1000 + 256 + 7 + 7 + + + + + + + 1000 + 256 + 7 + 7 + + + + + + + + + + )V0G0N"; std::string _model_LSTM = R"V0G0N( - + - + + - + 1 - 30 + 512 - - - - + + + + 1 - 30 + 256 + + + + + + + + 1 + 256 + + + + + + + + 1024 + 512 + + + + + + + + 1024 + 256 + + + + + + + + 1024 + + + + + + + + 1 + 512 + + + 1 + 256 + + + 1 + 256 + + + 1024 + 512 + + + 1024 + 256 + + + 1024 - - - 1 - 10 - - - 1 - 10 - - - 1 - 10 - - + + + 1 + 256 + + + 1 + 256 + + - - - - - 1 - 10 - - - 1 - 10 - - - 1 - 10 - - - - - 1 - 10 - - - 1 - 10 - - - - - - - - - - - - 1 - 10 - - - 1 - 10 - - - - - 1 - 10 - - - - - - - - - - - - - + + + + 1 + 256 + + + + + + + 1 + 256 + + + + + + + + + + + + + + + )V0G0N"; protected: @@ -205,40 +224,30 @@ protected: void testBody(bool isLSTM = false) const { InferenceEngine::Core core; - // This model contains layers with float attributes. - // Conversion from string may be affected by locale. std::string model = isLSTM ? _model_LSTM : _model; - auto blob = make_shared_blob(TensorDesc(Precision::U8, {3360}, Layout::C)); + auto blob = make_shared_blob(TensorDesc(Precision::U8, {26000000}, Layout::C)); blob->allocate(); auto net = core.ReadNetwork(model, blob); - IE_SUPPRESS_DEPRECATED_START - if (!isLSTM) { - auto power_layer = dynamic_pointer_cast(CommonTestUtils::getLayerByName(net, "power")); - ASSERT_EQ(power_layer->scale, 0.75f); - ASSERT_EQ(power_layer->offset, 0.35f); - ASSERT_EQ(power_layer->power, 0.5f); + auto funcs = net.getFunction(); - auto sum_layer = dynamic_pointer_cast(CommonTestUtils::getLayerByName(net, "sum")); - std::vector ref_coeff{0.77f, 0.33f}; - ASSERT_EQ(sum_layer->coeff, ref_coeff); - - auto info = net.getInputsInfo(); - auto preproc = info.begin()->second->getPreProcess(); - ASSERT_EQ(preproc[0]->stdScale, 0.1f); - ASSERT_EQ(preproc[0]->meanValue, 104.006f); - } else { - InferenceEngine::NetPass::UnrollRNN_if(net, [] (const RNNCellBase& rnn) -> bool { return true; }); - auto lstmcell_layer = dynamic_pointer_cast(CommonTestUtils::getLayerByName(net, "LSTMCell:split_clip")); - - float ref_coeff = 0.2f; - ASSERT_EQ(lstmcell_layer->min_value, -ref_coeff); - ASSERT_EQ(lstmcell_layer->max_value, ref_coeff); - - ASSERT_EQ(lstmcell_layer->GetParamAsFloat("min"), -ref_coeff); - ASSERT_EQ(lstmcell_layer->GetParamAsFloat("max"), ref_coeff); + for (const auto & op : funcs->get_ops()) { + if (!isLSTM) { + if (op->get_friendly_name() == "output") { + const auto roi = std::dynamic_pointer_cast(op); + ASSERT_EQ(roi->get_pooled_h(), 7); + ASSERT_EQ(roi->get_pooled_w(), 7); + ASSERT_EQ(roi->get_sampling_ratio(), 2); + ASSERT_EQ(roi->get_spatial_scale(), 0.25f); + } + } else { + if (op->get_friendly_name() == "LSTMCell") { + const auto lstm_seq = std::dynamic_pointer_cast(op); + ASSERT_EQ(lstm_seq->get_clip(), 0.0f); + ASSERT_EQ(lstm_seq->get_hidden_size(), 256); + } + } } - IE_SUPPRESS_DEPRECATED_END } }; From 72fb7d207cb61fd5b9bb630ee8785881cc656b72 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 7 Sep 2021 12:43:33 +0300 Subject: [PATCH 29/52] Merge tools and inference_engine/tools folders (#7359) * Merge tools folders * Fixed docs * Moved deployment_manager * Fixed path to benchmark_tool docs * python_tools -> python_tools_benchmark --- CODEOWNERS | 1 - docs/IE_DG/Intro_to_Performance.md | 2 +- docs/IE_DG/Model_caching_overview.md | 2 +- docs/IE_DG/Tools_Overview.md | 4 +- docs/index.md | 8 ++-- inference-engine/CMakeLists.txt | 1 - inference-engine/tools/CMakeLists.txt | 5 --- inference-engine/tools/package_BOM.txt | 6 --- scripts/CMakeLists.txt | 8 ---- tools/CMakeLists.txt | 38 ++++++++++++++----- .../compile_tool/CMakeLists.txt | 0 .../tools => tools}/compile_tool/README.md | 0 .../tools => tools}/compile_tool/main.cpp | 0 .../cross_check_tool/README.md | 0 .../cross_check_tool/__init__.py | 0 .../cross_check_tool/cross_check_tool.py | 0 .../cross_check_tool/requirements.txt | 0 .../tools => tools}/cross_check_tool/utils.py | 0 .../deployment_manager/configs/darwin.json | 0 .../deployment_manager/configs/linux.json | 0 .../deployment_manager/configs/windows.json | 0 .../deployment_manager/deployman/config.py | 0 .../deployment_manager/deployman/logger.py | 0 .../deployment_manager/deployman/main.py | 0 .../deployment_manager/deployman/ui.py | 0 .../deployment_manager/deployment_manager.py | 0 26 files changed, 37 insertions(+), 38 deletions(-) delete mode 100644 inference-engine/tools/CMakeLists.txt delete mode 100644 inference-engine/tools/package_BOM.txt rename {inference-engine/tools => tools}/compile_tool/CMakeLists.txt (100%) rename {inference-engine/tools => tools}/compile_tool/README.md (100%) rename {inference-engine/tools => tools}/compile_tool/main.cpp (100%) rename {inference-engine/tools => tools}/cross_check_tool/README.md (100%) rename {inference-engine/tools => tools}/cross_check_tool/__init__.py (100%) rename {inference-engine/tools => tools}/cross_check_tool/cross_check_tool.py (100%) rename {inference-engine/tools => tools}/cross_check_tool/requirements.txt (100%) rename {inference-engine/tools => tools}/cross_check_tool/utils.py (100%) rename {scripts => tools}/deployment_manager/configs/darwin.json (100%) rename {scripts => tools}/deployment_manager/configs/linux.json (100%) rename {scripts => tools}/deployment_manager/configs/windows.json (100%) rename {scripts => tools}/deployment_manager/deployman/config.py (100%) rename {scripts => tools}/deployment_manager/deployman/logger.py (100%) rename {scripts => tools}/deployment_manager/deployman/main.py (100%) rename {scripts => tools}/deployment_manager/deployman/ui.py (100%) rename {scripts => tools}/deployment_manager/deployment_manager.py (100%) diff --git a/CODEOWNERS b/CODEOWNERS index 92a821790ea..bc7beb0841e 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -44,7 +44,6 @@ azure-pipelines.yml @openvinotoolkit/openvino-admins /inference-engine/tests/functional/plugin/myriad/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers /inference-engine/tests/unit/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers /inference-engine/tests/unit/engines/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers -/inference-engine/tools/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers /inference-engine/scripts/run_tests_myriad_multistick.sh @openvinotoolkit/openvino-ie-vpu-maintainers # IE GNA: diff --git a/docs/IE_DG/Intro_to_Performance.md b/docs/IE_DG/Intro_to_Performance.md index 48d1ea5c56c..ca360d0d06f 100644 --- a/docs/IE_DG/Intro_to_Performance.md +++ b/docs/IE_DG/Intro_to_Performance.md @@ -34,7 +34,7 @@ Refer to the [Benchmark App](../../inference-engine/samples/benchmark_app/README ## Using Caching API for first inference latency optimization Since with the 2021.4 release, Inference Engine provides an ability to enable internal caching of loaded networks. This can significantly reduce load network latency for some devices at application startup. -Internally caching uses plugin's Export/ImportNetwork flow, like it is done for [Compile tool](../../inference-engine/tools/compile_tool/README.md), using the regular ReadNetwork/LoadNetwork API. +Internally caching uses plugin's Export/ImportNetwork flow, like it is done for [Compile tool](../../tools/compile_tool/README.md), using the regular ReadNetwork/LoadNetwork API. Refer to the [Model Caching Overview](Model_caching_overview.md) for more detailed explanation. ## Using Async API diff --git a/docs/IE_DG/Model_caching_overview.md b/docs/IE_DG/Model_caching_overview.md index 10d3d6cf99e..d480d7626d9 100644 --- a/docs/IE_DG/Model_caching_overview.md +++ b/docs/IE_DG/Model_caching_overview.md @@ -20,7 +20,7 @@ As described in [Inference Engine Developer Guide](Deep_Learning_Inference_Engin Step #5 can potentially perform several time-consuming device-specific optimizations and network compilations, and such delays can lead to bad user experience on application startup. To avoid this, some devices offer -Import/Export network capability, and it is possible to either use [Compile tool](../../inference-engine/tools/compile_tool/README.md) +Import/Export network capability, and it is possible to either use [Compile tool](../../tools/compile_tool/README.md) or enable model caching to export compiled network automatically. Reusing cached networks can significantly reduce load network time. diff --git a/docs/IE_DG/Tools_Overview.md b/docs/IE_DG/Tools_Overview.md index f0741105387..0d408ebf1d3 100644 --- a/docs/IE_DG/Tools_Overview.md +++ b/docs/IE_DG/Tools_Overview.md @@ -9,8 +9,8 @@ The OpenVINO™ toolkit installation includes the following tools: |[Accuracy Checker Tool](@ref omz_tools_accuracy_checker) | `/deployment_tools/tools/open_model_zoo/tools/accuracy_checker`| |[Post-Training Optimization Tool](@ref pot_README) | `/deployment_tools/tools/post_training_optimization_toolkit`| |[Model Downloader](@ref omz_tools_downloader) | `/deployment_tools/tools/model_downloader`| -|[Cross Check Tool](../../inference-engine/tools/cross_check_tool/README.md) | `/deployment_tools/tools/cross_check_tool`| -|[Compile Tool](../../inference-engine/tools/compile_tool/README.md) | `/deployment_tools/inference_engine/lib/intel64/`| +|[Cross Check Tool](../../tools/cross_check_tool/README.md) | `/deployment_tools/tools/cross_check_tool`| +|[Compile Tool](../../tools/compile_tool/README.md) | `/deployment_tools/inference_engine/lib/intel64/`| ## See Also diff --git a/docs/index.md b/docs/index.md index 4f1012b5353..f767109e616 100644 --- a/docs/index.md +++ b/docs/index.md @@ -45,7 +45,7 @@ Useful documents for model optimization: ### Running and Tuning Inference The other core component of OpenVINO™ is the [Inference Engine](IE_DG/Deep_Learning_Inference_Engine_DevGuide.md), which manages the loading and compiling of the optimized neural network model, runs inference operations on input data, and outputs the results. Inference Engine can execute synchronously or asynchronously, and its plugin architecture manages the appropriate compilations for execution on multiple Intel® devices, including both workhorse CPUs and specialized graphics and video processing platforms (see below, Packaging and Deployment). -You can use OpenVINO™ Tuning Utilities with the Inference Engine to trial and test inference on your model. The Benchmark utility uses an input model to run iterative tests for throughput or latency measures, and the [Cross Check Utility](../inference-engine/tools/cross_check_tool/README.md) compares performance of differently configured inferences. +You can use OpenVINO™ Tuning Utilities with the Inference Engine to trial and test inference on your model. The Benchmark utility uses an input model to run iterative tests for throughput or latency measures, and the [Cross Check Utility](../tools/cross_check_tool/README.md) compares performance of differently configured inferences. For a full browser-based studio integrating these other key tuning utilities, try the [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction). ![](img/OV-diagram-step3.png) @@ -81,7 +81,7 @@ The Inference Engine's plug-in architecture can be extended to meet other specia * [Deployment Manager Guide](./install_guides/deployment-manager-tool.md) -## OpenVINO™ Toolkit Components +## OpenVINO™ Toolkit Components Intel® Distribution of OpenVINO™ toolkit includes the following components: @@ -90,8 +90,8 @@ Intel® Distribution of OpenVINO™ toolkit includes the following components: - [Inference Engine Samples](IE_DG/Samples_Overview.md): A set of simple console applications demonstrating how to use the Inference Engine in your applications. - [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction): A web-based graphical environment that allows you to easily use various sophisticated OpenVINO™ toolkit components. - [Post-training Optimization Tool](@ref pot_README): A tool to calibrate a model and then execute it in the INT8 precision. -- Additional Tools: A set of tools to work with your models including [Benchmark App](../inference-engine/tools/benchmark_tool/README.md), [Cross Check Tool](../inference-engine/tools/cross_check_tool/README.md), [Compile tool](../inference-engine/tools/compile_tool/README.md). -- [Open Model Zoo](@ref omz_models_group_intel) +- Additional Tools: A set of tools to work with your models including [Benchmark App](../tools/benchmark_tool/README.md), [Cross Check Tool](../tools/cross_check_tool/README.md), [Compile tool](../tools/compile_tool/README.md). +- [Open Model Zoo](@ref omz_models_group_intel) - [Demos](@ref omz_demos): Console applications that provide robust application templates to help you implement specific deep learning scenarios. - Additional Tools: A set of tools to work with your models including [Accuracy Checker Utility](@ref omz_tools_accuracy_checker) and [Model Downloader](@ref omz_tools_downloader). - [Documentation for Pretrained Models](@ref omz_models_group_intel): Documentation for pre-trained models that are available in the [Open Model Zoo repository](https://github.com/openvinotoolkit/open_model_zoo). diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt index 69a71eb55b5..fdefac633c7 100644 --- a/inference-engine/CMakeLists.txt +++ b/inference-engine/CMakeLists.txt @@ -12,7 +12,6 @@ if(ENABLE_PYTHON) add_subdirectory(ie_bridges/python) endif() -add_subdirectory(tools) add_subdirectory(samples) openvino_developer_export_targets(COMPONENT openvino_common TARGETS format_reader ie_samples_utils) diff --git a/inference-engine/tools/CMakeLists.txt b/inference-engine/tools/CMakeLists.txt deleted file mode 100644 index 9112658ce5e..00000000000 --- a/inference-engine/tools/CMakeLists.txt +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -add_subdirectory(compile_tool) diff --git a/inference-engine/tools/package_BOM.txt b/inference-engine/tools/package_BOM.txt deleted file mode 100644 index b1d58875c48..00000000000 --- a/inference-engine/tools/package_BOM.txt +++ /dev/null @@ -1,6 +0,0 @@ -cross_check_tool/__init__.py -cross_check_tool/utils.py -cross_check_tool/requirements.txt -cross_check_tool/README.md -cross_check_tool/cross_check_tool.py -compile_tool/README.md \ No newline at end of file diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index 6cf6e02c1c4..ac094ce648b 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -47,14 +47,6 @@ if(UNIX) COMPONENT install_dependencies) endif() -# install DeploymentManager - -ie_cpack_add_component(deployment_manager REQUIRED) -install(DIRECTORY deployment_manager/ - DESTINATION deployment_tools/tools/deployment_manager - COMPONENT deployment_manager - USE_SOURCE_PERMISSIONS) - # install files for demo ie_cpack_add_component(demo_scripts DEPENDS core) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index fc468719d16..4d52674546e 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -1,19 +1,35 @@ # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 + cmake_minimum_required(VERSION 3.13) -project(python_tools) +project(OpenVINO_Tools DESCRIPTION "OpenVINO toolkit Development Tools") if(NOT DEFINED OpenVINO_SOURCE_DIR) find_package(InferenceEngineDeveloperPackage QUIET) + set(python_tools_only ON) endif() +# C++ tools + +if(NOT python_tools_only) + add_subdirectory(compile_tool) +endif() + +# Python tools + +# install deployment_manager + +ie_cpack_add_component(deployment_manager REQUIRED) +install(DIRECTORY deployment_manager/ + DESTINATION deployment_tools/tools/deployment_manager + COMPONENT deployment_manager + USE_SOURCE_PERMISSIONS) + if(ENABLE_PYTHON) find_package(PythonInterp 3 REQUIRED) set(PYTHON_VERSION python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}) - set(TARGET_NAME "python_tools") - if(WIN32) set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python_api/${PYTHON_VERSION}/openvino) else() @@ -21,24 +37,28 @@ if(ENABLE_PYTHON) endif() # creates a copy inside bin directory for developers to have ability running python benchmark_app - add_custom_target(${TARGET_NAME} ALL + add_custom_target(python_tools_benchmark ALL COMMAND ${CMAKE_COMMAND} -E make_directory ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/tools - COMMAND ${CMAKE_COMMAND} -E copy_directory ${OpenVINO_SOURCE_DIR}/tools/benchmark_tool/openvino/tools/benchmark ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/tools/benchmark + COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/benchmark_tool/openvino/tools/benchmark + ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/tools/benchmark ) ie_cpack_add_component(python_tools_${PYTHON_VERSION}) ie_cpack_add_component(python_tools) + # install cross_check_tool tool + install(DIRECTORY cross_check_tool + DESTINATION deployment_tools/tools + COMPONENT python_tools) + + # install benchmark_app tool install(FILES benchmark_tool/benchmark_app.py benchmark_tool/README.md benchmark_tool/requirements.txt DESTINATION deployment_tools/tools/benchmark_tool COMPONENT python_tools) - install(DIRECTORY ../inference-engine/tools/cross_check_tool - DESTINATION deployment_tools/tools - COMPONENT python_tools) - + # install openvino/tools/benchmark as a python package install(DIRECTORY benchmark_tool/openvino/tools/benchmark DESTINATION python/${PYTHON_VERSION}/openvino/tools USE_SOURCE_PERMISSIONS diff --git a/inference-engine/tools/compile_tool/CMakeLists.txt b/tools/compile_tool/CMakeLists.txt similarity index 100% rename from inference-engine/tools/compile_tool/CMakeLists.txt rename to tools/compile_tool/CMakeLists.txt diff --git a/inference-engine/tools/compile_tool/README.md b/tools/compile_tool/README.md similarity index 100% rename from inference-engine/tools/compile_tool/README.md rename to tools/compile_tool/README.md diff --git a/inference-engine/tools/compile_tool/main.cpp b/tools/compile_tool/main.cpp similarity index 100% rename from inference-engine/tools/compile_tool/main.cpp rename to tools/compile_tool/main.cpp diff --git a/inference-engine/tools/cross_check_tool/README.md b/tools/cross_check_tool/README.md similarity index 100% rename from inference-engine/tools/cross_check_tool/README.md rename to tools/cross_check_tool/README.md diff --git a/inference-engine/tools/cross_check_tool/__init__.py b/tools/cross_check_tool/__init__.py similarity index 100% rename from inference-engine/tools/cross_check_tool/__init__.py rename to tools/cross_check_tool/__init__.py diff --git a/inference-engine/tools/cross_check_tool/cross_check_tool.py b/tools/cross_check_tool/cross_check_tool.py similarity index 100% rename from inference-engine/tools/cross_check_tool/cross_check_tool.py rename to tools/cross_check_tool/cross_check_tool.py diff --git a/inference-engine/tools/cross_check_tool/requirements.txt b/tools/cross_check_tool/requirements.txt similarity index 100% rename from inference-engine/tools/cross_check_tool/requirements.txt rename to tools/cross_check_tool/requirements.txt diff --git a/inference-engine/tools/cross_check_tool/utils.py b/tools/cross_check_tool/utils.py similarity index 100% rename from inference-engine/tools/cross_check_tool/utils.py rename to tools/cross_check_tool/utils.py diff --git a/scripts/deployment_manager/configs/darwin.json b/tools/deployment_manager/configs/darwin.json similarity index 100% rename from scripts/deployment_manager/configs/darwin.json rename to tools/deployment_manager/configs/darwin.json diff --git a/scripts/deployment_manager/configs/linux.json b/tools/deployment_manager/configs/linux.json similarity index 100% rename from scripts/deployment_manager/configs/linux.json rename to tools/deployment_manager/configs/linux.json diff --git a/scripts/deployment_manager/configs/windows.json b/tools/deployment_manager/configs/windows.json similarity index 100% rename from scripts/deployment_manager/configs/windows.json rename to tools/deployment_manager/configs/windows.json diff --git a/scripts/deployment_manager/deployman/config.py b/tools/deployment_manager/deployman/config.py similarity index 100% rename from scripts/deployment_manager/deployman/config.py rename to tools/deployment_manager/deployman/config.py diff --git a/scripts/deployment_manager/deployman/logger.py b/tools/deployment_manager/deployman/logger.py similarity index 100% rename from scripts/deployment_manager/deployman/logger.py rename to tools/deployment_manager/deployman/logger.py diff --git a/scripts/deployment_manager/deployman/main.py b/tools/deployment_manager/deployman/main.py similarity index 100% rename from scripts/deployment_manager/deployman/main.py rename to tools/deployment_manager/deployman/main.py diff --git a/scripts/deployment_manager/deployman/ui.py b/tools/deployment_manager/deployman/ui.py similarity index 100% rename from scripts/deployment_manager/deployman/ui.py rename to tools/deployment_manager/deployman/ui.py diff --git a/scripts/deployment_manager/deployment_manager.py b/tools/deployment_manager/deployment_manager.py similarity index 100% rename from scripts/deployment_manager/deployment_manager.py rename to tools/deployment_manager/deployment_manager.py From c0a3cebe0be20635322280c644db6215959e3695 Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Tue, 7 Sep 2021 14:05:44 +0300 Subject: [PATCH 30/52] [IE TESTS] Enable Opset8 in Conformance report (#7369) --- .../functional_test_utils/src/layer_test_utils/summary.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/src/layer_test_utils/summary.cpp b/inference-engine/tests/ie_test_utils/functional_test_utils/src/layer_test_utils/summary.cpp index 91d6ee58c43..449d8b29491 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/src/layer_test_utils/summary.cpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/src/layer_test_utils/summary.cpp @@ -36,6 +36,7 @@ Summary::Summary() { opsets.push_back(ngraph::get_opset5()); opsets.push_back(ngraph::get_opset6()); opsets.push_back(ngraph::get_opset7()); + opsets.push_back(ngraph::get_opset8()); } Summary &Summary::getInstance() { From 27a287b9a750e262c44ba2f2aada8173bb19ace4 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Tue, 7 Sep 2021 14:32:48 +0300 Subject: [PATCH 31/52] Extend coverage versions in requirements_dev.txt (#7404) Signed-off-by: Roman Kazantsev --- model-optimizer/requirements_dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model-optimizer/requirements_dev.txt b/model-optimizer/requirements_dev.txt index 67640a48229..1a896ac5f88 100644 --- a/model-optimizer/requirements_dev.txt +++ b/model-optimizer/requirements_dev.txt @@ -1,4 +1,4 @@ -coverage==4.4.2 +coverage>=4.4.2,<=5.5 astroid==2.4.2 pylint==2.5.0 pyenchant==1.6.11 From a2aae78f4bbcf0dbde21ffc359cacd22caba3fec Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Tue, 7 Sep 2021 16:30:06 +0300 Subject: [PATCH 32/52] Moved opsets to ov namespace (#7388) --- ngraph/core/include/ngraph/opsets/opset.hpp | 79 +------- .../core/include/ngraph/opsets/opset1_tbl.hpp | 113 +---------- .../core/include/ngraph/opsets/opset2_tbl.hpp | 137 +------------ .../core/include/ngraph/opsets/opset3_tbl.hpp | 153 +-------------- .../core/include/ngraph/opsets/opset4_tbl.hpp | 146 +------------- .../core/include/ngraph/opsets/opset5_tbl.hpp | 156 +-------------- .../core/include/ngraph/opsets/opset6_tbl.hpp | 165 +--------------- .../core/include/ngraph/opsets/opset7_tbl.hpp | 171 +--------------- .../core/include/ngraph/opsets/opset8_tbl.hpp | 179 +---------------- ngraph/core/include/openvino/op/ops.hpp | 171 ++++++++++++++++ ngraph/core/include/openvino/opsets/opset.hpp | 119 +++++++++++ .../core/include/openvino/opsets/opset1.hpp | 15 ++ .../include/openvino/opsets/opset1_tbl.hpp | 150 ++++++++++++++ .../core/include/openvino/opsets/opset2.hpp | 15 ++ .../include/openvino/opsets/opset2_tbl.hpp | 143 ++++++++++++++ .../core/include/openvino/opsets/opset3.hpp | 15 ++ .../include/openvino/opsets/opset3_tbl.hpp | 159 +++++++++++++++ .../core/include/openvino/opsets/opset4.hpp | 15 ++ .../include/openvino/opsets/opset4_tbl.hpp | 152 ++++++++++++++ .../core/include/openvino/opsets/opset5.hpp | 15 ++ .../include/openvino/opsets/opset5_tbl.hpp | 162 +++++++++++++++ .../core/include/openvino/opsets/opset6.hpp | 15 ++ .../include/openvino/opsets/opset6_tbl.hpp | 171 ++++++++++++++++ .../core/include/openvino/opsets/opset7.hpp | 15 ++ .../include/openvino/opsets/opset7_tbl.hpp | 177 +++++++++++++++++ .../core/include/openvino/opsets/opset8.hpp | 15 ++ .../include/openvino/opsets/opset8_tbl.hpp | 185 ++++++++++++++++++ ngraph/core/src/opsets/opset.cpp | 154 +++++++++------ ngraph/frontend/ir/src/model.cpp | 2 +- ngraph/test/opset1.cpp | 4 +- 30 files changed, 1843 insertions(+), 1325 deletions(-) create mode 100644 ngraph/core/include/openvino/op/ops.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset1.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset1_tbl.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset2.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset2_tbl.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset3.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset3_tbl.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset4.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset4_tbl.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset5.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset5_tbl.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset6.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset6_tbl.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset7.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset7_tbl.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset8.hpp create mode 100644 ngraph/core/include/openvino/opsets/opset8_tbl.hpp diff --git a/ngraph/core/include/ngraph/opsets/opset.hpp b/ngraph/core/include/ngraph/opsets/opset.hpp index c1f6430a8c8..039abf863d7 100644 --- a/ngraph/core/include/ngraph/opsets/opset.hpp +++ b/ngraph/core/include/ngraph/opsets/opset.hpp @@ -12,99 +12,36 @@ #include "ngraph/factory.hpp" #include "ngraph/ngraph_visibility.hpp" #include "ngraph/node.hpp" +#include "openvino/opsets/opset.hpp" namespace ngraph { /// \brief Run-time opset information -class NGRAPH_API OpSet { +class NGRAPH_API OpSet : public ov::OpSet { static std::mutex& get_mutex(); public: - OpSet() {} - std::set::size_type size() const { - std::lock_guard guard(get_mutex()); - return m_op_types.size(); - } + explicit OpSet(const ov::OpSet& opset); + OpSet(const ngraph::OpSet& opset) = default; + OpSet() = default; /// \brief Insert an op into the opset with a particular name and factory void insert(const std::string& name, const NodeTypeInfo& type_info, FactoryRegistry::Factory factory) { - std::lock_guard guard(get_mutex()); - m_op_types.insert(type_info); - m_name_type_info_map[name] = type_info; - m_case_insensitive_type_info_map[to_upper_name(name)] = type_info; - m_factory_registry.register_factory(type_info, factory); + return insert(name, type_info, std::move(factory)); } - /// \brief Insert OP_TYPE into the opset with a special name and the default factory template void insert(const std::string& name) { - insert(name, OP_TYPE::type_info, FactoryRegistry::get_default_factory()); + ov::OpSet::insert(name); } /// \brief Insert OP_TYPE into the opset with the default name and factory template void insert() { - insert(OP_TYPE::type_info.name); + ov::OpSet::insert(OP_TYPE::type_info.name); } - const std::set& get_types_info() const { - return m_op_types; - } - /// \brief Create the op named name using it's factory - ngraph::Node* create(const std::string& name) const; - - /// \brief Create the op named name using it's factory - ngraph::Node* create_insensitive(const std::string& name) const; - - /// \brief Return true if OP_TYPE is in the opset - bool contains_type(const NodeTypeInfo& type_info) const { - std::lock_guard guard(get_mutex()); - return m_op_types.find(type_info) != m_op_types.end(); - } - - /// \brief Return true if OP_TYPE is in the opset - template - bool contains_type() const { - return contains_type(OP_TYPE::type_info); - } - - /// \brief Return true if name is in the opset - bool contains_type(const std::string& name) const { - std::lock_guard guard(get_mutex()); - return m_name_type_info_map.find(name) != m_name_type_info_map.end(); - } - - /// \brief Return true if name is in the opset - bool contains_type_insensitive(const std::string& name) const { - std::lock_guard guard(get_mutex()); - return m_case_insensitive_type_info_map.find(to_upper_name(name)) != m_case_insensitive_type_info_map.end(); - } - - /// \brief Return true if node's type is in the opset - bool contains_op_type(const Node* node) const { - std::lock_guard guard(get_mutex()); - return m_op_types.find(node->get_type_info()) != m_op_types.end(); - } - - const std::set& get_type_info_set() const { - return m_op_types; - } ngraph::FactoryRegistry& get_factory_registry() { return m_factory_registry; } - -protected: - static std::string to_upper_name(const std::string& name) { - std::string upper_name = name; - std::locale loc; - std::transform(upper_name.begin(), upper_name.end(), upper_name.begin(), [&loc](char c) { - return std::toupper(c, loc); - }); - return upper_name; - } - - ngraph::FactoryRegistry m_factory_registry; - std::set m_op_types; - std::map m_name_type_info_map; - std::map m_case_insensitive_type_info_map; }; const NGRAPH_API OpSet& get_opset1(); diff --git a/ngraph/core/include/ngraph/opsets/opset1_tbl.hpp b/ngraph/core/include/ngraph/opsets/opset1_tbl.hpp index 03000cffbbb..3a088f9eb66 100644 --- a/ngraph/core/include/ngraph/opsets/opset1_tbl.hpp +++ b/ngraph/core/include/ngraph/opsets/opset1_tbl.hpp @@ -38,113 +38,6 @@ # define NGRAPH_OP(x, y) #endif -NGRAPH_OP(Abs, ngraph::op::v0) -NGRAPH_OP(Acos, ngraph::op::v0) -NGRAPH_OP(Add, ngraph::op::v1) -NGRAPH_OP(Asin, ngraph::op::v0) -NGRAPH_OP(Atan, ngraph::op::v0) -NGRAPH_OP(AvgPool, ngraph::op::v1) -NGRAPH_OP(BatchNormInference, ngraph::op::v0) -NGRAPH_OP(BinaryConvolution, ngraph::op::v1) -NGRAPH_OP(Broadcast, ngraph::op::v1) -NGRAPH_OP(CTCGreedyDecoder, ngraph::op::v0) -NGRAPH_OP(Ceiling, ngraph::op::v0) -NGRAPH_OP(Clamp, ngraph::op::v0) -NGRAPH_OP(Concat, ngraph::op::v0) -NGRAPH_OP(Constant, ngraph::op) -NGRAPH_OP(Convert, ngraph::op::v0) -NGRAPH_OP(ConvertLike, ngraph::op::v1) -NGRAPH_OP(Convolution, ngraph::op::v1) -NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v1) -NGRAPH_OP(Cos, ngraph::op::v0) -NGRAPH_OP(Cosh, ngraph::op::v0) -NGRAPH_OP(DeformableConvolution, ngraph::op::v1) -NGRAPH_OP(DeformablePSROIPooling, ngraph::op::v1) -NGRAPH_OP(DepthToSpace, ngraph::op::v0) -NGRAPH_OP(DetectionOutput, ngraph::op::v0) -NGRAPH_OP(Divide, ngraph::op::v1) -NGRAPH_OP(Elu, ngraph::op::v0) -NGRAPH_OP(Erf, ngraph::op::v0) -NGRAPH_OP(Equal, ngraph::op::v1) -NGRAPH_OP(Exp, ngraph::op::v0) -NGRAPH_OP(FakeQuantize, ngraph::op::v0) -NGRAPH_OP(Floor, ngraph::op::v0) -NGRAPH_OP(FloorMod, ngraph::op::v1) -NGRAPH_OP(Gather, ngraph::op::v1) -NGRAPH_OP(GatherTree, ngraph::op::v1) -NGRAPH_OP(Greater, ngraph::op::v1) -NGRAPH_OP(GreaterEqual, ngraph::op::v1) -NGRAPH_OP(GroupConvolution, ngraph::op::v1) -NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v1) -NGRAPH_OP(GRN, ngraph::op::v0) -NGRAPH_OP(HardSigmoid, ngraph::op::v0) -NGRAPH_OP(Interpolate, ngraph::op::v0) -NGRAPH_OP(Less, ngraph::op::v1) -NGRAPH_OP(LessEqual, ngraph::op::v1) -NGRAPH_OP(Log, ngraph::op::v0) -NGRAPH_OP(LogicalAnd, ngraph::op::v1) -NGRAPH_OP(LogicalNot, ngraph::op::v1) -NGRAPH_OP(LogicalOr, ngraph::op::v1) -NGRAPH_OP(LogicalXor, ngraph::op::v1) -NGRAPH_OP(LRN, ngraph::op::v0) -NGRAPH_OP(LSTMCell, ngraph::op::v0) -NGRAPH_OP(LSTMSequence, ngraph::op::v0) -NGRAPH_OP(MatMul, ngraph::op::v0) -NGRAPH_OP(MaxPool, ngraph::op::v1) -NGRAPH_OP(Maximum, ngraph::op::v1) -NGRAPH_OP(Minimum, ngraph::op::v1) -NGRAPH_OP(Mod, ngraph::op::v1) -NGRAPH_OP(Multiply, ngraph::op::v1) -NGRAPH_OP(Negative, ngraph::op::v0) -NGRAPH_OP(NonMaxSuppression, ngraph::op::v1) -NGRAPH_OP(NormalizeL2, ngraph::op::v0) -NGRAPH_OP(NotEqual, ngraph::op::v1) -NGRAPH_OP(OneHot, ngraph::op::v1) -NGRAPH_OP(PRelu, ngraph::op::v0) -NGRAPH_OP(PSROIPooling, ngraph::op::v0) -NGRAPH_OP(Pad, ngraph::op::v1) -NGRAPH_OP(Parameter, ngraph::op::v0) -NGRAPH_OP(Power, ngraph::op::v1) -NGRAPH_OP(PriorBox, ngraph::op::v0) -NGRAPH_OP(PriorBoxClustered, ngraph::op::v0) -NGRAPH_OP(Proposal, ngraph::op::v0) -NGRAPH_OP(Range, ngraph::op::v0) -NGRAPH_OP(Relu, ngraph::op::v0) -NGRAPH_OP(ReduceMax, ngraph::op::v1) -NGRAPH_OP(ReduceLogicalAnd, ngraph::op::v1) -NGRAPH_OP(ReduceLogicalOr, ngraph::op::v1) -NGRAPH_OP(ReduceMean, ngraph::op::v1) -NGRAPH_OP(ReduceMin, ngraph::op::v1) -NGRAPH_OP(ReduceProd, ngraph::op::v1) -NGRAPH_OP(ReduceSum, ngraph::op::v1) -NGRAPH_OP(RegionYolo, ngraph::op::v0) -NGRAPH_OP(Reshape, ngraph::op::v1) -NGRAPH_OP(Result, ngraph::op::v0) -NGRAPH_OP(Reverse, ngraph::op::v1) -NGRAPH_OP(ReverseSequence, ngraph::op::v0) -NGRAPH_OP(RNNCell, ngraph::op::v0) -NGRAPH_OP(Select, ngraph::op::v1) -NGRAPH_OP(Selu, ngraph::op::v0) -NGRAPH_OP(ShapeOf, ngraph::op::v0) -NGRAPH_OP(ShuffleChannels, ngraph::op::v0) -NGRAPH_OP(Sign, ngraph::op::v0) -NGRAPH_OP(Sigmoid, ngraph::op::v0) -NGRAPH_OP(Sin, ngraph::op::v0) -NGRAPH_OP(Sinh, ngraph::op::v0) -NGRAPH_OP(Softmax, ngraph::op::v1) -NGRAPH_OP(Sqrt, ngraph::op::v0) -NGRAPH_OP(SpaceToDepth, ngraph::op::v0) -NGRAPH_OP(Split, ngraph::op::v1) -NGRAPH_OP(SquaredDifference, ngraph::op::v0) -NGRAPH_OP(Squeeze, ngraph::op::v0) -NGRAPH_OP(StridedSlice, ngraph::op::v1) -NGRAPH_OP(Subtract, ngraph::op::v1) -NGRAPH_OP(Tan, ngraph::op::v0) -NGRAPH_OP(Tanh, ngraph::op::v0) -NGRAPH_OP(TensorIterator, ngraph::op::v0) -NGRAPH_OP(Tile, ngraph::op::v0) -NGRAPH_OP(TopK, ngraph::op::v1) -NGRAPH_OP(Transpose, ngraph::op::v1) -NGRAPH_OP(Unsqueeze, ngraph::op::v0) -NGRAPH_OP(VariadicSplit, ngraph::op::v1) -NGRAPH_OP(Xor, ngraph::op::v0) +#define OPENVINO_OP NGRAPH_OP +#include "openvino/opsets/opset1_tbl.hpp" +#undef OPENVINO_OP diff --git a/ngraph/core/include/ngraph/opsets/opset2_tbl.hpp b/ngraph/core/include/ngraph/opsets/opset2_tbl.hpp index 122cba77f90..3dc125d1daf 100644 --- a/ngraph/core/include/ngraph/opsets/opset2_tbl.hpp +++ b/ngraph/core/include/ngraph/opsets/opset2_tbl.hpp @@ -7,137 +7,6 @@ # define NGRAPH_OP(x, y) #endif -NGRAPH_OP(Abs, ngraph::op::v0) -NGRAPH_OP(Acos, ngraph::op::v0) -NGRAPH_OP(Add, ngraph::op::v1) -NGRAPH_OP(Asin, ngraph::op::v0) -NGRAPH_OP(Atan, ngraph::op::v0) -NGRAPH_OP(AvgPool, ngraph::op::v1) -NGRAPH_OP(BatchNormInference, ngraph::op::v0) -NGRAPH_OP(BinaryConvolution, ngraph::op::v1) -NGRAPH_OP(Broadcast, ngraph::op::v1) -NGRAPH_OP(CTCGreedyDecoder, ngraph::op::v0) -NGRAPH_OP(Ceiling, ngraph::op::v0) -NGRAPH_OP(Clamp, ngraph::op::v0) -NGRAPH_OP(Concat, ngraph::op::v0) -NGRAPH_OP(Constant, ngraph::op) -NGRAPH_OP(Convert, ngraph::op::v0) -NGRAPH_OP(ConvertLike, ngraph::op::v1) -NGRAPH_OP(Convolution, ngraph::op::v1) -NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v1) -NGRAPH_OP(Cos, ngraph::op::v0) -NGRAPH_OP(Cosh, ngraph::op::v0) -NGRAPH_OP(DeformableConvolution, ngraph::op::v1) -NGRAPH_OP(DeformablePSROIPooling, ngraph::op::v1) -NGRAPH_OP(DepthToSpace, ngraph::op::v0) -NGRAPH_OP(DetectionOutput, ngraph::op::v0) -NGRAPH_OP(Divide, ngraph::op::v1) -NGRAPH_OP(Elu, ngraph::op::v0) -NGRAPH_OP(Erf, ngraph::op::v0) -NGRAPH_OP(Equal, ngraph::op::v1) -NGRAPH_OP(Exp, ngraph::op::v0) -NGRAPH_OP(FakeQuantize, ngraph::op::v0) -NGRAPH_OP(Floor, ngraph::op::v0) -NGRAPH_OP(FloorMod, ngraph::op::v1) -NGRAPH_OP(Gather, ngraph::op::v1) -NGRAPH_OP(GatherTree, ngraph::op::v1) -NGRAPH_OP(Greater, ngraph::op::v1) -NGRAPH_OP(GreaterEqual, ngraph::op::v1) -NGRAPH_OP(GroupConvolution, ngraph::op::v1) -NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v1) -NGRAPH_OP(GRN, ngraph::op::v0) -NGRAPH_OP(HardSigmoid, ngraph::op::v0) -NGRAPH_OP(Interpolate, ngraph::op::v0) -NGRAPH_OP(Less, ngraph::op::v1) -NGRAPH_OP(LessEqual, ngraph::op::v1) -NGRAPH_OP(Log, ngraph::op::v0) -NGRAPH_OP(LogicalAnd, ngraph::op::v1) -NGRAPH_OP(LogicalNot, ngraph::op::v1) -NGRAPH_OP(LogicalOr, ngraph::op::v1) -NGRAPH_OP(LogicalXor, ngraph::op::v1) -NGRAPH_OP(LRN, ngraph::op::v0) -NGRAPH_OP(LSTMCell, ngraph::op::v0) -NGRAPH_OP(LSTMSequence, ngraph::op::v0) -NGRAPH_OP(MatMul, ngraph::op::v0) -NGRAPH_OP(MaxPool, ngraph::op::v1) -NGRAPH_OP(Maximum, ngraph::op::v1) -NGRAPH_OP(Minimum, ngraph::op::v1) -NGRAPH_OP(Mod, ngraph::op::v1) -NGRAPH_OP(Multiply, ngraph::op::v1) - -NGRAPH_OP(MVN, ngraph::op::v0) // Missing in opset1 - -NGRAPH_OP(Negative, ngraph::op::v0) -NGRAPH_OP(NonMaxSuppression, ngraph::op::v1) -NGRAPH_OP(NormalizeL2, ngraph::op::v0) -NGRAPH_OP(NotEqual, ngraph::op::v1) -NGRAPH_OP(OneHot, ngraph::op::v1) -NGRAPH_OP(PRelu, ngraph::op::v0) -NGRAPH_OP(PSROIPooling, ngraph::op::v0) -NGRAPH_OP(Pad, ngraph::op::v1) -NGRAPH_OP(Parameter, ngraph::op::v0) -NGRAPH_OP(Power, ngraph::op::v1) -NGRAPH_OP(PriorBox, ngraph::op::v0) -NGRAPH_OP(PriorBoxClustered, ngraph::op::v0) -NGRAPH_OP(Proposal, ngraph::op::v0) -NGRAPH_OP(Range, ngraph::op::v0) -NGRAPH_OP(Relu, ngraph::op::v0) -NGRAPH_OP(ReduceMax, ngraph::op::v1) -NGRAPH_OP(ReduceLogicalAnd, ngraph::op::v1) -NGRAPH_OP(ReduceLogicalOr, ngraph::op::v1) -NGRAPH_OP(ReduceMean, ngraph::op::v1) -NGRAPH_OP(ReduceMin, ngraph::op::v1) -NGRAPH_OP(ReduceProd, ngraph::op::v1) -NGRAPH_OP(ReduceSum, ngraph::op::v1) -NGRAPH_OP(RegionYolo, ngraph::op::v0) - -NGRAPH_OP(ReorgYolo, ngraph::op::v0) // Missing in opset1 - -NGRAPH_OP(Reshape, ngraph::op::v1) -NGRAPH_OP(Result, ngraph::op::v0) - -// Moved out of opset2, it was added to opset1 by mistake -// NGRAPH_OP(Reverse, ngraph::op::v1) - -NGRAPH_OP(ReverseSequence, ngraph::op::v0) - -// Moved out of opset2, it was added to opset1 by mistake -// NGRAPH_OP(RNNCell, ngraph::op::v0) - -NGRAPH_OP(ROIPooling, ngraph::op::v0) // Missing in opset1 - -NGRAPH_OP(Select, ngraph::op::v1) -NGRAPH_OP(Selu, ngraph::op::v0) -NGRAPH_OP(ShapeOf, ngraph::op::v0) - -// Moved out of opset2, it was added to opset1 by mistake -// NGRAPH_OP(ShuffleChannels, ngraph::op::v0) - -NGRAPH_OP(Sign, ngraph::op::v0) -NGRAPH_OP(Sigmoid, ngraph::op::v0) -NGRAPH_OP(Sin, ngraph::op::v0) -NGRAPH_OP(Sinh, ngraph::op::v0) -NGRAPH_OP(Softmax, ngraph::op::v1) -NGRAPH_OP(Sqrt, ngraph::op::v0) -NGRAPH_OP(SpaceToDepth, ngraph::op::v0) -NGRAPH_OP(Split, ngraph::op::v1) -NGRAPH_OP(SquaredDifference, ngraph::op::v0) -NGRAPH_OP(Squeeze, ngraph::op::v0) -NGRAPH_OP(StridedSlice, ngraph::op::v1) -NGRAPH_OP(Subtract, ngraph::op::v1) -NGRAPH_OP(Tan, ngraph::op::v0) -NGRAPH_OP(Tanh, ngraph::op::v0) -NGRAPH_OP(TensorIterator, ngraph::op::v0) -NGRAPH_OP(Tile, ngraph::op::v0) -NGRAPH_OP(TopK, ngraph::op::v1) -NGRAPH_OP(Transpose, ngraph::op::v1) -NGRAPH_OP(Unsqueeze, ngraph::op::v0) -NGRAPH_OP(VariadicSplit, ngraph::op::v1) - -// Moved out of opset2, it was added to opset1 by mistake -// NGRAPH_OP(Xor, ngraph::op::v0) - -// New operations added in opset2 -NGRAPH_OP(Gelu, ngraph::op::v0) -NGRAPH_OP(BatchToSpace, ngraph::op::v1) -NGRAPH_OP(SpaceToBatch, ngraph::op::v1) +#define OPENVINO_OP NGRAPH_OP +#include "openvino/opsets/opset2_tbl.hpp" +#undef OPENVINO_OP diff --git a/ngraph/core/include/ngraph/opsets/opset3_tbl.hpp b/ngraph/core/include/ngraph/opsets/opset3_tbl.hpp index a23d60c95f5..eccbdc2e64c 100644 --- a/ngraph/core/include/ngraph/opsets/opset3_tbl.hpp +++ b/ngraph/core/include/ngraph/opsets/opset3_tbl.hpp @@ -7,153 +7,6 @@ # define NGRAPH_OP(x, y) #endif -NGRAPH_OP(Abs, ngraph::op::v0) -NGRAPH_OP(Acos, ngraph::op::v0) -NGRAPH_OP(Add, ngraph::op::v1) -NGRAPH_OP(Asin, ngraph::op::v0) -NGRAPH_OP(Atan, ngraph::op::v0) -NGRAPH_OP(AvgPool, ngraph::op::v1) -NGRAPH_OP(BatchNormInference, ngraph::op::v0) -NGRAPH_OP(BinaryConvolution, ngraph::op::v1) -NGRAPH_OP(Broadcast, ngraph::op::v3) -NGRAPH_OP(Bucketize, ngraph::op::v3) -NGRAPH_OP(CTCGreedyDecoder, ngraph::op::v0) -NGRAPH_OP(Ceiling, ngraph::op::v0) -NGRAPH_OP(Clamp, ngraph::op::v0) -NGRAPH_OP(Concat, ngraph::op::v0) -NGRAPH_OP(Constant, ngraph::op) -NGRAPH_OP(Convert, ngraph::op::v0) -NGRAPH_OP(ConvertLike, ngraph::op::v1) -NGRAPH_OP(Convolution, ngraph::op::v1) -NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v1) -NGRAPH_OP(Cos, ngraph::op::v0) -NGRAPH_OP(Cosh, ngraph::op::v0) -NGRAPH_OP(CumSum, ngraph::op::v0) -NGRAPH_OP(DeformableConvolution, ngraph::op::v1) -NGRAPH_OP(DeformablePSROIPooling, ngraph::op::v1) -NGRAPH_OP(DepthToSpace, ngraph::op::v0) -NGRAPH_OP(DetectionOutput, ngraph::op::v0) -NGRAPH_OP(Divide, ngraph::op::v1) -NGRAPH_OP(Elu, ngraph::op::v0) -NGRAPH_OP(Erf, ngraph::op::v0) -NGRAPH_OP(Equal, ngraph::op::v1) -NGRAPH_OP(Exp, ngraph::op::v0) -NGRAPH_OP(ExtractImagePatches, ngraph::op::v3) -NGRAPH_OP(FakeQuantize, ngraph::op::v0) -NGRAPH_OP(Floor, ngraph::op::v0) -NGRAPH_OP(FloorMod, ngraph::op::v1) -NGRAPH_OP(Gather, ngraph::op::v1) -NGRAPH_OP(GatherTree, ngraph::op::v1) -NGRAPH_OP(Greater, ngraph::op::v1) -NGRAPH_OP(GreaterEqual, ngraph::op::v1) -NGRAPH_OP(GroupConvolution, ngraph::op::v1) -NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v1) -NGRAPH_OP(GRN, ngraph::op::v0) -NGRAPH_OP(HardSigmoid, ngraph::op::v0) -NGRAPH_OP(Interpolate, ngraph::op::v0) -NGRAPH_OP(Less, ngraph::op::v1) -NGRAPH_OP(LessEqual, ngraph::op::v1) -NGRAPH_OP(Log, ngraph::op::v0) -NGRAPH_OP(LogicalAnd, ngraph::op::v1) -NGRAPH_OP(LogicalNot, ngraph::op::v1) -NGRAPH_OP(LogicalOr, ngraph::op::v1) -NGRAPH_OP(LogicalXor, ngraph::op::v1) -NGRAPH_OP(LRN, ngraph::op::v0) -NGRAPH_OP(LSTMCell, ngraph::op::v0) -NGRAPH_OP(LSTMSequence, ngraph::op::v0) -NGRAPH_OP(MatMul, ngraph::op::v0) -NGRAPH_OP(MaxPool, ngraph::op::v1) -NGRAPH_OP(Maximum, ngraph::op::v1) -NGRAPH_OP(Minimum, ngraph::op::v1) -NGRAPH_OP(Mod, ngraph::op::v1) -NGRAPH_OP(Multiply, ngraph::op::v1) - -NGRAPH_OP(MVN, ngraph::op::v0) // Missing in opset1 - -NGRAPH_OP(Negative, ngraph::op::v0) -NGRAPH_OP(NonMaxSuppression, ngraph::op::v3) -NGRAPH_OP(NormalizeL2, ngraph::op::v0) -NGRAPH_OP(NotEqual, ngraph::op::v1) -NGRAPH_OP(OneHot, ngraph::op::v1) -NGRAPH_OP(PRelu, ngraph::op::v0) -NGRAPH_OP(PSROIPooling, ngraph::op::v0) -NGRAPH_OP(Pad, ngraph::op::v1) -NGRAPH_OP(Parameter, ngraph::op::v0) -NGRAPH_OP(Power, ngraph::op::v1) -NGRAPH_OP(PriorBox, ngraph::op::v0) -NGRAPH_OP(PriorBoxClustered, ngraph::op::v0) -NGRAPH_OP(Proposal, ngraph::op::v0) -NGRAPH_OP(Range, ngraph::op::v0) -NGRAPH_OP(Relu, ngraph::op::v0) -NGRAPH_OP(ReduceMax, ngraph::op::v1) -NGRAPH_OP(ReduceLogicalAnd, ngraph::op::v1) -NGRAPH_OP(ReduceLogicalOr, ngraph::op::v1) -NGRAPH_OP(ReduceMean, ngraph::op::v1) -NGRAPH_OP(ReduceMin, ngraph::op::v1) -NGRAPH_OP(ReduceProd, ngraph::op::v1) -NGRAPH_OP(ReduceSum, ngraph::op::v1) -NGRAPH_OP(RegionYolo, ngraph::op::v0) - -NGRAPH_OP(ReorgYolo, ngraph::op::v0) // Missing in opset1 - -NGRAPH_OP(Reshape, ngraph::op::v1) -NGRAPH_OP(Result, ngraph::op::v0) - -// Moved out of opset2, it was added to opset1 by mistake -// NGRAPH_OP(Reverse, ngraph::op::v1) - -NGRAPH_OP(ReverseSequence, ngraph::op::v0) - -// Moved out of opset2, it was added to opset1 by mistake -// NGRAPH_OP(RNNCell, ngraph::op::v0) - -NGRAPH_OP(ROIPooling, ngraph::op::v0) // Missing in opset1 - -NGRAPH_OP(Select, ngraph::op::v1) -NGRAPH_OP(Selu, ngraph::op::v0) -// Superseded -// NGRAPH_OP(ShapeOf, ngraph::op::v0) - -NGRAPH_OP(Sign, ngraph::op::v0) -NGRAPH_OP(Sigmoid, ngraph::op::v0) -NGRAPH_OP(Sin, ngraph::op::v0) -NGRAPH_OP(Sinh, ngraph::op::v0) -NGRAPH_OP(Softmax, ngraph::op::v1) -NGRAPH_OP(Sqrt, ngraph::op::v0) -NGRAPH_OP(SpaceToDepth, ngraph::op::v0) -NGRAPH_OP(Split, ngraph::op::v1) -NGRAPH_OP(SquaredDifference, ngraph::op::v0) -NGRAPH_OP(Squeeze, ngraph::op::v0) -NGRAPH_OP(StridedSlice, ngraph::op::v1) -NGRAPH_OP(Subtract, ngraph::op::v1) -NGRAPH_OP(Tan, ngraph::op::v0) -NGRAPH_OP(Tanh, ngraph::op::v0) -NGRAPH_OP(TensorIterator, ngraph::op::v0) -NGRAPH_OP(Tile, ngraph::op::v0) -NGRAPH_OP(Transpose, ngraph::op::v1) -NGRAPH_OP(Unsqueeze, ngraph::op::v0) -NGRAPH_OP(VariadicSplit, ngraph::op::v1) - -// Moved out of opset2, it was added to opset1 by mistake -// NGRAPH_OP(Xor, ngraph::op::v0) - -// New operations added in opset2 -NGRAPH_OP(Gelu, ngraph::op::v0) -NGRAPH_OP(BatchToSpace, ngraph::op::v1) -NGRAPH_OP(SpaceToBatch, ngraph::op::v1) - -// New operations added in opset3 -NGRAPH_OP(EmbeddingBagPackedSum, ngraph::op::v3) -NGRAPH_OP(EmbeddingSegmentsSum, ngraph::op::v3) -NGRAPH_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) -NGRAPH_OP(GRUCell, ngraph::op::v3) -NGRAPH_OP(NonZero, ngraph::op::v3) -NGRAPH_OP(RNNCell, ngraph::op::v0) -NGRAPH_OP(ROIAlign, ngraph::op::v3) -NGRAPH_OP(ScatterElementsUpdate, ngraph::op::v3) -NGRAPH_OP(ScatterUpdate, ngraph::op::v3) -NGRAPH_OP(ShuffleChannels, ngraph::op::v0) -NGRAPH_OP(ShapeOf, ngraph::op::v3) -NGRAPH_OP(Assign, ngraph::op::v3) -NGRAPH_OP(ReadValue, ngraph::op::v3) -NGRAPH_OP(TopK, ngraph::op::v3) +#define OPENVINO_OP NGRAPH_OP +#include "openvino/opsets/opset3_tbl.hpp" +#undef OPENVINO_OP diff --git a/ngraph/core/include/ngraph/opsets/opset4_tbl.hpp b/ngraph/core/include/ngraph/opsets/opset4_tbl.hpp index 23b907c1e24..70744995c9e 100644 --- a/ngraph/core/include/ngraph/opsets/opset4_tbl.hpp +++ b/ngraph/core/include/ngraph/opsets/opset4_tbl.hpp @@ -7,146 +7,6 @@ # define NGRAPH_OP(x, y) #endif -NGRAPH_OP(Abs, ngraph::op::v0) -NGRAPH_OP(Acos, ngraph::op::v0) -NGRAPH_OP(Add, ngraph::op::v1) -NGRAPH_OP(Asin, ngraph::op::v0) -NGRAPH_OP(Atan, ngraph::op::v0) -NGRAPH_OP(AvgPool, ngraph::op::v1) -NGRAPH_OP(BatchNormInference, ngraph::op::v0) -NGRAPH_OP(BinaryConvolution, ngraph::op::v1) -NGRAPH_OP(Broadcast, ngraph::op::v3) -NGRAPH_OP(Bucketize, ngraph::op::v3) -NGRAPH_OP(CTCGreedyDecoder, ngraph::op::v0) -NGRAPH_OP(Ceiling, ngraph::op::v0) -NGRAPH_OP(Clamp, ngraph::op::v0) -NGRAPH_OP(Concat, ngraph::op::v0) -NGRAPH_OP(Constant, ngraph::op) -NGRAPH_OP(Convert, ngraph::op::v0) -NGRAPH_OP(ConvertLike, ngraph::op::v1) -NGRAPH_OP(Convolution, ngraph::op::v1) -NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v1) -NGRAPH_OP(Cos, ngraph::op::v0) -NGRAPH_OP(Cosh, ngraph::op::v0) -NGRAPH_OP(CumSum, ngraph::op::v0) -NGRAPH_OP(DeformableConvolution, ngraph::op::v1) -NGRAPH_OP(DeformablePSROIPooling, ngraph::op::v1) -NGRAPH_OP(DepthToSpace, ngraph::op::v0) -NGRAPH_OP(DetectionOutput, ngraph::op::v0) -NGRAPH_OP(Divide, ngraph::op::v1) -NGRAPH_OP(Elu, ngraph::op::v0) -NGRAPH_OP(Erf, ngraph::op::v0) -NGRAPH_OP(Equal, ngraph::op::v1) -NGRAPH_OP(Exp, ngraph::op::v0) -NGRAPH_OP(ExtractImagePatches, ngraph::op::v3) -NGRAPH_OP(FakeQuantize, ngraph::op::v0) -NGRAPH_OP(Floor, ngraph::op::v0) -NGRAPH_OP(FloorMod, ngraph::op::v1) -NGRAPH_OP(Gather, ngraph::op::v1) -NGRAPH_OP(GatherTree, ngraph::op::v1) -NGRAPH_OP(Greater, ngraph::op::v1) -NGRAPH_OP(GreaterEqual, ngraph::op::v1) -NGRAPH_OP(GroupConvolution, ngraph::op::v1) -NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v1) -NGRAPH_OP(GRN, ngraph::op::v0) -NGRAPH_OP(HardSigmoid, ngraph::op::v0) -NGRAPH_OP(Less, ngraph::op::v1) -NGRAPH_OP(LessEqual, ngraph::op::v1) -NGRAPH_OP(Log, ngraph::op::v0) -NGRAPH_OP(LogicalAnd, ngraph::op::v1) -NGRAPH_OP(LogicalNot, ngraph::op::v1) -NGRAPH_OP(LogicalOr, ngraph::op::v1) -NGRAPH_OP(LogicalXor, ngraph::op::v1) -NGRAPH_OP(LRN, ngraph::op::v0) -NGRAPH_OP(LSTMCell, ngraph::op::v4) -NGRAPH_OP(MatMul, ngraph::op::v0) -NGRAPH_OP(MaxPool, ngraph::op::v1) -NGRAPH_OP(Maximum, ngraph::op::v1) -NGRAPH_OP(Minimum, ngraph::op::v1) -NGRAPH_OP(Mod, ngraph::op::v1) -NGRAPH_OP(Multiply, ngraph::op::v1) -NGRAPH_OP(MVN, ngraph::op::v0) -NGRAPH_OP(Negative, ngraph::op::v0) -NGRAPH_OP(NormalizeL2, ngraph::op::v0) -NGRAPH_OP(NotEqual, ngraph::op::v1) -NGRAPH_OP(OneHot, ngraph::op::v1) -NGRAPH_OP(PRelu, ngraph::op::v0) -NGRAPH_OP(PSROIPooling, ngraph::op::v0) -NGRAPH_OP(Pad, ngraph::op::v1) -NGRAPH_OP(Parameter, ngraph::op::v0) -NGRAPH_OP(Power, ngraph::op::v1) -NGRAPH_OP(PriorBox, ngraph::op::v0) -NGRAPH_OP(PriorBoxClustered, ngraph::op::v0) -NGRAPH_OP(Proposal, ngraph::op::v4) -NGRAPH_OP(Range, ngraph::op::v4) -NGRAPH_OP(Relu, ngraph::op::v0) -NGRAPH_OP(ReduceMax, ngraph::op::v1) -NGRAPH_OP(ReduceLogicalAnd, ngraph::op::v1) -NGRAPH_OP(ReduceLogicalOr, ngraph::op::v1) -NGRAPH_OP(ReduceMean, ngraph::op::v1) -NGRAPH_OP(ReduceMin, ngraph::op::v1) -NGRAPH_OP(ReduceProd, ngraph::op::v1) -NGRAPH_OP(ReduceSum, ngraph::op::v1) -NGRAPH_OP(RegionYolo, ngraph::op::v0) -NGRAPH_OP(ReorgYolo, ngraph::op::v0) -NGRAPH_OP(Reshape, ngraph::op::v1) -NGRAPH_OP(Result, ngraph::op::v0) -NGRAPH_OP(ReverseSequence, ngraph::op::v0) -NGRAPH_OP(ROIPooling, ngraph::op::v0) -NGRAPH_OP(ScatterNDUpdate, ngraph::op::v3) -NGRAPH_OP(Select, ngraph::op::v1) -NGRAPH_OP(Selu, ngraph::op::v0) -NGRAPH_OP(Sign, ngraph::op::v0) -NGRAPH_OP(Sigmoid, ngraph::op::v0) -NGRAPH_OP(Sin, ngraph::op::v0) -NGRAPH_OP(Sinh, ngraph::op::v0) -NGRAPH_OP(Softmax, ngraph::op::v1) -NGRAPH_OP(Sqrt, ngraph::op::v0) -NGRAPH_OP(SpaceToDepth, ngraph::op::v0) -NGRAPH_OP(Split, ngraph::op::v1) -NGRAPH_OP(SquaredDifference, ngraph::op::v0) -NGRAPH_OP(Squeeze, ngraph::op::v0) -NGRAPH_OP(StridedSlice, ngraph::op::v1) -NGRAPH_OP(Subtract, ngraph::op::v1) -NGRAPH_OP(Tan, ngraph::op::v0) -NGRAPH_OP(Tanh, ngraph::op::v0) -NGRAPH_OP(TensorIterator, ngraph::op::v0) -NGRAPH_OP(Tile, ngraph::op::v0) -NGRAPH_OP(Transpose, ngraph::op::v1) -NGRAPH_OP(Unsqueeze, ngraph::op::v0) -NGRAPH_OP(VariadicSplit, ngraph::op::v1) - -// New operations added in opset2 -NGRAPH_OP(Gelu, ngraph::op::v0) -NGRAPH_OP(BatchToSpace, ngraph::op::v1) -NGRAPH_OP(SpaceToBatch, ngraph::op::v1) - -// New operations added in opset3 -NGRAPH_OP(EmbeddingBagPackedSum, ngraph::op::v3) -NGRAPH_OP(EmbeddingSegmentsSum, ngraph::op::v3) -NGRAPH_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) -NGRAPH_OP(GRUCell, ngraph::op::v3) -NGRAPH_OP(NonZero, ngraph::op::v3) -NGRAPH_OP(RNNCell, ngraph::op::v0) -NGRAPH_OP(ROIAlign, ngraph::op::v3) -NGRAPH_OP(ScatterElementsUpdate, ngraph::op::v3) -NGRAPH_OP(ScatterUpdate, ngraph::op::v3) -NGRAPH_OP(ShuffleChannels, ngraph::op::v0) -NGRAPH_OP(ShapeOf, ngraph::op::v3) -NGRAPH_OP(Assign, ngraph::op::v3) -NGRAPH_OP(ReadValue, ngraph::op::v3) -NGRAPH_OP(TopK, ngraph::op::v3) - -// New operations added in opset4 -NGRAPH_OP(Acosh, ngraph::op::v3) -NGRAPH_OP(Asinh, ngraph::op::v3) -NGRAPH_OP(Atanh, ngraph::op::v3) -NGRAPH_OP(CTCLoss, ngraph::op::v4) -NGRAPH_OP(HSwish, ngraph::op::v4) -NGRAPH_OP(Interpolate, ngraph::op::v4) -NGRAPH_OP(Mish, ngraph::op::v4) -NGRAPH_OP(NonMaxSuppression, ngraph::op::v4) -NGRAPH_OP(ReduceL1, ngraph::op::v4) -NGRAPH_OP(ReduceL2, ngraph::op::v4) -NGRAPH_OP(SoftPlus, ngraph::op::v4) -NGRAPH_OP(Swish, ngraph::op::v4) +#define OPENVINO_OP NGRAPH_OP +#include "openvino/opsets/opset4_tbl.hpp" +#undef OPENVINO_OP diff --git a/ngraph/core/include/ngraph/opsets/opset5_tbl.hpp b/ngraph/core/include/ngraph/opsets/opset5_tbl.hpp index 3f81f851358..2e4b6ad7288 100644 --- a/ngraph/core/include/ngraph/opsets/opset5_tbl.hpp +++ b/ngraph/core/include/ngraph/opsets/opset5_tbl.hpp @@ -7,156 +7,6 @@ # define NGRAPH_OP(x, y) #endif -NGRAPH_OP(Abs, ngraph::op::v0) -NGRAPH_OP(Acos, ngraph::op::v0) -NGRAPH_OP(Add, ngraph::op::v1) -NGRAPH_OP(Asin, ngraph::op::v0) -NGRAPH_OP(Atan, ngraph::op::v0) -NGRAPH_OP(AvgPool, ngraph::op::v1) -NGRAPH_OP(BatchNormInference, ngraph::op::v5) -NGRAPH_OP(BinaryConvolution, ngraph::op::v1) -NGRAPH_OP(Broadcast, ngraph::op::v3) -NGRAPH_OP(Bucketize, ngraph::op::v3) -NGRAPH_OP(CTCGreedyDecoder, ngraph::op::v0) -NGRAPH_OP(Ceiling, ngraph::op::v0) -NGRAPH_OP(Clamp, ngraph::op::v0) -NGRAPH_OP(Concat, ngraph::op::v0) -NGRAPH_OP(Constant, ngraph::op) -NGRAPH_OP(Convert, ngraph::op::v0) -NGRAPH_OP(ConvertLike, ngraph::op::v1) -NGRAPH_OP(Convolution, ngraph::op::v1) -NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v1) -NGRAPH_OP(Cos, ngraph::op::v0) -NGRAPH_OP(Cosh, ngraph::op::v0) -NGRAPH_OP(CumSum, ngraph::op::v0) -NGRAPH_OP(DeformableConvolution, ngraph::op::v1) -NGRAPH_OP(DeformablePSROIPooling, ngraph::op::v1) -NGRAPH_OP(DepthToSpace, ngraph::op::v0) -NGRAPH_OP(DetectionOutput, ngraph::op::v0) -NGRAPH_OP(Divide, ngraph::op::v1) -NGRAPH_OP(Elu, ngraph::op::v0) -NGRAPH_OP(Erf, ngraph::op::v0) -NGRAPH_OP(Equal, ngraph::op::v1) -NGRAPH_OP(Exp, ngraph::op::v0) -NGRAPH_OP(ExtractImagePatches, ngraph::op::v3) -NGRAPH_OP(FakeQuantize, ngraph::op::v0) -NGRAPH_OP(Floor, ngraph::op::v0) -NGRAPH_OP(FloorMod, ngraph::op::v1) -NGRAPH_OP(Gather, ngraph::op::v1) -NGRAPH_OP(GatherTree, ngraph::op::v1) -NGRAPH_OP(Greater, ngraph::op::v1) -NGRAPH_OP(GreaterEqual, ngraph::op::v1) -NGRAPH_OP(GroupConvolution, ngraph::op::v1) -NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v1) -NGRAPH_OP(GRN, ngraph::op::v0) -NGRAPH_OP(HardSigmoid, ngraph::op::v0) -NGRAPH_OP(Less, ngraph::op::v1) -NGRAPH_OP(LessEqual, ngraph::op::v1) -NGRAPH_OP(Log, ngraph::op::v0) -NGRAPH_OP(LogicalAnd, ngraph::op::v1) -NGRAPH_OP(LogicalNot, ngraph::op::v1) -NGRAPH_OP(LogicalOr, ngraph::op::v1) -NGRAPH_OP(LogicalXor, ngraph::op::v1) -NGRAPH_OP(LRN, ngraph::op::v0) -NGRAPH_OP(LSTMCell, ngraph::op::v4) -NGRAPH_OP(MatMul, ngraph::op::v0) -NGRAPH_OP(MaxPool, ngraph::op::v1) -NGRAPH_OP(Maximum, ngraph::op::v1) -NGRAPH_OP(Minimum, ngraph::op::v1) -NGRAPH_OP(Mod, ngraph::op::v1) -NGRAPH_OP(Multiply, ngraph::op::v1) -NGRAPH_OP(MVN, ngraph::op::v0) -NGRAPH_OP(Negative, ngraph::op::v0) -NGRAPH_OP(NormalizeL2, ngraph::op::v0) -NGRAPH_OP(NotEqual, ngraph::op::v1) -NGRAPH_OP(OneHot, ngraph::op::v1) -NGRAPH_OP(PRelu, ngraph::op::v0) -NGRAPH_OP(PSROIPooling, ngraph::op::v0) -NGRAPH_OP(Pad, ngraph::op::v1) -NGRAPH_OP(Parameter, ngraph::op::v0) -NGRAPH_OP(Power, ngraph::op::v1) -NGRAPH_OP(PriorBox, ngraph::op::v0) -NGRAPH_OP(PriorBoxClustered, ngraph::op::v0) -NGRAPH_OP(Proposal, ngraph::op::v4) -NGRAPH_OP(Range, ngraph::op::v4) -NGRAPH_OP(Relu, ngraph::op::v0) -NGRAPH_OP(ReduceMax, ngraph::op::v1) -NGRAPH_OP(ReduceLogicalAnd, ngraph::op::v1) -NGRAPH_OP(ReduceLogicalOr, ngraph::op::v1) -NGRAPH_OP(ReduceMean, ngraph::op::v1) -NGRAPH_OP(ReduceMin, ngraph::op::v1) -NGRAPH_OP(ReduceProd, ngraph::op::v1) -NGRAPH_OP(ReduceSum, ngraph::op::v1) -NGRAPH_OP(RegionYolo, ngraph::op::v0) -NGRAPH_OP(ReorgYolo, ngraph::op::v0) -NGRAPH_OP(Reshape, ngraph::op::v1) -NGRAPH_OP(Result, ngraph::op::v0) -NGRAPH_OP(ReverseSequence, ngraph::op::v0) -NGRAPH_OP(ROIPooling, ngraph::op::v0) -NGRAPH_OP(ScatterNDUpdate, ngraph::op::v3) -NGRAPH_OP(Select, ngraph::op::v1) -NGRAPH_OP(Selu, ngraph::op::v0) -NGRAPH_OP(Sign, ngraph::op::v0) -NGRAPH_OP(Sigmoid, ngraph::op::v0) -NGRAPH_OP(Sin, ngraph::op::v0) -NGRAPH_OP(Sinh, ngraph::op::v0) -NGRAPH_OP(Softmax, ngraph::op::v1) -NGRAPH_OP(Sqrt, ngraph::op::v0) -NGRAPH_OP(SpaceToDepth, ngraph::op::v0) -NGRAPH_OP(Split, ngraph::op::v1) -NGRAPH_OP(SquaredDifference, ngraph::op::v0) -NGRAPH_OP(Squeeze, ngraph::op::v0) -NGRAPH_OP(StridedSlice, ngraph::op::v1) -NGRAPH_OP(Subtract, ngraph::op::v1) -NGRAPH_OP(Tan, ngraph::op::v0) -NGRAPH_OP(Tanh, ngraph::op::v0) -NGRAPH_OP(TensorIterator, ngraph::op::v0) -NGRAPH_OP(Tile, ngraph::op::v0) -NGRAPH_OP(Transpose, ngraph::op::v1) -NGRAPH_OP(Unsqueeze, ngraph::op::v0) -NGRAPH_OP(VariadicSplit, ngraph::op::v1) - -// New operations added in opset2 -NGRAPH_OP(Gelu, ngraph::op::v0) -NGRAPH_OP(BatchToSpace, ngraph::op::v1) -NGRAPH_OP(SpaceToBatch, ngraph::op::v1) - -// New operations added in opset3 -NGRAPH_OP(EmbeddingBagPackedSum, ngraph::op::v3) -NGRAPH_OP(EmbeddingSegmentsSum, ngraph::op::v3) -NGRAPH_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) -NGRAPH_OP(GRUCell, ngraph::op::v3) -NGRAPH_OP(NonZero, ngraph::op::v3) -NGRAPH_OP(RNNCell, ngraph::op::v0) -NGRAPH_OP(ROIAlign, ngraph::op::v3) -NGRAPH_OP(ScatterElementsUpdate, ngraph::op::v3) -NGRAPH_OP(ScatterUpdate, ngraph::op::v3) -NGRAPH_OP(ShuffleChannels, ngraph::op::v0) -NGRAPH_OP(ShapeOf, ngraph::op::v3) -NGRAPH_OP(Assign, ngraph::op::v3) -NGRAPH_OP(ReadValue, ngraph::op::v3) -NGRAPH_OP(TopK, ngraph::op::v3) - -// New operations added in opset4 -NGRAPH_OP(Acosh, ngraph::op::v3) -NGRAPH_OP(Asinh, ngraph::op::v3) -NGRAPH_OP(Atanh, ngraph::op::v3) -NGRAPH_OP(CTCLoss, ngraph::op::v4) -NGRAPH_OP(HSwish, ngraph::op::v4) -NGRAPH_OP(Interpolate, ngraph::op::v4) -NGRAPH_OP(Mish, ngraph::op::v4) -NGRAPH_OP(ReduceL1, ngraph::op::v4) -NGRAPH_OP(ReduceL2, ngraph::op::v4) -NGRAPH_OP(SoftPlus, ngraph::op::v4) -NGRAPH_OP(Swish, ngraph::op::v4) - -// New operations added in opset5 -NGRAPH_OP(GatherND, ngraph::op::v5) -NGRAPH_OP(GRUSequence, ngraph::op::v5) -NGRAPH_OP(HSigmoid, ngraph::op::v5) -NGRAPH_OP(LogSoftmax, ngraph::op::v5) -NGRAPH_OP(Loop, ngraph::op::v5) -NGRAPH_OP(LSTMSequence, ngraph::op::v5) -NGRAPH_OP(NonMaxSuppression, ngraph::op::v5) -NGRAPH_OP(RNNSequence, ngraph::op::v5) -NGRAPH_OP(Round, ngraph::op::v5) +#define OPENVINO_OP NGRAPH_OP +#include "openvino/opsets/opset5_tbl.hpp" +#undef OPENVINO_OP diff --git a/ngraph/core/include/ngraph/opsets/opset6_tbl.hpp b/ngraph/core/include/ngraph/opsets/opset6_tbl.hpp index d188304b53c..e9cb2a0c12a 100644 --- a/ngraph/core/include/ngraph/opsets/opset6_tbl.hpp +++ b/ngraph/core/include/ngraph/opsets/opset6_tbl.hpp @@ -7,165 +7,6 @@ # define NGRAPH_OP(x, y) #endif -NGRAPH_OP(Abs, ngraph::op::v0) -NGRAPH_OP(Acos, ngraph::op::v0) -NGRAPH_OP(Add, ngraph::op::v1) -NGRAPH_OP(Asin, ngraph::op::v0) -NGRAPH_OP(Atan, ngraph::op::v0) -NGRAPH_OP(AvgPool, ngraph::op::v1) -NGRAPH_OP(BatchNormInference, ngraph::op::v5) -NGRAPH_OP(BinaryConvolution, ngraph::op::v1) -NGRAPH_OP(Broadcast, ngraph::op::v3) -NGRAPH_OP(Bucketize, ngraph::op::v3) -NGRAPH_OP(CTCGreedyDecoder, ngraph::op::v0) -NGRAPH_OP(Ceiling, ngraph::op::v0) -NGRAPH_OP(Clamp, ngraph::op::v0) -NGRAPH_OP(Concat, ngraph::op::v0) -NGRAPH_OP(Constant, ngraph::op) -NGRAPH_OP(Convert, ngraph::op::v0) -NGRAPH_OP(ConvertLike, ngraph::op::v1) -NGRAPH_OP(Convolution, ngraph::op::v1) -NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v1) -NGRAPH_OP(Cos, ngraph::op::v0) -NGRAPH_OP(Cosh, ngraph::op::v0) -NGRAPH_OP(CumSum, ngraph::op::v0) -NGRAPH_OP(DeformableConvolution, ngraph::op::v1) -NGRAPH_OP(DeformablePSROIPooling, ngraph::op::v1) -NGRAPH_OP(DepthToSpace, ngraph::op::v0) -NGRAPH_OP(DetectionOutput, ngraph::op::v0) -NGRAPH_OP(Divide, ngraph::op::v1) -NGRAPH_OP(Elu, ngraph::op::v0) -NGRAPH_OP(Erf, ngraph::op::v0) -NGRAPH_OP(Equal, ngraph::op::v1) -NGRAPH_OP(Exp, ngraph::op::v0) -NGRAPH_OP(ExtractImagePatches, ngraph::op::v3) -NGRAPH_OP(FakeQuantize, ngraph::op::v0) -NGRAPH_OP(Floor, ngraph::op::v0) -NGRAPH_OP(FloorMod, ngraph::op::v1) -NGRAPH_OP(Gather, ngraph::op::v1) -NGRAPH_OP(GatherTree, ngraph::op::v1) -NGRAPH_OP(Greater, ngraph::op::v1) -NGRAPH_OP(GreaterEqual, ngraph::op::v1) -NGRAPH_OP(GroupConvolution, ngraph::op::v1) -NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v1) -NGRAPH_OP(GRN, ngraph::op::v0) -NGRAPH_OP(HardSigmoid, ngraph::op::v0) -NGRAPH_OP(Less, ngraph::op::v1) -NGRAPH_OP(LessEqual, ngraph::op::v1) -NGRAPH_OP(Log, ngraph::op::v0) -NGRAPH_OP(LogicalAnd, ngraph::op::v1) -NGRAPH_OP(LogicalNot, ngraph::op::v1) -NGRAPH_OP(LogicalOr, ngraph::op::v1) -NGRAPH_OP(LogicalXor, ngraph::op::v1) -NGRAPH_OP(LRN, ngraph::op::v0) -NGRAPH_OP(LSTMCell, ngraph::op::v4) -NGRAPH_OP(MatMul, ngraph::op::v0) -NGRAPH_OP(MaxPool, ngraph::op::v1) -NGRAPH_OP(Maximum, ngraph::op::v1) -NGRAPH_OP(Minimum, ngraph::op::v1) -NGRAPH_OP(Mod, ngraph::op::v1) -NGRAPH_OP(Multiply, ngraph::op::v1) -NGRAPH_OP(Negative, ngraph::op::v0) -NGRAPH_OP(NormalizeL2, ngraph::op::v0) -NGRAPH_OP(NotEqual, ngraph::op::v1) -NGRAPH_OP(OneHot, ngraph::op::v1) -NGRAPH_OP(PRelu, ngraph::op::v0) -NGRAPH_OP(PSROIPooling, ngraph::op::v0) -NGRAPH_OP(Pad, ngraph::op::v1) -NGRAPH_OP(Parameter, ngraph::op::v0) -NGRAPH_OP(Power, ngraph::op::v1) -NGRAPH_OP(PriorBox, ngraph::op::v0) -NGRAPH_OP(PriorBoxClustered, ngraph::op::v0) -NGRAPH_OP(Proposal, ngraph::op::v4) -NGRAPH_OP(Range, ngraph::op::v4) -NGRAPH_OP(Relu, ngraph::op::v0) -NGRAPH_OP(ReduceMax, ngraph::op::v1) -NGRAPH_OP(ReduceLogicalAnd, ngraph::op::v1) -NGRAPH_OP(ReduceLogicalOr, ngraph::op::v1) -NGRAPH_OP(ReduceMean, ngraph::op::v1) -NGRAPH_OP(ReduceMin, ngraph::op::v1) -NGRAPH_OP(ReduceProd, ngraph::op::v1) -NGRAPH_OP(ReduceSum, ngraph::op::v1) -NGRAPH_OP(RegionYolo, ngraph::op::v0) -NGRAPH_OP(ReorgYolo, ngraph::op::v0) -NGRAPH_OP(Reshape, ngraph::op::v1) -NGRAPH_OP(Result, ngraph::op::v0) -NGRAPH_OP(ReverseSequence, ngraph::op::v0) -NGRAPH_OP(ROIPooling, ngraph::op::v0) -NGRAPH_OP(ScatterNDUpdate, ngraph::op::v3) -NGRAPH_OP(Select, ngraph::op::v1) -NGRAPH_OP(Selu, ngraph::op::v0) -NGRAPH_OP(Sign, ngraph::op::v0) -NGRAPH_OP(Sigmoid, ngraph::op::v0) -NGRAPH_OP(Sin, ngraph::op::v0) -NGRAPH_OP(Sinh, ngraph::op::v0) -NGRAPH_OP(Softmax, ngraph::op::v1) -NGRAPH_OP(Sqrt, ngraph::op::v0) -NGRAPH_OP(SpaceToDepth, ngraph::op::v0) -NGRAPH_OP(Split, ngraph::op::v1) -NGRAPH_OP(SquaredDifference, ngraph::op::v0) -NGRAPH_OP(Squeeze, ngraph::op::v0) -NGRAPH_OP(StridedSlice, ngraph::op::v1) -NGRAPH_OP(Subtract, ngraph::op::v1) -NGRAPH_OP(Tan, ngraph::op::v0) -NGRAPH_OP(Tanh, ngraph::op::v0) -NGRAPH_OP(TensorIterator, ngraph::op::v0) -NGRAPH_OP(Tile, ngraph::op::v0) -NGRAPH_OP(Transpose, ngraph::op::v1) -NGRAPH_OP(Unsqueeze, ngraph::op::v0) -NGRAPH_OP(VariadicSplit, ngraph::op::v1) - -// New operations added in opset2 -NGRAPH_OP(Gelu, ngraph::op::v0) -NGRAPH_OP(BatchToSpace, ngraph::op::v1) -NGRAPH_OP(SpaceToBatch, ngraph::op::v1) - -// New operations added in opset3 -NGRAPH_OP(EmbeddingBagPackedSum, ngraph::op::v3) -NGRAPH_OP(EmbeddingSegmentsSum, ngraph::op::v3) -NGRAPH_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) -NGRAPH_OP(GRUCell, ngraph::op::v3) -NGRAPH_OP(NonZero, ngraph::op::v3) -NGRAPH_OP(RNNCell, ngraph::op::v0) -NGRAPH_OP(ROIAlign, ngraph::op::v3) -NGRAPH_OP(ScatterElementsUpdate, ngraph::op::v3) -NGRAPH_OP(ScatterUpdate, ngraph::op::v3) -NGRAPH_OP(ShuffleChannels, ngraph::op::v0) -NGRAPH_OP(ShapeOf, ngraph::op::v3) -NGRAPH_OP(TopK, ngraph::op::v3) - -// New operations added in opset4 -NGRAPH_OP(Acosh, ngraph::op::v3) -NGRAPH_OP(Asinh, ngraph::op::v3) -NGRAPH_OP(Atanh, ngraph::op::v3) -NGRAPH_OP(CTCLoss, ngraph::op::v4) -NGRAPH_OP(HSwish, ngraph::op::v4) -NGRAPH_OP(Interpolate, ngraph::op::v4) -NGRAPH_OP(Mish, ngraph::op::v4) -NGRAPH_OP(ReduceL1, ngraph::op::v4) -NGRAPH_OP(ReduceL2, ngraph::op::v4) -NGRAPH_OP(SoftPlus, ngraph::op::v4) -NGRAPH_OP(Swish, ngraph::op::v4) - -// New operations added in opset5 -NGRAPH_OP(GatherND, ngraph::op::v5) -NGRAPH_OP(GRUSequence, ngraph::op::v5) -NGRAPH_OP(HSigmoid, ngraph::op::v5) -NGRAPH_OP(LogSoftmax, ngraph::op::v5) -NGRAPH_OP(Loop, ngraph::op::v5) -NGRAPH_OP(LSTMSequence, ngraph::op::v5) -NGRAPH_OP(NonMaxSuppression, ngraph::op::v5) -NGRAPH_OP(RNNSequence, ngraph::op::v5) -NGRAPH_OP(Round, ngraph::op::v5) - -// New operations added in opset6 -NGRAPH_OP(CTCGreedyDecoderSeqLen, ngraph::op::v6) -NGRAPH_OP(ExperimentalDetectronDetectionOutput, ngraph::op::v6) -NGRAPH_OP(ExperimentalDetectronGenerateProposalsSingleImage, ngraph::op::v6) -NGRAPH_OP(ExperimentalDetectronPriorGridGenerator, ngraph::op::v6) -NGRAPH_OP(ExperimentalDetectronROIFeatureExtractor, ngraph::op::v6) -NGRAPH_OP(ExperimentalDetectronTopKROIs, ngraph::op::v6) -NGRAPH_OP(GatherElements, ngraph::op::v6) -NGRAPH_OP(MVN, ngraph::op::v6) -NGRAPH_OP(Assign, ngraph::op::v6) // new version -NGRAPH_OP(ReadValue, ngraph::op::v6) // new version +#define OPENVINO_OP NGRAPH_OP +#include "openvino/opsets/opset6_tbl.hpp" +#undef OPENVINO_OP diff --git a/ngraph/core/include/ngraph/opsets/opset7_tbl.hpp b/ngraph/core/include/ngraph/opsets/opset7_tbl.hpp index 38ffbd373aa..02802da1b94 100644 --- a/ngraph/core/include/ngraph/opsets/opset7_tbl.hpp +++ b/ngraph/core/include/ngraph/opsets/opset7_tbl.hpp @@ -7,171 +7,6 @@ # define NGRAPH_OP(x, y) #endif -NGRAPH_OP(Abs, ngraph::op::v0) -NGRAPH_OP(Acos, ngraph::op::v0) -NGRAPH_OP(Add, ngraph::op::v1) -NGRAPH_OP(Asin, ngraph::op::v0) -NGRAPH_OP(Atan, ngraph::op::v0) -NGRAPH_OP(AvgPool, ngraph::op::v1) -NGRAPH_OP(BatchNormInference, ngraph::op::v5) -NGRAPH_OP(BinaryConvolution, ngraph::op::v1) -NGRAPH_OP(Broadcast, ngraph::op::v3) -NGRAPH_OP(Bucketize, ngraph::op::v3) -NGRAPH_OP(CTCGreedyDecoder, ngraph::op::v0) -NGRAPH_OP(Ceiling, ngraph::op::v0) -NGRAPH_OP(Clamp, ngraph::op::v0) -NGRAPH_OP(Concat, ngraph::op::v0) -NGRAPH_OP(Constant, ngraph::op) -NGRAPH_OP(Convert, ngraph::op::v0) -NGRAPH_OP(ConvertLike, ngraph::op::v1) -NGRAPH_OP(Convolution, ngraph::op::v1) -NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v1) -NGRAPH_OP(Cos, ngraph::op::v0) -NGRAPH_OP(Cosh, ngraph::op::v0) -NGRAPH_OP(CumSum, ngraph::op::v0) -NGRAPH_OP(DeformableConvolution, ngraph::op::v1) -NGRAPH_OP(DeformablePSROIPooling, ngraph::op::v1) -NGRAPH_OP(DepthToSpace, ngraph::op::v0) -NGRAPH_OP(DetectionOutput, ngraph::op::v0) -NGRAPH_OP(Divide, ngraph::op::v1) -NGRAPH_OP(Elu, ngraph::op::v0) -NGRAPH_OP(Erf, ngraph::op::v0) -NGRAPH_OP(Equal, ngraph::op::v1) -NGRAPH_OP(Exp, ngraph::op::v0) -NGRAPH_OP(ExtractImagePatches, ngraph::op::v3) -NGRAPH_OP(FakeQuantize, ngraph::op::v0) -NGRAPH_OP(Floor, ngraph::op::v0) -NGRAPH_OP(FloorMod, ngraph::op::v1) -NGRAPH_OP(Gather, ngraph::op::v7) -NGRAPH_OP(GatherTree, ngraph::op::v1) -NGRAPH_OP(Greater, ngraph::op::v1) -NGRAPH_OP(GreaterEqual, ngraph::op::v1) -NGRAPH_OP(GroupConvolution, ngraph::op::v1) -NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v1) -NGRAPH_OP(GRN, ngraph::op::v0) -NGRAPH_OP(HardSigmoid, ngraph::op::v0) -NGRAPH_OP(Less, ngraph::op::v1) -NGRAPH_OP(LessEqual, ngraph::op::v1) -NGRAPH_OP(Log, ngraph::op::v0) -NGRAPH_OP(LogicalAnd, ngraph::op::v1) -NGRAPH_OP(LogicalNot, ngraph::op::v1) -NGRAPH_OP(LogicalOr, ngraph::op::v1) -NGRAPH_OP(LogicalXor, ngraph::op::v1) -NGRAPH_OP(LRN, ngraph::op::v0) -NGRAPH_OP(LSTMCell, ngraph::op::v4) -NGRAPH_OP(MatMul, ngraph::op::v0) -NGRAPH_OP(MaxPool, ngraph::op::v1) -NGRAPH_OP(Maximum, ngraph::op::v1) -NGRAPH_OP(Minimum, ngraph::op::v1) -NGRAPH_OP(Mod, ngraph::op::v1) -NGRAPH_OP(Multiply, ngraph::op::v1) -NGRAPH_OP(Negative, ngraph::op::v0) -NGRAPH_OP(NormalizeL2, ngraph::op::v0) -NGRAPH_OP(NotEqual, ngraph::op::v1) -NGRAPH_OP(OneHot, ngraph::op::v1) -NGRAPH_OP(PRelu, ngraph::op::v0) -NGRAPH_OP(PSROIPooling, ngraph::op::v0) -NGRAPH_OP(Pad, ngraph::op::v1) -NGRAPH_OP(Parameter, ngraph::op::v0) -NGRAPH_OP(Power, ngraph::op::v1) -NGRAPH_OP(PriorBox, ngraph::op::v0) -NGRAPH_OP(PriorBoxClustered, ngraph::op::v0) -NGRAPH_OP(Proposal, ngraph::op::v4) -NGRAPH_OP(Range, ngraph::op::v4) -NGRAPH_OP(Relu, ngraph::op::v0) -NGRAPH_OP(ReduceMax, ngraph::op::v1) -NGRAPH_OP(ReduceLogicalAnd, ngraph::op::v1) -NGRAPH_OP(ReduceLogicalOr, ngraph::op::v1) -NGRAPH_OP(ReduceMean, ngraph::op::v1) -NGRAPH_OP(ReduceMin, ngraph::op::v1) -NGRAPH_OP(ReduceProd, ngraph::op::v1) -NGRAPH_OP(ReduceSum, ngraph::op::v1) -NGRAPH_OP(RegionYolo, ngraph::op::v0) -NGRAPH_OP(ReorgYolo, ngraph::op::v0) -NGRAPH_OP(Reshape, ngraph::op::v1) -NGRAPH_OP(Result, ngraph::op::v0) -NGRAPH_OP(ReverseSequence, ngraph::op::v0) -NGRAPH_OP(ROIPooling, ngraph::op::v0) -NGRAPH_OP(ScatterNDUpdate, ngraph::op::v3) -NGRAPH_OP(Select, ngraph::op::v1) -NGRAPH_OP(Selu, ngraph::op::v0) -NGRAPH_OP(Sign, ngraph::op::v0) -NGRAPH_OP(Sigmoid, ngraph::op::v0) -NGRAPH_OP(Sin, ngraph::op::v0) -NGRAPH_OP(Sinh, ngraph::op::v0) -NGRAPH_OP(Softmax, ngraph::op::v1) -NGRAPH_OP(Sqrt, ngraph::op::v0) -NGRAPH_OP(SpaceToDepth, ngraph::op::v0) -NGRAPH_OP(Split, ngraph::op::v1) -NGRAPH_OP(SquaredDifference, ngraph::op::v0) -NGRAPH_OP(Squeeze, ngraph::op::v0) -NGRAPH_OP(StridedSlice, ngraph::op::v1) -NGRAPH_OP(Subtract, ngraph::op::v1) -NGRAPH_OP(Tan, ngraph::op::v0) -NGRAPH_OP(Tanh, ngraph::op::v0) -NGRAPH_OP(TensorIterator, ngraph::op::v0) -NGRAPH_OP(Tile, ngraph::op::v0) -NGRAPH_OP(Transpose, ngraph::op::v1) -NGRAPH_OP(Unsqueeze, ngraph::op::v0) -NGRAPH_OP(VariadicSplit, ngraph::op::v1) - -// New operations added in opset2 -NGRAPH_OP(BatchToSpace, ngraph::op::v1) -NGRAPH_OP(SpaceToBatch, ngraph::op::v1) - -// New operations added in opset3 -NGRAPH_OP(EmbeddingBagPackedSum, ngraph::op::v3) -NGRAPH_OP(EmbeddingSegmentsSum, ngraph::op::v3) -NGRAPH_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) -NGRAPH_OP(GRUCell, ngraph::op::v3) -NGRAPH_OP(NonZero, ngraph::op::v3) -NGRAPH_OP(RNNCell, ngraph::op::v0) -NGRAPH_OP(ROIAlign, ngraph::op::v3) -NGRAPH_OP(ScatterElementsUpdate, ngraph::op::v3) -NGRAPH_OP(ScatterUpdate, ngraph::op::v3) -NGRAPH_OP(ShuffleChannels, ngraph::op::v0) -NGRAPH_OP(ShapeOf, ngraph::op::v3) -NGRAPH_OP(TopK, ngraph::op::v3) - -// New operations added in opset4 -NGRAPH_OP(Acosh, ngraph::op::v3) -NGRAPH_OP(Asinh, ngraph::op::v3) -NGRAPH_OP(Atanh, ngraph::op::v3) -NGRAPH_OP(CTCLoss, ngraph::op::v4) -NGRAPH_OP(HSwish, ngraph::op::v4) -NGRAPH_OP(Interpolate, ngraph::op::v4) -NGRAPH_OP(Mish, ngraph::op::v4) -NGRAPH_OP(ReduceL1, ngraph::op::v4) -NGRAPH_OP(ReduceL2, ngraph::op::v4) -NGRAPH_OP(SoftPlus, ngraph::op::v4) -NGRAPH_OP(Swish, ngraph::op::v4) - -// New operations added in opset5 -NGRAPH_OP(GatherND, ngraph::op::v5) -NGRAPH_OP(GRUSequence, ngraph::op::v5) -NGRAPH_OP(HSigmoid, ngraph::op::v5) -NGRAPH_OP(LogSoftmax, ngraph::op::v5) -NGRAPH_OP(Loop, ngraph::op::v5) -NGRAPH_OP(LSTMSequence, ngraph::op::v5) -NGRAPH_OP(NonMaxSuppression, ngraph::op::v5) -NGRAPH_OP(RNNSequence, ngraph::op::v5) -NGRAPH_OP(Round, ngraph::op::v5) - -// New operations added in opset6 -NGRAPH_OP(CTCGreedyDecoderSeqLen, ngraph::op::v6) -NGRAPH_OP(ExperimentalDetectronDetectionOutput, ngraph::op::v6) -NGRAPH_OP(ExperimentalDetectronGenerateProposalsSingleImage, ngraph::op::v6) -NGRAPH_OP(ExperimentalDetectronPriorGridGenerator, ngraph::op::v6) -NGRAPH_OP(ExperimentalDetectronROIFeatureExtractor, ngraph::op::v6) -NGRAPH_OP(ExperimentalDetectronTopKROIs, ngraph::op::v6) -NGRAPH_OP(GatherElements, ngraph::op::v6) -NGRAPH_OP(MVN, ngraph::op::v6) -NGRAPH_OP(Assign, ngraph::op::v6) // new version -NGRAPH_OP(ReadValue, ngraph::op::v6) // new version - -// New operations added in opset7 -NGRAPH_OP(DFT, ngraph::op::v7) -NGRAPH_OP(Einsum, ngraph::op::v7) -NGRAPH_OP(Gelu, ngraph::op::v7) -NGRAPH_OP(IDFT, ngraph::op::v7) -NGRAPH_OP(Roll, ngraph::op::v7) +#define OPENVINO_OP NGRAPH_OP +#include "openvino/opsets/opset7_tbl.hpp" +#undef OPENVINO_OP diff --git a/ngraph/core/include/ngraph/opsets/opset8_tbl.hpp b/ngraph/core/include/ngraph/opsets/opset8_tbl.hpp index faf3d65593a..a57cb4318da 100644 --- a/ngraph/core/include/ngraph/opsets/opset8_tbl.hpp +++ b/ngraph/core/include/ngraph/opsets/opset8_tbl.hpp @@ -7,179 +7,6 @@ # define NGRAPH_OP(x, y) #endif -NGRAPH_OP(Abs, ngraph::op::v0) -NGRAPH_OP(Acos, ngraph::op::v0) -NGRAPH_OP(Add, ngraph::op::v1) -NGRAPH_OP(Asin, ngraph::op::v0) -NGRAPH_OP(Atan, ngraph::op::v0) -NGRAPH_OP(AvgPool, ngraph::op::v1) -NGRAPH_OP(BatchNormInference, ngraph::op::v5) -NGRAPH_OP(BinaryConvolution, ngraph::op::v1) -NGRAPH_OP(Broadcast, ngraph::op::v3) -NGRAPH_OP(Bucketize, ngraph::op::v3) -NGRAPH_OP(CTCGreedyDecoder, ngraph::op::v0) -NGRAPH_OP(Ceiling, ngraph::op::v0) -NGRAPH_OP(Clamp, ngraph::op::v0) -NGRAPH_OP(Concat, ngraph::op::v0) -NGRAPH_OP(Constant, ngraph::op) -NGRAPH_OP(Convert, ngraph::op::v0) -NGRAPH_OP(ConvertLike, ngraph::op::v1) -NGRAPH_OP(Convolution, ngraph::op::v1) -NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v1) -NGRAPH_OP(Cos, ngraph::op::v0) -NGRAPH_OP(Cosh, ngraph::op::v0) -NGRAPH_OP(CumSum, ngraph::op::v0) -NGRAPH_OP(DeformablePSROIPooling, ngraph::op::v1) -NGRAPH_OP(DepthToSpace, ngraph::op::v0) -NGRAPH_OP(DetectionOutput, ngraph::op::v0) -NGRAPH_OP(Divide, ngraph::op::v1) -NGRAPH_OP(Elu, ngraph::op::v0) -NGRAPH_OP(Erf, ngraph::op::v0) -NGRAPH_OP(Equal, ngraph::op::v1) -NGRAPH_OP(Exp, ngraph::op::v0) -NGRAPH_OP(ExtractImagePatches, ngraph::op::v3) -NGRAPH_OP(FakeQuantize, ngraph::op::v0) -NGRAPH_OP(Floor, ngraph::op::v0) -NGRAPH_OP(FloorMod, ngraph::op::v1) -NGRAPH_OP(GatherTree, ngraph::op::v1) -NGRAPH_OP(Greater, ngraph::op::v1) -NGRAPH_OP(GreaterEqual, ngraph::op::v1) -NGRAPH_OP(GroupConvolution, ngraph::op::v1) -NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v1) -NGRAPH_OP(GRN, ngraph::op::v0) -NGRAPH_OP(HardSigmoid, ngraph::op::v0) -NGRAPH_OP(Less, ngraph::op::v1) -NGRAPH_OP(LessEqual, ngraph::op::v1) -NGRAPH_OP(Log, ngraph::op::v0) -NGRAPH_OP(LogicalAnd, ngraph::op::v1) -NGRAPH_OP(LogicalNot, ngraph::op::v1) -NGRAPH_OP(LogicalOr, ngraph::op::v1) -NGRAPH_OP(LogicalXor, ngraph::op::v1) -NGRAPH_OP(LRN, ngraph::op::v0) -NGRAPH_OP(LSTMCell, ngraph::op::v4) -NGRAPH_OP(MatMul, ngraph::op::v0) -NGRAPH_OP(Maximum, ngraph::op::v1) -NGRAPH_OP(Minimum, ngraph::op::v1) -NGRAPH_OP(Mod, ngraph::op::v1) -NGRAPH_OP(Multiply, ngraph::op::v1) -NGRAPH_OP(Negative, ngraph::op::v0) -NGRAPH_OP(NormalizeL2, ngraph::op::v0) -NGRAPH_OP(NotEqual, ngraph::op::v1) -NGRAPH_OP(OneHot, ngraph::op::v1) -NGRAPH_OP(PRelu, ngraph::op::v0) -NGRAPH_OP(PSROIPooling, ngraph::op::v0) -NGRAPH_OP(Pad, ngraph::op::v1) -NGRAPH_OP(Parameter, ngraph::op::v0) -NGRAPH_OP(Power, ngraph::op::v1) -NGRAPH_OP(PriorBox, ngraph::op::v0) -NGRAPH_OP(PriorBoxClustered, ngraph::op::v0) -NGRAPH_OP(Proposal, ngraph::op::v4) -NGRAPH_OP(Range, ngraph::op::v4) -NGRAPH_OP(Relu, ngraph::op::v0) -NGRAPH_OP(ReduceMax, ngraph::op::v1) -NGRAPH_OP(ReduceLogicalAnd, ngraph::op::v1) -NGRAPH_OP(ReduceLogicalOr, ngraph::op::v1) -NGRAPH_OP(ReduceMean, ngraph::op::v1) -NGRAPH_OP(ReduceMin, ngraph::op::v1) -NGRAPH_OP(ReduceProd, ngraph::op::v1) -NGRAPH_OP(ReduceSum, ngraph::op::v1) -NGRAPH_OP(RegionYolo, ngraph::op::v0) -NGRAPH_OP(ReorgYolo, ngraph::op::v0) -NGRAPH_OP(Reshape, ngraph::op::v1) -NGRAPH_OP(Result, ngraph::op::v0) -NGRAPH_OP(ReverseSequence, ngraph::op::v0) -NGRAPH_OP(ROIPooling, ngraph::op::v0) -NGRAPH_OP(ScatterNDUpdate, ngraph::op::v3) -NGRAPH_OP(Select, ngraph::op::v1) -NGRAPH_OP(Selu, ngraph::op::v0) -NGRAPH_OP(Sign, ngraph::op::v0) -NGRAPH_OP(Sigmoid, ngraph::op::v0) -NGRAPH_OP(Sin, ngraph::op::v0) -NGRAPH_OP(Sinh, ngraph::op::v0) -NGRAPH_OP(Softmax, ngraph::op::v1) -NGRAPH_OP(Sqrt, ngraph::op::v0) -NGRAPH_OP(SpaceToDepth, ngraph::op::v0) -NGRAPH_OP(Split, ngraph::op::v1) -NGRAPH_OP(SquaredDifference, ngraph::op::v0) -NGRAPH_OP(Squeeze, ngraph::op::v0) -NGRAPH_OP(StridedSlice, ngraph::op::v1) -NGRAPH_OP(Subtract, ngraph::op::v1) -NGRAPH_OP(Tan, ngraph::op::v0) -NGRAPH_OP(Tanh, ngraph::op::v0) -NGRAPH_OP(TensorIterator, ngraph::op::v0) -NGRAPH_OP(Tile, ngraph::op::v0) -NGRAPH_OP(Transpose, ngraph::op::v1) -NGRAPH_OP(Unsqueeze, ngraph::op::v0) -NGRAPH_OP(VariadicSplit, ngraph::op::v1) - -// New operations added in opset2 -NGRAPH_OP(BatchToSpace, ngraph::op::v1) -NGRAPH_OP(SpaceToBatch, ngraph::op::v1) - -// New operations added in opset3 -NGRAPH_OP(EmbeddingBagPackedSum, ngraph::op::v3) -NGRAPH_OP(EmbeddingSegmentsSum, ngraph::op::v3) -NGRAPH_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) -NGRAPH_OP(GRUCell, ngraph::op::v3) -NGRAPH_OP(NonZero, ngraph::op::v3) -NGRAPH_OP(RNNCell, ngraph::op::v0) -NGRAPH_OP(ROIAlign, ngraph::op::v3) -NGRAPH_OP(ScatterElementsUpdate, ngraph::op::v3) -NGRAPH_OP(ScatterUpdate, ngraph::op::v3) -NGRAPH_OP(ShuffleChannels, ngraph::op::v0) -NGRAPH_OP(ShapeOf, ngraph::op::v3) -NGRAPH_OP(TopK, ngraph::op::v3) - -// New operations added in opset4 -NGRAPH_OP(Acosh, ngraph::op::v3) -NGRAPH_OP(Asinh, ngraph::op::v3) -NGRAPH_OP(Atanh, ngraph::op::v3) -NGRAPH_OP(CTCLoss, ngraph::op::v4) -NGRAPH_OP(HSwish, ngraph::op::v4) -NGRAPH_OP(Interpolate, ngraph::op::v4) -NGRAPH_OP(Mish, ngraph::op::v4) -NGRAPH_OP(ReduceL1, ngraph::op::v4) -NGRAPH_OP(ReduceL2, ngraph::op::v4) -NGRAPH_OP(SoftPlus, ngraph::op::v4) -NGRAPH_OP(Swish, ngraph::op::v4) - -// New operations added in opset5 -NGRAPH_OP(GatherND, ngraph::op::v5) -NGRAPH_OP(GRUSequence, ngraph::op::v5) -NGRAPH_OP(HSigmoid, ngraph::op::v5) -NGRAPH_OP(LogSoftmax, ngraph::op::v5) -NGRAPH_OP(Loop, ngraph::op::v5) -NGRAPH_OP(LSTMSequence, ngraph::op::v5) -NGRAPH_OP(NonMaxSuppression, ngraph::op::v5) -NGRAPH_OP(RNNSequence, ngraph::op::v5) -NGRAPH_OP(Round, ngraph::op::v5) - -// New operations added in opset6 -NGRAPH_OP(CTCGreedyDecoderSeqLen, ngraph::op::v6) -NGRAPH_OP(ExperimentalDetectronDetectionOutput, ngraph::op::v6) -NGRAPH_OP(ExperimentalDetectronGenerateProposalsSingleImage, ngraph::op::v6) -NGRAPH_OP(ExperimentalDetectronPriorGridGenerator, ngraph::op::v6) -NGRAPH_OP(ExperimentalDetectronROIFeatureExtractor, ngraph::op::v6) -NGRAPH_OP(ExperimentalDetectronTopKROIs, ngraph::op::v6) -NGRAPH_OP(GatherElements, ngraph::op::v6) -NGRAPH_OP(MVN, ngraph::op::v6) -NGRAPH_OP(Assign, ngraph::op::v6) // new version -NGRAPH_OP(ReadValue, ngraph::op::v6) // new version - -// New operations added in opset7 -NGRAPH_OP(DFT, ngraph::op::v7) -NGRAPH_OP(Einsum, ngraph::op::v7) -NGRAPH_OP(Gelu, ngraph::op::v7) -NGRAPH_OP(IDFT, ngraph::op::v7) -NGRAPH_OP(Roll, ngraph::op::v7) - -// New operations added in opset8 -NGRAPH_OP(Gather, ngraph::op::v8) -NGRAPH_OP(AdaptiveAvgPool, ngraph::op::v8) -NGRAPH_OP(AdaptiveMaxPool, ngraph::op::v8) -NGRAPH_OP(DeformableConvolution, ngraph::op::v8) -NGRAPH_OP(MatrixNms, ngraph::op::v8) -NGRAPH_OP(MaxPool, ngraph::op::v8) -NGRAPH_OP(MulticlassNms, ngraph::op::v8) -NGRAPH_OP(RandomUniform, ngraph::op::v8) -NGRAPH_OP(If, ngraph::op::v8) +#define OPENVINO_OP NGRAPH_OP +#include "openvino/opsets/opset8_tbl.hpp" +#undef OPENVINO_OP diff --git a/ngraph/core/include/openvino/op/ops.hpp b/ngraph/core/include/openvino/op/ops.hpp new file mode 100644 index 00000000000..773986c918a --- /dev/null +++ b/ngraph/core/include/openvino/op/ops.hpp @@ -0,0 +1,171 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +// All OpenVINO Operation Headers + +#pragma once + +#include "openvino/op/abs.hpp" +#include "openvino/op/acos.hpp" +#include "openvino/op/acosh.hpp" +#include "openvino/op/adaptive_avg_pool.hpp" +#include "openvino/op/adaptive_max_pool.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/asin.hpp" +#include "openvino/op/asinh.hpp" +#include "openvino/op/assign.hpp" +#include "openvino/op/atan.hpp" +#include "openvino/op/atanh.hpp" +#include "openvino/op/avg_pool.hpp" +#include "openvino/op/batch_norm.hpp" +#include "openvino/op/batch_to_space.hpp" +#include "openvino/op/binary_convolution.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/bucketize.hpp" +#include "openvino/op/ceiling.hpp" +#include "openvino/op/clamp.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/convert_like.hpp" +#include "openvino/op/convolution.hpp" +#include "openvino/op/cos.hpp" +#include "openvino/op/cosh.hpp" +#include "openvino/op/ctc_greedy_decoder.hpp" +#include "openvino/op/ctc_greedy_decoder_seq_len.hpp" +#include "openvino/op/ctc_loss.hpp" +#include "openvino/op/cum_sum.hpp" +#include "openvino/op/deformable_convolution.hpp" +#include "openvino/op/deformable_psroi_pooling.hpp" +#include "openvino/op/depth_to_space.hpp" +#include "openvino/op/detection_output.hpp" +#include "openvino/op/dft.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/einsum.hpp" +#include "openvino/op/elu.hpp" +#include "openvino/op/embedding_segments_sum.hpp" +#include "openvino/op/embeddingbag_offsets_sum.hpp" +#include "openvino/op/embeddingbag_packedsum.hpp" +#include "openvino/op/equal.hpp" +#include "openvino/op/erf.hpp" +#include "openvino/op/exp.hpp" +#include "openvino/op/experimental_detectron_detection_output.hpp" +#include "openvino/op/experimental_detectron_generate_proposals.hpp" +#include "openvino/op/experimental_detectron_prior_grid_generator.hpp" +#include "openvino/op/experimental_detectron_roi_feature.hpp" +#include "openvino/op/experimental_detectron_topkrois.hpp" +#include "openvino/op/extractimagepatches.hpp" +#include "openvino/op/fake_quantize.hpp" +#include "openvino/op/floor.hpp" +#include "openvino/op/floor_mod.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/gather_elements.hpp" +#include "openvino/op/gather_nd.hpp" +#include "openvino/op/gather_tree.hpp" +#include "openvino/op/gelu.hpp" +#include "openvino/op/greater.hpp" +#include "openvino/op/greater_eq.hpp" +#include "openvino/op/grn.hpp" +#include "openvino/op/group_conv.hpp" +#include "openvino/op/gru_cell.hpp" +#include "openvino/op/gru_sequence.hpp" +#include "openvino/op/hard_sigmoid.hpp" +#include "openvino/op/hsigmoid.hpp" +#include "openvino/op/hswish.hpp" +#include "openvino/op/idft.hpp" +#include "openvino/op/if.hpp" +#include "openvino/op/interpolate.hpp" +#include "openvino/op/less.hpp" +#include "openvino/op/less_eq.hpp" +#include "openvino/op/log.hpp" +#include "openvino/op/log_softmax.hpp" +#include "openvino/op/logical_and.hpp" +#include "openvino/op/logical_not.hpp" +#include "openvino/op/logical_or.hpp" +#include "openvino/op/loop.hpp" +#include "openvino/op/lrn.hpp" +#include "openvino/op/lstm_cell.hpp" +#include "openvino/op/lstm_sequence.hpp" +#include "openvino/op/matmul.hpp" +#include "openvino/op/matrix_nms.hpp" +#include "openvino/op/max.hpp" +#include "openvino/op/max_pool.hpp" +#include "openvino/op/maximum.hpp" +#include "openvino/op/minimum.hpp" +#include "openvino/op/mish.hpp" +#include "openvino/op/mod.hpp" +#include "openvino/op/multiclass_nms.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/mvn.hpp" +#include "openvino/op/negative.hpp" +#include "openvino/op/non_max_suppression.hpp" +#include "openvino/op/non_zero.hpp" +#include "openvino/op/normalize_l2.hpp" +#include "openvino/op/not_equal.hpp" +#include "openvino/op/one_hot.hpp" +#include "openvino/op/pad.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/power.hpp" +#include "openvino/op/prelu.hpp" +#include "openvino/op/prior_box.hpp" +#include "openvino/op/prior_box_clustered.hpp" +#include "openvino/op/proposal.hpp" +#include "openvino/op/psroi_pooling.hpp" +#include "openvino/op/random_uniform.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/read_value.hpp" +#include "openvino/op/reduce_l1.hpp" +#include "openvino/op/reduce_l2.hpp" +#include "openvino/op/reduce_logical_and.hpp" +#include "openvino/op/reduce_logical_or.hpp" +#include "openvino/op/reduce_mean.hpp" +#include "openvino/op/reduce_min.hpp" +#include "openvino/op/reduce_prod.hpp" +#include "openvino/op/reduce_sum.hpp" +#include "openvino/op/region_yolo.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/reorg_yolo.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/reverse.hpp" +#include "openvino/op/reverse_sequence.hpp" +#include "openvino/op/rnn_cell.hpp" +#include "openvino/op/rnn_sequence.hpp" +#include "openvino/op/roi_align.hpp" +#include "openvino/op/roi_pooling.hpp" +#include "openvino/op/roll.hpp" +#include "openvino/op/round.hpp" +#include "openvino/op/scatter_elements_update.hpp" +#include "openvino/op/scatter_nd_update.hpp" +#include "openvino/op/scatter_update.hpp" +#include "openvino/op/select.hpp" +#include "openvino/op/selu.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/shuffle_channels.hpp" +#include "openvino/op/sigmoid.hpp" +#include "openvino/op/sign.hpp" +#include "openvino/op/sin.hpp" +#include "openvino/op/sinh.hpp" +#include "openvino/op/softmax.hpp" +#include "openvino/op/softplus.hpp" +#include "openvino/op/space_to_batch.hpp" +#include "openvino/op/space_to_depth.hpp" +#include "openvino/op/split.hpp" +#include "openvino/op/sqrt.hpp" +#include "openvino/op/squared_difference.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/strided_slice.hpp" +#include "openvino/op/subtract.hpp" +#include "openvino/op/swish.hpp" +#include "openvino/op/tan.hpp" +#include "openvino/op/tanh.hpp" +#include "openvino/op/tensor_iterator.hpp" +#include "openvino/op/tile.hpp" +#include "openvino/op/topk.hpp" +#include "openvino/op/transpose.hpp" +#include "openvino/op/unsqueeze.hpp" +#include "openvino/op/util/attr_types.hpp" +#include "openvino/op/util/op_types.hpp" +#include "openvino/op/variadic_split.hpp" +#include "openvino/op/xor.hpp" diff --git a/ngraph/core/include/openvino/opsets/opset.hpp b/ngraph/core/include/openvino/opsets/opset.hpp new file mode 100644 index 00000000000..8503d815313 --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset.hpp @@ -0,0 +1,119 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include + +#include "ngraph/factory.hpp" +#include "openvino/core/node.hpp" + +namespace ov { +/// \brief Run-time opset information +class OPENVINO_API OpSet { + static std::mutex& get_mutex(); + +public: + OpSet() = default; + virtual ~OpSet() = default; + std::set::size_type size() const { + std::lock_guard guard(get_mutex()); + return m_op_types.size(); + } + + /// \brief Insert OP_TYPE into the opset with a special name and the default factory + template + void insert(const std::string& name) { + insert(name, OP_TYPE::type_info, ngraph::FactoryRegistry::get_default_factory()); + } + + /// \brief Insert OP_TYPE into the opset with the default name and factory + template + void insert() { + insert(OP_TYPE::type_info.name); + } + + const std::set& get_types_info() const { + return m_op_types; + } + /// \brief Create the op named name using it's factory + ov::Node* create(const std::string& name) const; + + /// \brief Create the op named name using it's factory + ov::Node* create_insensitive(const std::string& name) const; + + /// \brief Return true if OP_TYPE is in the opset + bool contains_type(const NodeTypeInfo& type_info) const { + std::lock_guard guard(get_mutex()); + return m_op_types.find(type_info) != m_op_types.end(); + } + + /// \brief Return true if OP_TYPE is in the opset + template + bool contains_type() const { + return contains_type(OP_TYPE::type_info); + } + + /// \brief Return true if name is in the opset + bool contains_type(const std::string& name) const { + std::lock_guard guard(get_mutex()); + return m_name_type_info_map.find(name) != m_name_type_info_map.end(); + } + + /// \brief Return true if name is in the opset + bool contains_type_insensitive(const std::string& name) const { + std::lock_guard guard(get_mutex()); + return m_case_insensitive_type_info_map.find(to_upper_name(name)) != m_case_insensitive_type_info_map.end(); + } + + /// \brief Return true if node's type is in the opset + bool contains_op_type(const Node* node) const { + std::lock_guard guard(get_mutex()); + return m_op_types.find(node->get_type_info()) != m_op_types.end(); + } + + const std::set& get_type_info_set() const { + return m_op_types; + } + +protected: + static std::string to_upper_name(const std::string& name) { + std::string upper_name = name; + std::locale loc; + std::transform(upper_name.begin(), upper_name.end(), upper_name.begin(), [&loc](char c) { + return std::toupper(c, loc); + }); + return upper_name; + } + + ngraph::FactoryRegistry m_factory_registry; + std::set m_op_types; + std::map m_name_type_info_map; + std::map m_case_insensitive_type_info_map; + + /// \brief Insert an op into the opset with a particular name and factory + void insert(const std::string& name, + const NodeTypeInfo& type_info, + ngraph::FactoryRegistry::Factory factory) { + std::lock_guard guard(get_mutex()); + m_op_types.insert(type_info); + m_name_type_info_map[name] = type_info; + m_case_insensitive_type_info_map[to_upper_name(name)] = type_info; + m_factory_registry.register_factory(type_info, std::move(factory)); + } +}; + +const OPENVINO_API OpSet& get_opset1(); +const OPENVINO_API OpSet& get_opset2(); +const OPENVINO_API OpSet& get_opset3(); +const OPENVINO_API OpSet& get_opset4(); +const OPENVINO_API OpSet& get_opset5(); +const OPENVINO_API OpSet& get_opset6(); +const OPENVINO_API OpSet& get_opset7(); +const OPENVINO_API OpSet& get_opset8(); +} // namespace ov diff --git a/ngraph/core/include/openvino/opsets/opset1.hpp b/ngraph/core/include/openvino/opsets/opset1.hpp new file mode 100644 index 00000000000..47eb6effa88 --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset1.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/ops.hpp" + +namespace ov { +namespace opset1 { +#define OPENVINO_OP(a, b) using b::a; +#include "openvino/opsets/opset1_tbl.hpp" +#undef OPENVINO_OP +} // namespace opset1 +} // namespace ov diff --git a/ngraph/core/include/openvino/opsets/opset1_tbl.hpp b/ngraph/core/include/openvino/opsets/opset1_tbl.hpp new file mode 100644 index 00000000000..4bc2bfd7f60 --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset1_tbl.hpp @@ -0,0 +1,150 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +// This collection contains one entry for each op. If an op is added it must be +// added to this list. +// +// In order to use this list you want to define a macro named exactly OPENVINO_OP +// When you are done you should undef the macro +// As an example if you wanted to make a list of all op names as strings you could do this: +// +// #define OPENVINO_OP(a,b) #a, +// std::vector op_names{ +// #include "this include file name" +// }; +// #undef OPENVINO_OP +// +// This sample expands to a list like this: +// "Abs", +// "Acos", +// ... +// +// #define OPENVINO_OP(a,b) b::a, +// std::vector op_names{ +// #include "this include file name" +// }; +// #undef OPENVINO_OP +// +// This sample expands to a list like this: +// ngraph::op::Abs, +// ngraph::op::Acos, +// ... +// +// It's that easy. You can use this for fun and profit. + +#ifndef OPENVINO_OP +# warning "OPENVINO_OP not defined" +# define OPENVINO_OP(x, y) +#endif + +OPENVINO_OP(Abs, ngraph::op::v0) +OPENVINO_OP(Acos, ngraph::op::v0) +OPENVINO_OP(Add, ngraph::op::v1) +OPENVINO_OP(Asin, ngraph::op::v0) +OPENVINO_OP(Atan, ngraph::op::v0) +OPENVINO_OP(AvgPool, ngraph::op::v1) +OPENVINO_OP(BatchNormInference, ngraph::op::v0) +OPENVINO_OP(BinaryConvolution, ngraph::op::v1) +OPENVINO_OP(Broadcast, ngraph::op::v1) +OPENVINO_OP(CTCGreedyDecoder, ngraph::op::v0) +OPENVINO_OP(Ceiling, ngraph::op::v0) +OPENVINO_OP(Clamp, ngraph::op::v0) +OPENVINO_OP(Concat, ngraph::op::v0) +OPENVINO_OP(Constant, ngraph::op) +OPENVINO_OP(Convert, ngraph::op::v0) +OPENVINO_OP(ConvertLike, ngraph::op::v1) +OPENVINO_OP(Convolution, ngraph::op::v1) +OPENVINO_OP(ConvolutionBackpropData, ngraph::op::v1) +OPENVINO_OP(Cos, ngraph::op::v0) +OPENVINO_OP(Cosh, ngraph::op::v0) +OPENVINO_OP(DeformableConvolution, ngraph::op::v1) +OPENVINO_OP(DeformablePSROIPooling, ngraph::op::v1) +OPENVINO_OP(DepthToSpace, ngraph::op::v0) +OPENVINO_OP(DetectionOutput, ngraph::op::v0) +OPENVINO_OP(Divide, ngraph::op::v1) +OPENVINO_OP(Elu, ngraph::op::v0) +OPENVINO_OP(Erf, ngraph::op::v0) +OPENVINO_OP(Equal, ngraph::op::v1) +OPENVINO_OP(Exp, ngraph::op::v0) +OPENVINO_OP(FakeQuantize, ngraph::op::v0) +OPENVINO_OP(Floor, ngraph::op::v0) +OPENVINO_OP(FloorMod, ngraph::op::v1) +OPENVINO_OP(Gather, ngraph::op::v1) +OPENVINO_OP(GatherTree, ngraph::op::v1) +OPENVINO_OP(Greater, ngraph::op::v1) +OPENVINO_OP(GreaterEqual, ngraph::op::v1) +OPENVINO_OP(GroupConvolution, ngraph::op::v1) +OPENVINO_OP(GroupConvolutionBackpropData, ngraph::op::v1) +OPENVINO_OP(GRN, ngraph::op::v0) +OPENVINO_OP(HardSigmoid, ngraph::op::v0) +OPENVINO_OP(Interpolate, ngraph::op::v0) +OPENVINO_OP(Less, ngraph::op::v1) +OPENVINO_OP(LessEqual, ngraph::op::v1) +OPENVINO_OP(Log, ngraph::op::v0) +OPENVINO_OP(LogicalAnd, ngraph::op::v1) +OPENVINO_OP(LogicalNot, ngraph::op::v1) +OPENVINO_OP(LogicalOr, ngraph::op::v1) +OPENVINO_OP(LogicalXor, ngraph::op::v1) +OPENVINO_OP(LRN, ngraph::op::v0) +OPENVINO_OP(LSTMCell, ngraph::op::v0) +OPENVINO_OP(LSTMSequence, ngraph::op::v0) +OPENVINO_OP(MatMul, ngraph::op::v0) +OPENVINO_OP(MaxPool, ngraph::op::v1) +OPENVINO_OP(Maximum, ngraph::op::v1) +OPENVINO_OP(Minimum, ngraph::op::v1) +OPENVINO_OP(Mod, ngraph::op::v1) +OPENVINO_OP(Multiply, ngraph::op::v1) +OPENVINO_OP(Negative, ngraph::op::v0) +OPENVINO_OP(NonMaxSuppression, ngraph::op::v1) +OPENVINO_OP(NormalizeL2, ngraph::op::v0) +OPENVINO_OP(NotEqual, ngraph::op::v1) +OPENVINO_OP(OneHot, ngraph::op::v1) +OPENVINO_OP(PRelu, ngraph::op::v0) +OPENVINO_OP(PSROIPooling, ngraph::op::v0) +OPENVINO_OP(Pad, ngraph::op::v1) +OPENVINO_OP(Parameter, ngraph::op::v0) +OPENVINO_OP(Power, ngraph::op::v1) +OPENVINO_OP(PriorBox, ngraph::op::v0) +OPENVINO_OP(PriorBoxClustered, ngraph::op::v0) +OPENVINO_OP(Proposal, ngraph::op::v0) +OPENVINO_OP(Range, ngraph::op::v0) +OPENVINO_OP(Relu, ngraph::op::v0) +OPENVINO_OP(ReduceMax, ngraph::op::v1) +OPENVINO_OP(ReduceLogicalAnd, ngraph::op::v1) +OPENVINO_OP(ReduceLogicalOr, ngraph::op::v1) +OPENVINO_OP(ReduceMean, ngraph::op::v1) +OPENVINO_OP(ReduceMin, ngraph::op::v1) +OPENVINO_OP(ReduceProd, ngraph::op::v1) +OPENVINO_OP(ReduceSum, ngraph::op::v1) +OPENVINO_OP(RegionYolo, ngraph::op::v0) +OPENVINO_OP(Reshape, ngraph::op::v1) +OPENVINO_OP(Result, ngraph::op::v0) +OPENVINO_OP(Reverse, ngraph::op::v1) +OPENVINO_OP(ReverseSequence, ngraph::op::v0) +OPENVINO_OP(RNNCell, ngraph::op::v0) +OPENVINO_OP(Select, ngraph::op::v1) +OPENVINO_OP(Selu, ngraph::op::v0) +OPENVINO_OP(ShapeOf, ngraph::op::v0) +OPENVINO_OP(ShuffleChannels, ngraph::op::v0) +OPENVINO_OP(Sign, ngraph::op::v0) +OPENVINO_OP(Sigmoid, ngraph::op::v0) +OPENVINO_OP(Sin, ngraph::op::v0) +OPENVINO_OP(Sinh, ngraph::op::v0) +OPENVINO_OP(Softmax, ngraph::op::v1) +OPENVINO_OP(Sqrt, ngraph::op::v0) +OPENVINO_OP(SpaceToDepth, ngraph::op::v0) +OPENVINO_OP(Split, ngraph::op::v1) +OPENVINO_OP(SquaredDifference, ngraph::op::v0) +OPENVINO_OP(Squeeze, ngraph::op::v0) +OPENVINO_OP(StridedSlice, ngraph::op::v1) +OPENVINO_OP(Subtract, ngraph::op::v1) +OPENVINO_OP(Tan, ngraph::op::v0) +OPENVINO_OP(Tanh, ngraph::op::v0) +OPENVINO_OP(TensorIterator, ngraph::op::v0) +OPENVINO_OP(Tile, ngraph::op::v0) +OPENVINO_OP(TopK, ngraph::op::v1) +OPENVINO_OP(Transpose, ngraph::op::v1) +OPENVINO_OP(Unsqueeze, ngraph::op::v0) +OPENVINO_OP(VariadicSplit, ngraph::op::v1) +OPENVINO_OP(Xor, ngraph::op::v0) diff --git a/ngraph/core/include/openvino/opsets/opset2.hpp b/ngraph/core/include/openvino/opsets/opset2.hpp new file mode 100644 index 00000000000..24c1b1befe2 --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset2.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/ops.hpp" + +namespace ov { +namespace opset2 { +#define OPENVINO_OP(a, b) using b::a; +#include "openvino/opsets/opset2_tbl.hpp" +#undef OPENVINO_OP +} // namespace opset2 +} // namespace ov diff --git a/ngraph/core/include/openvino/opsets/opset2_tbl.hpp b/ngraph/core/include/openvino/opsets/opset2_tbl.hpp new file mode 100644 index 00000000000..139215d2f56 --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset2_tbl.hpp @@ -0,0 +1,143 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifndef OPENVINO_OP +# warning "OPENVINO_OP not defined" +# define OPENVINO_OP(x, y) +#endif + +OPENVINO_OP(Abs, ngraph::op::v0) +OPENVINO_OP(Acos, ngraph::op::v0) +OPENVINO_OP(Add, ngraph::op::v1) +OPENVINO_OP(Asin, ngraph::op::v0) +OPENVINO_OP(Atan, ngraph::op::v0) +OPENVINO_OP(AvgPool, ngraph::op::v1) +OPENVINO_OP(BatchNormInference, ngraph::op::v0) +OPENVINO_OP(BinaryConvolution, ngraph::op::v1) +OPENVINO_OP(Broadcast, ngraph::op::v1) +OPENVINO_OP(CTCGreedyDecoder, ngraph::op::v0) +OPENVINO_OP(Ceiling, ngraph::op::v0) +OPENVINO_OP(Clamp, ngraph::op::v0) +OPENVINO_OP(Concat, ngraph::op::v0) +OPENVINO_OP(Constant, ngraph::op) +OPENVINO_OP(Convert, ngraph::op::v0) +OPENVINO_OP(ConvertLike, ngraph::op::v1) +OPENVINO_OP(Convolution, ngraph::op::v1) +OPENVINO_OP(ConvolutionBackpropData, ngraph::op::v1) +OPENVINO_OP(Cos, ngraph::op::v0) +OPENVINO_OP(Cosh, ngraph::op::v0) +OPENVINO_OP(DeformableConvolution, ngraph::op::v1) +OPENVINO_OP(DeformablePSROIPooling, ngraph::op::v1) +OPENVINO_OP(DepthToSpace, ngraph::op::v0) +OPENVINO_OP(DetectionOutput, ngraph::op::v0) +OPENVINO_OP(Divide, ngraph::op::v1) +OPENVINO_OP(Elu, ngraph::op::v0) +OPENVINO_OP(Erf, ngraph::op::v0) +OPENVINO_OP(Equal, ngraph::op::v1) +OPENVINO_OP(Exp, ngraph::op::v0) +OPENVINO_OP(FakeQuantize, ngraph::op::v0) +OPENVINO_OP(Floor, ngraph::op::v0) +OPENVINO_OP(FloorMod, ngraph::op::v1) +OPENVINO_OP(Gather, ngraph::op::v1) +OPENVINO_OP(GatherTree, ngraph::op::v1) +OPENVINO_OP(Greater, ngraph::op::v1) +OPENVINO_OP(GreaterEqual, ngraph::op::v1) +OPENVINO_OP(GroupConvolution, ngraph::op::v1) +OPENVINO_OP(GroupConvolutionBackpropData, ngraph::op::v1) +OPENVINO_OP(GRN, ngraph::op::v0) +OPENVINO_OP(HardSigmoid, ngraph::op::v0) +OPENVINO_OP(Interpolate, ngraph::op::v0) +OPENVINO_OP(Less, ngraph::op::v1) +OPENVINO_OP(LessEqual, ngraph::op::v1) +OPENVINO_OP(Log, ngraph::op::v0) +OPENVINO_OP(LogicalAnd, ngraph::op::v1) +OPENVINO_OP(LogicalNot, ngraph::op::v1) +OPENVINO_OP(LogicalOr, ngraph::op::v1) +OPENVINO_OP(LogicalXor, ngraph::op::v1) +OPENVINO_OP(LRN, ngraph::op::v0) +OPENVINO_OP(LSTMCell, ngraph::op::v0) +OPENVINO_OP(LSTMSequence, ngraph::op::v0) +OPENVINO_OP(MatMul, ngraph::op::v0) +OPENVINO_OP(MaxPool, ngraph::op::v1) +OPENVINO_OP(Maximum, ngraph::op::v1) +OPENVINO_OP(Minimum, ngraph::op::v1) +OPENVINO_OP(Mod, ngraph::op::v1) +OPENVINO_OP(Multiply, ngraph::op::v1) + +OPENVINO_OP(MVN, ngraph::op::v0) // Missing in opset1 + +OPENVINO_OP(Negative, ngraph::op::v0) +OPENVINO_OP(NonMaxSuppression, ngraph::op::v1) +OPENVINO_OP(NormalizeL2, ngraph::op::v0) +OPENVINO_OP(NotEqual, ngraph::op::v1) +OPENVINO_OP(OneHot, ngraph::op::v1) +OPENVINO_OP(PRelu, ngraph::op::v0) +OPENVINO_OP(PSROIPooling, ngraph::op::v0) +OPENVINO_OP(Pad, ngraph::op::v1) +OPENVINO_OP(Parameter, ngraph::op::v0) +OPENVINO_OP(Power, ngraph::op::v1) +OPENVINO_OP(PriorBox, ngraph::op::v0) +OPENVINO_OP(PriorBoxClustered, ngraph::op::v0) +OPENVINO_OP(Proposal, ngraph::op::v0) +OPENVINO_OP(Range, ngraph::op::v0) +OPENVINO_OP(Relu, ngraph::op::v0) +OPENVINO_OP(ReduceMax, ngraph::op::v1) +OPENVINO_OP(ReduceLogicalAnd, ngraph::op::v1) +OPENVINO_OP(ReduceLogicalOr, ngraph::op::v1) +OPENVINO_OP(ReduceMean, ngraph::op::v1) +OPENVINO_OP(ReduceMin, ngraph::op::v1) +OPENVINO_OP(ReduceProd, ngraph::op::v1) +OPENVINO_OP(ReduceSum, ngraph::op::v1) +OPENVINO_OP(RegionYolo, ngraph::op::v0) + +OPENVINO_OP(ReorgYolo, ngraph::op::v0) // Missing in opset1 + +OPENVINO_OP(Reshape, ngraph::op::v1) +OPENVINO_OP(Result, ngraph::op::v0) + +// Moved out of opset2, it was added to opset1 by mistake +// OPENVINO_OP(Reverse, ngraph::op::v1) + +OPENVINO_OP(ReverseSequence, ngraph::op::v0) + +// Moved out of opset2, it was added to opset1 by mistake +// OPENVINO_OP(RNNCell, ngraph::op::v0) + +OPENVINO_OP(ROIPooling, ngraph::op::v0) // Missing in opset1 + +OPENVINO_OP(Select, ngraph::op::v1) +OPENVINO_OP(Selu, ngraph::op::v0) +OPENVINO_OP(ShapeOf, ngraph::op::v0) + +// Moved out of opset2, it was added to opset1 by mistake +// OPENVINO_OP(ShuffleChannels, ngraph::op::v0) + +OPENVINO_OP(Sign, ngraph::op::v0) +OPENVINO_OP(Sigmoid, ngraph::op::v0) +OPENVINO_OP(Sin, ngraph::op::v0) +OPENVINO_OP(Sinh, ngraph::op::v0) +OPENVINO_OP(Softmax, ngraph::op::v1) +OPENVINO_OP(Sqrt, ngraph::op::v0) +OPENVINO_OP(SpaceToDepth, ngraph::op::v0) +OPENVINO_OP(Split, ngraph::op::v1) +OPENVINO_OP(SquaredDifference, ngraph::op::v0) +OPENVINO_OP(Squeeze, ngraph::op::v0) +OPENVINO_OP(StridedSlice, ngraph::op::v1) +OPENVINO_OP(Subtract, ngraph::op::v1) +OPENVINO_OP(Tan, ngraph::op::v0) +OPENVINO_OP(Tanh, ngraph::op::v0) +OPENVINO_OP(TensorIterator, ngraph::op::v0) +OPENVINO_OP(Tile, ngraph::op::v0) +OPENVINO_OP(TopK, ngraph::op::v1) +OPENVINO_OP(Transpose, ngraph::op::v1) +OPENVINO_OP(Unsqueeze, ngraph::op::v0) +OPENVINO_OP(VariadicSplit, ngraph::op::v1) + +// Moved out of opset2, it was added to opset1 by mistake +// OPENVINO_OP(Xor, ngraph::op::v0) + +// New operations added in opset2 +OPENVINO_OP(Gelu, ngraph::op::v0) +OPENVINO_OP(BatchToSpace, ngraph::op::v1) +OPENVINO_OP(SpaceToBatch, ngraph::op::v1) diff --git a/ngraph/core/include/openvino/opsets/opset3.hpp b/ngraph/core/include/openvino/opsets/opset3.hpp new file mode 100644 index 00000000000..a2f60e8a9f1 --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset3.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/ops.hpp" + +namespace ov { +namespace opset3 { +#define OPENVINO_OP(a, b) using b::a; +#include "openvino/opsets/opset3_tbl.hpp" +#undef OPENVINO_OP +} // namespace opset3 +} // namespace ov diff --git a/ngraph/core/include/openvino/opsets/opset3_tbl.hpp b/ngraph/core/include/openvino/opsets/opset3_tbl.hpp new file mode 100644 index 00000000000..f1883c85370 --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset3_tbl.hpp @@ -0,0 +1,159 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifndef OPENVINO_OP +# warning "OPENVINO_OP not defined" +# define OPENVINO_OP(x, y) +#endif + +OPENVINO_OP(Abs, ngraph::op::v0) +OPENVINO_OP(Acos, ngraph::op::v0) +OPENVINO_OP(Add, ngraph::op::v1) +OPENVINO_OP(Asin, ngraph::op::v0) +OPENVINO_OP(Atan, ngraph::op::v0) +OPENVINO_OP(AvgPool, ngraph::op::v1) +OPENVINO_OP(BatchNormInference, ngraph::op::v0) +OPENVINO_OP(BinaryConvolution, ngraph::op::v1) +OPENVINO_OP(Broadcast, ngraph::op::v3) +OPENVINO_OP(Bucketize, ngraph::op::v3) +OPENVINO_OP(CTCGreedyDecoder, ngraph::op::v0) +OPENVINO_OP(Ceiling, ngraph::op::v0) +OPENVINO_OP(Clamp, ngraph::op::v0) +OPENVINO_OP(Concat, ngraph::op::v0) +OPENVINO_OP(Constant, ngraph::op) +OPENVINO_OP(Convert, ngraph::op::v0) +OPENVINO_OP(ConvertLike, ngraph::op::v1) +OPENVINO_OP(Convolution, ngraph::op::v1) +OPENVINO_OP(ConvolutionBackpropData, ngraph::op::v1) +OPENVINO_OP(Cos, ngraph::op::v0) +OPENVINO_OP(Cosh, ngraph::op::v0) +OPENVINO_OP(CumSum, ngraph::op::v0) +OPENVINO_OP(DeformableConvolution, ngraph::op::v1) +OPENVINO_OP(DeformablePSROIPooling, ngraph::op::v1) +OPENVINO_OP(DepthToSpace, ngraph::op::v0) +OPENVINO_OP(DetectionOutput, ngraph::op::v0) +OPENVINO_OP(Divide, ngraph::op::v1) +OPENVINO_OP(Elu, ngraph::op::v0) +OPENVINO_OP(Erf, ngraph::op::v0) +OPENVINO_OP(Equal, ngraph::op::v1) +OPENVINO_OP(Exp, ngraph::op::v0) +OPENVINO_OP(ExtractImagePatches, ngraph::op::v3) +OPENVINO_OP(FakeQuantize, ngraph::op::v0) +OPENVINO_OP(Floor, ngraph::op::v0) +OPENVINO_OP(FloorMod, ngraph::op::v1) +OPENVINO_OP(Gather, ngraph::op::v1) +OPENVINO_OP(GatherTree, ngraph::op::v1) +OPENVINO_OP(Greater, ngraph::op::v1) +OPENVINO_OP(GreaterEqual, ngraph::op::v1) +OPENVINO_OP(GroupConvolution, ngraph::op::v1) +OPENVINO_OP(GroupConvolutionBackpropData, ngraph::op::v1) +OPENVINO_OP(GRN, ngraph::op::v0) +OPENVINO_OP(HardSigmoid, ngraph::op::v0) +OPENVINO_OP(Interpolate, ngraph::op::v0) +OPENVINO_OP(Less, ngraph::op::v1) +OPENVINO_OP(LessEqual, ngraph::op::v1) +OPENVINO_OP(Log, ngraph::op::v0) +OPENVINO_OP(LogicalAnd, ngraph::op::v1) +OPENVINO_OP(LogicalNot, ngraph::op::v1) +OPENVINO_OP(LogicalOr, ngraph::op::v1) +OPENVINO_OP(LogicalXor, ngraph::op::v1) +OPENVINO_OP(LRN, ngraph::op::v0) +OPENVINO_OP(LSTMCell, ngraph::op::v0) +OPENVINO_OP(LSTMSequence, ngraph::op::v0) +OPENVINO_OP(MatMul, ngraph::op::v0) +OPENVINO_OP(MaxPool, ngraph::op::v1) +OPENVINO_OP(Maximum, ngraph::op::v1) +OPENVINO_OP(Minimum, ngraph::op::v1) +OPENVINO_OP(Mod, ngraph::op::v1) +OPENVINO_OP(Multiply, ngraph::op::v1) + +OPENVINO_OP(MVN, ngraph::op::v0) // Missing in opset1 + +OPENVINO_OP(Negative, ngraph::op::v0) +OPENVINO_OP(NonMaxSuppression, ngraph::op::v3) +OPENVINO_OP(NormalizeL2, ngraph::op::v0) +OPENVINO_OP(NotEqual, ngraph::op::v1) +OPENVINO_OP(OneHot, ngraph::op::v1) +OPENVINO_OP(PRelu, ngraph::op::v0) +OPENVINO_OP(PSROIPooling, ngraph::op::v0) +OPENVINO_OP(Pad, ngraph::op::v1) +OPENVINO_OP(Parameter, ngraph::op::v0) +OPENVINO_OP(Power, ngraph::op::v1) +OPENVINO_OP(PriorBox, ngraph::op::v0) +OPENVINO_OP(PriorBoxClustered, ngraph::op::v0) +OPENVINO_OP(Proposal, ngraph::op::v0) +OPENVINO_OP(Range, ngraph::op::v0) +OPENVINO_OP(Relu, ngraph::op::v0) +OPENVINO_OP(ReduceMax, ngraph::op::v1) +OPENVINO_OP(ReduceLogicalAnd, ngraph::op::v1) +OPENVINO_OP(ReduceLogicalOr, ngraph::op::v1) +OPENVINO_OP(ReduceMean, ngraph::op::v1) +OPENVINO_OP(ReduceMin, ngraph::op::v1) +OPENVINO_OP(ReduceProd, ngraph::op::v1) +OPENVINO_OP(ReduceSum, ngraph::op::v1) +OPENVINO_OP(RegionYolo, ngraph::op::v0) + +OPENVINO_OP(ReorgYolo, ngraph::op::v0) // Missing in opset1 + +OPENVINO_OP(Reshape, ngraph::op::v1) +OPENVINO_OP(Result, ngraph::op::v0) + +// Moved out of opset2, it was added to opset1 by mistake +// OPENVINO_OP(Reverse, ngraph::op::v1) + +OPENVINO_OP(ReverseSequence, ngraph::op::v0) + +// Moved out of opset2, it was added to opset1 by mistake +// OPENVINO_OP(RNNCell, ngraph::op::v0) + +OPENVINO_OP(ROIPooling, ngraph::op::v0) // Missing in opset1 + +OPENVINO_OP(Select, ngraph::op::v1) +OPENVINO_OP(Selu, ngraph::op::v0) +// Superseded +// OPENVINO_OP(ShapeOf, ngraph::op::v0) + +OPENVINO_OP(Sign, ngraph::op::v0) +OPENVINO_OP(Sigmoid, ngraph::op::v0) +OPENVINO_OP(Sin, ngraph::op::v0) +OPENVINO_OP(Sinh, ngraph::op::v0) +OPENVINO_OP(Softmax, ngraph::op::v1) +OPENVINO_OP(Sqrt, ngraph::op::v0) +OPENVINO_OP(SpaceToDepth, ngraph::op::v0) +OPENVINO_OP(Split, ngraph::op::v1) +OPENVINO_OP(SquaredDifference, ngraph::op::v0) +OPENVINO_OP(Squeeze, ngraph::op::v0) +OPENVINO_OP(StridedSlice, ngraph::op::v1) +OPENVINO_OP(Subtract, ngraph::op::v1) +OPENVINO_OP(Tan, ngraph::op::v0) +OPENVINO_OP(Tanh, ngraph::op::v0) +OPENVINO_OP(TensorIterator, ngraph::op::v0) +OPENVINO_OP(Tile, ngraph::op::v0) +OPENVINO_OP(Transpose, ngraph::op::v1) +OPENVINO_OP(Unsqueeze, ngraph::op::v0) +OPENVINO_OP(VariadicSplit, ngraph::op::v1) + +// Moved out of opset2, it was added to opset1 by mistake +// OPENVINO_OP(Xor, ngraph::op::v0) + +// New operations added in opset2 +OPENVINO_OP(Gelu, ngraph::op::v0) +OPENVINO_OP(BatchToSpace, ngraph::op::v1) +OPENVINO_OP(SpaceToBatch, ngraph::op::v1) + +// New operations added in opset3 +OPENVINO_OP(EmbeddingBagPackedSum, ngraph::op::v3) +OPENVINO_OP(EmbeddingSegmentsSum, ngraph::op::v3) +OPENVINO_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) +OPENVINO_OP(GRUCell, ngraph::op::v3) +OPENVINO_OP(NonZero, ngraph::op::v3) +OPENVINO_OP(RNNCell, ngraph::op::v0) +OPENVINO_OP(ROIAlign, ngraph::op::v3) +OPENVINO_OP(ScatterElementsUpdate, ngraph::op::v3) +OPENVINO_OP(ScatterUpdate, ngraph::op::v3) +OPENVINO_OP(ShuffleChannels, ngraph::op::v0) +OPENVINO_OP(ShapeOf, ngraph::op::v3) +OPENVINO_OP(Assign, ngraph::op::v3) +OPENVINO_OP(ReadValue, ngraph::op::v3) +OPENVINO_OP(TopK, ngraph::op::v3) diff --git a/ngraph/core/include/openvino/opsets/opset4.hpp b/ngraph/core/include/openvino/opsets/opset4.hpp new file mode 100644 index 00000000000..369ea427e9e --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset4.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/ops.hpp" + +namespace ov { +namespace opset4 { +#define OPENVINO_OP(a, b) using b::a; +#include "openvino/opsets/opset4_tbl.hpp" +#undef OPENVINO_OP +} // namespace opset4 +} // namespace ov diff --git a/ngraph/core/include/openvino/opsets/opset4_tbl.hpp b/ngraph/core/include/openvino/opsets/opset4_tbl.hpp new file mode 100644 index 00000000000..142ea8cb157 --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset4_tbl.hpp @@ -0,0 +1,152 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifndef OPENVINO_OP +# warning "OPENVINO_OP not defined" +# define OPENVINO_OP(x, y) +#endif + +OPENVINO_OP(Abs, ngraph::op::v0) +OPENVINO_OP(Acos, ngraph::op::v0) +OPENVINO_OP(Add, ngraph::op::v1) +OPENVINO_OP(Asin, ngraph::op::v0) +OPENVINO_OP(Atan, ngraph::op::v0) +OPENVINO_OP(AvgPool, ngraph::op::v1) +OPENVINO_OP(BatchNormInference, ngraph::op::v0) +OPENVINO_OP(BinaryConvolution, ngraph::op::v1) +OPENVINO_OP(Broadcast, ngraph::op::v3) +OPENVINO_OP(Bucketize, ngraph::op::v3) +OPENVINO_OP(CTCGreedyDecoder, ngraph::op::v0) +OPENVINO_OP(Ceiling, ngraph::op::v0) +OPENVINO_OP(Clamp, ngraph::op::v0) +OPENVINO_OP(Concat, ngraph::op::v0) +OPENVINO_OP(Constant, ngraph::op) +OPENVINO_OP(Convert, ngraph::op::v0) +OPENVINO_OP(ConvertLike, ngraph::op::v1) +OPENVINO_OP(Convolution, ngraph::op::v1) +OPENVINO_OP(ConvolutionBackpropData, ngraph::op::v1) +OPENVINO_OP(Cos, ngraph::op::v0) +OPENVINO_OP(Cosh, ngraph::op::v0) +OPENVINO_OP(CumSum, ngraph::op::v0) +OPENVINO_OP(DeformableConvolution, ngraph::op::v1) +OPENVINO_OP(DeformablePSROIPooling, ngraph::op::v1) +OPENVINO_OP(DepthToSpace, ngraph::op::v0) +OPENVINO_OP(DetectionOutput, ngraph::op::v0) +OPENVINO_OP(Divide, ngraph::op::v1) +OPENVINO_OP(Elu, ngraph::op::v0) +OPENVINO_OP(Erf, ngraph::op::v0) +OPENVINO_OP(Equal, ngraph::op::v1) +OPENVINO_OP(Exp, ngraph::op::v0) +OPENVINO_OP(ExtractImagePatches, ngraph::op::v3) +OPENVINO_OP(FakeQuantize, ngraph::op::v0) +OPENVINO_OP(Floor, ngraph::op::v0) +OPENVINO_OP(FloorMod, ngraph::op::v1) +OPENVINO_OP(Gather, ngraph::op::v1) +OPENVINO_OP(GatherTree, ngraph::op::v1) +OPENVINO_OP(Greater, ngraph::op::v1) +OPENVINO_OP(GreaterEqual, ngraph::op::v1) +OPENVINO_OP(GroupConvolution, ngraph::op::v1) +OPENVINO_OP(GroupConvolutionBackpropData, ngraph::op::v1) +OPENVINO_OP(GRN, ngraph::op::v0) +OPENVINO_OP(HardSigmoid, ngraph::op::v0) +OPENVINO_OP(Less, ngraph::op::v1) +OPENVINO_OP(LessEqual, ngraph::op::v1) +OPENVINO_OP(Log, ngraph::op::v0) +OPENVINO_OP(LogicalAnd, ngraph::op::v1) +OPENVINO_OP(LogicalNot, ngraph::op::v1) +OPENVINO_OP(LogicalOr, ngraph::op::v1) +OPENVINO_OP(LogicalXor, ngraph::op::v1) +OPENVINO_OP(LRN, ngraph::op::v0) +OPENVINO_OP(LSTMCell, ngraph::op::v4) +OPENVINO_OP(MatMul, ngraph::op::v0) +OPENVINO_OP(MaxPool, ngraph::op::v1) +OPENVINO_OP(Maximum, ngraph::op::v1) +OPENVINO_OP(Minimum, ngraph::op::v1) +OPENVINO_OP(Mod, ngraph::op::v1) +OPENVINO_OP(Multiply, ngraph::op::v1) +OPENVINO_OP(MVN, ngraph::op::v0) +OPENVINO_OP(Negative, ngraph::op::v0) +OPENVINO_OP(NormalizeL2, ngraph::op::v0) +OPENVINO_OP(NotEqual, ngraph::op::v1) +OPENVINO_OP(OneHot, ngraph::op::v1) +OPENVINO_OP(PRelu, ngraph::op::v0) +OPENVINO_OP(PSROIPooling, ngraph::op::v0) +OPENVINO_OP(Pad, ngraph::op::v1) +OPENVINO_OP(Parameter, ngraph::op::v0) +OPENVINO_OP(Power, ngraph::op::v1) +OPENVINO_OP(PriorBox, ngraph::op::v0) +OPENVINO_OP(PriorBoxClustered, ngraph::op::v0) +OPENVINO_OP(Proposal, ngraph::op::v4) +OPENVINO_OP(Range, ngraph::op::v4) +OPENVINO_OP(Relu, ngraph::op::v0) +OPENVINO_OP(ReduceMax, ngraph::op::v1) +OPENVINO_OP(ReduceLogicalAnd, ngraph::op::v1) +OPENVINO_OP(ReduceLogicalOr, ngraph::op::v1) +OPENVINO_OP(ReduceMean, ngraph::op::v1) +OPENVINO_OP(ReduceMin, ngraph::op::v1) +OPENVINO_OP(ReduceProd, ngraph::op::v1) +OPENVINO_OP(ReduceSum, ngraph::op::v1) +OPENVINO_OP(RegionYolo, ngraph::op::v0) +OPENVINO_OP(ReorgYolo, ngraph::op::v0) +OPENVINO_OP(Reshape, ngraph::op::v1) +OPENVINO_OP(Result, ngraph::op::v0) +OPENVINO_OP(ReverseSequence, ngraph::op::v0) +OPENVINO_OP(ROIPooling, ngraph::op::v0) +OPENVINO_OP(ScatterNDUpdate, ngraph::op::v3) +OPENVINO_OP(Select, ngraph::op::v1) +OPENVINO_OP(Selu, ngraph::op::v0) +OPENVINO_OP(Sign, ngraph::op::v0) +OPENVINO_OP(Sigmoid, ngraph::op::v0) +OPENVINO_OP(Sin, ngraph::op::v0) +OPENVINO_OP(Sinh, ngraph::op::v0) +OPENVINO_OP(Softmax, ngraph::op::v1) +OPENVINO_OP(Sqrt, ngraph::op::v0) +OPENVINO_OP(SpaceToDepth, ngraph::op::v0) +OPENVINO_OP(Split, ngraph::op::v1) +OPENVINO_OP(SquaredDifference, ngraph::op::v0) +OPENVINO_OP(Squeeze, ngraph::op::v0) +OPENVINO_OP(StridedSlice, ngraph::op::v1) +OPENVINO_OP(Subtract, ngraph::op::v1) +OPENVINO_OP(Tan, ngraph::op::v0) +OPENVINO_OP(Tanh, ngraph::op::v0) +OPENVINO_OP(TensorIterator, ngraph::op::v0) +OPENVINO_OP(Tile, ngraph::op::v0) +OPENVINO_OP(Transpose, ngraph::op::v1) +OPENVINO_OP(Unsqueeze, ngraph::op::v0) +OPENVINO_OP(VariadicSplit, ngraph::op::v1) + +// New operations added in opset2 +OPENVINO_OP(Gelu, ngraph::op::v0) +OPENVINO_OP(BatchToSpace, ngraph::op::v1) +OPENVINO_OP(SpaceToBatch, ngraph::op::v1) + +// New operations added in opset3 +OPENVINO_OP(EmbeddingBagPackedSum, ngraph::op::v3) +OPENVINO_OP(EmbeddingSegmentsSum, ngraph::op::v3) +OPENVINO_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) +OPENVINO_OP(GRUCell, ngraph::op::v3) +OPENVINO_OP(NonZero, ngraph::op::v3) +OPENVINO_OP(RNNCell, ngraph::op::v0) +OPENVINO_OP(ROIAlign, ngraph::op::v3) +OPENVINO_OP(ScatterElementsUpdate, ngraph::op::v3) +OPENVINO_OP(ScatterUpdate, ngraph::op::v3) +OPENVINO_OP(ShuffleChannels, ngraph::op::v0) +OPENVINO_OP(ShapeOf, ngraph::op::v3) +OPENVINO_OP(Assign, ngraph::op::v3) +OPENVINO_OP(ReadValue, ngraph::op::v3) +OPENVINO_OP(TopK, ngraph::op::v3) + +// New operations added in opset4 +OPENVINO_OP(Acosh, ngraph::op::v3) +OPENVINO_OP(Asinh, ngraph::op::v3) +OPENVINO_OP(Atanh, ngraph::op::v3) +OPENVINO_OP(CTCLoss, ngraph::op::v4) +OPENVINO_OP(HSwish, ngraph::op::v4) +OPENVINO_OP(Interpolate, ngraph::op::v4) +OPENVINO_OP(Mish, ngraph::op::v4) +OPENVINO_OP(NonMaxSuppression, ngraph::op::v4) +OPENVINO_OP(ReduceL1, ngraph::op::v4) +OPENVINO_OP(ReduceL2, ngraph::op::v4) +OPENVINO_OP(SoftPlus, ngraph::op::v4) +OPENVINO_OP(Swish, ngraph::op::v4) diff --git a/ngraph/core/include/openvino/opsets/opset5.hpp b/ngraph/core/include/openvino/opsets/opset5.hpp new file mode 100644 index 00000000000..736fa00f529 --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset5.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/ops.hpp" + +namespace ov { +namespace opset5 { +#define OPENVINO_OP(a, b) using b::a; +#include "openvino/opsets/opset5_tbl.hpp" +#undef OPENVINO_OP +} // namespace opset5 +} // namespace ov diff --git a/ngraph/core/include/openvino/opsets/opset5_tbl.hpp b/ngraph/core/include/openvino/opsets/opset5_tbl.hpp new file mode 100644 index 00000000000..bde996f3252 --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset5_tbl.hpp @@ -0,0 +1,162 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifndef OPENVINO_OP +# warning "OPENVINO_OP not defined" +# define OPENVINO_OP(x, y) +#endif + +OPENVINO_OP(Abs, ngraph::op::v0) +OPENVINO_OP(Acos, ngraph::op::v0) +OPENVINO_OP(Add, ngraph::op::v1) +OPENVINO_OP(Asin, ngraph::op::v0) +OPENVINO_OP(Atan, ngraph::op::v0) +OPENVINO_OP(AvgPool, ngraph::op::v1) +OPENVINO_OP(BatchNormInference, ngraph::op::v5) +OPENVINO_OP(BinaryConvolution, ngraph::op::v1) +OPENVINO_OP(Broadcast, ngraph::op::v3) +OPENVINO_OP(Bucketize, ngraph::op::v3) +OPENVINO_OP(CTCGreedyDecoder, ngraph::op::v0) +OPENVINO_OP(Ceiling, ngraph::op::v0) +OPENVINO_OP(Clamp, ngraph::op::v0) +OPENVINO_OP(Concat, ngraph::op::v0) +OPENVINO_OP(Constant, ngraph::op) +OPENVINO_OP(Convert, ngraph::op::v0) +OPENVINO_OP(ConvertLike, ngraph::op::v1) +OPENVINO_OP(Convolution, ngraph::op::v1) +OPENVINO_OP(ConvolutionBackpropData, ngraph::op::v1) +OPENVINO_OP(Cos, ngraph::op::v0) +OPENVINO_OP(Cosh, ngraph::op::v0) +OPENVINO_OP(CumSum, ngraph::op::v0) +OPENVINO_OP(DeformableConvolution, ngraph::op::v1) +OPENVINO_OP(DeformablePSROIPooling, ngraph::op::v1) +OPENVINO_OP(DepthToSpace, ngraph::op::v0) +OPENVINO_OP(DetectionOutput, ngraph::op::v0) +OPENVINO_OP(Divide, ngraph::op::v1) +OPENVINO_OP(Elu, ngraph::op::v0) +OPENVINO_OP(Erf, ngraph::op::v0) +OPENVINO_OP(Equal, ngraph::op::v1) +OPENVINO_OP(Exp, ngraph::op::v0) +OPENVINO_OP(ExtractImagePatches, ngraph::op::v3) +OPENVINO_OP(FakeQuantize, ngraph::op::v0) +OPENVINO_OP(Floor, ngraph::op::v0) +OPENVINO_OP(FloorMod, ngraph::op::v1) +OPENVINO_OP(Gather, ngraph::op::v1) +OPENVINO_OP(GatherTree, ngraph::op::v1) +OPENVINO_OP(Greater, ngraph::op::v1) +OPENVINO_OP(GreaterEqual, ngraph::op::v1) +OPENVINO_OP(GroupConvolution, ngraph::op::v1) +OPENVINO_OP(GroupConvolutionBackpropData, ngraph::op::v1) +OPENVINO_OP(GRN, ngraph::op::v0) +OPENVINO_OP(HardSigmoid, ngraph::op::v0) +OPENVINO_OP(Less, ngraph::op::v1) +OPENVINO_OP(LessEqual, ngraph::op::v1) +OPENVINO_OP(Log, ngraph::op::v0) +OPENVINO_OP(LogicalAnd, ngraph::op::v1) +OPENVINO_OP(LogicalNot, ngraph::op::v1) +OPENVINO_OP(LogicalOr, ngraph::op::v1) +OPENVINO_OP(LogicalXor, ngraph::op::v1) +OPENVINO_OP(LRN, ngraph::op::v0) +OPENVINO_OP(LSTMCell, ngraph::op::v4) +OPENVINO_OP(MatMul, ngraph::op::v0) +OPENVINO_OP(MaxPool, ngraph::op::v1) +OPENVINO_OP(Maximum, ngraph::op::v1) +OPENVINO_OP(Minimum, ngraph::op::v1) +OPENVINO_OP(Mod, ngraph::op::v1) +OPENVINO_OP(Multiply, ngraph::op::v1) +OPENVINO_OP(MVN, ngraph::op::v0) +OPENVINO_OP(Negative, ngraph::op::v0) +OPENVINO_OP(NormalizeL2, ngraph::op::v0) +OPENVINO_OP(NotEqual, ngraph::op::v1) +OPENVINO_OP(OneHot, ngraph::op::v1) +OPENVINO_OP(PRelu, ngraph::op::v0) +OPENVINO_OP(PSROIPooling, ngraph::op::v0) +OPENVINO_OP(Pad, ngraph::op::v1) +OPENVINO_OP(Parameter, ngraph::op::v0) +OPENVINO_OP(Power, ngraph::op::v1) +OPENVINO_OP(PriorBox, ngraph::op::v0) +OPENVINO_OP(PriorBoxClustered, ngraph::op::v0) +OPENVINO_OP(Proposal, ngraph::op::v4) +OPENVINO_OP(Range, ngraph::op::v4) +OPENVINO_OP(Relu, ngraph::op::v0) +OPENVINO_OP(ReduceMax, ngraph::op::v1) +OPENVINO_OP(ReduceLogicalAnd, ngraph::op::v1) +OPENVINO_OP(ReduceLogicalOr, ngraph::op::v1) +OPENVINO_OP(ReduceMean, ngraph::op::v1) +OPENVINO_OP(ReduceMin, ngraph::op::v1) +OPENVINO_OP(ReduceProd, ngraph::op::v1) +OPENVINO_OP(ReduceSum, ngraph::op::v1) +OPENVINO_OP(RegionYolo, ngraph::op::v0) +OPENVINO_OP(ReorgYolo, ngraph::op::v0) +OPENVINO_OP(Reshape, ngraph::op::v1) +OPENVINO_OP(Result, ngraph::op::v0) +OPENVINO_OP(ReverseSequence, ngraph::op::v0) +OPENVINO_OP(ROIPooling, ngraph::op::v0) +OPENVINO_OP(ScatterNDUpdate, ngraph::op::v3) +OPENVINO_OP(Select, ngraph::op::v1) +OPENVINO_OP(Selu, ngraph::op::v0) +OPENVINO_OP(Sign, ngraph::op::v0) +OPENVINO_OP(Sigmoid, ngraph::op::v0) +OPENVINO_OP(Sin, ngraph::op::v0) +OPENVINO_OP(Sinh, ngraph::op::v0) +OPENVINO_OP(Softmax, ngraph::op::v1) +OPENVINO_OP(Sqrt, ngraph::op::v0) +OPENVINO_OP(SpaceToDepth, ngraph::op::v0) +OPENVINO_OP(Split, ngraph::op::v1) +OPENVINO_OP(SquaredDifference, ngraph::op::v0) +OPENVINO_OP(Squeeze, ngraph::op::v0) +OPENVINO_OP(StridedSlice, ngraph::op::v1) +OPENVINO_OP(Subtract, ngraph::op::v1) +OPENVINO_OP(Tan, ngraph::op::v0) +OPENVINO_OP(Tanh, ngraph::op::v0) +OPENVINO_OP(TensorIterator, ngraph::op::v0) +OPENVINO_OP(Tile, ngraph::op::v0) +OPENVINO_OP(Transpose, ngraph::op::v1) +OPENVINO_OP(Unsqueeze, ngraph::op::v0) +OPENVINO_OP(VariadicSplit, ngraph::op::v1) + +// New operations added in opset2 +OPENVINO_OP(Gelu, ngraph::op::v0) +OPENVINO_OP(BatchToSpace, ngraph::op::v1) +OPENVINO_OP(SpaceToBatch, ngraph::op::v1) + +// New operations added in opset3 +OPENVINO_OP(EmbeddingBagPackedSum, ngraph::op::v3) +OPENVINO_OP(EmbeddingSegmentsSum, ngraph::op::v3) +OPENVINO_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) +OPENVINO_OP(GRUCell, ngraph::op::v3) +OPENVINO_OP(NonZero, ngraph::op::v3) +OPENVINO_OP(RNNCell, ngraph::op::v0) +OPENVINO_OP(ROIAlign, ngraph::op::v3) +OPENVINO_OP(ScatterElementsUpdate, ngraph::op::v3) +OPENVINO_OP(ScatterUpdate, ngraph::op::v3) +OPENVINO_OP(ShuffleChannels, ngraph::op::v0) +OPENVINO_OP(ShapeOf, ngraph::op::v3) +OPENVINO_OP(Assign, ngraph::op::v3) +OPENVINO_OP(ReadValue, ngraph::op::v3) +OPENVINO_OP(TopK, ngraph::op::v3) + +// New operations added in opset4 +OPENVINO_OP(Acosh, ngraph::op::v3) +OPENVINO_OP(Asinh, ngraph::op::v3) +OPENVINO_OP(Atanh, ngraph::op::v3) +OPENVINO_OP(CTCLoss, ngraph::op::v4) +OPENVINO_OP(HSwish, ngraph::op::v4) +OPENVINO_OP(Interpolate, ngraph::op::v4) +OPENVINO_OP(Mish, ngraph::op::v4) +OPENVINO_OP(ReduceL1, ngraph::op::v4) +OPENVINO_OP(ReduceL2, ngraph::op::v4) +OPENVINO_OP(SoftPlus, ngraph::op::v4) +OPENVINO_OP(Swish, ngraph::op::v4) + +// New operations added in opset5 +OPENVINO_OP(GatherND, ngraph::op::v5) +OPENVINO_OP(GRUSequence, ngraph::op::v5) +OPENVINO_OP(HSigmoid, ngraph::op::v5) +OPENVINO_OP(LogSoftmax, ngraph::op::v5) +OPENVINO_OP(Loop, ngraph::op::v5) +OPENVINO_OP(LSTMSequence, ngraph::op::v5) +OPENVINO_OP(NonMaxSuppression, ngraph::op::v5) +OPENVINO_OP(RNNSequence, ngraph::op::v5) +OPENVINO_OP(Round, ngraph::op::v5) diff --git a/ngraph/core/include/openvino/opsets/opset6.hpp b/ngraph/core/include/openvino/opsets/opset6.hpp new file mode 100644 index 00000000000..82b792da09a --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset6.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/ops.hpp" + +namespace ov { +namespace opset6 { +#define OPENVINO_OP(a, b) using b::a; +#include "openvino/opsets/opset6_tbl.hpp" +#undef OPENVINO_OP +} // namespace opset6 +} // namespace ov diff --git a/ngraph/core/include/openvino/opsets/opset6_tbl.hpp b/ngraph/core/include/openvino/opsets/opset6_tbl.hpp new file mode 100644 index 00000000000..27d04eacd42 --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset6_tbl.hpp @@ -0,0 +1,171 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifndef OPENVINO_OP +# warning "OPENVINO_OP not defined" +# define OPENVINO_OP(x, y) +#endif + +OPENVINO_OP(Abs, ngraph::op::v0) +OPENVINO_OP(Acos, ngraph::op::v0) +OPENVINO_OP(Add, ngraph::op::v1) +OPENVINO_OP(Asin, ngraph::op::v0) +OPENVINO_OP(Atan, ngraph::op::v0) +OPENVINO_OP(AvgPool, ngraph::op::v1) +OPENVINO_OP(BatchNormInference, ngraph::op::v5) +OPENVINO_OP(BinaryConvolution, ngraph::op::v1) +OPENVINO_OP(Broadcast, ngraph::op::v3) +OPENVINO_OP(Bucketize, ngraph::op::v3) +OPENVINO_OP(CTCGreedyDecoder, ngraph::op::v0) +OPENVINO_OP(Ceiling, ngraph::op::v0) +OPENVINO_OP(Clamp, ngraph::op::v0) +OPENVINO_OP(Concat, ngraph::op::v0) +OPENVINO_OP(Constant, ngraph::op) +OPENVINO_OP(Convert, ngraph::op::v0) +OPENVINO_OP(ConvertLike, ngraph::op::v1) +OPENVINO_OP(Convolution, ngraph::op::v1) +OPENVINO_OP(ConvolutionBackpropData, ngraph::op::v1) +OPENVINO_OP(Cos, ngraph::op::v0) +OPENVINO_OP(Cosh, ngraph::op::v0) +OPENVINO_OP(CumSum, ngraph::op::v0) +OPENVINO_OP(DeformableConvolution, ngraph::op::v1) +OPENVINO_OP(DeformablePSROIPooling, ngraph::op::v1) +OPENVINO_OP(DepthToSpace, ngraph::op::v0) +OPENVINO_OP(DetectionOutput, ngraph::op::v0) +OPENVINO_OP(Divide, ngraph::op::v1) +OPENVINO_OP(Elu, ngraph::op::v0) +OPENVINO_OP(Erf, ngraph::op::v0) +OPENVINO_OP(Equal, ngraph::op::v1) +OPENVINO_OP(Exp, ngraph::op::v0) +OPENVINO_OP(ExtractImagePatches, ngraph::op::v3) +OPENVINO_OP(FakeQuantize, ngraph::op::v0) +OPENVINO_OP(Floor, ngraph::op::v0) +OPENVINO_OP(FloorMod, ngraph::op::v1) +OPENVINO_OP(Gather, ngraph::op::v1) +OPENVINO_OP(GatherTree, ngraph::op::v1) +OPENVINO_OP(Greater, ngraph::op::v1) +OPENVINO_OP(GreaterEqual, ngraph::op::v1) +OPENVINO_OP(GroupConvolution, ngraph::op::v1) +OPENVINO_OP(GroupConvolutionBackpropData, ngraph::op::v1) +OPENVINO_OP(GRN, ngraph::op::v0) +OPENVINO_OP(HardSigmoid, ngraph::op::v0) +OPENVINO_OP(Less, ngraph::op::v1) +OPENVINO_OP(LessEqual, ngraph::op::v1) +OPENVINO_OP(Log, ngraph::op::v0) +OPENVINO_OP(LogicalAnd, ngraph::op::v1) +OPENVINO_OP(LogicalNot, ngraph::op::v1) +OPENVINO_OP(LogicalOr, ngraph::op::v1) +OPENVINO_OP(LogicalXor, ngraph::op::v1) +OPENVINO_OP(LRN, ngraph::op::v0) +OPENVINO_OP(LSTMCell, ngraph::op::v4) +OPENVINO_OP(MatMul, ngraph::op::v0) +OPENVINO_OP(MaxPool, ngraph::op::v1) +OPENVINO_OP(Maximum, ngraph::op::v1) +OPENVINO_OP(Minimum, ngraph::op::v1) +OPENVINO_OP(Mod, ngraph::op::v1) +OPENVINO_OP(Multiply, ngraph::op::v1) +OPENVINO_OP(Negative, ngraph::op::v0) +OPENVINO_OP(NormalizeL2, ngraph::op::v0) +OPENVINO_OP(NotEqual, ngraph::op::v1) +OPENVINO_OP(OneHot, ngraph::op::v1) +OPENVINO_OP(PRelu, ngraph::op::v0) +OPENVINO_OP(PSROIPooling, ngraph::op::v0) +OPENVINO_OP(Pad, ngraph::op::v1) +OPENVINO_OP(Parameter, ngraph::op::v0) +OPENVINO_OP(Power, ngraph::op::v1) +OPENVINO_OP(PriorBox, ngraph::op::v0) +OPENVINO_OP(PriorBoxClustered, ngraph::op::v0) +OPENVINO_OP(Proposal, ngraph::op::v4) +OPENVINO_OP(Range, ngraph::op::v4) +OPENVINO_OP(Relu, ngraph::op::v0) +OPENVINO_OP(ReduceMax, ngraph::op::v1) +OPENVINO_OP(ReduceLogicalAnd, ngraph::op::v1) +OPENVINO_OP(ReduceLogicalOr, ngraph::op::v1) +OPENVINO_OP(ReduceMean, ngraph::op::v1) +OPENVINO_OP(ReduceMin, ngraph::op::v1) +OPENVINO_OP(ReduceProd, ngraph::op::v1) +OPENVINO_OP(ReduceSum, ngraph::op::v1) +OPENVINO_OP(RegionYolo, ngraph::op::v0) +OPENVINO_OP(ReorgYolo, ngraph::op::v0) +OPENVINO_OP(Reshape, ngraph::op::v1) +OPENVINO_OP(Result, ngraph::op::v0) +OPENVINO_OP(ReverseSequence, ngraph::op::v0) +OPENVINO_OP(ROIPooling, ngraph::op::v0) +OPENVINO_OP(ScatterNDUpdate, ngraph::op::v3) +OPENVINO_OP(Select, ngraph::op::v1) +OPENVINO_OP(Selu, ngraph::op::v0) +OPENVINO_OP(Sign, ngraph::op::v0) +OPENVINO_OP(Sigmoid, ngraph::op::v0) +OPENVINO_OP(Sin, ngraph::op::v0) +OPENVINO_OP(Sinh, ngraph::op::v0) +OPENVINO_OP(Softmax, ngraph::op::v1) +OPENVINO_OP(Sqrt, ngraph::op::v0) +OPENVINO_OP(SpaceToDepth, ngraph::op::v0) +OPENVINO_OP(Split, ngraph::op::v1) +OPENVINO_OP(SquaredDifference, ngraph::op::v0) +OPENVINO_OP(Squeeze, ngraph::op::v0) +OPENVINO_OP(StridedSlice, ngraph::op::v1) +OPENVINO_OP(Subtract, ngraph::op::v1) +OPENVINO_OP(Tan, ngraph::op::v0) +OPENVINO_OP(Tanh, ngraph::op::v0) +OPENVINO_OP(TensorIterator, ngraph::op::v0) +OPENVINO_OP(Tile, ngraph::op::v0) +OPENVINO_OP(Transpose, ngraph::op::v1) +OPENVINO_OP(Unsqueeze, ngraph::op::v0) +OPENVINO_OP(VariadicSplit, ngraph::op::v1) + +// New operations added in opset2 +OPENVINO_OP(Gelu, ngraph::op::v0) +OPENVINO_OP(BatchToSpace, ngraph::op::v1) +OPENVINO_OP(SpaceToBatch, ngraph::op::v1) + +// New operations added in opset3 +OPENVINO_OP(EmbeddingBagPackedSum, ngraph::op::v3) +OPENVINO_OP(EmbeddingSegmentsSum, ngraph::op::v3) +OPENVINO_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) +OPENVINO_OP(GRUCell, ngraph::op::v3) +OPENVINO_OP(NonZero, ngraph::op::v3) +OPENVINO_OP(RNNCell, ngraph::op::v0) +OPENVINO_OP(ROIAlign, ngraph::op::v3) +OPENVINO_OP(ScatterElementsUpdate, ngraph::op::v3) +OPENVINO_OP(ScatterUpdate, ngraph::op::v3) +OPENVINO_OP(ShuffleChannels, ngraph::op::v0) +OPENVINO_OP(ShapeOf, ngraph::op::v3) +OPENVINO_OP(TopK, ngraph::op::v3) + +// New operations added in opset4 +OPENVINO_OP(Acosh, ngraph::op::v3) +OPENVINO_OP(Asinh, ngraph::op::v3) +OPENVINO_OP(Atanh, ngraph::op::v3) +OPENVINO_OP(CTCLoss, ngraph::op::v4) +OPENVINO_OP(HSwish, ngraph::op::v4) +OPENVINO_OP(Interpolate, ngraph::op::v4) +OPENVINO_OP(Mish, ngraph::op::v4) +OPENVINO_OP(ReduceL1, ngraph::op::v4) +OPENVINO_OP(ReduceL2, ngraph::op::v4) +OPENVINO_OP(SoftPlus, ngraph::op::v4) +OPENVINO_OP(Swish, ngraph::op::v4) + +// New operations added in opset5 +OPENVINO_OP(GatherND, ngraph::op::v5) +OPENVINO_OP(GRUSequence, ngraph::op::v5) +OPENVINO_OP(HSigmoid, ngraph::op::v5) +OPENVINO_OP(LogSoftmax, ngraph::op::v5) +OPENVINO_OP(Loop, ngraph::op::v5) +OPENVINO_OP(LSTMSequence, ngraph::op::v5) +OPENVINO_OP(NonMaxSuppression, ngraph::op::v5) +OPENVINO_OP(RNNSequence, ngraph::op::v5) +OPENVINO_OP(Round, ngraph::op::v5) + +// New operations added in opset6 +OPENVINO_OP(CTCGreedyDecoderSeqLen, ngraph::op::v6) +OPENVINO_OP(ExperimentalDetectronDetectionOutput, ngraph::op::v6) +OPENVINO_OP(ExperimentalDetectronGenerateProposalsSingleImage, ngraph::op::v6) +OPENVINO_OP(ExperimentalDetectronPriorGridGenerator, ngraph::op::v6) +OPENVINO_OP(ExperimentalDetectronROIFeatureExtractor, ngraph::op::v6) +OPENVINO_OP(ExperimentalDetectronTopKROIs, ngraph::op::v6) +OPENVINO_OP(GatherElements, ngraph::op::v6) +OPENVINO_OP(MVN, ngraph::op::v6) +OPENVINO_OP(Assign, ngraph::op::v6) // new version +OPENVINO_OP(ReadValue, ngraph::op::v6) // new version diff --git a/ngraph/core/include/openvino/opsets/opset7.hpp b/ngraph/core/include/openvino/opsets/opset7.hpp new file mode 100644 index 00000000000..87343585747 --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset7.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/ops.hpp" + +namespace ov { +namespace opset7 { +#define OPENVINO_OP(a, b) using b::a; +#include "openvino/opsets/opset7_tbl.hpp" +#undef OPENVINO_OP +} // namespace opset7 +} // namespace ov diff --git a/ngraph/core/include/openvino/opsets/opset7_tbl.hpp b/ngraph/core/include/openvino/opsets/opset7_tbl.hpp new file mode 100644 index 00000000000..7804a8dfe7e --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset7_tbl.hpp @@ -0,0 +1,177 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifndef OPENVINO_OP +# warning "OPENVINO_OP not defined" +# define OPENVINO_OP(x, y) +#endif + +OPENVINO_OP(Abs, ngraph::op::v0) +OPENVINO_OP(Acos, ngraph::op::v0) +OPENVINO_OP(Add, ngraph::op::v1) +OPENVINO_OP(Asin, ngraph::op::v0) +OPENVINO_OP(Atan, ngraph::op::v0) +OPENVINO_OP(AvgPool, ngraph::op::v1) +OPENVINO_OP(BatchNormInference, ngraph::op::v5) +OPENVINO_OP(BinaryConvolution, ngraph::op::v1) +OPENVINO_OP(Broadcast, ngraph::op::v3) +OPENVINO_OP(Bucketize, ngraph::op::v3) +OPENVINO_OP(CTCGreedyDecoder, ngraph::op::v0) +OPENVINO_OP(Ceiling, ngraph::op::v0) +OPENVINO_OP(Clamp, ngraph::op::v0) +OPENVINO_OP(Concat, ngraph::op::v0) +OPENVINO_OP(Constant, ngraph::op) +OPENVINO_OP(Convert, ngraph::op::v0) +OPENVINO_OP(ConvertLike, ngraph::op::v1) +OPENVINO_OP(Convolution, ngraph::op::v1) +OPENVINO_OP(ConvolutionBackpropData, ngraph::op::v1) +OPENVINO_OP(Cos, ngraph::op::v0) +OPENVINO_OP(Cosh, ngraph::op::v0) +OPENVINO_OP(CumSum, ngraph::op::v0) +OPENVINO_OP(DeformableConvolution, ngraph::op::v1) +OPENVINO_OP(DeformablePSROIPooling, ngraph::op::v1) +OPENVINO_OP(DepthToSpace, ngraph::op::v0) +OPENVINO_OP(DetectionOutput, ngraph::op::v0) +OPENVINO_OP(Divide, ngraph::op::v1) +OPENVINO_OP(Elu, ngraph::op::v0) +OPENVINO_OP(Erf, ngraph::op::v0) +OPENVINO_OP(Equal, ngraph::op::v1) +OPENVINO_OP(Exp, ngraph::op::v0) +OPENVINO_OP(ExtractImagePatches, ngraph::op::v3) +OPENVINO_OP(FakeQuantize, ngraph::op::v0) +OPENVINO_OP(Floor, ngraph::op::v0) +OPENVINO_OP(FloorMod, ngraph::op::v1) +OPENVINO_OP(Gather, ngraph::op::v7) +OPENVINO_OP(GatherTree, ngraph::op::v1) +OPENVINO_OP(Greater, ngraph::op::v1) +OPENVINO_OP(GreaterEqual, ngraph::op::v1) +OPENVINO_OP(GroupConvolution, ngraph::op::v1) +OPENVINO_OP(GroupConvolutionBackpropData, ngraph::op::v1) +OPENVINO_OP(GRN, ngraph::op::v0) +OPENVINO_OP(HardSigmoid, ngraph::op::v0) +OPENVINO_OP(Less, ngraph::op::v1) +OPENVINO_OP(LessEqual, ngraph::op::v1) +OPENVINO_OP(Log, ngraph::op::v0) +OPENVINO_OP(LogicalAnd, ngraph::op::v1) +OPENVINO_OP(LogicalNot, ngraph::op::v1) +OPENVINO_OP(LogicalOr, ngraph::op::v1) +OPENVINO_OP(LogicalXor, ngraph::op::v1) +OPENVINO_OP(LRN, ngraph::op::v0) +OPENVINO_OP(LSTMCell, ngraph::op::v4) +OPENVINO_OP(MatMul, ngraph::op::v0) +OPENVINO_OP(MaxPool, ngraph::op::v1) +OPENVINO_OP(Maximum, ngraph::op::v1) +OPENVINO_OP(Minimum, ngraph::op::v1) +OPENVINO_OP(Mod, ngraph::op::v1) +OPENVINO_OP(Multiply, ngraph::op::v1) +OPENVINO_OP(Negative, ngraph::op::v0) +OPENVINO_OP(NormalizeL2, ngraph::op::v0) +OPENVINO_OP(NotEqual, ngraph::op::v1) +OPENVINO_OP(OneHot, ngraph::op::v1) +OPENVINO_OP(PRelu, ngraph::op::v0) +OPENVINO_OP(PSROIPooling, ngraph::op::v0) +OPENVINO_OP(Pad, ngraph::op::v1) +OPENVINO_OP(Parameter, ngraph::op::v0) +OPENVINO_OP(Power, ngraph::op::v1) +OPENVINO_OP(PriorBox, ngraph::op::v0) +OPENVINO_OP(PriorBoxClustered, ngraph::op::v0) +OPENVINO_OP(Proposal, ngraph::op::v4) +OPENVINO_OP(Range, ngraph::op::v4) +OPENVINO_OP(Relu, ngraph::op::v0) +OPENVINO_OP(ReduceMax, ngraph::op::v1) +OPENVINO_OP(ReduceLogicalAnd, ngraph::op::v1) +OPENVINO_OP(ReduceLogicalOr, ngraph::op::v1) +OPENVINO_OP(ReduceMean, ngraph::op::v1) +OPENVINO_OP(ReduceMin, ngraph::op::v1) +OPENVINO_OP(ReduceProd, ngraph::op::v1) +OPENVINO_OP(ReduceSum, ngraph::op::v1) +OPENVINO_OP(RegionYolo, ngraph::op::v0) +OPENVINO_OP(ReorgYolo, ngraph::op::v0) +OPENVINO_OP(Reshape, ngraph::op::v1) +OPENVINO_OP(Result, ngraph::op::v0) +OPENVINO_OP(ReverseSequence, ngraph::op::v0) +OPENVINO_OP(ROIPooling, ngraph::op::v0) +OPENVINO_OP(ScatterNDUpdate, ngraph::op::v3) +OPENVINO_OP(Select, ngraph::op::v1) +OPENVINO_OP(Selu, ngraph::op::v0) +OPENVINO_OP(Sign, ngraph::op::v0) +OPENVINO_OP(Sigmoid, ngraph::op::v0) +OPENVINO_OP(Sin, ngraph::op::v0) +OPENVINO_OP(Sinh, ngraph::op::v0) +OPENVINO_OP(Softmax, ngraph::op::v1) +OPENVINO_OP(Sqrt, ngraph::op::v0) +OPENVINO_OP(SpaceToDepth, ngraph::op::v0) +OPENVINO_OP(Split, ngraph::op::v1) +OPENVINO_OP(SquaredDifference, ngraph::op::v0) +OPENVINO_OP(Squeeze, ngraph::op::v0) +OPENVINO_OP(StridedSlice, ngraph::op::v1) +OPENVINO_OP(Subtract, ngraph::op::v1) +OPENVINO_OP(Tan, ngraph::op::v0) +OPENVINO_OP(Tanh, ngraph::op::v0) +OPENVINO_OP(TensorIterator, ngraph::op::v0) +OPENVINO_OP(Tile, ngraph::op::v0) +OPENVINO_OP(Transpose, ngraph::op::v1) +OPENVINO_OP(Unsqueeze, ngraph::op::v0) +OPENVINO_OP(VariadicSplit, ngraph::op::v1) + +// New operations added in opset2 +OPENVINO_OP(BatchToSpace, ngraph::op::v1) +OPENVINO_OP(SpaceToBatch, ngraph::op::v1) + +// New operations added in opset3 +OPENVINO_OP(EmbeddingBagPackedSum, ngraph::op::v3) +OPENVINO_OP(EmbeddingSegmentsSum, ngraph::op::v3) +OPENVINO_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) +OPENVINO_OP(GRUCell, ngraph::op::v3) +OPENVINO_OP(NonZero, ngraph::op::v3) +OPENVINO_OP(RNNCell, ngraph::op::v0) +OPENVINO_OP(ROIAlign, ngraph::op::v3) +OPENVINO_OP(ScatterElementsUpdate, ngraph::op::v3) +OPENVINO_OP(ScatterUpdate, ngraph::op::v3) +OPENVINO_OP(ShuffleChannels, ngraph::op::v0) +OPENVINO_OP(ShapeOf, ngraph::op::v3) +OPENVINO_OP(TopK, ngraph::op::v3) + +// New operations added in opset4 +OPENVINO_OP(Acosh, ngraph::op::v3) +OPENVINO_OP(Asinh, ngraph::op::v3) +OPENVINO_OP(Atanh, ngraph::op::v3) +OPENVINO_OP(CTCLoss, ngraph::op::v4) +OPENVINO_OP(HSwish, ngraph::op::v4) +OPENVINO_OP(Interpolate, ngraph::op::v4) +OPENVINO_OP(Mish, ngraph::op::v4) +OPENVINO_OP(ReduceL1, ngraph::op::v4) +OPENVINO_OP(ReduceL2, ngraph::op::v4) +OPENVINO_OP(SoftPlus, ngraph::op::v4) +OPENVINO_OP(Swish, ngraph::op::v4) + +// New operations added in opset5 +OPENVINO_OP(GatherND, ngraph::op::v5) +OPENVINO_OP(GRUSequence, ngraph::op::v5) +OPENVINO_OP(HSigmoid, ngraph::op::v5) +OPENVINO_OP(LogSoftmax, ngraph::op::v5) +OPENVINO_OP(Loop, ngraph::op::v5) +OPENVINO_OP(LSTMSequence, ngraph::op::v5) +OPENVINO_OP(NonMaxSuppression, ngraph::op::v5) +OPENVINO_OP(RNNSequence, ngraph::op::v5) +OPENVINO_OP(Round, ngraph::op::v5) + +// New operations added in opset6 +OPENVINO_OP(CTCGreedyDecoderSeqLen, ngraph::op::v6) +OPENVINO_OP(ExperimentalDetectronDetectionOutput, ngraph::op::v6) +OPENVINO_OP(ExperimentalDetectronGenerateProposalsSingleImage, ngraph::op::v6) +OPENVINO_OP(ExperimentalDetectronPriorGridGenerator, ngraph::op::v6) +OPENVINO_OP(ExperimentalDetectronROIFeatureExtractor, ngraph::op::v6) +OPENVINO_OP(ExperimentalDetectronTopKROIs, ngraph::op::v6) +OPENVINO_OP(GatherElements, ngraph::op::v6) +OPENVINO_OP(MVN, ngraph::op::v6) +OPENVINO_OP(Assign, ngraph::op::v6) // new version +OPENVINO_OP(ReadValue, ngraph::op::v6) // new version + +// New operations added in opset7 +OPENVINO_OP(DFT, ngraph::op::v7) +OPENVINO_OP(Einsum, ngraph::op::v7) +OPENVINO_OP(Gelu, ngraph::op::v7) +OPENVINO_OP(IDFT, ngraph::op::v7) +OPENVINO_OP(Roll, ngraph::op::v7) diff --git a/ngraph/core/include/openvino/opsets/opset8.hpp b/ngraph/core/include/openvino/opsets/opset8.hpp new file mode 100644 index 00000000000..c41f5fb8af6 --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset8.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/ops.hpp" + +namespace ov { +namespace opset8 { +#define OPENVINO_OP(a, b) using b::a; +#include "openvino/opsets/opset8_tbl.hpp" +#undef OPENVINO_OP +} // namespace opset8 +} // namespace ov diff --git a/ngraph/core/include/openvino/opsets/opset8_tbl.hpp b/ngraph/core/include/openvino/opsets/opset8_tbl.hpp new file mode 100644 index 00000000000..be2b7c303de --- /dev/null +++ b/ngraph/core/include/openvino/opsets/opset8_tbl.hpp @@ -0,0 +1,185 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifndef OPENVINO_OP +# warning "OPENVINO_OP not defined" +# define OPENVINO_OP(x, y) +#endif + +OPENVINO_OP(Abs, ngraph::op::v0) +OPENVINO_OP(Acos, ngraph::op::v0) +OPENVINO_OP(Add, ngraph::op::v1) +OPENVINO_OP(Asin, ngraph::op::v0) +OPENVINO_OP(Atan, ngraph::op::v0) +OPENVINO_OP(AvgPool, ngraph::op::v1) +OPENVINO_OP(BatchNormInference, ngraph::op::v5) +OPENVINO_OP(BinaryConvolution, ngraph::op::v1) +OPENVINO_OP(Broadcast, ngraph::op::v3) +OPENVINO_OP(Bucketize, ngraph::op::v3) +OPENVINO_OP(CTCGreedyDecoder, ngraph::op::v0) +OPENVINO_OP(Ceiling, ngraph::op::v0) +OPENVINO_OP(Clamp, ngraph::op::v0) +OPENVINO_OP(Concat, ngraph::op::v0) +OPENVINO_OP(Constant, ngraph::op) +OPENVINO_OP(Convert, ngraph::op::v0) +OPENVINO_OP(ConvertLike, ngraph::op::v1) +OPENVINO_OP(Convolution, ngraph::op::v1) +OPENVINO_OP(ConvolutionBackpropData, ngraph::op::v1) +OPENVINO_OP(Cos, ngraph::op::v0) +OPENVINO_OP(Cosh, ngraph::op::v0) +OPENVINO_OP(CumSum, ngraph::op::v0) +OPENVINO_OP(DeformablePSROIPooling, ngraph::op::v1) +OPENVINO_OP(DepthToSpace, ngraph::op::v0) +OPENVINO_OP(DetectionOutput, ngraph::op::v0) +OPENVINO_OP(Divide, ngraph::op::v1) +OPENVINO_OP(Elu, ngraph::op::v0) +OPENVINO_OP(Erf, ngraph::op::v0) +OPENVINO_OP(Equal, ngraph::op::v1) +OPENVINO_OP(Exp, ngraph::op::v0) +OPENVINO_OP(ExtractImagePatches, ngraph::op::v3) +OPENVINO_OP(FakeQuantize, ngraph::op::v0) +OPENVINO_OP(Floor, ngraph::op::v0) +OPENVINO_OP(FloorMod, ngraph::op::v1) +OPENVINO_OP(GatherTree, ngraph::op::v1) +OPENVINO_OP(Greater, ngraph::op::v1) +OPENVINO_OP(GreaterEqual, ngraph::op::v1) +OPENVINO_OP(GroupConvolution, ngraph::op::v1) +OPENVINO_OP(GroupConvolutionBackpropData, ngraph::op::v1) +OPENVINO_OP(GRN, ngraph::op::v0) +OPENVINO_OP(HardSigmoid, ngraph::op::v0) +OPENVINO_OP(Less, ngraph::op::v1) +OPENVINO_OP(LessEqual, ngraph::op::v1) +OPENVINO_OP(Log, ngraph::op::v0) +OPENVINO_OP(LogicalAnd, ngraph::op::v1) +OPENVINO_OP(LogicalNot, ngraph::op::v1) +OPENVINO_OP(LogicalOr, ngraph::op::v1) +OPENVINO_OP(LogicalXor, ngraph::op::v1) +OPENVINO_OP(LRN, ngraph::op::v0) +OPENVINO_OP(LSTMCell, ngraph::op::v4) +OPENVINO_OP(MatMul, ngraph::op::v0) +OPENVINO_OP(Maximum, ngraph::op::v1) +OPENVINO_OP(Minimum, ngraph::op::v1) +OPENVINO_OP(Mod, ngraph::op::v1) +OPENVINO_OP(Multiply, ngraph::op::v1) +OPENVINO_OP(Negative, ngraph::op::v0) +OPENVINO_OP(NormalizeL2, ngraph::op::v0) +OPENVINO_OP(NotEqual, ngraph::op::v1) +OPENVINO_OP(OneHot, ngraph::op::v1) +OPENVINO_OP(PRelu, ngraph::op::v0) +OPENVINO_OP(PSROIPooling, ngraph::op::v0) +OPENVINO_OP(Pad, ngraph::op::v1) +OPENVINO_OP(Parameter, ngraph::op::v0) +OPENVINO_OP(Power, ngraph::op::v1) +OPENVINO_OP(PriorBox, ngraph::op::v0) +OPENVINO_OP(PriorBoxClustered, ngraph::op::v0) +OPENVINO_OP(Proposal, ngraph::op::v4) +OPENVINO_OP(Range, ngraph::op::v4) +OPENVINO_OP(Relu, ngraph::op::v0) +OPENVINO_OP(ReduceMax, ngraph::op::v1) +OPENVINO_OP(ReduceLogicalAnd, ngraph::op::v1) +OPENVINO_OP(ReduceLogicalOr, ngraph::op::v1) +OPENVINO_OP(ReduceMean, ngraph::op::v1) +OPENVINO_OP(ReduceMin, ngraph::op::v1) +OPENVINO_OP(ReduceProd, ngraph::op::v1) +OPENVINO_OP(ReduceSum, ngraph::op::v1) +OPENVINO_OP(RegionYolo, ngraph::op::v0) +OPENVINO_OP(ReorgYolo, ngraph::op::v0) +OPENVINO_OP(Reshape, ngraph::op::v1) +OPENVINO_OP(Result, ngraph::op::v0) +OPENVINO_OP(ReverseSequence, ngraph::op::v0) +OPENVINO_OP(ROIPooling, ngraph::op::v0) +OPENVINO_OP(ScatterNDUpdate, ngraph::op::v3) +OPENVINO_OP(Select, ngraph::op::v1) +OPENVINO_OP(Selu, ngraph::op::v0) +OPENVINO_OP(Sign, ngraph::op::v0) +OPENVINO_OP(Sigmoid, ngraph::op::v0) +OPENVINO_OP(Sin, ngraph::op::v0) +OPENVINO_OP(Sinh, ngraph::op::v0) +OPENVINO_OP(Softmax, ngraph::op::v1) +OPENVINO_OP(Sqrt, ngraph::op::v0) +OPENVINO_OP(SpaceToDepth, ngraph::op::v0) +OPENVINO_OP(Split, ngraph::op::v1) +OPENVINO_OP(SquaredDifference, ngraph::op::v0) +OPENVINO_OP(Squeeze, ngraph::op::v0) +OPENVINO_OP(StridedSlice, ngraph::op::v1) +OPENVINO_OP(Subtract, ngraph::op::v1) +OPENVINO_OP(Tan, ngraph::op::v0) +OPENVINO_OP(Tanh, ngraph::op::v0) +OPENVINO_OP(TensorIterator, ngraph::op::v0) +OPENVINO_OP(Tile, ngraph::op::v0) +OPENVINO_OP(Transpose, ngraph::op::v1) +OPENVINO_OP(Unsqueeze, ngraph::op::v0) +OPENVINO_OP(VariadicSplit, ngraph::op::v1) + +// New operations added in opset2 +OPENVINO_OP(BatchToSpace, ngraph::op::v1) +OPENVINO_OP(SpaceToBatch, ngraph::op::v1) + +// New operations added in opset3 +OPENVINO_OP(EmbeddingBagPackedSum, ngraph::op::v3) +OPENVINO_OP(EmbeddingSegmentsSum, ngraph::op::v3) +OPENVINO_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) +OPENVINO_OP(GRUCell, ngraph::op::v3) +OPENVINO_OP(NonZero, ngraph::op::v3) +OPENVINO_OP(RNNCell, ngraph::op::v0) +OPENVINO_OP(ROIAlign, ngraph::op::v3) +OPENVINO_OP(ScatterElementsUpdate, ngraph::op::v3) +OPENVINO_OP(ScatterUpdate, ngraph::op::v3) +OPENVINO_OP(ShuffleChannels, ngraph::op::v0) +OPENVINO_OP(ShapeOf, ngraph::op::v3) +OPENVINO_OP(TopK, ngraph::op::v3) + +// New operations added in opset4 +OPENVINO_OP(Acosh, ngraph::op::v3) +OPENVINO_OP(Asinh, ngraph::op::v3) +OPENVINO_OP(Atanh, ngraph::op::v3) +OPENVINO_OP(CTCLoss, ngraph::op::v4) +OPENVINO_OP(HSwish, ngraph::op::v4) +OPENVINO_OP(Interpolate, ngraph::op::v4) +OPENVINO_OP(Mish, ngraph::op::v4) +OPENVINO_OP(ReduceL1, ngraph::op::v4) +OPENVINO_OP(ReduceL2, ngraph::op::v4) +OPENVINO_OP(SoftPlus, ngraph::op::v4) +OPENVINO_OP(Swish, ngraph::op::v4) + +// New operations added in opset5 +OPENVINO_OP(GatherND, ngraph::op::v5) +OPENVINO_OP(GRUSequence, ngraph::op::v5) +OPENVINO_OP(HSigmoid, ngraph::op::v5) +OPENVINO_OP(LogSoftmax, ngraph::op::v5) +OPENVINO_OP(Loop, ngraph::op::v5) +OPENVINO_OP(LSTMSequence, ngraph::op::v5) +OPENVINO_OP(NonMaxSuppression, ngraph::op::v5) +OPENVINO_OP(RNNSequence, ngraph::op::v5) +OPENVINO_OP(Round, ngraph::op::v5) + +// New operations added in opset6 +OPENVINO_OP(CTCGreedyDecoderSeqLen, ngraph::op::v6) +OPENVINO_OP(ExperimentalDetectronDetectionOutput, ngraph::op::v6) +OPENVINO_OP(ExperimentalDetectronGenerateProposalsSingleImage, ngraph::op::v6) +OPENVINO_OP(ExperimentalDetectronPriorGridGenerator, ngraph::op::v6) +OPENVINO_OP(ExperimentalDetectronROIFeatureExtractor, ngraph::op::v6) +OPENVINO_OP(ExperimentalDetectronTopKROIs, ngraph::op::v6) +OPENVINO_OP(GatherElements, ngraph::op::v6) +OPENVINO_OP(MVN, ngraph::op::v6) +OPENVINO_OP(Assign, ngraph::op::v6) // new version +OPENVINO_OP(ReadValue, ngraph::op::v6) // new version + +// New operations added in opset7 +OPENVINO_OP(DFT, ngraph::op::v7) +OPENVINO_OP(Einsum, ngraph::op::v7) +OPENVINO_OP(Gelu, ngraph::op::v7) +OPENVINO_OP(IDFT, ngraph::op::v7) +OPENVINO_OP(Roll, ngraph::op::v7) + +// New operations added in opset8 +OPENVINO_OP(Gather, ngraph::op::v8) +OPENVINO_OP(AdaptiveAvgPool, ngraph::op::v8) +OPENVINO_OP(AdaptiveMaxPool, ngraph::op::v8) +OPENVINO_OP(DeformableConvolution, ngraph::op::v8) +OPENVINO_OP(MatrixNms, ngraph::op::v8) +OPENVINO_OP(MaxPool, ngraph::op::v8) +OPENVINO_OP(MulticlassNms, ngraph::op::v8) +OPENVINO_OP(RandomUniform, ngraph::op::v8) +OPENVINO_OP(If, ngraph::op::v8) diff --git a/ngraph/core/src/opsets/opset.cpp b/ngraph/core/src/opsets/opset.cpp index b9a67b10427..72a23e9f74b 100644 --- a/ngraph/core/src/opsets/opset.cpp +++ b/ngraph/core/src/opsets/opset.cpp @@ -7,12 +7,14 @@ #include "ngraph/log.hpp" #include "ngraph/ops.hpp" -std::mutex& ngraph::OpSet::get_mutex() { +ngraph::OpSet::OpSet(const ov::OpSet& opset) : ov::OpSet(opset) {} + +std::mutex& ov::OpSet::get_mutex() { static std::mutex opset_mutex; return opset_mutex; } -ngraph::Node* ngraph::OpSet::create(const std::string& name) const { +ov::Node* ov::OpSet::create(const std::string& name) const { auto type_info_it = m_name_type_info_map.find(name); if (type_info_it == m_name_type_info_map.end()) { NGRAPH_WARN << "Couldn't create operator of type: " << name << " . Operation not registered in opset."; @@ -21,96 +23,136 @@ ngraph::Node* ngraph::OpSet::create(const std::string& name) const { return m_factory_registry.create(type_info_it->second); } -ngraph::Node* ngraph::OpSet::create_insensitive(const std::string& name) const { +ov::Node* ov::OpSet::create_insensitive(const std::string& name) const { auto type_info_it = m_case_insensitive_type_info_map.find(to_upper_name(name)); return type_info_it == m_case_insensitive_type_info_map.end() ? nullptr : m_factory_registry.create(type_info_it->second); } -const ngraph::OpSet& ngraph::get_opset1() { +const ov::OpSet& ov::get_opset1() { static OpSet opset; static std::once_flag flag; std::call_once(flag, [&]() { -#define NGRAPH_OP(NAME, NAMESPACE) opset.insert(); -#include "ngraph/opsets/opset1_tbl.hpp" -#undef NGRAPH_OP +#define OPENVINO_OP(NAME, NAMESPACE) opset.insert(); +#include "openvino/opsets/opset1_tbl.hpp" +#undef OPENVINO_OP }); return opset; } +const ov::OpSet& ov::get_opset2() { + static OpSet opset; + static std::once_flag flag; + std::call_once(flag, [&]() { +#define OPENVINO_OP(NAME, NAMESPACE) opset.insert(); +#include "openvino/opsets/opset2_tbl.hpp" +#undef OPENVINO_OP + }); + return opset; +} + +const ov::OpSet& ov::get_opset3() { + static OpSet opset; + static std::once_flag flag; + std::call_once(flag, [&]() { +#define OPENVINO_OP(NAME, NAMESPACE) opset.insert(); +#include "openvino/opsets/opset3_tbl.hpp" +#undef OPENVINO_OP + }); + return opset; +} + +const ov::OpSet& ov::get_opset4() { + static OpSet opset; + static std::once_flag flag; + std::call_once(flag, [&]() { +#define OPENVINO_OP(NAME, NAMESPACE) opset.insert(); +#include "openvino/opsets/opset4_tbl.hpp" +#undef OPENVINO_OP + }); + return opset; +} + +const ov::OpSet& ov::get_opset5() { + static OpSet opset; + static std::once_flag flag; + std::call_once(flag, [&]() { +#define OPENVINO_OP(NAME, NAMESPACE) opset.insert(); +#include "openvino/opsets/opset5_tbl.hpp" +#undef OPENVINO_OP + }); + return opset; +} + +const ov::OpSet& ov::get_opset6() { + static OpSet opset; + static std::once_flag flag; + std::call_once(flag, [&]() { +#define OPENVINO_OP(NAME, NAMESPACE) opset.insert(); +#include "openvino/opsets/opset6_tbl.hpp" +#undef OPENVINO_OP + }); + return opset; +} + +const ov::OpSet& ov::get_opset7() { + static OpSet opset; + static std::once_flag flag; + std::call_once(flag, [&]() { +#define OPENVINO_OP(NAME, NAMESPACE) opset.insert(); +#include "openvino/opsets/opset7_tbl.hpp" +#undef OPENVINO_OP + }); + return opset; +} + +const ov::OpSet& ov::get_opset8() { + static OpSet opset; + static std::once_flag flag; + std::call_once(flag, [&]() { +#define OPENVINO_OP(NAME, NAMESPACE) opset.insert(); +#include "openvino/opsets/opset8_tbl.hpp" +#undef OPENVINO_OP + }); + return opset; +} + +const ngraph::OpSet& ngraph::get_opset1() { + static OpSet opset(ov::get_opset1()); + return opset; +} + const ngraph::OpSet& ngraph::get_opset2() { - static OpSet opset; - static std::once_flag flag; - std::call_once(flag, [&]() { -#define NGRAPH_OP(NAME, NAMESPACE) opset.insert(); -#include "ngraph/opsets/opset2_tbl.hpp" -#undef NGRAPH_OP - }); + static OpSet opset(ov::get_opset2()); return opset; } const ngraph::OpSet& ngraph::get_opset3() { - static OpSet opset; - static std::once_flag flag; - std::call_once(flag, [&]() { -#define NGRAPH_OP(NAME, NAMESPACE) opset.insert(); -#include "ngraph/opsets/opset3_tbl.hpp" -#undef NGRAPH_OP - }); + static OpSet opset(ov::get_opset3()); return opset; } const ngraph::OpSet& ngraph::get_opset4() { - static OpSet opset; - static std::once_flag flag; - std::call_once(flag, [&]() { -#define NGRAPH_OP(NAME, NAMESPACE) opset.insert(); -#include "ngraph/opsets/opset4_tbl.hpp" -#undef NGRAPH_OP - }); + static OpSet opset(ov::get_opset4()); return opset; } const ngraph::OpSet& ngraph::get_opset5() { - static OpSet opset; - static std::once_flag flag; - std::call_once(flag, [&]() { -#define NGRAPH_OP(NAME, NAMESPACE) opset.insert(); -#include "ngraph/opsets/opset5_tbl.hpp" -#undef NGRAPH_OP - }); + static OpSet opset(ov::get_opset5()); return opset; } const ngraph::OpSet& ngraph::get_opset6() { - static OpSet opset; - static std::once_flag flag; - std::call_once(flag, [&]() { -#define NGRAPH_OP(NAME, NAMESPACE) opset.insert(); -#include "ngraph/opsets/opset6_tbl.hpp" -#undef NGRAPH_OP - }); + static OpSet opset(ov::get_opset6()); return opset; } const ngraph::OpSet& ngraph::get_opset7() { - static OpSet opset; - static std::once_flag flag; - std::call_once(flag, [&]() { -#define NGRAPH_OP(NAME, NAMESPACE) opset.insert(); -#include "ngraph/opsets/opset7_tbl.hpp" -#undef NGRAPH_OP - }); + static OpSet opset(ov::get_opset7()); return opset; } const ngraph::OpSet& ngraph::get_opset8() { - static OpSet opset; - static std::once_flag flag; - std::call_once(flag, [&]() { -#define NGRAPH_OP(NAME, NAMESPACE) opset.insert(); -#include "ngraph/opsets/opset8_tbl.hpp" -#undef NGRAPH_OP - }); + static OpSet opset(ov::get_opset8()); return opset; } diff --git a/ngraph/frontend/ir/src/model.cpp b/ngraph/frontend/ir/src/model.cpp index 096e1ebcc65..7ec59eb724b 100644 --- a/ngraph/frontend/ir/src/model.cpp +++ b/ngraph/frontend/ir/src/model.cpp @@ -935,4 +935,4 @@ std::shared_ptr InputModelIR::convert() { return function; } } // namespace frontend -} // namespace ngraph \ No newline at end of file +} // namespace ngraph diff --git a/ngraph/test/opset1.cpp b/ngraph/test/opset1.cpp index 7f5a2cfdba3..969944a75d9 100644 --- a/ngraph/test/opset1.cpp +++ b/ngraph/test/opset1.cpp @@ -162,7 +162,7 @@ public: } void validate_and_infer_types() override{}; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& /* new_args */) const override { + std::shared_ptr clone_with_new_inputs(const OutputVector& /* new_args */) const override { return make_shared(); }; }; @@ -173,6 +173,7 @@ TEST(opset, new_op) { // Copy opset1; don't bash the real thing in a test OpSet opset1_copy(get_opset1()); opset1_copy.insert(); + ASSERT_TRUE(opset1_copy.contains_type()); { shared_ptr op(opset1_copy.create(NewOp::type_info.name)); ASSERT_TRUE(op); @@ -193,6 +194,7 @@ TEST(opset, new_op) { EXPECT_TRUE(fred); // Fred should not be in the registry ASSERT_FALSE(get_opset1().contains_type(NewOp::type_info)); + ASSERT_FALSE(get_opset1().contains_type()); } TEST(opset, dump) { From 322c87411399f83ab9c9b99c941c1ba2ed623214 Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Tue, 7 Sep 2021 19:21:41 +0300 Subject: [PATCH 33/52] Feature/azaytsev/cherry picks from 2021 4 (#7389) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Added info on DockerHub CI Framework * Feature/azaytsev/change layout (#3295) * Changes according to feedback comments * Replaced @ref's with html links * Fixed links, added a title page for installing from repos and images, fixed formatting issues * Added links * minor fix * Added DL Streamer to the list of components installed by default * Link fixes * Link fixes * ovms doc fix (#2988) * added OpenVINO Model Server * ovms doc fixes Co-authored-by: Trawinski, Dariusz * Updated openvino_docs.xml * Updated the link to software license agreements * Revert "Updated the link to software license agreements" This reverts commit 706dac500e764bd7534f7005ac6197f827d68cb5. * Updated legal info (#6409) # Conflicts: # thirdparty/ade * Cherry-pick 4833c8db726140427d41beedd4b11398f32c612f [DOCS]Changed DL WB related docs and tips (#6318) * changed DL WB related docs and tips * added two tips to benchmark and changed layout * changed layout * changed links * page title added * changed tips * ie layout fixed * updated diagram and hints * changed tooltip and ref link * changet tooltip link * changed DL WB description * typo fix # Conflicts: # docs/doxygen/ie_docs.xml # thirdparty/ade * Cherry-pick 6405 Feature/azaytsev/mo devguide changes (#6405) * MO devguide edits * MO devguide edits * MO devguide edits * MO devguide edits * MO devguide edits * Experimenting with videos * Experimenting with videos * Experimenting with videos * Experimenting with videos * Experimenting with videos * Experimenting with videos * Experimenting with videos * Experimenting with videos * Experimenting with videos * Additional edits * Additional edits * Updated the workflow diagram * Minor fix * Experimenting with videos * Updated the workflow diagram * Removed Prepare_Trained_Model, changed the title for Config_Model_Optimizer * Rolled back * Revert "Rolled back" This reverts commit 6a4a3e17653105874d75650e5ebfadb5f6c42b41. * Revert "Removed Prepare_Trained_Model, changed the title for Config_Model_Optimizer" This reverts commit 0810bd534f680257a6a32af2c8153b1802d8643d. * Fixed ie_docs.xml, Removed Prepare_Trained_Model, changed the title for Config_Model_Optimizer * Fixed ie_docs.xml * Minor fix *
tag issue *
tag issue * Fix
tag issue * Fix
tag issue * Fix
tag issue # Conflicts: # thirdparty/ade * Cherry-pick #6419 * [Runtime] INT8 inference documentation update * [Runtime] INT8 inference documentation: typo was fixed * Update docs/IE_DG/Int8Inference.md Co-authored-by: Anastasiya Ageeva * Update docs/IE_DG/Int8Inference.md Co-authored-by: Anastasiya Ageeva * Update docs/IE_DG/Int8Inference.md Co-authored-by: Anastasiya Ageeva * Update docs/IE_DG/Int8Inference.md Co-authored-by: Anastasiya Ageeva * Update docs/IE_DG/Int8Inference.md Co-authored-by: Anastasiya Ageeva * Table of Contents was removed Co-authored-by: Anastasiya Ageeva # Conflicts: # docs/IE_DG/Int8Inference.md # thirdparty/ade * Cherry pick (#6437) * Q2 changes * Changed Convert_RNNT.md Co-authored-by: baychub # Conflicts: # docs/IE_DG/Int8Inference.md # docs/install_guides/installing-openvino-conda.md # docs/install_guides/pypi-openvino-dev.md # thirdparty/ade * Cherry-pick (#6447) * Added benchmark page changes * Make the picture smaller * Added Intel® Iris® Xe MAX Graphics * Changed the TIP about DL WB * Added Note on the driver for Intel® Iris® Xe MAX Graphics * Fixed formatting * Added the link to Intel® software for general purpose GPU capabilities * OVSA ovsa_get_started updates * Fixed link # Conflicts: # thirdparty/ade * Cherry-pick #6450 * fix layout * 4 # Conflicts: # thirdparty/ade * Cherry-pick #6466 * Cherry-pick #6548 * install docs fixes * changed video width * CMake reference added * fixed table * added backtics and table formating * new table changes * GPU table changes * added more backtics and changed table format * gpu table changes * Update get_started_dl_workbench.md Co-authored-by: Andrey Zaytsev # Conflicts: # thirdparty/ade * [Runtime] INT8 inference documentation update (#6419) * [Runtime] INT8 inference documentation update * [Runtime] INT8 inference documentation: typo was fixed * Update docs/IE_DG/Int8Inference.md Co-authored-by: Anastasiya Ageeva * Update docs/IE_DG/Int8Inference.md Co-authored-by: Anastasiya Ageeva * Update docs/IE_DG/Int8Inference.md Co-authored-by: Anastasiya Ageeva * Update docs/IE_DG/Int8Inference.md Co-authored-by: Anastasiya Ageeva * Update docs/IE_DG/Int8Inference.md Co-authored-by: Anastasiya Ageeva * Table of Contents was removed Co-authored-by: Anastasiya Ageeva # Conflicts: # docs/IE_DG/Int8Inference.md # thirdparty/ade * Cherry-pick #6651 * Edits to MO Per findings spreadsheet * macOS changes per issue spreadsheet * Fixes from review spreadsheet Mostly IE_DG fixes * Consistency changes * Make doc fixes from last round of review * Add GSG build-all details * Fix links to samples and demos pages * Make MO_DG v2 changes * Add image view step to classify demo * Put MO dependency with others * Edit docs per issues spreadsheet * Add file to pytorch_specific * More fixes per spreadsheet * Prototype sample page * Add build section * Update README.md * Batch download/convert by default * Add detail to How It Works * Minor change * Temporary restored topics * corrected layout * Resized * Added white background into the picture * fixed link to omz_tools_downloader * fixed title in the layout Co-authored-by: baychub Co-authored-by: baychub <31420038+baychub@users.noreply.github.com> # Conflicts: # docs/doxygen/ie_docs.xml * Cherry-pick (#6789) [59449][DOCS] GPU table layout change * changed argument display * added br tag to more arguments * changed argument display in GPU table * changed more arguments * changed Quantized_ models display # Conflicts: # thirdparty/ade * Sync doxygen-ignore * Removed ref to FPGA.md * Fixed link to ONNX format doc Co-authored-by: Trawinski, Dariusz Co-authored-by: Tatiana Savina Co-authored-by: Edward Shogulin Co-authored-by: Nikolay Tyukaev --- ...Deep_Learning_Inference_Engine_DevGuide.md | 6 +- .../IE_DG/Extensibility_DG/AddingNGraphOps.md | 2 +- docs/IE_DG/Extensibility_DG/Extension.md | 5 +- docs/IE_DG/Int8Inference.md | 28 +- docs/IE_DG/Legal_Information.md | 12 - docs/IE_DG/Samples_Overview.md | 2 +- docs/IE_DG/ShapeInference.md | 2 +- docs/IE_DG/supported_plugins/CPU.md | 17 +- docs/IE_DG/supported_plugins/GPU.md | 25 +- docs/IE_DG/supported_plugins/MULTI.md | 4 +- docs/Legal_Information.md | 22 +- .../Deep_Learning_Model_Optimizer_DevGuide.md | 156 +++-------- .../img/small_IR_graph_demonstration.png | 4 +- docs/MO_DG/img/workflow_steps.png | 4 +- .../prepare_model/Config_Model_Optimizer.md | 16 +- .../prepare_model/Prepare_Trained_Model.md | 63 ----- .../convert_model/Convert_Model_From_MxNet.md | 2 - .../Convert_Model_From_TensorFlow.md | 6 +- .../convert_model/Converting_Model.md | 35 +-- .../convert_model/Converting_Model_General.md | 3 +- .../convert_model/Cutting_Model.md | 2 +- .../IR_suitable_for_INT8_inference.md | 4 +- .../Convert_Style_Transfer_From_MXNet.md | 2 + .../pytorch_specific/Convert_F3Net.md | 14 +- .../pytorch_specific/Convert_RNNT.md | 19 +- .../pytorch_specific/Convert_YOLACT.md | 5 +- .../Convert_XLNet_From_Tensorflow.md | 16 +- .../Convert_YOLO_From_Tensorflow.md | 6 +- .../Customize_Model_Optimizer.md | 2 +- docs/benchmarks/performance_benchmarks_faq.md | 53 ++-- .../performance_benchmarks_openvino.md | 221 ++++++++------- .../benchmarks/performance_benchmarks_ovms.md | 94 ++++++- docs/benchmarks/performance_int8_vs_fp32.md | 263 ++++++++++-------- docs/doxygen/doxy_md_filter.py | 10 + docs/doxygen/doxygen-ignore.txt | 19 +- docs/doxygen/ie_docs.xml | 23 +- docs/doxygen/openvino_docs.xml | 12 +- .../dl_workbench_img/active_projects_page.png | 3 + .../dl_workbench_img/openvino_in_dl_wb.png | 3 + docs/get_started/get_started_dl_workbench.md | 158 +++-------- docs/how_tos/how-to-links.md | 6 - docs/img/OpenVINO-diagram.png | 4 +- docs/img/int8vsfp32.png | 4 +- docs/img/throughput_ovms_3dunet.png | 4 +- docs/img/throughput_ovms_bertlarge_fp32.png | 4 +- docs/img/throughput_ovms_bertlarge_int8.png | 4 +- docs/img/throughput_ovms_bertsmall_fp32.png | 3 + docs/img/throughput_ovms_bertsmall_int8.png | 3 + .../throughput_ovms_mobilenet3large_fp32.png | 3 + .../throughput_ovms_mobilenet3small_fp32.png | 3 + .../throughput_ovms_resnet50_fp32_bs_1.png | 3 + docs/img/throughput_ovms_resnet50_int8.png | 4 +- .../throughput_ovms_ssdmobilenet1_fp32.png | 3 + docs/img/throughput_ovms_yolo3_fp32.png | 3 + docs/img/throughput_ovms_yolo3tiny_fp32.png | 3 + docs/img/throughput_ovms_yolo4_fp32.png | 3 + docs/img/workflow_steps.png | 4 +- docs/index.md | 2 +- .../installing-openvino-conda.md | 2 +- .../installing-openvino-images.md | 2 +- .../installing-openvino-linux.md | 47 ++-- .../installing-openvino-macos.md | 29 +- .../install_guides/installing-openvino-pip.md | 14 +- .../installing-openvino-windows.md | 33 +-- .../install_guides/installing-openvino-yum.md | 3 +- docs/install_guides/pypi-openvino-dev.md | 10 +- docs/install_guides/pypi-openvino-rt.md | 2 +- .../dldt_optimization_guide.md | 136 +++++---- docs/ovsa/ovsa_get_started.md | 192 +++++++------ .../c/samples/hello_classification/README.md | 2 +- .../hello_nv12_input_classification/README.md | 2 +- .../object_detection_sample_ssd/README.md | 2 +- .../classification_sample_async/README.md | 2 +- .../sample/hello_classification/README.md | 2 +- .../python/sample/hello_reshape_ssd/README.md | 2 +- .../object_detection_sample_ssd/README.md | 2 +- .../sample/style_transfer_sample/README.md | 2 +- .../samples/benchmark_app/README.md | 11 +- .../classification_sample_async/README.md | 2 +- .../samples/hello_classification/README.md | 2 +- .../hello_nv12_input_classification/README.md | 2 +- .../samples/hello_reshape_ssd/README.md | 2 +- .../object_detection_sample_ssd/README.md | 2 +- .../samples/style_transfer_sample/README.md | 2 +- tools/benchmark_tool/README.md | 10 +- 85 files changed, 907 insertions(+), 1018 deletions(-) delete mode 100644 docs/IE_DG/Legal_Information.md delete mode 100644 docs/MO_DG/prepare_model/Prepare_Trained_Model.md create mode 100644 docs/get_started/dl_workbench_img/active_projects_page.png create mode 100644 docs/get_started/dl_workbench_img/openvino_in_dl_wb.png create mode 100644 docs/img/throughput_ovms_bertsmall_fp32.png create mode 100644 docs/img/throughput_ovms_bertsmall_int8.png create mode 100644 docs/img/throughput_ovms_mobilenet3large_fp32.png create mode 100644 docs/img/throughput_ovms_mobilenet3small_fp32.png create mode 100644 docs/img/throughput_ovms_resnet50_fp32_bs_1.png create mode 100644 docs/img/throughput_ovms_ssdmobilenet1_fp32.png create mode 100644 docs/img/throughput_ovms_yolo3_fp32.png create mode 100644 docs/img/throughput_ovms_yolo3tiny_fp32.png create mode 100644 docs/img/throughput_ovms_yolo4_fp32.png diff --git a/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md b/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md index 0f07f550381..f8362188ab2 100644 --- a/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md +++ b/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md @@ -1,7 +1,5 @@ # Inference Engine Developer Guide {#openvino_docs_IE_DG_Deep_Learning_Inference_Engine_DevGuide} -> **NOTE:** [Intel® System Studio](https://software.intel.com/content/www/us/en/develop/tools/oneapi/commercial-base-iot.html) (click "Intel® System Studio Users" tab) is an all-in-one, cross-platform tool suite, purpose-built to simplify system bring-up and improve system and IoT device application performance on Intel® platforms. If you are using the Intel® Distribution of OpenVINO™ with Intel® System Studio, go to [Get Started with Intel® System Studio](https://software.intel.com/en-us/articles/get-started-with-openvino-and-intel-system-studio-2019). - This Guide provides an overview of the Inference Engine describing the typical workflow for performing inference of a pre-trained and optimized deep learning model and a set of sample applications. > **NOTE:** Before you perform inference with the Inference Engine, your models should be converted to the Inference Engine format using the Model Optimizer or built directly in runtime using nGraph API. To learn about how to use Model Optimizer, refer to the [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). To learn about the pre-trained and optimized models delivered with the OpenVINO™ toolkit, refer to [Pre-Trained Models](@ref omz_models_group_intel). @@ -111,10 +109,8 @@ The common workflow contains the following steps: 8. **Get the output** - After inference is completed, get the output memory or read the memory you provided earlier. Do this with the `InferenceEngine::IInferRequest::GetBlob()` method. ## Video: Inference Engine Concept -[![](https://img.youtube.com/vi/e6R13V8nbak/0.jpg)](https://www.youtube.com/watch?v=e6R13V8nbak) -\htmlonly + -\endhtmlonly ## Further Reading diff --git a/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md b/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md index 8ca911f7d0c..ed4d6559532 100644 --- a/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md +++ b/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md @@ -1,6 +1,6 @@ # Custom nGraph Operation {#openvino_docs_IE_DG_Extensibility_DG_AddingNGraphOps} -Inference Engine Extension API allows you to register operation sets (opsets) with custom nGraph operations to support models with operations which OpenVINO™ does not support out-of-the-box. +The Inference Engine Extension API allows you to register operation sets (opsets) with custom nGraph operations to support models with operations that OpenVINO™ does not support out-of-the-box. ## Operation Class diff --git a/docs/IE_DG/Extensibility_DG/Extension.md b/docs/IE_DG/Extensibility_DG/Extension.md index 178d0099df6..e941cb9c13c 100644 --- a/docs/IE_DG/Extensibility_DG/Extension.md +++ b/docs/IE_DG/Extensibility_DG/Extension.md @@ -25,5 +25,6 @@ Also, an `Extension` object should implement the following methods: Implement the InferenceEngine::IExtension::getOpSets method if the extension contains custom layers. Read [Custom nGraph Operation](AddingNGraphOps.md) for more information. -To integrate execution kernels to the extension library, read [How to Implement Custom CPU Operations](CPU_Kernel.md). -To register a custom ONNX\* operator to the extension library, read [Custom ONNX Operators](Custom_ONNX_Ops.md). +To understand how to integrate execution kernels to the extension library, read the [documentation about development of custom CPU kernels](CPU_Kernel.md). + +To understand how to register custom ONNX operator to the extension library, read the [documentation about custom ONNX operators](Custom_ONNX_Ops.md). diff --git a/docs/IE_DG/Int8Inference.md b/docs/IE_DG/Int8Inference.md index 889af6a5327..2577e7dc4ec 100644 --- a/docs/IE_DG/Int8Inference.md +++ b/docs/IE_DG/Int8Inference.md @@ -1,12 +1,5 @@ # Low-Precision 8-bit Integer Inference {#openvino_docs_IE_DG_Int8Inference} -## Table of Contents -1. [Supported devices](#supported-devices) -2. [Low-Precision 8-bit Integer Inference Workflow](#low-precision-8-bit-integer-inference-workflow) -3. [Prerequisites](#prerequisites) -4. [Inference](#inference) -5. [Results analysis](#results-analysis) - ## Supported devices Low-precision 8-bit inference is optimized for: @@ -24,34 +17,35 @@ Low-precision 8-bit inference is optimized for: ## Low-Precision 8-bit Integer Inference Workflow -8-bit computations (referred to as `int8`) offer better performance compared to the results of inference in higher precision (for example, `fp32`), because they allow loading more data into a single processor instruction. Usually the cost for significant boost is a reduced accuracy. However, it is proved that an accuracy drop can be negligible and depends on task requirements, so that the application engineer can set up the maximum accuracy drop that is acceptable. +8-bit computations (referred to as `int8`) offer better performance compared to the results of inference in higher precision (for example, `fp32`), because they allow loading more data into a single processor instruction. Usually the cost for significant boost is reduced accuracy. However, it is proved that an accuracy drop can be negligible and depends on task requirements, so that the application engineer can set up the maximum accuracy drop that is acceptable. For 8-bit integer computations, a model must be quantized. Quantized models can be downloaded from [Overview of OpenVINO™ Toolkit Intel's Pre-Trained Models](@ref omz_models_group_intel). If the model is not quantized, you can use the [Post-Training Optimization Tool](@ref pot_README) to quantize the model. The quantization process adds [FakeQuantize](../ops/quantization/FakeQuantize_1.md) layers on activations and weights for most layers. Read more about mathematical computations in the [Uniform Quantization with Fine-Tuning](https://github.com/openvinotoolkit/nncf/blob/develop/docs/compression_algorithms/Quantization.md). When you pass the quantized IR to the OpenVINO™ plugin, the plugin automatically recognizes it as a quantized model and performs 8-bit inference. Note, if you pass a quantized model to another plugin that does not support 8-bit inference but supports all operations from the model, the model is inferred in precision that this plugin supports. -In *Runtime stage* stage, the quantized model is loaded to the plugin. The plugin uses `Low Precision Transformation` component to update the model to infer it in low precision: - - Update `FakeQuantize` layers to have quantized output tensors in low precision range and add dequantization layers to compensate the update. Dequantization layers are pushed through as many layers as possible to have more layers in low precision. After that, most layers have quantized input tensors in low precision range and can be inferred in low precision. Ideally, dequantization layers should be fused in the next `FakeQuantize` layer. - - Weights are quantized and stored in `Constant` layers. +In *Runtime stage*, the quantized model is loaded to the plugin. The plugin uses the `Low Precision Transformation` component to update the model to infer it in low precision: + - Update `FakeQuantize` layers to have quantized output tensors in a low precision range and add dequantization layers to compensate the update. Dequantization layers are pushed through as many layers as possible to have more layers in low precision. After that, most layers quantized input tensors in the low precision range and can be inferred in low precision. Ideally, dequantization layers should be fused in the next `FakeQuantize` layer. + - Quantize weights and store them in `Constant` layers. ## Prerequisites -Let's explore quantized [TensorFlow* implementation of ResNet-50](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/resnet-50-tf) model. Use [Model Downloader](@ref omz_tools_downloader) tool to download the `fp16` model from [OpenVINO™ Toolkit - Open Model Zoo repository](https://github.com/openvinotoolkit/open_model_zoo): +Let's explore the quantized [TensorFlow* implementation of ResNet-50](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/resnet-50-tf) model. Use the [Model Downloader](@ref omz_tools_downloader) tool to download the `fp16` model from [OpenVINO™ Toolkit - Open Model Zoo repository](https://github.com/openvinotoolkit/open_model_zoo): ```sh -./downloader.py --name resnet-50-tf --precisions FP16-INT8 +cd $INTEL_OPENVINO_DIR/deployment_tools/tools/model_downloader +./downloader.py --name resnet-50-tf --precisions FP16-INT8 --output_dir ``` -After that you should quantize model by the [Model Quantizer](@ref omz_tools_downloader) tool. +After that, you should quantize the model by the [Model Quantizer](@ref omz_tools_downloader) tool. For the dataset, you can choose to download the ImageNet dataset from [here](https://www.image-net.org/download.php). ```sh -./quantizer.py --model_dir public/resnet-50-tf --dataset_dir --precisions=FP16-INT8 +./quantizer.py --model_dir --name public/resnet-50-tf --dataset_dir --precisions=FP16-INT8 ``` ## Inference -The simplest way to infer the model and collect performance counters is [C++ Benchmark Application](../../inference-engine/samples/benchmark_app/README.md). +The simplest way to infer the model and collect performance counters is the [C++ Benchmark Application](../../inference-engine/samples/benchmark_app/README.md). ```sh ./benchmark_app -m resnet-50-tf.xml -d CPU -niter 1 -api sync -report_type average_counters -report_folder pc_report_dir ``` -If you infer the model with the OpenVINO™ CPU plugin and collect performance counters, all operations (except last not quantized SoftMax) are executed in INT8 precision. +If you infer the model with the Inference Engine CPU plugin and collect performance counters, all operations (except the last non-quantized SoftMax) are executed in INT8 precision. ## Results analysis diff --git a/docs/IE_DG/Legal_Information.md b/docs/IE_DG/Legal_Information.md deleted file mode 100644 index 3b39dba5810..00000000000 --- a/docs/IE_DG/Legal_Information.md +++ /dev/null @@ -1,12 +0,0 @@ -# Legal Information {#openvino_docs_IE_DG_Legal_Information} - -No license (express or implied, by estoppel or otherwise) to any intellectual property rights is granted by this document.
-Intel disclaims all express and implied warranties, including without limitation, the implied warranties of merchantability, fitness for a particular purpose, and non-infringement, as well as any warranty arising from course of performance, course of dealing, or usage in trade.
-This document contains information on products, services and/or processes in development. All information provided here is subject to change without notice. Contact your Intel representative to obtain the latest forecast, schedule, specifications and roadmaps.
-The products and services described may contain defects or errors known as errata which may cause deviations from published specifications. Current characterized errata are available on request.
-Copies of documents which have an order number and are referenced in this document may be obtained by calling 1-800-548-4725 or by visiting [www.intel.com/design/literature.htm](http://www.intel.com/design/literature.htm).
-Intel, Intel logo, Intel Core, VTune, Xeon are trademarks of Intel Corporation in the U.S. and other countries.
-\* Other names and brands may be claimed as the property of others.
-Copyright © 2016-2018 Intel Corporation.
-This software and the related documents are Intel copyrighted materials, and your use of them is governed by the express license under which they were provided to you (License). Unless the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose or transmit this software or the related documents without Intel's prior written permission.
-This software and the related documents are provided as is, with no express or implied warranties, other than those that are expressly stated in the License.
diff --git a/docs/IE_DG/Samples_Overview.md b/docs/IE_DG/Samples_Overview.md index f9e21cf5e4d..6d3cb495831 100644 --- a/docs/IE_DG/Samples_Overview.md +++ b/docs/IE_DG/Samples_Overview.md @@ -109,7 +109,7 @@ for the debug configuration — in `/intel64/Debug/`. The recommended Windows* build environment is the following: * Microsoft Windows* 10 -* Microsoft Visual Studio* 2017, or 2019 +* Microsoft Visual Studio* 2017, or 2019. Make sure that C++ CMake tools for Windows is [enabled](https://docs.microsoft.com/en-us/cpp/build/cmake-projects-in-visual-studio?view=msvc-160#:~:text=The%20Visual%20C%2B%2B%20Tools%20for,Visual%20Studio%20generators%20are%20supported). * CMake* version 3.10 or higher > **NOTE**: If you want to use Microsoft Visual Studio 2019, you are required to install CMake 3.14. diff --git a/docs/IE_DG/ShapeInference.md b/docs/IE_DG/ShapeInference.md index dcc4b5c3f88..a265f2e9703 100644 --- a/docs/IE_DG/ShapeInference.md +++ b/docs/IE_DG/ShapeInference.md @@ -33,7 +33,7 @@ If a model has a hard-coded batch dimension, use `InferenceEngine::CNNNetwork::s Inference Engine takes three kinds of a model description as an input, which are converted into an `InferenceEngine::CNNNetwork` object: 1. [Intermediate Representation (IR)](../MO_DG/IR_and_opsets.md) through `InferenceEngine::Core::ReadNetwork` -2. [ONNX model](../IE_DG/OnnxImporterTutorial.md) through `InferenceEngine::Core::ReadNetwork` +2. [ONNX model](../IE_DG/ONNX_Support.md) through `InferenceEngine::Core::ReadNetwork` 3. [nGraph function](../nGraph_DG/nGraph_dg.md) through the constructor of `InferenceEngine::CNNNetwork` `InferenceEngine::CNNNetwork` keeps an `ngraph::Function` object with the model description internally. diff --git a/docs/IE_DG/supported_plugins/CPU.md b/docs/IE_DG/supported_plugins/CPU.md index 8f75a792ade..12b005099ba 100644 --- a/docs/IE_DG/supported_plugins/CPU.md +++ b/docs/IE_DG/supported_plugins/CPU.md @@ -105,17 +105,18 @@ These are general options, also supported by other plugins: | Parameter name | Parameter values | Default | Description | | :--- | :--- | :--- | :----------------------------------------------------------------------------------------------------------------------------| -| KEY_EXCLUSIVE_ASYNC_REQUESTS | YES/NO | NO | Forces async requests (also from different executable networks) to execute serially. This prevents potential oversubscription| -| KEY_PERF_COUNT | YES/NO | NO | Enables gathering performance counters | +| `KEY_EXCLUSIVE_ASYNC_REQUESTS` | `YES`/`NO` | `NO` | Forces async requests (also from different executable networks) to execute serially. This prevents potential oversubscription| +| `KEY_PERF_COUNT` | `YES`/`NO` | `NO` | Enables gathering performance counters | CPU-specific settings: -| Parameter name | Parameter values | Default | Description | -| :--- | :--- | :--- | :--- | -| KEY_CPU_THREADS_NUM | positive integer values| 0 | Specifies the number of threads that CPU plugin should use for inference. Zero (default) means using all (logical) cores| -| KEY_CPU_BIND_THREAD | YES/NUMA/NO | YES | Binds inference threads to CPU cores. 'YES' (default) binding option maps threads to cores - this works best for static/synthetic scenarios like benchmarks. The 'NUMA' binding is more relaxed, binding inference threads only to NUMA nodes, leaving further scheduling to specific cores to the OS. This option might perform better in the real-life/contended scenarios. Note that for the latency-oriented cases (number of the streams is less or equal to the number of NUMA nodes, see below) both YES and NUMA options limit number of inference threads to the number of hardware cores (ignoring hyper-threading) on the multi-socket machines. | -| KEY_CPU_THROUGHPUT_STREAMS | KEY_CPU_THROUGHPUT_NUMA, KEY_CPU_THROUGHPUT_AUTO, or positive integer values| 1 | Specifies number of CPU "execution" streams for the throughput mode. Upper bound for the number of inference requests that can be executed simultaneously. All available CPU cores are evenly distributed between the streams. The default value is 1, which implies latency-oriented behavior for single NUMA-node machine, with all available cores processing requests one by one. On the multi-socket (multiple NUMA nodes) machine, the best latency numbers usually achieved with a number of streams matching the number of NUMA-nodes.
KEY_CPU_THROUGHPUT_NUMA creates as many streams as needed to accommodate NUMA and avoid associated penalties.
KEY_CPU_THROUGHPUT_AUTO creates bare minimum of streams to improve the performance; this is the most portable option if you don't know how many cores your target machine has (and what would be the optimal number of streams). Note that your application should provide enough parallel slack (for example, run many inference requests) to leverage the throughput mode.
Non-negative integer value creates the requested number of streams. If a number of streams is 0, no internal streams are created and user threads are interpreted as stream master threads.| -| KEY_ENFORCE_BF16 | YES/NO| YES | The name for setting to execute in bfloat16 precision whenever it is possible. This option lets plugin know to downscale the precision where it sees performance benefits from bfloat16 execution. Such option does not guarantee accuracy of the network, you need to verify the accuracy in this mode separately, based on performance and accuracy results. It should be your decision whether to use this option or not. | + +| Parameter name | Parameter values | Default | Description | +| :--- | :--- | :--- |:-----------------------------------------------------------------------------| +| `KEY_CPU_THREADS_NUM` | `positive integer values`| `0` | Specifies the number of threads that CPU plugin should use for inference. Zero (default) means using all (logical) cores| +| `KEY_CPU_BIND_THREAD` | `YES`/`NUMA`/`NO` | `YES` | Binds inference threads to CPU cores. 'YES' (default) binding option maps threads to cores - this works best for static/synthetic scenarios like benchmarks. The 'NUMA' binding is more relaxed, binding inference threads only to NUMA nodes, leaving further scheduling to specific cores to the OS. This option might perform better in the real-life/contended scenarios. Note that for the latency-oriented cases (number of the streams is less or equal to the number of NUMA nodes, see below) both YES and NUMA options limit number of inference threads to the number of hardware cores (ignoring hyper-threading) on the multi-socket machines. | +| `KEY_CPU_THROUGHPUT_STREAMS` | `KEY_CPU_THROUGHPUT_NUMA`, `KEY_CPU_THROUGHPUT_AUTO`, or `positive integer values`| `1` | Specifies number of CPU "execution" streams for the throughput mode. Upper bound for the number of inference requests that can be executed simultaneously. All available CPU cores are evenly distributed between the streams. The default value is 1, which implies latency-oriented behavior for single NUMA-node machine, with all available cores processing requests one by one. On the multi-socket (multiple NUMA nodes) machine, the best latency numbers usually achieved with a number of streams matching the number of NUMA-nodes.
`KEY_CPU_THROUGHPUT_NUMA` creates as many streams as needed to accommodate NUMA and avoid associated penalties.
`KEY_CPU_THROUGHPUT_AUTO` creates bare minimum of streams to improve the performance; this is the most portable option if you don't know how many cores your target machine has (and what would be the optimal number of streams). Note that your application should provide enough parallel slack (for example, run many inference requests) to leverage the throughput mode.
Non-negative integer value creates the requested number of streams. If a number of streams is 0, no internal streams are created and user threads are interpreted as stream master threads.| +| `KEY_ENFORCE_BF16` | `YES`/`NO`| `YES` | The name for setting to execute in bfloat16 precision whenever it is possible. This option lets plugin know to downscale the precision where it sees performance benefits from bfloat16 execution. Such option does not guarantee accuracy of the network, you need to verify the accuracy in this mode separately, based on performance and accuracy results. It should be your decision whether to use this option or not. | > **NOTE**: To disable all internal threading, use the following set of configuration parameters: `KEY_CPU_THROUGHPUT_STREAMS=0`, `KEY_CPU_THREADS_NUM=1`, `KEY_CPU_BIND_THREAD=NO`. diff --git a/docs/IE_DG/supported_plugins/GPU.md b/docs/IE_DG/supported_plugins/GPU.md index cc12be98a12..ab84dfbac06 100644 --- a/docs/IE_DG/supported_plugins/GPU.md +++ b/docs/IE_DG/supported_plugins/GPU.md @@ -99,23 +99,24 @@ The plugin supports the configuration parameters listed below. All parameters must be set before calling InferenceEngine::Core::LoadNetwork() in order to take effect. When specifying key values as raw strings (that is, when using Python API), omit the `KEY_` prefix. + | Parameter Name | Parameter Values | Default | Description | |---------------------|-----------------------------|-----------------|-----------------------------------------------------------| | `KEY_CACHE_DIR` | `""` | `""` | Specifies a directory where compiled OCL binaries can be cached. First model loading generates the cache, and all subsequent LoadNetwork calls use precompiled kernels which significantly improves load time. If empty - caching is disabled | | `KEY_PERF_COUNT` | `YES` / `NO` | `NO` | Collect performance counters during inference | | `KEY_CONFIG_FILE` | `" [ ...]"` | `""` | Load custom layer configuration files | -| `KEY_GPU_PLUGIN_PRIORITY` | `<0-3>` | `0` | OpenCL queue priority (before usage, make sure your OpenCL driver supports appropriate extension)
Higher value means higher priority for OpenCL queue. 0 disables the setting. | -| `KEY_GPU_PLUGIN_THROTTLE` | `<0-3>` | `0` | OpenCL queue throttling (before usage, make sure your OpenCL driver supports appropriate extension)
Lower value means lower driver thread priority and longer sleep time for it. 0 disables the setting. | -| `KEY_CLDNN_ENABLE_FP16_FOR_QUANTIZED_MODELS` | `YES` / `NO` | `YES` | Allows using FP16+INT8 mixed precision mode, so non-quantized parts of a model will be executed in FP16 precision for FP16 IR. Does not affect quantized FP32 IRs | -| `KEY_GPU_NV12_TWO_INPUTS` | `YES` / `NO` | `NO` | Controls preprocessing logic for nv12 input. If it's set to YES, then device graph will expect that user will set biplanar nv12 blob as input wich will be directly passed to device execution graph. Otherwise, preprocessing via GAPI is used to convert NV12->BGR, thus GPU graph have to expect single input | -| `KEY_GPU_THROUGHPUT_STREAMS` | `KEY_GPU_THROUGHPUT_AUTO`, or positive integer| 1 | Specifies a number of GPU "execution" streams for the throughput mode (upper bound for a number of inference requests that can be executed simultaneously).
This option is can be used to decrease GPU stall time by providing more effective load from several streams. Increasing the number of streams usually is more effective for smaller topologies or smaller input sizes. Note that your application should provide enough parallel slack (e.g. running many inference requests) to leverage full GPU bandwidth. Additional streams consume several times more GPU memory, so make sure the system has enough memory available to suit parallel stream execution. Multiple streams might also put additional load on CPU. If CPU load increases, it can be regulated by setting an appropriate `KEY_GPU_PLUGIN_THROTTLE` option value (see above). If your target system has relatively weak CPU, keep throttling low.
The default value is 1, which implies latency-oriented behavior.
`KEY_GPU_THROUGHPUT_AUTO` creates bare minimum of streams to improve the performance; this is the most portable option if you are not sure how many resources your target machine has (and what would be the optimal number of streams).
A positive integer value creates the requested number of streams. | -| `KEY_EXCLUSIVE_ASYNC_REQUESTS` | `YES` / `NO` | `NO` | Forces async requests (also from different executable networks) to execute serially.| -| `KEY_GPU_MAX_NUM_THREADS` | `integer value` | `maximum # of HW threads available in host environment` | Specifies the number of CPU threads that can be used for GPU engine, e.g, JIT compilation of GPU kernels or cpu kernel processing within GPU plugin. The default value is set as the number of maximum available threads in host environment to minimize the time for LoadNetwork, where the GPU kernel build time occupies a large portion. Note that if the specified value is larger than the maximum available # of threads or less than zero, it is set as maximum available # of threads. It can be specified with a smaller number than the available HW threads according to the usage scenario, e.g., when the user wants to assign more CPU threads while GPU plugin is running. Note that setting this value with lower number will affect not only the network loading time but also the cpu layers of GPU networks that are optimized with multi-threading. | -| `KEY_GPU_ENABLE_LOOP_UNROLLING` | `YES` / `NO` | `YES` | Enables recurrent layers such as TensorIterator or Loop with fixed iteration count to be unrolled. It is turned on by default. Turning this key on will achieve better inference performance for loops with not too many iteration counts (less than 16, as a rule of thumb). Turning this key off will achieve better performance for both graph loading time and inference time with many iteration counts (greater than 16). Note that turning this key on will increase the graph loading time in proportion to the iteration counts. Thus, this key should be turned off if graph loading time is considered to be most important target to optimize. | -| `KEY_CLDNN_PLUGIN_PRIORITY` | `<0-3>` | `0` | OpenCL queue priority (before usage, make sure your OpenCL driver supports appropriate extension)
Higher value means higher priority for OpenCL queue. 0 disables the setting. **Deprecated**. Please use KEY_GPU_PLUGIN_PRIORITY | -| `KEY_CLDNN_PLUGIN_THROTTLE` | `<0-3>` | `0` | OpenCL queue throttling (before usage, make sure your OpenCL driver supports appropriate extension)
Lower value means lower driver thread priority and longer sleep time for it. 0 disables the setting. **Deprecated**. Please use KEY_GPU_PLUGIN_THROTTLE | -| `KEY_CLDNN_GRAPH_DUMPS_DIR` | `""` | `""` | clDNN graph optimizer stages dump output directory (in GraphViz format) **Deprecated**. Will be removed in the next release | -| `KEY_CLDNN_SOURCES_DUMPS_DIR` | `""` | `""` | Final optimized clDNN OpenCL sources dump output directory. **Deprecated**. Will be removed in the next release | +| `KEY_GPU_PLUGIN_`
`PRIORITY` | `<0-3>` | `0` | OpenCL queue priority (before usage, make sure your OpenCL driver supports appropriate extension)
Higher value means higher priority for OpenCL queue. 0 disables the setting. | +| `KEY_GPU_PLUGIN_`
`THROTTLE` | `<0-3>` | `0` | OpenCL queue throttling (before usage, make sure your OpenCL driver supports appropriate extension)
Lower value means lower driver thread priority and longer sleep time for it. 0 disables the setting. | +| `KEY_CLDNN_ENABLE_`
`FP16_FOR_QUANTIZED_`
`MODELS` | `YES` / `NO` | `YES` | Allows using FP16+INT8 mixed precision mode, so non-quantized parts of a model will be executed in FP16 precision for FP16 IR. Does not affect quantized FP32 IRs | +| `KEY_GPU_NV12_`
`TWO_INPUTS` | `YES` / `NO` | `NO` | Controls preprocessing logic for nv12 input. If it's set to YES, then device graph will expect that user will set biplanar nv12 blob as input wich will be directly passed to device execution graph. Otherwise, preprocessing via GAPI is used to convert NV12->BGR, thus GPU graph have to expect single input | +| `KEY_GPU_THROUGHPUT_`
`STREAMS` | `KEY_GPU_THROUGHPUT_AUTO`, or positive integer| 1 | Specifies a number of GPU "execution" streams for the throughput mode (upper bound for a number of inference requests that can be executed simultaneously).
This option is can be used to decrease GPU stall time by providing more effective load from several streams. Increasing the number of streams usually is more effective for smaller topologies or smaller input sizes. Note that your application should provide enough parallel slack (e.g. running many inference requests) to leverage full GPU bandwidth. Additional streams consume several times more GPU memory, so make sure the system has enough memory available to suit parallel stream execution. Multiple streams might also put additional load on CPU. If CPU load increases, it can be regulated by setting an appropriate `KEY_GPU_PLUGIN_THROTTLE` option value (see above). If your target system has relatively weak CPU, keep throttling low.
The default value is 1, which implies latency-oriented behavior.
`KEY_GPU_THROUGHPUT_AUTO` creates bare minimum of streams to improve the performance; this is the most portable option if you are not sure how many resources your target machine has (and what would be the optimal number of streams).
A positive integer value creates the requested number of streams. | +| `KEY_EXCLUSIVE_ASYNC_`
`REQUESTS` | `YES` / `NO` | `NO` | Forces async requests (also from different executable networks) to execute serially.| +| `KEY_GPU_MAX_NUM_`
`THREADS` | `integer value` | `maximum # of HW threads available in host environment` | Specifies the number of CPU threads that can be used for GPU engine, e.g, JIT compilation of GPU kernels or cpu kernel processing within GPU plugin. The default value is set as the number of maximum available threads in host environment to minimize the time for LoadNetwork, where the GPU kernel build time occupies a large portion. Note that if the specified value is larger than the maximum available # of threads or less than zero, it is set as maximum available # of threads. It can be specified with a smaller number than the available HW threads according to the usage scenario, e.g., when the user wants to assign more CPU threads while GPU plugin is running. Note that setting this value with lower number will affect not only the network loading time but also the cpu layers of GPU networks that are optimized with multi-threading. | +| `KEY_GPU_ENABLE_`
`LOOP_UNROLLING` | `YES` / `NO` | `YES` | Enables recurrent layers such as TensorIterator or Loop with fixed iteration count to be unrolled. It is turned on by default. Turning this key on will achieve better inference performance for loops with not too many iteration counts (less than 16, as a rule of thumb). Turning this key off will achieve better performance for both graph loading time and inference time with many iteration counts (greater than 16). Note that turning this key on will increase the graph loading time in proportion to the iteration counts. Thus, this key should be turned off if graph loading time is considered to be most important target to optimize. | +| `KEY_CLDNN_PLUGIN_`
`PRIORITY` | `<0-3>` | `0` | OpenCL queue priority (before usage, make sure your OpenCL driver supports appropriate extension)
Higher value means higher priority for OpenCL queue. 0 disables the setting. **Deprecated**. Please use KEY_GPU_PLUGIN_PRIORITY | +| `KEY_CLDNN_PLUGIN_`
`THROTTLE` | `<0-3>` | `0` | OpenCL queue throttling (before usage, make sure your OpenCL driver supports appropriate extension)
Lower value means lower driver thread priority and longer sleep time for it. 0 disables the setting. **Deprecated**. Please use KEY_GPU_PLUGIN_THROTTLE | +| `KEY_CLDNN_GRAPH_`
`DUMPS_DIR` | `""` | `""` | clDNN graph optimizer stages dump output directory (in GraphViz format) **Deprecated**. Will be removed in the next release | +| `KEY_CLDNN_SOURCES_`
`DUMPS_DIR` | `""` | `""` | Final optimized clDNN OpenCL sources dump output directory. **Deprecated**. Will be removed in the next release | | `KEY_DUMP_KERNELS` | `YES` / `NO` | `NO` | Dump the final kernels used for custom layers. **Deprecated**. Will be removed in the next release | | `KEY_TUNING_MODE` | `TUNING_DISABLED`
`TUNING_CREATE`
`TUNING_USE_EXISTING` | `TUNING_DISABLED` | Disable inference kernel tuning
Create tuning file (expect much longer runtime)
Use an existing tuning file. **Deprecated**. Will be removed in the next release | | `KEY_TUNING_FILE` | `""` | `""` | Tuning file to create / use. **Deprecated**. Will be removed in the next release | diff --git a/docs/IE_DG/supported_plugins/MULTI.md b/docs/IE_DG/supported_plugins/MULTI.md index a3f7dc2afc9..cebc03ba135 100644 --- a/docs/IE_DG/supported_plugins/MULTI.md +++ b/docs/IE_DG/supported_plugins/MULTI.md @@ -96,10 +96,8 @@ Notice that you can use the FP16 IR to work with multi-device (as CPU automatica Also notice that no demos are (yet) fully optimized for the multi-device, by means of supporting the OPTIMAL_NUMBER_OF_INFER_REQUESTS metric, using the GPU streams/throttling, and so on. ## Video: MULTI Plugin -[![](https://img.youtube.com/vi/xbORYFEmrqU/0.jpg)](https://www.youtube.com/watch?v=xbORYFEmrqU) -\htmlonly + -\endhtmlonly ## See Also * [Supported Devices](Supported_Devices.md) diff --git a/docs/Legal_Information.md b/docs/Legal_Information.md index 2f3526f2902..2936ae2a949 100644 --- a/docs/Legal_Information.md +++ b/docs/Legal_Information.md @@ -1,22 +1,20 @@ # Legal Information {#openvino_docs_Legal_Information} -This software and the related documents are Intel copyrighted materials, and your use of them is governed by the express license (the “License”) under which they were provided to you. No license (express or implied, by estoppel or otherwise) to any intellectual property rights is granted by this document. Unless the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose or transmit this software or the related documents without Intel's prior written permission. This software and the related documents are provided as is, with no express or implied warranties, other than those that are expressly stated in the License. Intel disclaims all express and implied warranties, including without limitation, the implied warranties of merchantability, fitness for a particular purpose, and non-infringement, as well as any warranty arising from course of performance, course of dealing, or usage in trade. - -This document contains information on products, services and/or processes in development. All information provided here is subject to change without notice. Contact your Intel representative to obtain the latest forecast, schedule, specifications and roadmaps. The products and services described may contain defects or errors known as errata which may cause deviations from published specifications. Current characterized errata are available on request. Copies of documents which have an order number and are referenced in this document may be obtained by calling 1-800-548-4725 or by visiting [www.intel.com/design/literature.htm](https://www.intel.com/design/literature.htm). - Performance varies by use, configuration and other factors. Learn more at [www.intel.com/PerformanceIndex](https://www.intel.com/PerformanceIndex). - -Performance results are based on testing as of dates shown in configurations and may not reflect all publicly available updates. See backup for configuration details. No product or component can be absolutely secure. - -Your costs and results may vary. - + +Performance results are based on testing as of dates shown in configurations and may not reflect all publicly available updates. See backup for configuration details. No product or component can be absolutely secure. + +Your costs and results may vary. + Intel technologies may require enabled hardware, software or service activation. -© Intel Corporation. Intel, the Intel logo, and other Intel marks are trademarks of Intel Corporation or its subsidiaries. \*Other names and brands may be claimed as the property of others. +OpenCL and the OpenCL logo are trademarks of Apple Inc. used by permission by Khronos. +© Intel Corporation. Intel, the Intel logo, and other Intel marks are trademarks of Intel Corporation or its subsidiaries. Other names and brands may be claimed as the property of others. + ## OpenVINO™ Logo To build equity around the project, the OpenVINO logo was created for both Intel and community usage. The logo may only be used to represent the OpenVINO toolkit and offerings built using the OpenVINO toolkit. - + ## Logo Usage Guidelines The OpenVINO logo must be used in connection with truthful, non-misleading references to the OpenVINO toolkit, and for no other purpose. -Modification of the logo or use of any separate element(s) of the logo alone is not allowed. +Modification of the logo or use of any separate element(s) of the logo alone is not allowed. \ No newline at end of file diff --git a/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md b/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md index 2aed66ba719..378d559f895 100644 --- a/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md +++ b/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md @@ -1,136 +1,54 @@ # Model Optimizer Developer Guide {#openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide} +## Introduction + Model Optimizer is a cross-platform command-line tool that facilitates the transition between the training and deployment environment, performs static model analysis, and adjusts deep learning models for optimal execution on end-point target devices. -Model Optimizer process assumes you have a network model trained using a supported deep learning framework. The scheme below illustrates the typical workflow for deploying a trained deep learning model: +Model Optimizer process assumes you have a network model trained using supported deep learning frameworks: Caffe*, TensorFlow*, Kaldi*, MXNet* or converted to the ONNX* format. Model Optimizer produces an Intermediate Representation (IR) of the network, which can be inferred with the [Inference Engine](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md). + +> **NOTE**: Model Optimizer does not infer models. Model Optimizer is an offline tool that runs before the inference takes place. + +The scheme below illustrates the typical workflow for deploying a trained deep learning model: ![](img/workflow_steps.png) -Model Optimizer produces an Intermediate Representation (IR) of the network, which can be read, loaded, and inferred with the Inference Engine. The Inference Engine API offers a unified API across a number of supported Intel® platforms. The Intermediate Representation is a pair of files describing the model: +The IR is a pair of files describing the model: * .xml - Describes the network topology * .bin - Contains the weights and biases binary data. -> **TIP**: You also can work with the Model Optimizer inside the OpenVINO™ [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction) (DL Workbench). -> [DL Workbench](@ref workbench_docs_Workbench_DG_Introduction) is a platform built upon OpenVINO™ and provides a web-based graphical environment that enables you to optimize, fine-tune, analyze, visualize, and compare -> performance of deep learning models on various Intel® architecture -> configurations. In the DL Workbench, you can use most of OpenVINO™ toolkit components. ->
-> Proceed to an [easy installation from Docker](@ref workbench_docs_Workbench_DG_Install_from_Docker_Hub) to get started. +Below is a simple command running Model Optimizer to generate an IR for the input model: -## What's New in the Model Optimizer in this Release? +```sh +python3 mo.py --input_model INPUT_MODEL +``` +To learn about all Model Optimizer parameters and conversion technics, see the [Converting a Model to IR](prepare_model/convert_model/Converting_Model.md) page. -* Common changes: - * Implemented several optimization transformations to replace sub-graphs of operations with HSwish, Mish, Swish and SoftPlus operations. - * Model Optimizer generates IR keeping shape-calculating sub-graphs **by default**. Previously, this behavior was triggered if the "--keep_shape_ops" command line parameter was provided. The key is ignored in this release and will be deleted in the next release. To trigger the legacy behavior to generate an IR for a fixed input shape (folding ShapeOf operations and shape-calculating sub-graphs to Constant), use the "--static_shape" command line parameter. Changing model input shape using the Inference Engine API in runtime may fail for such an IR. - * Fixed Model Optimizer conversion issues resulted in non-reshapeable IR using the Inference Engine reshape API. - * Enabled transformations to fix non-reshapeable patterns in the original networks: - * Hardcoded Reshape - * In Reshape(2D)->MatMul pattern - * Reshape->Transpose->Reshape when the pattern can be fused to the ShuffleChannels or DepthToSpace operation - * Hardcoded Interpolate - * In Interpolate->Concat pattern - * Added a dedicated requirements file for TensorFlow 2.X as well as the dedicated install prerequisites scripts. - * Replaced the SparseToDense operation with ScatterNDUpdate-4. -* ONNX*: - * Enabled an ability to specify the model output **tensor** name using the "--output" command line parameter. - * Added support for the following operations: - * Acosh - * Asinh - * Atanh - * DepthToSpace-11, 13 - * DequantizeLinear-10 (zero_point must be constant) - * HardSigmoid-1,6 - * QuantizeLinear-10 (zero_point must be constant) - * ReduceL1-11, 13 - * ReduceL2-11, 13 - * Resize-11, 13 (except mode="nearest" with 5D+ input, mode="tf_crop_and_resize", and attributes exclude_outside and extrapolation_value with non-zero values) - * ScatterND-11, 13 - * SpaceToDepth-11, 13 -* TensorFlow*: - * Added support for the following operations: - * Acosh - * Asinh - * Atanh - * CTCLoss - * EuclideanNorm - * ExtractImagePatches - * FloorDiv -* MXNet*: - * Added support for the following operations: - * Acosh - * Asinh - * Atanh -* Kaldi*: - * Fixed bug with ParallelComponent support. Now it is fully supported with no restrictions. +> **TIP**: You can quick start with the Model Optimizer inside the OpenVINO™ [Deep Learning Workbench](@ref +> openvino_docs_get_started_get_started_dl_workbench) (DL Workbench). +> [DL Workbench](@ref workbench_docs_Workbench_DG_Introduction) is the OpenVINO™ toolkit UI that enables you to +> import a model, analyze its performance and accuracy, visualize the outputs, optimize and prepare the model for +> deployment on various Intel® platforms. -> **NOTE:** -> [Intel® System Studio](https://software.intel.com/en-us/system-studio) is an all-in-one, cross-platform tool suite, purpose-built to simplify system bring-up and improve system and IoT device application performance on Intel® platforms. If you are using the Intel® Distribution of OpenVINO™ with Intel® System Studio, go to [Get Started with Intel® System Studio](https://software.intel.com/en-us/articles/get-started-with-openvino-and-intel-system-studio-2019). +## Videos -## Table of Contents + + + + + + + + + + + +
+ + + + + +
Model Optimizer Concept.
Duration: 3:56
Model Optimizer Basic
Operation
.
Duration: 2:57.
Choosing the Right Precision.
Duration: 4:18.
-* [Preparing and Optimizing your Trained Model with Model Optimizer](prepare_model/Prepare_Trained_Model.md) - * [Configuring Model Optimizer](prepare_model/Config_Model_Optimizer.md) - * [Converting a Model to Intermediate Representation (IR)](prepare_model/convert_model/Converting_Model.md) - * [Converting a Model Using General Conversion Parameters](prepare_model/convert_model/Converting_Model_General.md) - * [Converting Your Caffe* Model](prepare_model/convert_model/Convert_Model_From_Caffe.md) - * [Converting Your TensorFlow* Model](prepare_model/convert_model/Convert_Model_From_TensorFlow.md) - * [Converting BERT from TensorFlow](prepare_model/convert_model/tf_specific/Convert_BERT_From_Tensorflow.md) - * [Converting GNMT from TensorFlow](prepare_model/convert_model/tf_specific/Convert_GNMT_From_Tensorflow.md) - * [Converting YOLO from DarkNet to TensorFlow and then to IR](prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md) - * [Converting Wide and Deep Models from TensorFlow](prepare_model/convert_model/tf_specific/Convert_WideAndDeep_Family_Models.md) - * [Converting FaceNet from TensorFlow](prepare_model/convert_model/tf_specific/Convert_FaceNet_From_Tensorflow.md) - * [Converting DeepSpeech from TensorFlow](prepare_model/convert_model/tf_specific/Convert_DeepSpeech_From_Tensorflow.md) - * [Converting Language Model on One Billion Word Benchmark from TensorFlow](prepare_model/convert_model/tf_specific/Convert_lm_1b_From_Tensorflow.md) - * [Converting Neural Collaborative Filtering Model from TensorFlow*](prepare_model/convert_model/tf_specific/Convert_NCF_From_Tensorflow.md) - * [Converting TensorFlow* Object Detection API Models](prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md) - * [Converting TensorFlow*-Slim Image Classification Model Library Models](prepare_model/convert_model/tf_specific/Convert_Slim_Library_Models.md) - * [Converting CRNN Model from TensorFlow*](prepare_model/convert_model/tf_specific/Convert_CRNN_From_Tensorflow.md) - * [Converting Your MXNet* Model](prepare_model/convert_model/Convert_Model_From_MxNet.md) - * [Converting a Style Transfer Model from MXNet](prepare_model/convert_model/mxnet_specific/Convert_Style_Transfer_From_MXNet.md) - * [Converting Your Kaldi* Model](prepare_model/convert_model/Convert_Model_From_Kaldi.md) - * [Converting Your ONNX* Model](prepare_model/convert_model/Convert_Model_From_ONNX.md) - * [Converting Faster-RCNN ONNX* Model](prepare_model/convert_model/onnx_specific/Convert_Faster_RCNN.md) - * [Converting Mask-RCNN ONNX* Model](prepare_model/convert_model/onnx_specific/Convert_Mask_RCNN.md) - * [Converting GPT2 ONNX* Model](prepare_model/convert_model/onnx_specific/Convert_GPT2.md) - * [Converting Your PyTorch* Model](prepare_model/convert_model/Convert_Model_From_PyTorch.md) - * [Converting F3Net PyTorch* Model](prepare_model/convert_model/pytorch_specific/Convert_F3Net.md) - * [Converting QuartzNet PyTorch* Model](prepare_model/convert_model/pytorch_specific/Convert_QuartzNet.md) - * [Converting YOLACT PyTorch* Model](prepare_model/convert_model/pytorch_specific/Convert_YOLACT.md) - * [Model Optimizations Techniques](prepare_model/Model_Optimization_Techniques.md) - * [Cutting parts of the model](prepare_model/convert_model/Cutting_Model.md) - * [Sub-graph Replacement in Model Optimizer](prepare_model/customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md) - * [Supported Framework Layers](prepare_model/Supported_Frameworks_Layers.md) - * [Intermediate Representation and Operation Sets](IR_and_opsets.md) - * [Operations Specification](../ops/opset.md) - * [Intermediate Representation suitable for INT8 inference](prepare_model/convert_model/IR_suitable_for_INT8_inference.md) - * [Model Optimizer Extensibility](prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) - * [Extending Model Optimizer with New Primitives](prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md) - * [Extending Model Optimizer with Caffe Python Layers](prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md) - * [Extending Model Optimizer with Custom MXNet* Operations](prepare_model/customize_model_optimizer/Extending_MXNet_Model_Optimizer_with_New_Primitives.md) - * [Legacy Mode for Caffe* Custom Layers](prepare_model/customize_model_optimizer/Legacy_Mode_for_Caffe_Custom_Layers.md) - * [Model Optimizer Frequently Asked Questions](prepare_model/Model_Optimizer_FAQ.md) - -* [Known Issues](Known_Issues_Limitations.md) - -**Typical Next Step:** [Preparing and Optimizing your Trained Model with Model Optimizer](prepare_model/Prepare_Trained_Model.md) - -## Video: Model Optimizer Concept - -[![](https://img.youtube.com/vi/Kl1ptVb7aI8/0.jpg)](https://www.youtube.com/watch?v=Kl1ptVb7aI8) -\htmlonly - -\endhtmlonly - -## Video: Model Optimizer Basic Operation -[![](https://img.youtube.com/vi/BBt1rseDcy0/0.jpg)](https://www.youtube.com/watch?v=BBt1rseDcy0) -\htmlonly - -\endhtmlonly - -## Video: Choosing the Right Precision -[![](https://img.youtube.com/vi/RF8ypHyiKrY/0.jpg)](https://www.youtube.com/watch?v=RF8ypHyiKrY) -\htmlonly - -\endhtmlonly diff --git a/docs/MO_DG/img/small_IR_graph_demonstration.png b/docs/MO_DG/img/small_IR_graph_demonstration.png index 91a3fe385ae..332c11fdb65 100644 --- a/docs/MO_DG/img/small_IR_graph_demonstration.png +++ b/docs/MO_DG/img/small_IR_graph_demonstration.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8ae479880ab43cdb12eeb2fbaaf3b7861f786413c583eeba906c5fdf4b66730 -size 30696 +oid sha256:e8a86ea362473121a266c0ec1257c8d428a4bb6438fecdc9d4a4f1ff5cfc9047 +size 26220 diff --git a/docs/MO_DG/img/workflow_steps.png b/docs/MO_DG/img/workflow_steps.png index 6bf780127ad..fee04b7cb33 100644 --- a/docs/MO_DG/img/workflow_steps.png +++ b/docs/MO_DG/img/workflow_steps.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e22bc22d614c7335ae461a8ce449ea8695973d755faca718cf74b95972c94e2 -size 19773 +oid sha256:5281f26cbaa468dc4cafa4ce2fde35d338fe0f658bbb796abaaf793e951939f6 +size 13943 diff --git a/docs/MO_DG/prepare_model/Config_Model_Optimizer.md b/docs/MO_DG/prepare_model/Config_Model_Optimizer.md index 9b978d750aa..3b190dd6272 100644 --- a/docs/MO_DG/prepare_model/Config_Model_Optimizer.md +++ b/docs/MO_DG/prepare_model/Config_Model_Optimizer.md @@ -1,8 +1,6 @@ -# Configuring the Model Optimizer {#openvino_docs_MO_DG_prepare_model_Config_Model_Optimizer} +# Installing Model Optimizer Pre-Requisites {#openvino_docs_MO_DG_prepare_model_Config_Model_Optimizer} -You must configure the Model Optimizer for the framework that was used to train -the model. This section tells you how to configure the Model Optimizer either -through scripts or by using a manual process. +Before running the Model Optimizer, you must install the Model Optimizer pre-requisites for the framework that was used to train the model. This section tells you how to install the pre-requisites either through scripts or by using a manual process. ## Using Configuration Scripts @@ -154,6 +152,10 @@ pip3 install -r requirements_onnx.txt ``` ## Using the protobuf Library in the Model Optimizer for Caffe\* +
+ Click to expand + + These procedures require: @@ -166,7 +168,7 @@ By default, the library executes pure Python\* language implementation, which is slow. These steps show how to use the faster C++ implementation of the protobuf library on Windows OS or Linux OS. -### Using the protobuf Library on Linux\* OS +#### Using the protobuf Library on Linux\* OS To use the C++ implementation of the protobuf library on Linux, it is enough to set up the environment variable: @@ -174,7 +176,7 @@ set up the environment variable: export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp ``` -### Using the protobuf Library on Windows\* OS +#### Using the protobuf Library on Windows\* OS On Windows, pre-built protobuf packages for Python versions 3.4, 3.5, 3.6, and 3.7 are provided with the installation package and can be found in @@ -262,6 +264,8 @@ python3 -m easy_install dist/protobuf-3.6.1-py3.6-win-amd64.egg set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp ``` +
+ ## See Also * [Converting a Model to Intermediate Representation (IR)](convert_model/Converting_Model.md) diff --git a/docs/MO_DG/prepare_model/Prepare_Trained_Model.md b/docs/MO_DG/prepare_model/Prepare_Trained_Model.md deleted file mode 100644 index a74d1b789a2..00000000000 --- a/docs/MO_DG/prepare_model/Prepare_Trained_Model.md +++ /dev/null @@ -1,63 +0,0 @@ -# Preparing and Optimizing Your Trained Model {#openvino_docs_MO_DG_prepare_model_Prepare_Trained_Model} - -Inference Engine enables _deploying_ your network model trained with any of supported deep learning frameworks: Caffe\*, TensorFlow\*, Kaldi\*, MXNet\* or converted to the ONNX\* format. To perform the inference, the Inference Engine does not operate with the original model, but with its Intermediate Representation (IR), which is optimized for execution on end-point target devices. To generate an IR for your trained model, the Model Optimizer tool is used. - -## How the Model Optimizer Works - -Model Optimizer loads a model into memory, reads it, builds the internal representation of the model, optimizes it, and produces the Intermediate Representation. Intermediate Representation is the only format the Inference Engine accepts. - -> **NOTE**: Model Optimizer does not infer models. Model Optimizer is an offline tool that runs before the inference takes place. - -Model Optimizer has two main purposes: - -* **Produce a valid Intermediate Representation**. If this main conversion artifact is not valid, the Inference Engine cannot run. The primary responsibility of the Model Optimizer is to produce the two files (`.xml` and `.bin`) that form the Intermediate Representation. -* **Produce an optimized Intermediate Representation**. Pre-trained models contain layers that are important for training, such as the `Dropout` layer. These layers are useless during inference and might increase the inference time. In many cases, these operations can be automatically removed from the resulting Intermediate Representation. However, if a group of operations can be represented as a single mathematical operation, and thus as a single operation node in a model graph, the Model Optimizer recognizes such patterns and replaces this group of operation nodes with the only one operation. The result is an Intermediate Representation that has fewer operation nodes than the original model. This decreases the inference time. - -To produce a valid Intermediate Representation, the Model Optimizer must be able to read the original model operations, handle their properties and represent them in Intermediate Representation format, while maintaining validity of the resulting Intermediate Representation. The resulting model consists of operations described in the [Operations Specification](../../ops/opset.md). - -## What You Need to Know about Your Model - -Many common layers exist across known frameworks and neural network topologies. Examples of these layers are `Convolution`, `Pooling`, and `Activation`. To read the original model and produce the Intermediate Representation of a model, the Model Optimizer must be able to work with these layers. - -The full list of them depends on the framework and can be found in the [Supported Framework Layers](Supported_Frameworks_Layers.md) section. If your topology contains only layers from the list of layers, as is the case for the topologies used by most users, the Model Optimizer easily creates the Intermediate Representation. After that you can proceed to work with the Inference Engine. - -However, if you use a topology with layers that are not recognized by the Model Optimizer out of the box, see [Custom Layers in the Model Optimizer](customize_model_optimizer/Customize_Model_Optimizer.md) to learn how to work with custom layers. - -## Model Optimizer Directory Structure - -After installation with OpenVINO™ toolkit or Intel® Deep Learning Deployment Toolkit, the Model Optimizer folder has the following structure (some directories omitted for clarity): -``` -|-- model_optimizer - |-- extensions - |-- front - Front-End framework agnostic transformations (operations output shapes are not defined yet). - |-- caffe - Front-End Caffe-specific transformations and Caffe layers extractors - |-- CustomLayersMapping.xml.example - example of file for registering custom Caffe layers (compatible with the 2017R3 release) - |-- kaldi - Front-End Kaldi-specific transformations and Kaldi operations extractors - |-- mxnet - Front-End MxNet-specific transformations and MxNet symbols extractors - |-- onnx - Front-End ONNX-specific transformations and ONNX operators extractors - |-- tf - Front-End TensorFlow-specific transformations, TensorFlow operations extractors, sub-graph replacements configuration files. - |-- middle - Middle-End framework agnostic transformations (layers output shapes are defined). - |-- back - Back-End framework agnostic transformations (preparation for IR generation). - |-- mo - |-- back - Back-End logic: contains IR emitting logic - |-- front - Front-End logic: contains matching between Framework-specific layers and IR specific, calculation of output shapes for each registered layer - |-- graph - Graph utilities to work with internal IR representation - |-- middle - Graph transformations - optimizations of the model - |-- pipeline - Sequence of steps required to create IR for each framework - |-- utils - Utility functions - |-- tf_call_ie_layer - Source code that enables TensorFlow fallback in Inference Engine during model inference - |-- mo.py - Centralized entry point that can be used for any supported framework - |-- mo_caffe.py - Entry point particularly for Caffe - |-- mo_kaldi.py - Entry point particularly for Kaldi - |-- mo_mxnet.py - Entry point particularly for MXNet - |-- mo_onnx.py - Entry point particularly for ONNX - |-- mo_tf.py - Entry point particularly for TensorFlow -``` - -The following sections provide the information about how to use the Model Optimizer, from configuring the tool and generating an IR for a given model to customizing the tool for your needs: - -* [Configuring Model Optimizer](Config_Model_Optimizer.md) -* [Converting a Model to Intermediate Representation](convert_model/Converting_Model.md) -* [Custom Layers in Model Optimizer](customize_model_optimizer/Customize_Model_Optimizer.md) -* [Model Optimization Techniques](Model_Optimization_Techniques.md) -* [Model Optimizer Frequently Asked Questions](Model_Optimizer_FAQ.md) diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md index 4b8c1816e8b..85218eaf1a0 100644 --- a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md +++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md @@ -27,14 +27,12 @@ A summary of the steps for optimizing and deploying a model that was trained wit |SSD-ResNet-50| [Repo](https://github.com/zhreshold/mxnet-ssd), [Symbol + Params](https://github.com/zhreshold/mxnet-ssd/releases/download/v0.6/resnet50_ssd_512_voc0712_trainval.zip)| |SSD-VGG-16-300| [Repo](https://github.com/zhreshold/mxnet-ssd), [Symbol + Params](https://github.com/zhreshold/mxnet-ssd/releases/download/v0.5-beta/vgg16_ssd_300_voc0712_trainval.zip)| |SSD-Inception v3| [Repo](https://github.com/zhreshold/mxnet-ssd), [Symbol + Params](https://github.com/zhreshold/mxnet-ssd/releases/download/v0.7-alpha/ssd_inceptionv3_512_voc0712trainval.zip)| -|FCN8 (Semantic Segmentation)| [Repo](https://github.com/apache/incubator-mxnet/tree/master/example/fcn-xs), [Symbol](https://www.dropbox.com/sh/578n5cxej7ofd6m/AAA9SFCBN8R_uL2CnAd3WQ5ia/FCN8s_VGG16-symbol.json?dl=0), [Params](https://www.dropbox.com/sh/578n5cxej7ofd6m/AABHWZHCtA2P6iR6LUflkxb_a/FCN8s_VGG16-0019-cpu.params?dl=0)| |MTCNN part 1 (Face Detection)| [Repo](https://github.com/pangyupo/mxnet_mtcnn_face_detection), [Symbol](https://github.com/pangyupo/mxnet_mtcnn_face_detection/blob/master/model/det1-symbol.json), [Params](https://github.com/pangyupo/mxnet_mtcnn_face_detection/blob/master/model/det1-0001.params)| |MTCNN part 2 (Face Detection)| [Repo](https://github.com/pangyupo/mxnet_mtcnn_face_detection), [Symbol](https://github.com/pangyupo/mxnet_mtcnn_face_detection/blob/master/model/det2-symbol.json), [Params](https://github.com/pangyupo/mxnet_mtcnn_face_detection/blob/master/model/det2-0001.params)| |MTCNN part 3 (Face Detection)| [Repo](https://github.com/pangyupo/mxnet_mtcnn_face_detection), [Symbol](https://github.com/pangyupo/mxnet_mtcnn_face_detection/blob/master/model/det3-symbol.json), [Params](https://github.com/pangyupo/mxnet_mtcnn_face_detection/blob/master/model/det3-0001.params)| |MTCNN part 4 (Face Detection)| [Repo](https://github.com/pangyupo/mxnet_mtcnn_face_detection), [Symbol](https://github.com/pangyupo/mxnet_mtcnn_face_detection/blob/master/model/det4-symbol.json), [Params](https://github.com/pangyupo/mxnet_mtcnn_face_detection/blob/master/model/det4-0001.params)| |Lightened_moon| [Repo](https://github.com/tornadomeet/mxnet-face/tree/master/model/lightened_moon), [Symbol](https://github.com/tornadomeet/mxnet-face/blob/master/model/lightened_moon/lightened_moon_fuse-symbol.json), [Params](https://github.com/tornadomeet/mxnet-face/blob/master/model/lightened_moon/lightened_moon_fuse-0082.params)| |RNN-Transducer| [Repo](https://github.com/HawkAaron/mxnet-transducer) | -|word_lm| [Repo](https://github.com/apache/incubator-mxnet/tree/master/example/rnn/word_lm) | **Other supported topologies** diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md index 7e29a7668b2..17465ef6e62 100644 --- a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md +++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md @@ -37,7 +37,7 @@ Detailed information on how to convert models from the TensorFlow 1 Detection Model Zoo is available in the [Converting TensorFlow Object Detection API Models](tf_specific/Convert_Object_Detection_API_Models.md) chapter. The table below contains models from the Object Detection Models zoo that are supported. +Detailed information on how to convert models from the TensorFlow 1 Object Detection Models Zoo and TensorFlow 2 Object Detection Models Zoo is available in the [Converting TensorFlow Object Detection API Models](tf_specific/Convert_Object_Detection_API_Models.md) chapter. The table below contains models from the Object Detection Models Zoo that are supported. | Model Name| TensorFlow 1 Object Detection API Models| | :------------- | -----:| @@ -405,10 +405,8 @@ Refer to [Supported Framework Layers ](../Supported_Frameworks_Layers.md) for th The Model Optimizer provides explanatory messages if it is unable to run to completion due to issues like typographical errors, incorrectly used options, or other issues. The message describes the potential cause of the problem and gives a link to the [Model Optimizer FAQ](../Model_Optimizer_FAQ.md). The FAQ has instructions on how to resolve most issues. The FAQ also includes links to relevant sections in the Model Optimizer Developer Guide to help you understand what went wrong. ## Video: Converting a TensorFlow Model -[![](https://img.youtube.com/vi/QW6532LtiTc/0.jpg)](https://www.youtube.com/watch?v=QW6532LtiTc) -\htmlonly + -\endhtmlonly ## Summary In this document, you learned: diff --git a/docs/MO_DG/prepare_model/convert_model/Converting_Model.md b/docs/MO_DG/prepare_model/convert_model/Converting_Model.md index 26ce1289b8c..60ab7e2ac71 100644 --- a/docs/MO_DG/prepare_model/convert_model/Converting_Model.md +++ b/docs/MO_DG/prepare_model/convert_model/Converting_Model.md @@ -1,39 +1,20 @@ # Converting a Model to Intermediate Representation (IR) {#openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model} -Use the mo.py script from the `/deployment_tools/model_optimizer` directory to run the Model Optimizer and convert the model to the Intermediate Representation (IR). -The simplest way to convert a model is to run mo.py with a path to the input model file and an output directory where you have write permissions: +Use the mo.py script from the `/deployment_tools/model_optimizer` directory to run the Model Optimizer and convert the model to the Intermediate Representation (IR): ```sh python3 mo.py --input_model INPUT_MODEL --output_dir ``` +You need to have have write permissions for an output directory. -> **NOTE**: Some models require using additional arguments to specify conversion parameters, such as `--scale`, `--scale_values`, `--mean_values`, `--mean_file`. To learn about when you need to use these parameters, refer to [Converting a Model Using General Conversion Parameters](Converting_Model_General.md). - -The mo.py script is the universal entry point that can deduce the framework that has produced the input model by a standard extension of the model file: - -* `.caffemodel` - Caffe\* models -* `.pb` - TensorFlow\* models -* `.params` - MXNet\* models -* `.onnx` - ONNX\* models -* `.nnet` - Kaldi\* models. - -If the model files do not have standard extensions, you can use the ``--framework {tf,caffe,kaldi,onnx,mxnet,paddle}`` option to specify the framework type explicitly. - -For example, the following commands are equivalent: -```sh -python3 mo.py --input_model /user/models/model.pb -``` -```sh -python3 mo.py --framework tf --input_model /user/models/model.pb -``` +> **NOTE**: Some models require using additional arguments to specify conversion parameters, such as `--input_shape`, `--scale`, `--scale_values`, `--mean_values`, `--mean_file`. To learn about when you need to use these parameters, refer to [Converting a Model Using General Conversion Parameters](Converting_Model_General.md). To adjust the conversion process, you may use general parameters defined in the [Converting a Model Using General Conversion Parameters](Converting_Model_General.md) and Framework-specific parameters for: -* [Caffe](Convert_Model_From_Caffe.md), -* [TensorFlow](Convert_Model_From_TensorFlow.md), -* [MXNet](Convert_Model_From_MxNet.md), -* [ONNX](Convert_Model_From_ONNX.md), -* [Kaldi](Convert_Model_From_Kaldi.md). -* [Paddle](Convert_Model_From_Paddle.md). +* [Caffe](Convert_Model_From_Caffe.md) +* [TensorFlow](Convert_Model_From_TensorFlow.md) +* [MXNet](Convert_Model_From_MxNet.md) +* [ONNX](Convert_Model_From_ONNX.md) +* [Kaldi](Convert_Model_From_Kaldi.md) ## See Also diff --git a/docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md b/docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md index 2d267cda3e7..913278a8e2a 100644 --- a/docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md +++ b/docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md @@ -212,8 +212,7 @@ Launch the Model Optimizer for the Caffe bvlc_alexnet model with reversed input python3 mo.py --input_model bvlc_alexnet.caffemodel --reverse_input_channels --mean_values [255,255,255] --data_type FP16 --output_dir ``` -Launch the Model Optimizer for the Caffe bvlc_alexnet model with extensions listed in specified directories, specified mean_images binaryproto. - file For more information about extensions, please refer to [this](../customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md) page. +Launch the Model Optimizer for the Caffe bvlc_alexnet model with extensions listed in specified directories, specified mean_images binaryproto file. For more information about extensions, please refer to [this](../customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md) page. ```sh python3 mo.py --input_model bvlc_alexnet.caffemodel --extensions /home/,/some/other/path/ --mean_file /path/to/binaryproto --output_dir ``` diff --git a/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md b/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md index d86368a9f70..203fc94862a 100644 --- a/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md +++ b/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md @@ -19,7 +19,7 @@ Model Optimizer provides command line options `--input` and `--output` to specif * `--input` option accepts a comma-separated list of layer names of the input model that should be treated as new entry points to the model. * `--output` option accepts a comma-separated list of layer names of the input model that should be treated as new exit points from the model. -The `--input` option is required for cases unrelated to model cutting. For example, when the model contains several inputs and `--input_shape` or `--mean_values` options are used, you should use the `--input` option to specify the order of input nodes for correct mapping between multiple items provided in `--input_shape` and `--mean_values` and the inputs in the model. This is out of scope. +The `--input` option is required for cases unrelated to model cutting. For example, when the model contains several inputs and `--input_shape` or `--mean_values` options are used, you should use the `--input` option to specify the order of input nodes for correct mapping between multiple items provided in `--input_shape` and `--mean_values` and the inputs in the model. Details on these options are out of scope for this document, which focuses on model cutting. Model cutting is illustrated with Inception V1. This model is in `models/research/slim` repository. [This section](Converting_Model.md) describes pre-work to prepare the model for the Model Optimizer to be ready to proceed with this chapter. diff --git a/docs/MO_DG/prepare_model/convert_model/IR_suitable_for_INT8_inference.md b/docs/MO_DG/prepare_model/convert_model/IR_suitable_for_INT8_inference.md index fa4bdb50554..4f9baa1386c 100644 --- a/docs/MO_DG/prepare_model/convert_model/IR_suitable_for_INT8_inference.md +++ b/docs/MO_DG/prepare_model/convert_model/IR_suitable_for_INT8_inference.md @@ -9,7 +9,7 @@ Intermediate Representation (IR) should be specifically formed to be suitable fo Such an IR is called a Low Precision IR and you can generate it in two ways: - [Quantize regular IR with the Post-Training Optimization tool](@ref pot_README) - Use the Model Optimizer for a model pretrained for Low Precision inference: TensorFlow\* pre-TFLite models (`.pb` model file with `FakeQuantize*` operations) and ONNX\* quantized models. -Both Tensorflow and ONNX quantized models could be prepared by [Neural Network Compression Framework](https://github.com/openvinotoolkit/nncf/blob/develop/README.md) +Both TensorFlow and ONNX quantized models could be prepared by [Neural Network Compression Framework](https://github.com/openvinotoolkit/nncf/blob/develop/README.md). For an operation to be executed in INT8, it must have `FakeQuantize` operations as inputs. See the [specification of `FakeQuantize` operation](../../../ops/quantization/FakeQuantize_1.md) for details. @@ -17,7 +17,7 @@ See the [specification of `FakeQuantize` operation](../../../ops/quantization/Fa To execute the `Convolution` operation in INT8 on CPU, both data and weight inputs should have `FakeQuantize` as an input operation: ![](../../img/expanded_int8_Convolution_weights.png) -Low pecision IR is also suitable for FP32 and FP16 inference if a chosen plugin supports all operations of the IR, because the only difference between a Low Precision IR and FP16 or FP32 IR is the existence of `FakeQuantize` in the Low Precision IR. +Low precision IR is also suitable for FP32 and FP16 inference if a chosen plugin supports all operations of the IR, because the only difference between a Low Precision IR and FP16 or FP32 IR is the existence of `FakeQuantize` in the Low Precision IR. Plugins with Low Precision Inference support recognize these sub-graphs and quantize them during the inference time. Plugins without Low Precision support execute all operations, including `FakeQuantize`, as is in the FP32 or FP16 precision. diff --git a/docs/MO_DG/prepare_model/convert_model/mxnet_specific/Convert_Style_Transfer_From_MXNet.md b/docs/MO_DG/prepare_model/convert_model/mxnet_specific/Convert_Style_Transfer_From_MXNet.md index f0ec23d5a9f..eb1a7094673 100644 --- a/docs/MO_DG/prepare_model/convert_model/mxnet_specific/Convert_Style_Transfer_From_MXNet.md +++ b/docs/MO_DG/prepare_model/convert_model/mxnet_specific/Convert_Style_Transfer_From_MXNet.md @@ -90,6 +90,8 @@ Where the `models/13` string is composed of the following substrings: * `models/`: path to the folder that contains .nd files with pre-trained styles weights * `13`: prefix pointing to 13_decoder, which is the default decoder for the repository +>**NOTE**: If you get an error saying "No module named 'cPickle'", try running the script from this step in Python 2. Then return to Python 3 for the remaining steps. + You can choose any style from [collection of pre-trained weights](https://pan.baidu.com/s/1skMHqYp). (On the Chinese-language page, click the down arrow next to a size in megabytes. Then wait for an overlay box to appear, and click the blue button in it to download.) The `generate()` function generates `nst_vgg19-symbol.json` and `vgg19-symbol.json` files for the specified shape. In the code, it is [1024 x 768] for a 4:3 ratio, and you can specify another, for example, [224,224] for a square ratio. #### 6. Run the Model Optimizer to generate an Intermediate Representation (IR): diff --git a/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_F3Net.md b/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_F3Net.md index ffb16eb5f7c..0d130197f74 100644 --- a/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_F3Net.md +++ b/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_F3Net.md @@ -2,15 +2,19 @@ [F3Net](https://github.com/weijun88/F3Net): Fusion, Feedback and Focus for Salient Object Detection +## Clone the F3Net Model Repository + +To clone the repository, run the following command: +```bash +git clone http://github.com/weijun88/F3Net.git +``` + ## Download and Convert the Model to ONNX* To download the pre-trained model or train the model yourself, refer to the -[instruction](https://github.com/weijun88/F3Net/blob/master/README.md) in the F3Net model repository. Firstly, -convert the model to ONNX\* format. Create and run the script with the following content in the `src` -directory of the model repository: +[instruction](https://github.com/weijun88/F3Net/blob/master/README.md) in the F3Net model repository. First, convert the model to ONNX\* format. Create and run the script with the following content in the `src` directory of the model repository: ```python import torch - from dataset import Config from net import F3Net @@ -19,7 +23,7 @@ net = F3Net(cfg) image = torch.zeros([1, 3, 352, 352]) torch.onnx.export(net, image, 'f3net.onnx', export_params=True, do_constant_folding=True, opset_version=11) ``` -The script generates the ONNX\* model file f3net.onnx. The model conversion was tested with the repository hash commit `eecace3adf1e8946b571a4f4397681252f9dc1b8`. +The script generates the ONNX\* model file `f3net.onnx`. This model conversion was tested with the repository hash commit `eecace3adf1e8946b571a4f4397681252f9dc1b8`. ## Convert ONNX* F3Net Model to IR diff --git a/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_RNNT.md b/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_RNNT.md index a58e886d4f4..31de647f379 100644 --- a/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_RNNT.md +++ b/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_RNNT.md @@ -20,15 +20,15 @@ mkdir rnnt_for_openvino cd rnnt_for_openvino ``` -**Step 3**. Download pretrained weights for PyTorch implementation from https://zenodo.org/record/3662521#.YG21DugzZaQ. -For UNIX*-like systems you can use wget: +**Step 3**. Download pretrained weights for PyTorch implementation from [https://zenodo.org/record/3662521#.YG21DugzZaQ](https://zenodo.org/record/3662521#.YG21DugzZaQ). +For UNIX*-like systems you can use `wget`: ```bash wget https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt ``` The link was taken from `setup.sh` in the `speech_recoginitin/rnnt` subfolder. You will get exactly the same weights as -if you were following the steps from https://github.com/mlcommons/inference/tree/master/speech_recognition/rnnt. +if you were following the steps from [https://github.com/mlcommons/inference/tree/master/speech_recognition/rnnt](https://github.com/mlcommons/inference/tree/master/speech_recognition/rnnt). -**Step 4**. Install required python* packages: +**Step 4**. Install required Python packages: ```bash pip3 install torch toml ``` @@ -37,7 +37,7 @@ pip3 install torch toml `export_rnnt_to_onnx.py` and run it in the current directory `rnnt_for_openvino`: > **NOTE**: If you already have a full clone of MLCommons inference repository, you need to -> specify `mlcommons_inference_path` variable. +> specify the `mlcommons_inference_path` variable. ```python import toml @@ -92,8 +92,7 @@ torch.onnx.export(model.joint, (f, g), "rnnt_joint.onnx", opset_version=12, python3 export_rnnt_to_onnx.py ``` -After completing this step, the files rnnt_encoder.onnx, rnnt_prediction.onnx, and rnnt_joint.onnx will be saved in -the current directory. +After completing this step, the files `rnnt_encoder.onnx`, `rnnt_prediction.onnx`, and `rnnt_joint.onnx` will be saved in the current directory. **Step 6**. Run the conversion command: @@ -102,6 +101,6 @@ python3 {path_to_openvino}/mo.py --input_model rnnt_encoder.onnx --input "input. python3 {path_to_openvino}/mo.py --input_model rnnt_prediction.onnx --input "input.1[1 1],1[2 1 320],2[2 1 320]" python3 {path_to_openvino}/mo.py --input_model rnnt_joint.onnx --input "0[1 1 1024],1[1 1 320]" ``` -Please note that hardcoded value for sequence length = 157 was taken from the MLCommons, but conversion to IR preserves -network [reshapeability](../../../../IE_DG/ShapeInference.md); this means you can change input shapes manually to any value either during conversion or -inference. +Please note that hardcoded value for sequence length = 157 was taken from the MLCommons but conversion to IR preserves +network [reshapeability](../../../../IE_DG/ShapeInference.md), this means you can change input shapes manually to any value either during conversion or +inference. \ No newline at end of file diff --git a/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_YOLACT.md b/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_YOLACT.md index 9fb7e1ca9e9..50272a33f74 100644 --- a/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_YOLACT.md +++ b/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_YOLACT.md @@ -138,7 +138,7 @@ git checkout 57b8f2d95e62e2e649b382f516ab41f949b57239 3. Set up the environment as described in `README.md`. -**Step 2**. Download a pre-trained model from the list attached in the `Evaluation` section of `README.md` document, for example `yolact_base_54_800000.pth`. +**Step 2**. Download a pre-trained model from the list attached in the `Evaluation` section of the [README.md](https://github.com/dbolya/yolact/blob/master/README.md) document, for example `yolact_base_54_800000.pth`. **Step 3**. Export the model to ONNX* format. @@ -187,5 +187,4 @@ python path/to/model_optimizer/mo.py \ --input_model /path/to/yolact.onnx \ --reverse_input_channels \ --scale 255 -``` - +``` \ No newline at end of file diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_XLNet_From_Tensorflow.md b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_XLNet_From_Tensorflow.md index cc121ab19e1..ac706c664f2 100644 --- a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_XLNet_From_Tensorflow.md +++ b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_XLNet_From_Tensorflow.md @@ -24,13 +24,15 @@ To get pb-file from the archive contents, you need to do the following. 1. Run commands ```sh - cd ~ - mkdir XLNet-Base - cd XLNet-Base - git clone https://github.com/zihangdai/xlnet - wget https://storage.googleapis.com/xlnet/released_models/cased_L-12_H-768_A-12.zip - unzip cased_L-12_H-768_A-12.zip - mkdir try_save +cd ~ +mkdir XLNet-Base +cd XLNet-Base +git clone https://github.com/zihangdai/xlnet +wget https://storage.googleapis.com/xlnet/released_models/cased_L-12_H-768_A-12.zip +unzip cased_L-12_H-768_A-12.zip +mkdir try_save +cd xlnet +sed -i "s/tf\.train\.Optimizer/tf\.train.Optimizer if tf.version < '1.15' else tf.compat.v1.train.Optimizer/g" model_utils.py ``` diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md index 60674b1c768..ae2de000433 100644 --- a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md +++ b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md @@ -67,7 +67,11 @@ git checkout ed60b90 ``` 3. Download [coco.names](https://raw.githubusercontent.com/pjreddie/darknet/master/data/coco.names) file from the DarkNet website **OR** use labels that fit your task. 4. Download the [yolov3.weights](https://pjreddie.com/media/files/yolov3.weights) (for the YOLOv3 model) or [yolov3-tiny.weights](https://pjreddie.com/media/files/yolov3-tiny.weights) (for the YOLOv3-tiny model) file **OR** use your pre-trained weights with the same structure -5. Run a converter: +5. Install PIL, which is used by the conversion script in the repo: +```sh +pip install PIL +``` +6. Run a converter: - for YOLO-v3: ```sh python3 convert_weights_pb.py --class_names coco.names --data_format NHWC --weights_file yolov3.weights diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md b/docs/MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md index cda8458e4dd..567543a01a8 100644 --- a/docs/MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md +++ b/docs/MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md @@ -34,7 +34,7 @@ Model Optimizer extensibility mechanism enables support of new operations and custom transformations to generate the optimized intermediate representation (IR) as described in the [Deep Learning Network Intermediate Representation and Operation Sets in OpenVINO™](../../IR_and_opsets.md). This -mechanism is a core part of the Model Optimizer. The Model Optimizer itself uses it under the hood, being a huge set of examples on how to add custom logic to support your model. +mechanism is a core part of the Model Optimizer, which uses it under the hood, so the Model Optimizer itself is a huge set of examples for adding custom logic to support your model. There are several cases when the customization is needed: diff --git a/docs/benchmarks/performance_benchmarks_faq.md b/docs/benchmarks/performance_benchmarks_faq.md index 2ff33612097..b833f03c531 100644 --- a/docs/benchmarks/performance_benchmarks_faq.md +++ b/docs/benchmarks/performance_benchmarks_faq.md @@ -19,31 +19,34 @@ All of the performance benchmarks were generated using the open-sourced tool wit #### 6. What image sizes are used for the classification network models? The image size used in the inference depends on the network being benchmarked. The following table shows the list of input sizes for each network model. -| **Model** | **Public Network** | **Task** | **Input Size** (Height x Width) | -|------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------|-----------------------------|-----------------------------------| -| [bert-large-uncased-whole-word-masking-squad](https://github.com/openvinotoolkit/open_model_zoo/tree/develop/models/intel/bert-large-uncased-whole-word-masking-squad-int8-0001) | BERT-large |question / answer |384| -| [deeplabv3-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/deeplabv3) | DeepLab v3 Tf |semantic segmentation | 513x513 | -| [densenet-121-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/densenet-121-tf) | Densenet-121 Tf |classification | 224x224 | -| [facenet-20180408-102900-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/facenet-20180408-102900) | FaceNet TF | face recognition | 160x160 | -| [faster_rcnn_resnet50_coco-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/faster_rcnn_resnet50_coco) | Faster RCNN Tf | object detection | 600x1024 | -| [googlenet-v1-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/googlenet-v1-tf) | GoogLeNet_ILSVRC-2012 | classification | 224x224 | -| [inception-v3-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/googlenet-v3) | Inception v3 Tf | classification | 299x299 | -| [mobilenet-ssd-CF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mobilenet-ssd) | SSD (MobileNet)_COCO-2017_Caffe | object detection | 300x300 | -| [mobilenet-v1-1.0-224-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mobilenet-v1-1.0-224-tf) | MobileNet v1 Tf | classification | 224x224 | -| [mobilenet-v2-1.0-224-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mobilenet-v2-1.0-224) | MobileNet v2 Tf | classification | 224x224 | -| [mobilenet-v2-pytorch](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mobilenet-v2-pytorch ) | Mobilenet V2 PyTorch | classification | 224x224 | -| [resnet-18-pytorch](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/resnet-18-pytorch) | ResNet-18 PyTorch | classification | 224x224 | -| [resnet-50-pytorch](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/resnet-50-pytorch) | ResNet-50 v1 PyTorch | classification | 224x224 | -| [resnet-50-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/resnet-50-tf) | ResNet-50_v1_ILSVRC-2012 | classification | 224x224 | -| [se-resnext-50-CF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/se-resnext-50) | Se-ResNext-50_ILSVRC-2012_Caffe | classification | 224x224 | -| [squeezenet1.1-CF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/squeezenet1.1) | SqueezeNet_v1.1_ILSVRC-2012_Caffe | classification | 227x227 | -| [ssd300-CF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssd300) | SSD (VGG-16)_VOC-2007_Caffe | object detection | 300x300 | -| [yolo_v3-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolo-v3-tf) | TF Keras YOLO v3 Modelset | object detection | 300x300 | -| [yolo_v4-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolo-v4-tf) | Yolo-V4 TF | object detection | 608x608 | -| [ssd_mobilenet_v1_coco-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssd_mobilenet_v1_coco) | ssd_mobilenet_v1_coco | object detection | 300x300 | -| [ssdlite_mobilenet_v2-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) | ssd_mobilenet_v2 | object detection | 300x300 | -| [unet-camvid-onnx-0001](https://github.com/openvinotoolkit/open_model_zoo/blob/master/models/intel/unet-camvid-onnx-0001/description/unet-camvid-onnx-0001.md) | U-Net | semantic segmentation | 368x480 | - +| **Model** | **Public Network** | **Task** | **Input Size** (Height x Width) | +|------------------------------------------------------------------------------------------------------------------------------------|------------------------------------|-----------------------------|-----------------------------------| +| [bert-large-uncased-whole-word-masking-squad](https://github.com/openvinotoolkit/open_model_zoo/tree/develop/models/intel/bert-large-uncased-whole-word-masking-squad-int8-0001) | BERT-large |question / answer |384| +| [brain-tumor-segmentation-0001-MXNET](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/brain-tumor-segmentation-0001) | brain-tumor-segmentation-0001 | semantic segmentation | 128x128x128 | +| [brain-tumor-segmentation-0002-CF2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/brain-tumor-segmentation-0002) | brain-tumor-segmentation-0002 | semantic segmentation | 128x128x128 | +| [deeplabv3-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/deeplabv3) | DeepLab v3 Tf | semantic segmentation | 513x513 | +| [densenet-121-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/densenet-121-tf) | Densenet-121 Tf | classification | 224x224 | +| [facenet-20180408-102900-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/facenet-20180408-102900) | FaceNet TF | face recognition | 160x160 | +| [faster_rcnn_resnet50_coco-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/faster_rcnn_resnet50_coco) | Faster RCNN Tf | object detection | 600x1024 | +| [inception-v4-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/develop/models/public/googlenet-v4-tf) | Inception v4 Tf (aka GoogleNet-V4) | classification | 299x299 | +| [inception-v3-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/googlenet-v3) | Inception v3 Tf | classification | 299x299 | +| [mobilenet-ssd-CF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mobilenet-ssd) | SSD (MobileNet)_COCO-2017_Caffe | object detection | 300x300 | +| [mobilenet-v2-1.0-224-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mobilenet-v2-1.0-224) | MobileNet v2 Tf | classification | 224x224 | +| [mobilenet-v2-pytorch](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mobilenet-v2-pytorch ) | Mobilenet V2 PyTorch | classification | 224x224 | +| [resnet-18-pytorch](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/resnet-18-pytorch) | ResNet-18 PyTorch | classification | 224x224 | +| [resnet-50-pytorch](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/resnet-50-pytorch) | ResNet-50 v1 PyTorch | classification | 224x224 | +| [resnet-50-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/resnet-50-tf) | ResNet-50_v1_ILSVRC-2012 | classification | 224x224 | +| [se-resnext-50-CF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/se-resnext-50) | Se-ResNext-50_ILSVRC-2012_Caffe | classification | 224x224 | +| [squeezenet1.1-CF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/squeezenet1.1) | SqueezeNet_v1.1_ILSVRC-2012_Caffe | classification | 227x227 | +| [ssd300-CF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssd300) | SSD (VGG-16)_VOC-2007_Caffe | object detection | 300x300 | +| [yolo_v4-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolo-v4-tf) | Yolo-V4 TF | object detection | 608x608 | +| [ssd_mobilenet_v1_coco-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssd_mobilenet_v1_coco) | ssd_mobilenet_v1_coco | object detection | 300x300 | +| [ssdlite_mobilenet_v2-TF](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) | ssdlite_mobilenet_v2 | object detection | 300x300 | +| [unet-camvid-onnx-0001](https://github.com/openvinotoolkit/open_model_zoo/blob/master/models/intel/unet-camvid-onnx-0001/description/unet-camvid-onnx-0001.md) | U-Net | semantic segmentation | 368x480 | +| [yolo-v3-tiny-tf](https://github.com/openvinotoolkit/open_model_zoo/tree/develop/models/public/yolo-v3-tiny-tf) | YOLO v3 Tiny | object detection | 416x416 | +| [ssd-resnet34-1200-onnx](https://github.com/openvinotoolkit/open_model_zoo/tree/develop/models/public/ssd-resnet34-1200-onnx) | ssd-resnet34 onnx model | object detection | 1200x1200 | +| [vgg19-caffe](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/vgg19-caffe2) | VGG-19 | classification | 224x224| + #### 7. Where can I purchase the specific hardware used in the benchmarking? Intel partners with various vendors all over the world. Visit the [Intel® AI: In Production Partners & Solutions Catalog](https://www.intel.com/content/www/us/en/internet-of-things/ai-in-production/partners-solutions-catalog.html) for a list of Equipment Makers and the [Supported Devices](../IE_DG/supported_plugins/Supported_Devices.md) documentation. You can also remotely test and run models before purchasing any hardware by using [Intel® DevCloud for the Edge](http://devcloud.intel.com/edge/). diff --git a/docs/benchmarks/performance_benchmarks_openvino.md b/docs/benchmarks/performance_benchmarks_openvino.md index 456f593db14..be7c46410d7 100644 --- a/docs/benchmarks/performance_benchmarks_openvino.md +++ b/docs/benchmarks/performance_benchmarks_openvino.md @@ -29,81 +29,86 @@ Measuring inference performance involves many variables and is extremely use-cas \htmlonly - + \endhtmlonly \htmlonly - + \endhtmlonly \htmlonly - + \endhtmlonly \htmlonly - + \endhtmlonly \htmlonly - + \endhtmlonly \htmlonly - + \endhtmlonly \htmlonly - + \endhtmlonly \htmlonly - + \endhtmlonly \htmlonly - + \endhtmlonly \htmlonly - + \endhtmlonly \htmlonly - + +\endhtmlonly + +\htmlonly + \endhtmlonly \htmlonly - + \endhtmlonly \htmlonly - -\endhtmlonly - - -\htmlonly - + \endhtmlonly \htmlonly - + \endhtmlonly \htmlonly - + \endhtmlonly \htmlonly - + \endhtmlonly +\htmlonly + +\endhtmlonly + + + ## Platform Configurations -Intel® Distribution of OpenVINO™ toolkit performance benchmark numbers are based on release 2021.3. +Intel® Distribution of OpenVINO™ toolkit performance benchmark numbers are based on release 2021.4. -Intel technologies’ features and benefits depend on system configuration and may require enabled hardware, software or service activation. Learn more at intel.com, or from the OEM or retailer. Performance results are based on testing as of March 15, 2021 and may not reflect all publicly available updates. See configuration disclosure for details. No product can be absolutely secure. +Intel technologies’ features and benefits depend on system configuration and may require enabled hardware, software or service activation. Learn more at intel.com, or from the OEM or retailer. Performance results are based on testing as of June 18, 2021 and may not reflect all publicly available updates. See configuration disclosure for details. No product can be absolutely secure. Performance varies by use, configuration and other factors. Learn more at [www.intel.com/PerformanceIndex](https://www.intel.com/PerformanceIndex). @@ -127,15 +132,15 @@ Testing by Intel done on: see test date for each HW platform below. | Operating System | Ubuntu* 18.04 LTS | Ubuntu* 18.04 LTS | Ubuntu* 18.04 LTS | | Kernel Version | 5.3.0-24-generic | 5.3.0-24-generic | 5.3.0-24-generic | | BIOS Vendor | American Megatrends Inc.* | American Megatrends Inc. | Intel Corporation | -| BIOS Version | 0904 | 607 | SE5C620.86B.02.01.
0009.092820190230 | -| BIOS Release | April 12, 2019 | May 29, 2020 | September 28, 2019 | +| BIOS Version | 0904 | 607 | SE5C620.86B.02.01.
0013.121520200651 | +| BIOS Release | April 12, 2019 | May 29, 2020 | December 15, 2020 | | BIOS Settings | Select optimized default settings,
save & exit | Select optimized default settings,
save & exit | Select optimized default settings,
change power policy
to "performance",
save & exit | | Batch size | 1 | 1 | 1 | Precision | INT8 | INT8 | INT8 | Number of concurrent inference requests | 4 | 5 | 32 -| Test Date | March 15, 2021 | March 15, 2021 | March 15, 2021 -| Power dissipation, TDP in Watt | [71](https://ark.intel.com/content/www/us/en/ark/products/134854/intel-xeon-e-2124g-processor-8m-cache-up-to-4-50-ghz.html#tab-blade-1-0-1) | [125](https://ark.intel.com/content/www/us/en/ark/products/199336/intel-xeon-w-1290p-processor-20m-cache-3-70-ghz.html) | [125](https://ark.intel.com/content/www/us/en/ark/products/193394/intel-xeon-silver-4216-processor-22m-cache-2-10-ghz.html#tab-blade-1-0-1) | -| CPU Price on Mach 15th, 2021, USD
Prices may vary | [213](https://ark.intel.com/content/www/us/en/ark/products/134854/intel-xeon-e-2124g-processor-8m-cache-up-to-4-50-ghz.html) | [539](https://ark.intel.com/content/www/us/en/ark/products/199336/intel-xeon-w-1290p-processor-20m-cache-3-70-ghz.html) |[1,002](https://ark.intel.com/content/www/us/en/ark/products/193394/intel-xeon-silver-4216-processor-22m-cache-2-10-ghz.html) | +| Test Date | June 18, 2021 | June 18, 2021 | June 18, 2021 +| Rated maximum TDP/socket in Watt | [71](https://ark.intel.com/content/www/us/en/ark/products/134854/intel-xeon-e-2124g-processor-8m-cache-up-to-4-50-ghz.html#tab-blade-1-0-1) | [125](https://ark.intel.com/content/www/us/en/ark/products/199336/intel-xeon-w-1290p-processor-20m-cache-3-70-ghz.html) | [125](https://ark.intel.com/content/www/us/en/ark/products/193394/intel-xeon-silver-4216-processor-22m-cache-2-10-ghz.html#tab-blade-1-0-1) | +| CPU Price/socket on June 21, 2021, USD
Prices may vary | [213](https://ark.intel.com/content/www/us/en/ark/products/134854/intel-xeon-e-2124g-processor-8m-cache-up-to-4-50-ghz.html) | [539](https://ark.intel.com/content/www/us/en/ark/products/199336/intel-xeon-w-1290p-processor-20m-cache-3-70-ghz.html) |[1,002](https://ark.intel.com/content/www/us/en/ark/products/193394/intel-xeon-silver-4216-processor-22m-cache-2-10-ghz.html) | **CPU Inference Engines (continue)** @@ -149,84 +154,104 @@ Testing by Intel done on: see test date for each HW platform below. | Operating System | Ubuntu* 18.04 LTS | Ubuntu* 18.04 LTS | Ubuntu* 18.04 LTS | | Kernel Version | 5.3.0-24-generic | 5.3.0-24-generic | 5.3.0-24-generic | | BIOS Vendor | Intel Corporation | Intel Corporation | Intel Corporation | -| BIOS Version | SE5C620.86B.02.01.
0009.092820190230 | SE5C620.86B.02.01.
0009.092820190230 | WLYDCRB1.SYS.0020.
P86.2103050636 | -| BIOS Release | September 28, 2019 | September 28, 2019 | March 5, 2021 | +| BIOS Version | SE5C620.86B.02.01.
0013.121520200651 | SE5C620.86B.02.01.
0013.121520200651 | WLYDCRB1.SYS.0020.
P86.2103050636 | +| BIOS Release | December 15, 2020 | December 15, 2020 | March 5, 2021 | | BIOS Settings | Select optimized default settings,
change power policy to "performance",
save & exit | Select optimized default settings,
change power policy to "performance",
save & exit | Select optimized default settings,
change power policy to "performance",
save & exit | | Batch size | 1 | 1 | 1 | | Precision | INT8 | INT8 | INT8 | | Number of concurrent inference requests |32 | 52 | 80 | -| Test Date | March 15, 2021 | March 15, 2021 | March 22, 2021 | -| Power dissipation, TDP in Watt | [105](https://ark.intel.com/content/www/us/en/ark/products/193953/intel-xeon-gold-5218t-processor-22m-cache-2-10-ghz.html#tab-blade-1-0-1) | [205](https://ark.intel.com/content/www/us/en/ark/products/192482/intel-xeon-platinum-8270-processor-35-75m-cache-2-70-ghz.html#tab-blade-1-0-1) | [270](https://ark.intel.com/content/www/us/en/ark/products/212287/intel-xeon-platinum-8380-processor-60m-cache-2-30-ghz.html) | -| CPU Price, USD
Prices may vary | [1,349](https://ark.intel.com/content/www/us/en/ark/products/193953/intel-xeon-gold-5218t-processor-22m-cache-2-10-ghz.html) (on Mach 15th, 2021) | [7,405](https://ark.intel.com/content/www/us/en/ark/products/192482/intel-xeon-platinum-8270-processor-35-75m-cache-2-70-ghz.html) (on Mach 15th, 2021) | [8,099](https://ark.intel.com/content/www/us/en/ark/products/212287/intel-xeon-platinum-8380-processor-60m-cache-2-30-ghz.html) (on March 26th, 2021) | +| Test Date | June 18, 2021 | June 18, 2021 | June 18, 2021 | +| Rated maximum TDP/socket in Watt | [105](https://ark.intel.com/content/www/us/en/ark/products/193953/intel-xeon-gold-5218t-processor-22m-cache-2-10-ghz.html#tab-blade-1-0-1) | [205](https://ark.intel.com/content/www/us/en/ark/products/192482/intel-xeon-platinum-8270-processor-35-75m-cache-2-70-ghz.html#tab-blade-1-0-1) | [270](https://ark.intel.com/content/www/us/en/ark/products/212287/intel-xeon-platinum-8380-processor-60m-cache-2-30-ghz.html) | +| CPU Price/socket on June 21, 2021, USD
Prices may vary | [1,349](https://ark.intel.com/content/www/us/en/ark/products/193953/intel-xeon-gold-5218t-processor-22m-cache-2-10-ghz.html) | [7,405](https://ark.intel.com/content/www/us/en/ark/products/192482/intel-xeon-platinum-8270-processor-35-75m-cache-2-70-ghz.html) | [8,099](https://ark.intel.com/content/www/us/en/ark/products/212287/intel-xeon-platinum-8380-processor-60m-cache-2-30-ghz.html) | **CPU Inference Engines (continue)** -| | Intel® Core™ i7-8700T | Intel® Core™ i9-10920X | 11th Gen Intel® Core™ i7-1185G7 | -| -------------------- | ----------------------------------- |--------------------------------------| --------------------------------| -| Motherboard | GIGABYTE* Z370M DS3H-CF | ASUS* PRIME X299-A II | Intel Corporation
internal/Reference
Validation Platform | -| CPU | Intel® Core™ i7-8700T CPU @ 2.40GHz | Intel® Core™ i9-10920X CPU @ 3.50GHz | 11th Gen Intel® Core™ i7-1185G7 @ 3.00GHz | -| Hyper Threading | ON | ON | ON | -| Turbo Setting | ON | ON | ON | -| Memory | 4 x 16 GB DDR4 2400MHz | 4 x 16 GB DDR4 2666MHz | 2 x 8 GB DDR4 3200MHz | -| Operating System | Ubuntu* 18.04 LTS | Ubuntu* 18.04 LTS | Ubuntu* 18.04 LTS | -| Kernel Version | 5.3.0-24-generic | 5.3.0-24-generic | 5.8.0-05-generic | -| BIOS Vendor | American Megatrends Inc.* | American Megatrends Inc.* | Intel Corporation | -| BIOS Version | F11 | 505 | TGLSFWI1.R00.3425.
A00.2010162309 | -| BIOS Release | March 13, 2019 | December 17, 2019 | October 16, 2020 | -| BIOS Settings | Select optimized default settings,
set OS type to "other",
save & exit | Default Settings | Default Settings | -| Batch size | 1 | 1 | 1 | -| Precision | INT8 | INT8 | INT8 | -| Number of concurrent inference requests |4 | 24 | 4 | -| Test Date | March 15, 2021 | March 15, 2021 | March 15, 2021 | -| Power dissipation, TDP in Watt | [35](https://ark.intel.com/content/www/us/en/ark/products/129948/intel-core-i7-8700t-processor-12m-cache-up-to-4-00-ghz.html#tab-blade-1-0-1) | [165](https://ark.intel.com/content/www/us/en/ark/products/198012/intel-core-i9-10920x-x-series-processor-19-25m-cache-3-50-ghz.html) | [28](https://ark.intel.com/content/www/us/en/ark/products/208664/intel-core-i7-1185g7-processor-12m-cache-up-to-4-80-ghz-with-ipu.html#tab-blade-1-0-1) | -| CPU Price on Mach 15th, 2021, USD
Prices may vary | [303](https://ark.intel.com/content/www/us/en/ark/products/129948/intel-core-i7-8700t-processor-12m-cache-up-to-4-00-ghz.html) | [700](https://ark.intel.com/content/www/us/en/ark/products/198012/intel-core-i9-10920x-x-series-processor-19-25m-cache-3-50-ghz.html) | [426](https://ark.intel.com/content/www/us/en/ark/products/208664/intel-core-i7-1185g7-processor-12m-cache-up-to-4-80-ghz-with-ipu.html#tab-blade-1-0-0) | +| | Intel® Core™ i7-8700T | Intel® Core™ i9-10920X | +| -------------------- | ----------------------------------- |--------------------------------------| +| Motherboard | GIGABYTE* Z370M DS3H-CF | ASUS* PRIME X299-A II | +| CPU | Intel® Core™ i7-8700T CPU @ 2.40GHz | Intel® Core™ i9-10920X CPU @ 3.50GHz | +| Hyper Threading | ON | ON | +| Turbo Setting | ON | ON | +| Memory | 4 x 16 GB DDR4 2400MHz | 4 x 16 GB DDR4 2666MHz | +| Operating System | Ubuntu* 18.04 LTS | Ubuntu* 18.04 LTS | +| Kernel Version | 5.3.0-24-generic | 5.3.0-24-generic | +| BIOS Vendor | American Megatrends Inc.* | American Megatrends Inc.* | +| BIOS Version | F14c | 1004 | +| BIOS Release | March 23, 2021 | March 19, 2021 | +| BIOS Settings | Select optimized default settings,
set OS type to "other",
save & exit | Default Settings | +| Batch size | 1 | 1 | +| Precision | INT8 | INT8 | +| Number of concurrent inference requests |4 | 24 | +| Test Date | June 18, 2021 | June 18, 2021 | +| Rated maximum TDP/socket in Watt | [35](https://ark.intel.com/content/www/us/en/ark/products/129948/intel-core-i7-8700t-processor-12m-cache-up-to-4-00-ghz.html#tab-blade-1-0-1) | [165](https://ark.intel.com/content/www/us/en/ark/products/198012/intel-core-i9-10920x-x-series-processor-19-25m-cache-3-50-ghz.html) | +| CPU Price/socket on June 21, 2021, USD
Prices may vary | [303](https://ark.intel.com/content/www/us/en/ark/products/129948/intel-core-i7-8700t-processor-12m-cache-up-to-4-00-ghz.html) | [700](https://ark.intel.com/content/www/us/en/ark/products/198012/intel-core-i9-10920x-x-series-processor-19-25m-cache-3-50-ghz.html) | + +**CPU Inference Engines (continue)** +| | 11th Gen Intel® Core™ i7-1185G7 | 11th Gen Intel® Core™ i7-11850HE | +| -------------------- | --------------------------------|----------------------------------| +| Motherboard | Intel Corporation
internal/Reference
Validation Platform | Intel Corporation
internal/Reference
Validation Platform | +| CPU | 11th Gen Intel® Core™ i7-1185G7 @ 3.00GHz | 11th Gen Intel® Core™ i7-11850HE @ 2.60GHz | +| Hyper Threading | ON | ON | +| Turbo Setting | ON | ON | +| Memory | 2 x 8 GB DDR4 3200MHz | 2 x 16 GB DDR4 3200MHz | +| Operating System | Ubuntu* 18.04 LTS | Ubuntu* 18.04.4 LTS | +| Kernel Version | 5.8.0-05-generic | 5.8.0-050800-generic | +| BIOS Vendor | Intel Corporation | Intel Corporation | +| BIOS Version | TGLSFWI1.R00.3425.
A00.2010162309 | TGLIFUI1.R00.4064.
A01.2102200132 | +| BIOS Release | October 16, 2020 | February 20, 2021 | +| BIOS Settings | Default Settings | Default Settings | +| Batch size | 1 | 1 | +| Precision | INT8 | INT8 | +| Number of concurrent inference requests |4 | 4 | +| Test Date | June 18, 2021 | June 18, 2021 | +| Rated maximum TDP/socket in Watt | [28](https://ark.intel.com/content/www/us/en/ark/products/208664/intel-core-i7-1185g7-processor-12m-cache-up-to-4-80-ghz-with-ipu.html) | [45](https://ark.intel.com/content/www/us/en/ark/products/213799/intel-core-i7-11850h-processor-24m-cache-up-to-4-80-ghz.html) | +| CPU Price/socket on June 21, 2021, USD
Prices may vary | [426](https://ark.intel.com/content/www/us/en/ark/products/208664/intel-core-i7-1185g7-processor-12m-cache-up-to-4-80-ghz-with-ipu.html) | [395](https://ark.intel.com/content/www/us/en/ark/products/213799/intel-core-i7-11850h-processor-24m-cache-up-to-4-80-ghz.html) | + +**CPU Inference Engines (continue)** + +| | Intel® Core™ i3-8100 | Intel® Core™ i5-8500 | Intel® Core™ i5-10500TE | +| -------------------- |----------------------------------- | ---------------------------------- | ----------------------------------- | +| Motherboard | GIGABYTE* Z390 UD | ASUS* PRIME Z370-A | GIGABYTE* Z490 AORUS PRO AX | +| CPU | Intel® Core™ i3-8100 CPU @ 3.60GHz | Intel® Core™ i5-8500 CPU @ 3.00GHz | Intel® Core™ i5-10500TE CPU @ 2.30GHz | +| Hyper Threading | OFF | OFF | ON | +| Turbo Setting | OFF | ON | ON | +| Memory | 4 x 8 GB DDR4 2400MHz | 2 x 16 GB DDR4 2666MHz | 2 x 16 GB DDR4 @ 2666MHz | +| Operating System | Ubuntu* 18.04 LTS | Ubuntu* 18.04 LTS | Ubuntu* 18.04 LTS | +| Kernel Version | 5.3.0-24-generic | 5.3.0-24-generic | 5.3.0-24-generic | +| BIOS Vendor | American Megatrends Inc.* | American Megatrends Inc.* | American Megatrends Inc.* | +| BIOS Version | F8 | 2401 | F3 | +| BIOS Release | May 24, 2019 | July 12, 2019 | March 25, 2020 | +| BIOS Settings | Select optimized default settings,
set OS type to "other",
save & exit | Select optimized default settings,
save & exit | Select optimized default settings,
set OS type to "other",
save & exit | +| Batch size | 1 | 1 | 1 | +| Precision | INT8 | INT8 | INT8 | +| Number of concurrent inference requests | 4 | 3 | 4 | +| Test Date | June 18, 2021 | June 18, 2021 | June 18, 2021 | +| Rated maximum TDP/socket in Watt | [65](https://ark.intel.com/content/www/us/en/ark/products/126688/intel-core-i3-8100-processor-6m-cache-3-60-ghz.html#tab-blade-1-0-1)| [65](https://ark.intel.com/content/www/us/en/ark/products/129939/intel-core-i5-8500-processor-9m-cache-up-to-4-10-ghz.html#tab-blade-1-0-1)| [35](https://ark.intel.com/content/www/us/en/ark/products/203891/intel-core-i5-10500te-processor-12m-cache-up-to-3-70-ghz.html) | +| CPU Price/socket on June 21, 2021, USD
Prices may vary | [117](https://ark.intel.com/content/www/us/en/ark/products/126688/intel-core-i3-8100-processor-6m-cache-3-60-ghz.html) | [192](https://ark.intel.com/content/www/us/en/ark/products/129939/intel-core-i5-8500-processor-9m-cache-up-to-4-10-ghz.html) | [195](https://ark.intel.com/content/www/us/en/ark/products/203891/intel-core-i5-10500te-processor-12m-cache-up-to-3-70-ghz.html) | **CPU Inference Engines (continue)** -| | Intel® Core™ i5-8500 | Intel® Core™ i5-10500TE | -| -------------------- | ---------------------------------- | ----------------------------------- | -| Motherboard | ASUS* PRIME Z370-A | GIGABYTE* Z490 AORUS PRO AX | -| CPU | Intel® Core™ i5-8500 CPU @ 3.00GHz | Intel® Core™ i5-10500TE CPU @ 2.30GHz | -| Hyper Threading | OFF | ON | -| Turbo Setting | ON | ON | -| Memory | 2 x 16 GB DDR4 2666MHz | 2 x 16 GB DDR4 @ 2666MHz | -| Operating System | Ubuntu* 18.04 LTS | Ubuntu* 18.04 LTS | -| Kernel Version | 5.3.0-24-generic | 5.3.0-24-generic | -| BIOS Vendor | American Megatrends Inc.* | American Megatrends Inc.* | -| BIOS Version | 2401 | F3 | -| BIOS Release | July 12, 2019 | March 25, 2020 | -| BIOS Settings | Select optimized default settings,
save & exit | Select optimized default settings,
set OS type to "other",
save & exit | -| Batch size | 1 | 1 | -| Precision | INT8 | INT8 | -| Number of concurrent inference requests | 3 | 4 | -| Test Date | March 15, 2021 | March 15, 2021 | -| Power dissipation, TDP in Watt | [65](https://ark.intel.com/content/www/us/en/ark/products/129939/intel-core-i5-8500-processor-9m-cache-up-to-4-10-ghz.html#tab-blade-1-0-1)| [35](https://ark.intel.com/content/www/us/en/ark/products/203891/intel-core-i5-10500te-processor-12m-cache-up-to-3-70-ghz.html) | -| CPU Price on Mach 15th, 2021, USD
Prices may vary | [192](https://ark.intel.com/content/www/us/en/ark/products/129939/intel-core-i5-8500-processor-9m-cache-up-to-4-10-ghz.html) | [195](https://ark.intel.com/content/www/us/en/ark/products/203891/intel-core-i5-10500te-processor-12m-cache-up-to-3-70-ghz.html) | - - -**CPU Inference Engines (continue)** - -| | Intel Atom® x5-E3940 | Intel Atom® x6425RE | Intel® Core™ i3-8100 | -| -------------------- | --------------------------------------|------------------------------- |----------------------------------- | -| Motherboard | | Intel Corporation /
ElkhartLake LPDDR4x T3 CRB | GIGABYTE* Z390 UD | -| CPU | Intel Atom® Processor E3940 @ 1.60GHz | Intel Atom® x6425RE
Processor @ 1.90GHz | Intel® Core™ i3-8100 CPU @ 3.60GHz | -| Hyper Threading | OFF | OFF | OFF | -| Turbo Setting | ON | ON | OFF | -| Memory | 1 x 8 GB DDR3 1600MHz | 2 x 4GB DDR4 3200 MHz | 4 x 8 GB DDR4 2400MHz | -| Operating System | Ubuntu* 18.04 LTS | Ubuntu* 18.04 LTS | Ubuntu* 18.04 LTS | -| Kernel Version | 5.3.0-24-generic | 5.8.0-050800-generic | 5.3.0-24-generic | -| BIOS Vendor | American Megatrends Inc.* | Intel Corporation | American Megatrends Inc.* | -| BIOS Version | 5.12 | EHLSFWI1.R00.2463.
A03.2011200425 | F8 | -| BIOS Release | September 6, 2017 | November 22, 2020 | May 24, 2019 | -| BIOS Settings | Default settings | Default settings | Select optimized default settings,
set OS type to "other",
save & exit | -| Batch size | 1 | 1 | 1 | -| Precision | INT8 | INT8 | INT8 | -| Number of concurrent inference requests | 4 | 4 | 4 | -| Test Date | March 15, 2021 | March 15, 2021 | March 15, 2021 | -| Power dissipation, TDP in Watt | [9.5](https://ark.intel.com/content/www/us/en/ark/products/96485/intel-atom-x5-e3940-processor-2m-cache-up-to-1-80-ghz.html) | [12](https://ark.intel.com/content/www/us/en/ark/products/207899/intel-atom-x6425re-processor-1-5m-cache-1-90-ghz.html) | [65](https://ark.intel.com/content/www/us/en/ark/products/126688/intel-core-i3-8100-processor-6m-cache-3-60-ghz.html#tab-blade-1-0-1)| -| CPU Price, USD
Prices may vary | [34](https://ark.intel.com/content/www/us/en/ark/products/96485/intel-atom-x5-e3940-processor-2m-cache-up-to-1-80-ghz.html) (on March 15th, 2021) | [59](https://ark.intel.com/content/www/us/en/ark/products/207899/intel-atom-x6425re-processor-1-5m-cache-1-90-ghz.html) (on March 26th, 2021) | [117](https://ark.intel.com/content/www/us/en/ark/products/126688/intel-core-i3-8100-processor-6m-cache-3-60-ghz.html) (on March 15th, 2021) | +| | Intel Atom® x5-E3940 | Intel Atom® x6425RE | Intel® Celeron® 6305E | +| -------------------- | --------------------------------------|------------------------------- |----------------------------------| +| Motherboard | Intel Corporation
internal/Reference
Validation Platform | Intel Corporation
internal/Reference
Validation Platform | Intel Corporation
internal/Reference
Validation Platform | +| CPU | Intel Atom® Processor E3940 @ 1.60GHz | Intel Atom® x6425RE
Processor @ 1.90GHz | Intel® Celeron®
6305E @ 1.80GHz | +| Hyper Threading | OFF | OFF | OFF | +| Turbo Setting | ON | ON | ON | +| Memory | 1 x 8 GB DDR3 1600MHz | 2 x 4GB DDR4 3200MHz | 2 x 8 GB DDR4 3200MHz | +| Operating System | Ubuntu* 18.04 LTS | Ubuntu* 18.04 LTS | Ubuntu 18.04.5 LTS | +| Kernel Version | 5.3.0-24-generic | 5.8.0-050800-generic | 5.8.0-050800-generic | +| BIOS Vendor | American Megatrends Inc.* | Intel Corporation | Intel Corporation | +| BIOS Version | 5.12 | EHLSFWI1.R00.2463.
A03.2011200425 | TGLIFUI1.R00.4064.A02.2102260133 | +| BIOS Release | September 6, 2017 | November 22, 2020 | February 26, 2021 | +| BIOS Settings | Default settings | Default settings | Default settings | +| Batch size | 1 | 1 | 1 | +| Precision | INT8 | INT8 | INT8 | +| Number of concurrent inference requests | 4 | 4 | 4| +| Test Date | June 18, 2021 | June 18, 2021 | June 18, 2021 | +| Rated maximum TDP/socket in Watt | [9.5](https://ark.intel.com/content/www/us/en/ark/products/96485/intel-atom-x5-e3940-processor-2m-cache-up-to-1-80-ghz.html) | [12](https://ark.intel.com/content/www/us/en/ark/products/207899/intel-atom-x6425re-processor-1-5m-cache-1-90-ghz.html) | [15](https://ark.intel.com/content/www/us/en/ark/products/208072/intel-celeron-6305e-processor-4m-cache-1-80-ghz.html)| +| CPU Price/socket on June 21, 2021, USD
Prices may vary | [34](https://ark.intel.com/content/www/us/en/ark/products/96485/intel-atom-x5-e3940-processor-2m-cache-up-to-1-80-ghz.html) | [59](https://ark.intel.com/content/www/us/en/ark/products/207899/intel-atom-x6425re-processor-1-5m-cache-1-90-ghz.html) |[107](https://ark.intel.com/content/www/us/en/ark/products/208072/intel-celeron-6305e-processor-4m-cache-1-80-ghz.html) | @@ -239,8 +264,8 @@ Testing by Intel done on: see test date for each HW platform below. | Batch size | 1 | 1 | | Precision | FP16 | FP16 | | Number of concurrent inference requests | 4 | 32 | -| Power dissipation, TDP in Watt | 2.5 | [30](https://www.arrow.com/en/products/mustang-v100-mx8-r10/iei-technology?gclid=Cj0KCQiA5bz-BRD-ARIsABjT4ng1v1apmxz3BVCPA-tdIsOwbEjTtqnmp_rQJGMfJ6Q2xTq6ADtf9OYaAhMUEALw_wcB) | -| CPU Price, USD
Prices may vary | [69](https://ark.intel.com/content/www/us/en/ark/products/140109/intel-neural-compute-stick-2.html) (from March 15, 2021) | [1180](https://www.arrow.com/en/products/mustang-v100-mx8-r10/iei-technology?gclid=Cj0KCQiA5bz-BRD-ARIsABjT4ng1v1apmxz3BVCPA-tdIsOwbEjTtqnmp_rQJGMfJ6Q2xTq6ADtf9OYaAhMUEALw_wcB) (from March 15, 2021) | +| Rated maximum TDP/socket in Watt | 2.5 | [30](https://www.arrow.com/en/products/mustang-v100-mx8-r10/iei-technology?gclid=Cj0KCQiA5bz-BRD-ARIsABjT4ng1v1apmxz3BVCPA-tdIsOwbEjTtqnmp_rQJGMfJ6Q2xTq6ADtf9OYaAhMUEALw_wcB) | +| CPU Price/socket on June 21, 2021, USD
Prices may vary | [69](https://ark.intel.com/content/www/us/en/ark/products/140109/intel-neural-compute-stick-2.html) | [425](https://www.arrow.com/en/products/mustang-v100-mx8-r10/iei-technology?gclid=Cj0KCQiA5bz-BRD-ARIsABjT4ng1v1apmxz3BVCPA-tdIsOwbEjTtqnmp_rQJGMfJ6Q2xTq6ADtf9OYaAhMUEALw_wcB) | | Host Computer | Intel® Core™ i7 | Intel® Core™ i5 | | Motherboard | ASUS* Z370-A II | Uzelinfo* / US-E1300 | | CPU | Intel® Core™ i7-8700 CPU @ 3.20GHz | Intel® Core™ i5-6600 CPU @ 3.30GHz | @@ -252,9 +277,9 @@ Testing by Intel done on: see test date for each HW platform below. | BIOS Vendor | American Megatrends Inc.* | American Megatrends Inc.* | | BIOS Version | 411 | 5.12 | | BIOS Release | September 21, 2018 | September 21, 2018 | -| Test Date | March 15, 2021 | March 15, 2021 | +| Test Date | June 18, 2021 | June 18, 2021 | -Please follow this link for more detailed configuration descriptions: [Configuration Details](https://docs.openvinotoolkit.org/resources/benchmark_files/system_configurations_2021.3.html) +Please follow this link for more detailed configuration descriptions: [Configuration Details](https://docs.openvinotoolkit.org/resources/benchmark_files/system_configurations_2021.4.html) \htmlonly