Move ngraph function to new api (#19728)

* Moved ngraphFunctions to new API

* Fixed code style

* Fixed build function

* Fixed cpu unit tests

* Fixed code style

* Fixed transformation tests

* Fixed code style

* Fixed build

* Fixed LP tests

* Fixed build all for macOS

* Fixed more issues

* Fixed some func tests

* Try to fix CPU tests

* Revert incorrect change

* Try to fix tests

* Fixed merge conflicts

* Remove redundant headers

* Update src/tests/ngraph_helpers/ngraph_functions/src/non_max_suppression.cpp

Co-authored-by: Ilya Lavrenov <ilya.lavrenov@intel.com>

---------

Co-authored-by: Ilya Lavrenov <ilya.lavrenov@intel.com>
This commit is contained in:
Ilya Churaev 2023-09-14 10:57:23 +04:00 committed by GitHub
parent 5ba60f845e
commit 4df4ea9b31
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
94 changed files with 4617 additions and 3646 deletions

View File

@ -87,28 +87,28 @@ public:
testValues.actual.dequantization2);
SimpleLowPrecisionTransformer transformer;
transformer.add<ov::pass::low_precision::MatMulTransformation, ov::opset1::MatMul>(testValues.params);
transformer.add<ov::pass::low_precision::MatMulTransformation, ov::op::v0::MatMul>(testValues.params);
transformer.transform(actualFunction);
referenceFunction = (testValues.expected.precisionBeforeOperation1 == ov::element::f32) &&
testValues.expected.result.empty()
? ngraph::builder::subgraph::MatMulFunction::getOriginal(
precision,
shapes.first,
testValues.actual.precisionBeforeDequantization1,
testValues.actual.dequantization1,
shapes.second,
testValues.actual.precisionBeforeDequantization2,
testValues.actual.dequantization2)
: ngraph::builder::subgraph::MatMulFunction::getReference(
precision,
shapes.first,
testValues.expected.precisionBeforeDequantization1,
testValues.expected.dequantization1,
shapes.second,
testValues.expected.precisionBeforeDequantization2,
testValues.expected.dequantization2,
testValues.expected.result);
referenceFunction =
(testValues.expected.precisionBeforeOperation1 == ov::element::f32) && testValues.expected.result.empty()
? ngraph::builder::subgraph::MatMulFunction::getOriginal(
precision,
shapes.first,
testValues.actual.precisionBeforeDequantization1,
testValues.actual.dequantization1,
shapes.second,
testValues.actual.precisionBeforeDequantization2,
testValues.actual.dequantization2)
: ngraph::builder::subgraph::MatMulFunction::getReference(
precision,
shapes.first,
testValues.expected.precisionBeforeDequantization1,
testValues.expected.dequantization1,
shapes.second,
testValues.expected.precisionBeforeDequantization2,
testValues.expected.dequantization2,
testValues.expected.result);
}
static std::string getTestCaseName(testing::TestParamInfo<MatMulTransformationParams> obj) {

View File

@ -89,7 +89,7 @@ public:
testValues.actual.dequantizationOnWeights);
SimpleLowPrecisionTransformer transformer;
transformer.add<ov::pass::low_precision::MatMulTransformation, ov::opset1::MatMul>(testValues.params);
transformer.add<ov::pass::low_precision::MatMulTransformation, ov::op::v0::MatMul>(testValues.params);
transformer.transform(actualFunction);
referenceFunction =

View File

@ -9,6 +9,7 @@
#include "ie_common.h"
#include "ngraph_functions/builders.hpp"
#include "openvino/op/matmul.hpp"
#include "ov_ops/type_relaxed.hpp"
using namespace ov;

View File

@ -5,6 +5,8 @@
#include <common_test_utils/test_constants.hpp>
#include "execution_graph_tests/add_output.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/sigmoid.hpp"
#include "ngraph_functions/builders.hpp"
using namespace ngraph;

View File

@ -3,11 +3,13 @@
//
#include <vector>
#include "common_test_utils/test_enums.hpp"
#include "single_layer_tests/activation.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
using namespace ngraph::helpers;
using namespace ov::test::utils;
namespace {
// Common params
const std::vector<InferenceEngine::Precision> inputPrecisions = {
@ -26,7 +28,7 @@ const std::vector<InferenceEngine::Precision> intPrecisions = {
InferenceEngine::Precision::I32,
};
const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypes = {
const std::map<ov::test::utils::ActivationTypes, std::vector<std::vector<float>>> activationTypes = {
{Sigmoid, {}},
{Tan, {}},
{Tanh, {}},

View File

@ -7,6 +7,7 @@
#include <shared_test_classes/base/ov_subgraph.hpp>
#include <ngraph_functions/builders.hpp>
#include <common_test_utils/ov_tensor_utils.hpp>
#include "ngraph_functions/utils/ngraph_helpers.hpp"
using namespace ov::test;
@ -125,4 +126,4 @@ protected:
TEST_F(CustomOpCPUTest, smoke_CustomOpInternalDynamismCPUTest) {
run();
}
} // namespace CPULayerTestsDefinitions
} // namespace CPULayerTestsDefinitions

View File

@ -10,6 +10,7 @@
#include "common_test_utils/file_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "openvino/op/concat.hpp"
#include "openvino/runtime/tensor.hpp"
namespace ov {

View File

@ -13,6 +13,8 @@
#include "common_test_utils/file_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "openvino/op/concat.hpp"
#include "openvino/op/relu.hpp"
namespace ov {
namespace test {

View File

@ -7,6 +7,7 @@
#include "common_test_utils/ov_test_utils.hpp"
#include "common_test_utils/file_utils.hpp"
#include "openvino/core/model.hpp"
#include "openvino/op/relu.hpp"
namespace BehaviorTestsDefinitions {
class ExecutableNetworkBaseTest : public BehaviorTestsUtils::IEExecutableNetworkTestBase,

View File

@ -10,6 +10,7 @@
#include "common_test_utils/file_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "openvino/op/concat.hpp"
#include "openvino/runtime/tensor.hpp"
namespace ov {

View File

@ -10,6 +10,16 @@
#include "common_test_utils/test_assertions.hpp"
#include "common_test_utils/file_utils.hpp"
#include "common_test_utils/unicode_utils.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/concat.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/gather.hpp"
#include "openvino/op/matmul.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/relu.hpp"
#include "openvino/op/reshape.hpp"
#include "openvino/op/result.hpp"
#include "openvino/op/shape_of.hpp"
#include "openvino/util/file_util.hpp"
#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT
@ -86,7 +96,7 @@ class OVClassSetDevicePriorityConfigTest : public OVPluginTestBase,
protected:
std::string deviceName;
ov::AnyMap configuration;
std::shared_ptr<ngraph::Function> actualNetwork;
std::shared_ptr<ov::Model> actualNetwork;
public:
void SetUp() override {
@ -795,29 +805,29 @@ TEST_P(OVClassSeveralDevicesTestQueryNetwork, QueryNetworkActualSeveralDevicesNo
TEST_P(OVClassNetworkTestP, SetAffinityWithConstantBranches) {
ov::Core ie = createCoreWithTemplate();
std::shared_ptr<ngraph::Function> func;
std::shared_ptr<ov::Model> func;
{
ngraph::PartialShape shape({1, 84});
ngraph::element::Type type(ngraph::element::Type_t::f32);
auto param = std::make_shared<ngraph::opset6::Parameter>(type, shape);
auto matMulWeights = ngraph::opset6::Constant::create(ngraph::element::Type_t::f32, {10, 84}, {1});
auto shapeOf = std::make_shared<ngraph::opset6::ShapeOf>(matMulWeights);
auto gConst1 = ngraph::opset6::Constant::create(ngraph::element::Type_t::i32, {1}, {1});
auto gConst2 = ngraph::opset6::Constant::create(ngraph::element::Type_t::i64, {}, {0});
auto gather = std::make_shared<ngraph::opset6::Gather>(shapeOf, gConst1, gConst2);
auto concatConst = ngraph::opset6::Constant::create(ngraph::element::Type_t::i64, {1}, {1});
auto concat = std::make_shared<ngraph::opset6::Concat>(ngraph::NodeVector{concatConst, gather}, 0);
auto relu = std::make_shared<ngraph::opset6::Relu>(param);
auto reshape = std::make_shared<ngraph::opset6::Reshape>(relu, concat, false);
auto matMul = std::make_shared<ngraph::opset6::MatMul>(reshape, matMulWeights, false, true);
auto matMulBias = ngraph::opset6::Constant::create(ngraph::element::Type_t::f32, {1, 10}, {1});
auto addBias = std::make_shared<ngraph::opset6::Add>(matMul, matMulBias);
auto result = std::make_shared<ngraph::opset6::Result>(addBias);
ov::PartialShape shape({1, 84});
ov::element::Type type(ov::element::Type_t::f32);
auto param = std::make_shared<ov::op::v0::Parameter>(type, shape);
auto matMulWeights = ov::op::v0::Constant::create(ov::element::Type_t::f32, {10, 84}, {1});
auto shapeOf = std::make_shared<ov::op::v3::ShapeOf>(matMulWeights);
auto gConst1 = ov::op::v0::Constant::create(ov::element::Type_t::i32, {1}, {1});
auto gConst2 = ov::op::v0::Constant::create(ov::element::Type_t::i64, {}, {0});
auto gather = std::make_shared<ov::op::v1::Gather>(shapeOf, gConst1, gConst2);
auto concatConst = ov::op::v0::Constant::create(ov::element::Type_t::i64, {1}, {1});
auto concat = std::make_shared<ov::op::v0::Concat>(ov::NodeVector{concatConst, gather}, 0);
auto relu = std::make_shared<ov::op::v0::Relu>(param);
auto reshape = std::make_shared<ov::op::v1::Reshape>(relu, concat, false);
auto matMul = std::make_shared<ov::op::v0::MatMul>(reshape, matMulWeights, false, true);
auto matMulBias = ov::op::v0::Constant::create(ov::element::Type_t::f32, {1, 10}, {1});
auto addBias = std::make_shared<ov::op::v1::Add>(matMul, matMulBias);
auto result = std::make_shared<ov::op::v0::Result>(addBias);
ngraph::ParameterVector params = {param};
ngraph::ResultVector results = {result};
ov::ParameterVector params = {param};
ov::ResultVector results = {result};
func = std::make_shared<ngraph::Function>(results, params);
func = std::make_shared<ov::Model>(results, params);
}
auto rl_map = ie.query_model(func, target_device);

View File

@ -5,6 +5,16 @@
#pragma once
#include "base/ov_behavior_test_utils.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/concat.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/gather.hpp"
#include "openvino/op/matmul.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/relu.hpp"
#include "openvino/op/reshape.hpp"
#include "openvino/op/result.hpp"
#include "openvino/op/shape_of.hpp"
namespace ov {
namespace test {
@ -32,29 +42,29 @@ TEST_P(OVClassModelTestP, QueryModelWithKSO) {
TEST_P(OVClassQueryModelTest, QueryModelWithMatMul) {
ov::Core ie = createCoreWithTemplate();
std::shared_ptr<ngraph::Function> func;
std::shared_ptr<ov::Model> func;
{
ngraph::PartialShape shape({1, 84});
ngraph::element::Type type(ngraph::element::Type_t::f32);
auto param = std::make_shared<ngraph::opset6::Parameter>(type, shape);
auto matMulWeights = ngraph::opset6::Constant::create(ngraph::element::Type_t::f32, {10, 84}, {1});
auto shapeOf = std::make_shared<ngraph::opset6::ShapeOf>(matMulWeights);
auto gConst1 = ngraph::opset6::Constant::create(ngraph::element::Type_t::i32, {1}, {1});
auto gConst2 = ngraph::opset6::Constant::create(ngraph::element::Type_t::i64, {}, {0});
auto gather = std::make_shared<ngraph::opset6::Gather>(shapeOf, gConst1, gConst2);
auto concatConst = ngraph::opset6::Constant::create(ngraph::element::Type_t::i64, {1}, {1});
auto concat = std::make_shared<ngraph::opset6::Concat>(ngraph::NodeVector{concatConst, gather}, 0);
auto relu = std::make_shared<ngraph::opset6::Relu>(param);
auto reshape = std::make_shared<ngraph::opset6::Reshape>(relu, concat, false);
auto matMul = std::make_shared<ngraph::opset6::MatMul>(reshape, matMulWeights, false, true);
auto matMulBias = ngraph::opset6::Constant::create(ngraph::element::Type_t::f32, {1, 10}, {1});
auto addBias = std::make_shared<ngraph::opset6::Add>(matMul, matMulBias);
auto result = std::make_shared<ngraph::opset6::Result>(addBias);
ov::PartialShape shape({1, 84});
ov::element::Type type(ov::element::Type_t::f32);
auto param = std::make_shared<ov::op::v0::Parameter>(type, shape);
auto matMulWeights = ov::op::v0::Constant::create(ov::element::Type_t::f32, {10, 84}, {1});
auto shapeOf = std::make_shared<ov::op::v3::ShapeOf>(matMulWeights);
auto gConst1 = ov::op::v0::Constant::create(ov::element::Type_t::i32, {1}, {1});
auto gConst2 = ov::op::v0::Constant::create(ov::element::Type_t::i64, {}, {0});
auto gather = std::make_shared<ov::op::v1::Gather>(shapeOf, gConst1, gConst2);
auto concatConst = ov::op::v0::Constant::create(ov::element::Type_t::i64, {1}, {1});
auto concat = std::make_shared<ov::op::v0::Concat>(ov::NodeVector{concatConst, gather}, 0);
auto relu = std::make_shared<ov::op::v0::Relu>(param);
auto reshape = std::make_shared<ov::op::v1::Reshape>(relu, concat, false);
auto matMul = std::make_shared<ov::op::v0::MatMul>(reshape, matMulWeights, false, true);
auto matMulBias = ov::op::v0::Constant::create(ov::element::Type_t::f32, {1, 10}, {1});
auto addBias = std::make_shared<ov::op::v1::Add>(matMul, matMulBias);
auto result = std::make_shared<ov::op::v0::Result>(addBias);
ngraph::ParameterVector params = {param};
ngraph::ResultVector results = {result};
ov::ParameterVector params = {param};
ov::ResultVector results = {result};
func = std::make_shared<ngraph::Function>(results, params);
func = std::make_shared<ov::Model>(results, params);
}
auto rl_map = ie.query_model(func, target_device);
@ -88,4 +98,4 @@ TEST_P(OVClassQueryModelTest, QueryModelWithInvalidDeviceIDThrows) {
} // namespace behavior
} // namespace test
} // namespace ov
} // namespace ov

View File

@ -2,30 +2,38 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/executable_network/locale.hpp"
#include <locale.h>
#include "behavior/executable_network/locale.hpp"
#include "functional_test_utils/summary/api_summary.hpp"
#include "openvino/core/model.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/gelu.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/reshape.hpp"
#include "openvino/op/result.hpp"
#include "openvino/op/swish.hpp"
namespace BehaviorTestsDefinitions {
inline std::shared_ptr<ngraph::Function> makeTestModel(std::vector<size_t> inputShape = {1, 1, 32, 32}) {
ngraph::Shape in_shape(inputShape);
auto et = ngraph::element::Type_t::f16;
auto in = std::make_shared<ngraph::opset1::Parameter>(et, in_shape);
auto gelu = std::make_shared<ngraph::opset7::Gelu>(in);
auto swish_const = ngraph::op::Constant::create(et, ngraph::Shape{}, {2.5f});
auto swish = std::make_shared<ngraph::opset4::Swish>(gelu, swish_const);
ngraph::Shape reluShape = swish->outputs()[0].get_tensor().get_shape();
std::vector<size_t> constShape2 = {1, ngraph::shape_size(reluShape)};
auto const2 = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{2}, constShape2);
auto reshape2 = std::make_shared<ngraph::opset1::Reshape>(swish, const2, false);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(reshape2)};
std::shared_ptr<ngraph::Function> fnPtr = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{in});
inline std::shared_ptr<ov::Model> makeTestModel(std::vector<size_t> inputShape = {1, 1, 32, 32}) {
ov::Shape in_shape(inputShape);
auto et = ov::element::Type_t::f16;
auto in = std::make_shared<ov::op::v0::Parameter>(et, in_shape);
auto gelu = std::make_shared<ov::op::v7::Gelu>(in);
auto swish_const = ov::op::v0::Constant::create(et, ov::Shape{}, {2.5f});
auto swish = std::make_shared<ov::op::v4::Swish>(gelu, swish_const);
ov::Shape reluShape = swish->outputs()[0].get_tensor().get_shape();
std::vector<size_t> constShape2 = {1, ov::shape_size(reluShape)};
auto const2 = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, constShape2);
auto reshape2 = std::make_shared<ov::op::v1::Reshape>(swish, const2, false);
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(reshape2)};
std::shared_ptr<ov::Model> fnPtr = std::make_shared<ov::Model>(results, ov::ParameterVector{in});
return fnPtr;
}
std::string CustomLocaleTest::getTestCaseName(const testing::TestParamInfo<LocaleParams> &obj) {
std::string CustomLocaleTest::getTestCaseName(const testing::TestParamInfo<LocaleParams>& obj) {
std::ostringstream results;
std::string targetDevice, localeName;
std::tie(localeName, targetDevice) = obj.param;
@ -59,4 +67,4 @@ TEST_P(CustomLocaleTest, CanLoadNetworkWithCustomLocale) {
setlocale(LC_TIME, prev.c_str());
}
} // namespace BehaviorTestsDefinitions
} // namespace BehaviorTestsDefinitions

View File

@ -2,13 +2,17 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <base/behavior_test_utils.hpp>
#include "behavior/infer_request/memory_states.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include <base/behavior_test_utils.hpp>
#include "blob_factory.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/sigmoid.hpp"
namespace BehaviorTestsDefinitions {
std::string InferRequestVariableStateTest::getTestCaseName(const testing::TestParamInfo<memoryStateParams> &obj) {
std::string InferRequestVariableStateTest::getTestCaseName(const testing::TestParamInfo<memoryStateParams>& obj) {
std::ostringstream result;
InferenceEngine::CNNNetwork net;
std::string targetDevice;
@ -17,7 +21,7 @@ std::string InferRequestVariableStateTest::getTestCaseName(const testing::TestPa
std::tie(net, statesToQuery, targetDevice, configuration) = obj.param;
result << "targetDevice=" << targetDevice;
if (!configuration.empty()) {
for (auto &configItem : configuration) {
for (auto& configItem : configuration) {
result << "_configItem=" << configItem.first << "_" << configItem.second << "_";
}
}
@ -75,10 +79,10 @@ TEST_P(InferRequestVariableStateTest, inferreq_smoke_VariableState_QueryState) {
auto states = inferReq.QueryState();
ASSERT_TRUE(states.size() == 2) << "Incorrect number of VariableStates";
for (auto &&state : states) {
for (auto&& state : states) {
auto name = state.GetName();
ASSERT_TRUE(std::find(statesToQuery.begin(), statesToQuery.end(), name) != statesToQuery.end())
<< "State " << name << "expected to be in memory states but it is not!";
<< "State " << name << "expected to be in memory states but it is not!";
}
}
@ -87,26 +91,26 @@ TEST_P(InferRequestVariableStateTest, inferreq_smoke_VariableState_SetState) {
auto inferReq = executableNet.CreateInferRequest();
const float new_state_val = 13.0f;
for (auto &&state : inferReq.QueryState()) {
for (auto&& state : inferReq.QueryState()) {
state.Reset();
auto state_val = state.GetState();
auto element_count = state_val->size();
float *new_state_data = new float[element_count];
float* new_state_data = new float[element_count];
for (int i = 0; i < element_count; i++) {
new_state_data[i] = new_state_val;
}
auto stateBlob = make_blob_with_precision(state_val->getTensorDesc());
stateBlob->allocate();
std::memcpy(stateBlob->buffer(), new_state_data, element_count * sizeof(float));
delete[]new_state_data;
delete[] new_state_data;
state.SetState(stateBlob);
}
for (auto &&state : inferReq.QueryState()) {
for (auto&& state : inferReq.QueryState()) {
auto lastState = state.GetState();
auto last_state_size = lastState->size();
auto last_state_data = lastState->cbuffer().as<float *>();
auto last_state_data = lastState->cbuffer().as<float*>();
ASSERT_TRUE(last_state_size != 0) << "State size should not be 0";
for (int i = 0; i < last_state_size; i++) {
EXPECT_NEAR(new_state_val, last_state_data[i], 1e-5);
@ -119,19 +123,19 @@ TEST_P(InferRequestVariableStateTest, inferreq_smoke_VariableState_Reset) {
auto inferReq = executableNet.CreateInferRequest();
const float new_state_val = 13.0f;
for (auto &&state : inferReq.QueryState()) {
for (auto&& state : inferReq.QueryState()) {
state.Reset();
auto state_val = state.GetState();
auto element_count = state_val->size();
float *new_state_data = new float[element_count];
float* new_state_data = new float[element_count];
for (int i = 0; i < element_count; i++) {
new_state_data[i] = new_state_val;
}
auto stateBlob = make_blob_with_precision(state_val->getTensorDesc());
stateBlob->allocate();
std::memcpy(stateBlob->buffer(), new_state_data, element_count * sizeof(float));
delete[]new_state_data;
delete[] new_state_data;
state.SetState(stateBlob);
}
@ -142,7 +146,7 @@ TEST_P(InferRequestVariableStateTest, inferreq_smoke_VariableState_Reset) {
for (int i = 0; i < states.size(); ++i) {
auto lastState = states[i].GetState();
auto last_state_size = lastState->size();
auto last_state_data = lastState->cbuffer().as<float *>();
auto last_state_data = lastState->cbuffer().as<float*>();
ASSERT_TRUE(last_state_size != 0) << "State size should not be 0";
if (i == 0) {
@ -163,22 +167,22 @@ TEST_P(InferRequestVariableStateTest, inferreq_smoke_VariableState_2infers_set)
auto inferReq2 = executableNet.CreateInferRequest();
const float new_state_val = 13.0f;
for (auto &&state : inferReq.QueryState()) {
for (auto&& state : inferReq.QueryState()) {
state.Reset();
auto state_val = state.GetState();
auto element_count = state_val->size();
float *new_state_data = new float[element_count];
float* new_state_data = new float[element_count];
for (int i = 0; i < element_count; i++) {
new_state_data[i] = new_state_val;
}
auto stateBlob = make_blob_with_precision(state_val->getTensorDesc());
stateBlob->allocate();
std::memcpy(stateBlob->buffer(), new_state_data, element_count * sizeof(float));
delete[]new_state_data;
delete[] new_state_data;
state.SetState(stateBlob);
}
for (auto &&state : inferReq2.QueryState()) {
for (auto&& state : inferReq2.QueryState()) {
state.Reset();
}
@ -187,7 +191,7 @@ TEST_P(InferRequestVariableStateTest, inferreq_smoke_VariableState_2infers_set)
for (int i = 0; i < states.size(); ++i) {
auto lastState = states[i].GetState();
auto last_state_size = lastState->size();
auto last_state_data = lastState->cbuffer().as<float *>();
auto last_state_data = lastState->cbuffer().as<float*>();
ASSERT_TRUE(last_state_size != 0) << "State size should not be 0";
@ -198,7 +202,7 @@ TEST_P(InferRequestVariableStateTest, inferreq_smoke_VariableState_2infers_set)
for (int i = 0; i < states2.size(); ++i) {
auto lastState = states2[i].GetState();
auto last_state_size = lastState->size();
auto last_state_data = lastState->cbuffer().as<float *>();
auto last_state_data = lastState->cbuffer().as<float*>();
ASSERT_TRUE(last_state_size != 0) << "State size should not be 0";
@ -215,8 +219,8 @@ TEST_P(InferRequestVariableStateTest, inferreq_smoke_VariableState_2infers) {
const float new_state_val = 13.0f;
// set the input data for the network
for (const auto &input : executableNet.GetInputsInfo()) {
const auto &info = input.second;
for (const auto& input : executableNet.GetInputsInfo()) {
const auto& info = input.second;
InferenceEngine::Blob::Ptr inBlob;
inBlob = make_blob_with_precision(info->getTensorDesc());
inBlob->allocate();
@ -225,23 +229,23 @@ TEST_P(InferRequestVariableStateTest, inferreq_smoke_VariableState_2infers) {
}
// initial state for 2nd infer request
for (auto &&state : inferReq2.QueryState()) {
for (auto&& state : inferReq2.QueryState()) {
auto state_val = state.GetState();
auto element_count = state_val->size();
float *new_state_data = new float[element_count];
float* new_state_data = new float[element_count];
for (int i = 0; i < element_count; i++) {
new_state_data[i] = new_state_val;
}
auto stateBlob = make_blob_with_precision(state_val->getTensorDesc());
stateBlob->allocate();
std::memcpy(stateBlob->buffer(), new_state_data, element_count * sizeof(float));
delete[]new_state_data;
delete[] new_state_data;
state.SetState(stateBlob);
}
// reset state for 1st infer request
for (auto &&state : inferReq.QueryState()) {
for (auto&& state : inferReq.QueryState()) {
state.Reset();
}
@ -257,20 +261,20 @@ TEST_P(InferRequestVariableStateTest, inferreq_smoke_VariableState_2infers) {
for (int i = 0; i < states.size(); ++i) {
auto lastState = states[i].GetState();
auto last_state_size = lastState->size();
auto last_state_data = lastState->cbuffer().as<float *>();
auto last_state_data = lastState->cbuffer().as<float*>();
ASSERT_TRUE(last_state_size != 0) << "State size should not be 0";
for (int j = 0; j < last_state_size; ++j) {
EXPECT_NEAR(0.0, last_state_data[j], 1e-5);
}
EXPECT_NEAR(0.0, last_state_data[j], 1e-5);
}
}
// check the output and state of 2nd request
for (int i = 0; i < states2.size(); ++i) {
auto lastState = states2[i].GetState();
auto last_state_size = lastState->size();
auto last_state_data = lastState->cbuffer().as<float *>();
auto last_state_data = lastState->cbuffer().as<float*>();
ASSERT_TRUE(last_state_size != 0) << "State size should not be 0";
@ -286,4 +290,4 @@ TEST_P(InferRequestQueryStateExceptionTest, inferreq_smoke_QueryState_ExceptionT
EXPECT_ANY_THROW(inferReq.QueryState());
}
} // namespace BehaviorTestsDefinitions
} // namespace BehaviorTestsDefinitions

View File

@ -3,8 +3,10 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/pass/constant_folding.hpp>
#include "common_test_utils/common_utils.hpp"
#include "openvino/op/gelu.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/pass/constant_folding.hpp"
#include "snippets/codegen_gelu.hpp"
#include "subgraph_simple.hpp"
#include "ngraph_functions/builders.hpp"
@ -48,21 +50,21 @@ namespace snippets {
init_input_shapes({inputShape0, inputShapes1});
auto input0 = std::make_shared<ngraph::opset1::Parameter>(netPrecision, inputDynamicShapes[0]);
auto input1 = std::make_shared<ngraph::opset1::Parameter>(netPrecision, inputDynamicShapes[1]);
auto add = std::make_shared<ngraph::opset1::Add>(input0, input1);
auto input0 = std::make_shared<ov::op::v0::Parameter>(netPrecision, inputDynamicShapes[0]);
auto input1 = std::make_shared<ov::op::v0::Parameter>(netPrecision, inputDynamicShapes[1]);
auto add = std::make_shared<ov::op::v1::Add>(input0, input1);
auto gelu = std::make_shared<ngraph::opset2::Gelu>(add);
auto result = std::make_shared<ngraph::opset1::Result>(gelu);
auto gelu = std::make_shared<ov::op::v0::Gelu>(add);
auto result = std::make_shared<ov::op::v0::Result>(gelu);
function = std::make_shared<ngraph::Function>(
ngraph::ResultVector{result},
ngraph::ParameterVector{input0, input1},
function = std::make_shared<ov::Model>(
ov::ResultVector{result},
ov::ParameterVector{input0, input1},
"CodegenGelu");
if (useSubgraph) {
ov::pass::InitNodeInfo().run_on_model(function);
ngraph::pass::ConstantFolding().run_on_model(function);
ov::pass::ConstantFolding().run_on_model(function);
}
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,

View File

@ -4,18 +4,17 @@
#pragma once
#include "openvino/core/model.hpp"
#include "transformations/convert_precision.hpp"
#include "common_test_utils/test_common.hpp"
#include "functional_test_utils/ov_plugin_cache.hpp"
#include "functional_test_utils/summary/op_summary.hpp"
#include "openvino/core/model.hpp"
#include "transformations/convert_precision.hpp"
namespace ov {
namespace test {
using InputShape = std::pair<ov::PartialShape, std::vector<ov::Shape>>;
std::ostream& operator <<(std::ostream& os, const InputShape& inputShape);
std::ostream& operator<<(std::ostream& os, const InputShape& inputShape);
using ElementType = ov::element::Type_t;
using Config = ov::AnyMap;
@ -28,12 +27,12 @@ public:
virtual void query_model();
protected:
virtual void compare(const std::vector<ov::Tensor> &expected,
const std::vector<ov::Tensor> &actual);
virtual void compare(const std::vector<ov::Tensor>& expected, const std::vector<ov::Tensor>& actual);
virtual void configure_model();
virtual void compile_model();
virtual void init_ref_function(std::shared_ptr<ov::Model> &funcRef, const std::vector<ov::Shape>& targetInputStaticShapes);
virtual void init_ref_function(std::shared_ptr<ov::Model>& funcRef,
const std::vector<ov::Shape>& targetInputStaticShapes);
virtual void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes);
virtual void infer();
virtual void validate();
@ -75,17 +74,20 @@ protected:
friend void core_configuration(SubgraphBaseTest* test);
};
inline std::vector<InputShape> static_partial_shapes_to_test_representation(const std::vector<ov::PartialShape>& shapes) {
inline std::vector<InputShape> static_partial_shapes_to_test_representation(
const std::vector<ov::PartialShape>& shapes) {
std::vector<InputShape> result;
for (const auto& staticShape : shapes) {
if (staticShape.is_dynamic())
throw std::runtime_error("static_partial_shapes_to_test_representation can process only static partial shapes");
throw std::runtime_error(
"static_partial_shapes_to_test_representation can process only static partial shapes");
result.push_back({{staticShape}, {staticShape.get_shape()}});
}
return result;
}
inline std::vector<std::vector<InputShape>> static_shapes_to_test_representation(const std::vector<std::vector<ov::Shape>>& shapes) {
inline std::vector<std::vector<InputShape>> static_shapes_to_test_representation(
const std::vector<std::vector<ov::Shape>>& shapes) {
std::vector<std::vector<InputShape>> result;
for (const auto& staticShapes : shapes) {
std::vector<InputShape> tmp;

View File

@ -8,6 +8,8 @@
#include "common_test_utils/common_utils.hpp"
#include "openvino/op/result.hpp"
#include "openvino/op/softmax.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
namespace ov {
@ -67,10 +69,10 @@ protected:
params.push_back(std::make_shared<ov::op::v0::Parameter>(ngPrc, shape));
}
const auto paramOuts =
ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ov::op::v0::Parameter>(params));
const auto softMax = std::make_shared<SoftmaxOpType>(paramOuts.at(0), axis);
const ngraph::ResultVector results{std::make_shared<ngraph::opset8::Result>(softMax)};
const ngraph::ResultVector results{std::make_shared<ov::op::v0::Result>(softMax)};
// TODO: This workaround is needed as there is no full support for f16 type in the reference implementation
if (ngPrc == element::Type_t::f16) {
@ -83,8 +85,8 @@ protected:
} // namespace aux
using SoftMax1LayerTest = aux::SoftMaxLayerTestBase<size_t, ngraph::opset1::Softmax>;
using SoftMax8LayerTest = aux::SoftMaxLayerTestBase<int64_t, ngraph::opset8::Softmax>;
using SoftMax1LayerTest = aux::SoftMaxLayerTestBase<size_t, ov::op::v1::Softmax>;
using SoftMax8LayerTest = aux::SoftMaxLayerTestBase<int64_t, ov::op::v8::Softmax>;
using SoftMaxLayerTest = SoftMax1LayerTest;

View File

@ -39,10 +39,10 @@ void BatchNormLayerTest::SetUp() {
ov::ParameterVector params {std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShapes))};
auto paramOuts = ngraph::helpers::convert2OutputVector(
ngraph::helpers::castOps2Nodes<ngraph::opset4::Parameter>(params));
ngraph::helpers::castOps2Nodes<ov::op::v0::Parameter>(params));
auto batchNorm = ngraph::builder::makeBatchNormInference(paramOuts[0], epsilon);
ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(batchNorm)};
ngraph::ResultVector results{std::make_shared<ov::op::v0::Result>(batchNorm)};
function = std::make_shared<ngraph::Function>(results, params, "BatchNormInference");
}

View File

@ -45,7 +45,7 @@ void ConversionLayerTest::SetUp() {
}
auto conversion = ngraph::builder::makeConversion(params.front(), targetPrc, conversionOpType);
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(conversion)};
ngraph::ResultVector results{std::make_shared<ov::op::v0::Result>(conversion)};
function = std::make_shared<ngraph::Function>(results, params, "Conversion");
}
} // namespace LayerTestsDefinitions

View File

@ -8,6 +8,8 @@
#include <ngraph/opsets/opset1.hpp>
#include <ngraph/opsets/opset3.hpp>
#include <ngraph/opsets/opset6.hpp>
#include "ngraph_functions/subgraph_builders.hpp"
#include "openvino/op/util/variable.hpp"
#include <openvino/op/util/assign_base.hpp>

View File

@ -285,7 +285,7 @@ std::shared_ptr<ngraph::opset1::FakeQuantize> makeFakeQuantize(
fqOnData.inputLowValues,
fqOnData.inputLowValues.empty());
if (fqOnData.addConverts) {
inputLowNode = ngraph::builder::makeConversion(inputLowNode, ov::element::f32, ngraph::helpers::ConversionTypes::CONVERT);
inputLowNode = ngraph::builder::makeConversion(inputLowNode, ov::element::f32, ov::test::utils::ConversionTypes::CONVERT);
}
inputHighNode = ngraph::builder::makeConstant(
@ -296,7 +296,7 @@ std::shared_ptr<ngraph::opset1::FakeQuantize> makeFakeQuantize(
fqOnData.inputHighValues,
fqOnData.inputHighValues.empty());
if (fqOnData.addConverts) {
inputHighNode = ngraph::builder::makeConversion(inputHighNode, ov::element::f32, ngraph::helpers::ConversionTypes::CONVERT);
inputHighNode = ngraph::builder::makeConversion(inputHighNode, ov::element::f32, ov::test::utils::ConversionTypes::CONVERT);
}
}
@ -308,7 +308,7 @@ std::shared_ptr<ngraph::opset1::FakeQuantize> makeFakeQuantize(
fqOnData.outputLowValues,
fqOnData.outputLowValues.empty());
if (fqOnData.addConverts) {
outputLowNode = ngraph::builder::makeConversion(outputLowNode, ov::element::f32, ngraph::helpers::ConversionTypes::CONVERT);
outputLowNode = ngraph::builder::makeConversion(outputLowNode, ov::element::f32, ov::test::utils::ConversionTypes::CONVERT);
}
auto outputHighNode = ngraph::builder::makeConstant(
@ -319,7 +319,7 @@ std::shared_ptr<ngraph::opset1::FakeQuantize> makeFakeQuantize(
fqOnData.outputHighValues,
fqOnData.outputHighValues.empty());
if (fqOnData.addConverts) {
outputHighNode = ngraph::builder::makeConversion(outputHighNode, ov::element::f32, ngraph::helpers::ConversionTypes::CONVERT);
outputHighNode = ngraph::builder::makeConversion(outputHighNode, ov::element::f32, ov::test::utils::ConversionTypes::CONVERT);
}
auto fq = std::make_shared<ngraph::opset1::FakeQuantize>(input, inputLowNode, inputHighNode, outputLowNode, outputHighNode, fqOnData.quantizationLevel);

View File

@ -15,7 +15,6 @@
#include "lpt_ngraph_functions/common/builders.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"
#include "lpt_ngraph_functions/common/dequantization_operations.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"
namespace ngraph {
namespace builder {

View File

@ -21,7 +21,8 @@ addIeTarget(
openvino::reference
interpreter_backend
openvino::runtime::dev
ADD_CPPLINT
common_test_utils
ADD_CLANG_FORMAT
DEVELOPER_PACKAGE
tests
)

View File

@ -4,88 +4,92 @@
#pragma once
#include <vector>
#include <memory>
#include <vector>
#include <ngraph_functions/utils/ngraph_helpers.hpp>
#include <ngraph/pass/graph_rewrite.hpp>
#include <ngraph/pattern/op/wrap_type.hpp>
#include "openvino/core/type/element_type.hpp"
#include "openvino/core/type/element_type_traits.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/convert.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/pass/graph_rewrite.hpp"
#include "openvino/pass/pattern/matcher.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
namespace ngraph {
namespace pass {
template<ngraph::element::Type_t from, ngraph::element::Type_t to>
class ConvertConstantsPrecision : public MatcherPass {
template <ov::element::Type_t from, ov::element::Type_t to>
class ConvertConstantsPrecision : public ov::pass::MatcherPass {
public:
ConvertConstantsPrecision() {
auto constant =
std::make_shared<ngraph::op::Constant>(element::f32, Shape{1}, std::vector<float>{0});
auto constant = std::make_shared<ov::op::v0::Constant>(element::f32, ov::Shape{1}, std::vector<float>{0});
ngraph::matcher_pass_callback callback = [](pattern::Matcher &m) {
auto constant = std::dynamic_pointer_cast<ngraph::op::Constant>(m.get_match_root());
ov::matcher_pass_callback callback = [](ov::pass::pattern::Matcher& m) {
auto constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(m.get_match_root());
if (!constant) {
return false;
}
if (constant->get_element_type() == ngraph::element::Type(from)) {
auto data = constant->cast_vector<typename ngraph::helpers::nGraphTypesTrait<to>::value_type>();
auto new_const = std::make_shared<ngraph::op::Constant>(to, constant->get_shape(), data);
if (constant->get_element_type() == ov::element::Type(from)) {
auto data = constant->cast_vector<typename ov::element_type_traits<to>::value_type>();
auto new_const = std::make_shared<ov::op::v0::Constant>(to, constant->get_shape(), data);
new_const->set_friendly_name(constant->get_friendly_name());
ngraph::replace_node(constant, new_const);
ov::replace_node(constant, new_const);
return true;
}
return false;
};
auto m = std::make_shared<ngraph::pattern::Matcher>(constant, "ConvertConstantsPrecision");
auto m = std::make_shared<ov::pass::pattern::Matcher>(constant, "ConvertConstantsPrecision");
register_matcher(m, callback);
}
};
template<ngraph::element::Type_t from, ngraph::element::Type_t to>
class ConvertParametersPrecision : public MatcherPass {
template <ov::element::Type_t from, ov::element::Type_t to>
class ConvertParametersPrecision : public ov::pass::MatcherPass {
public:
ConvertParametersPrecision() {
auto constant = std::make_shared<ngraph::op::Parameter>(to, Shape{1});
auto constant = std::make_shared<ov::op::v0::Parameter>(to, Shape{1});
ngraph::matcher_pass_callback callback = [](pattern::Matcher &m) {
auto parameter = std::dynamic_pointer_cast<ngraph::op::Parameter>(m.get_match_root());
if (parameter && parameter->get_element_type() == ngraph::element::Type(from)) {
ov::matcher_pass_callback callback = [](ov::pass::pattern::Matcher& m) {
auto parameter = std::dynamic_pointer_cast<ov::op::v0::Parameter>(m.get_match_root());
if (parameter && parameter->get_element_type() == ov::element::Type(from)) {
parameter->set_element_type(to);
return true;
}
return false;
};
auto m = std::make_shared<ngraph::pattern::Matcher>(constant, "ConvertParametersPrecision");
auto m = std::make_shared<ov::pass::pattern::Matcher>(constant, "ConvertParametersPrecision");
register_matcher(m, callback);
}
};
template<ngraph::element::Type_t from, ngraph::element::Type_t to>
class ConvertConvertLayerOutputPrecision : public MatcherPass {
template <ov::element::Type_t from, ov::element::Type_t to>
class ConvertConvertLayerOutputPrecision : public ov::pass::MatcherPass {
public:
ConvertConvertLayerOutputPrecision() {
auto convert = ngraph::pattern::wrap_type<opset1::Convert>();
ngraph::matcher_pass_callback callback = [](pattern::Matcher &m) {
auto convert = std::dynamic_pointer_cast<ngraph::op::Convert>(m.get_match_root());
auto convert = ov::pass::pattern::wrap_type<ov::op::v0::Convert>();
ov::matcher_pass_callback callback = [](ov::pass::pattern::Matcher& m) {
auto convert = std::dynamic_pointer_cast<ov::op::v0::Convert>(m.get_match_root());
if (!convert) {
return false;
}
if (convert->get_convert_element_type() == ngraph::element::Type(from)) {
if (convert->get_convert_element_type() == ov::element::Type(from)) {
convert->set_convert_element_type(to);
return true;
}
return false;
};
auto m = std::make_shared<ngraph::pattern::Matcher>(convert, "ConvertConvertLayerPrecision");
auto m = std::make_shared<ov::pass::pattern::Matcher>(convert, "ConvertConvertLayerPrecision");
register_matcher(m, callback);
}
};
template<ngraph::element::Type_t from, ngraph::element::Type_t to>
template <ov::element::Type_t from, ov::element::Type_t to>
class ConvertPrecision : public ov::pass::GraphRewrite {
public:
ConvertPrecision() {

View File

@ -4,7 +4,12 @@
#pragma once
#include "openvino/core/model.hpp"
#include "openvino/core/preprocess/pre_post_process.hpp"
#include "openvino/op/abs.hpp"
#include "openvino/op/convert.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/result.hpp"
namespace ov {
namespace builder {
@ -15,9 +20,11 @@ struct preprocess_func {
preprocess_func(const std::function<std::shared_ptr<Model>()>& f,
const std::string& name,
float acc,
const std::vector<Shape>& shapes = {}):
m_function(f), m_name(name), m_accuracy(acc), m_shapes(shapes) {
}
const std::vector<Shape>& shapes = {})
: m_function(f),
m_name(name),
m_accuracy(acc),
m_shapes(shapes) {}
std::function<std::shared_ptr<Model>()> m_function = nullptr;
std::string m_name = {};
float m_accuracy = 0.01f;
@ -26,11 +33,9 @@ struct preprocess_func {
inline std::vector<preprocess_func> generic_preprocess_functions();
/// -------- Functions ---------------
inline std::shared_ptr<Model> create_preprocess_1input(element::Type type,
const PartialShape& shape) {
inline std::shared_ptr<Model> create_preprocess_1input(element::Type type, const PartialShape& shape) {
auto data1 = std::make_shared<op::v0::Parameter>(type, shape);
data1->set_friendly_name("input1");
data1->output(0).get_tensor().set_names({"input1"});
@ -47,8 +52,7 @@ inline std::shared_ptr<Model> create_preprocess_1input(element::Type type,
return std::make_shared<Model>(ResultVector{res}, ParameterVector{data1});
}
inline std::shared_ptr<Model> create_preprocess_2inputs(element::Type type,
const PartialShape& shape) {
inline std::shared_ptr<Model> create_preprocess_2inputs(element::Type type, const PartialShape& shape) {
auto data1 = std::make_shared<op::v0::Parameter>(type, shape);
data1->set_friendly_name("input1");
data1->output(0).get_tensor().set_names({"input1"});
@ -185,15 +189,17 @@ inline std::shared_ptr<Model> multiple_ops() {
auto p1 = std::move(p);
p = std::move(p1);
p.input().tensor().set_element_type(element::f32).set_layout("?CHW");
p.input().preprocess().mean(1.f)
.scale(2.f)
.mean({1.1f, 2.2f, 3.3f})
.scale({2.f, 3.f, 4.f})
.custom([](const Output<Node>& node) {
auto abs = std::make_shared<op::v0::Abs>(node);
abs->set_friendly_name(node.get_node_shared_ptr()->get_friendly_name() + "/abs");
return abs;
});
p.input()
.preprocess()
.mean(1.f)
.scale(2.f)
.mean({1.1f, 2.2f, 3.3f})
.scale({2.f, 3.f, 4.f})
.custom([](const Output<Node>& node) {
auto abs = std::make_shared<op::v0::Abs>(node);
abs->set_friendly_name(node.get_node_shared_ptr()->get_friendly_name() + "/abs");
return abs;
});
p.input().preprocess().convert_element_type(element::u8);
function = p.build();
return function;
@ -347,15 +353,17 @@ inline std::shared_ptr<Model> cvt_color_nv12_cvt_layout_resize() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, PartialShape{1, 3, 10, 10});
auto p = PrePostProcessor(function);
p.input().tensor()
.set_color_format(ColorFormat::NV12_TWO_PLANES)
.set_element_type(element::u8)
.set_spatial_static_shape(20, 20);
p.input().preprocess()
.convert_color(ColorFormat::RGB)
.convert_layout()
.convert_element_type(element::f32)
.resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input()
.tensor()
.set_color_format(ColorFormat::NV12_TWO_PLANES)
.set_element_type(element::u8)
.set_spatial_static_shape(20, 20);
p.input()
.preprocess()
.convert_color(ColorFormat::RGB)
.convert_layout()
.convert_element_type(element::f32)
.resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().model().set_layout("NCHW");
function = p.build();
return function;
@ -432,39 +440,39 @@ inline std::shared_ptr<Model> crop_dynamic() {
}
inline std::vector<preprocess_func> generic_preprocess_functions() {
return std::vector<preprocess_func> {
preprocess_func(mean_only, "mean_only", 0.01f),
preprocess_func(scale_only, "scale_only", 0.01f),
preprocess_func(mean_scale, "mean_scale", 0.01f),
preprocess_func(scale_mean, "scale_mean", 0.01f),
preprocess_func(mean_vector, "mean_vector", 0.01f),
preprocess_func(scale_vector, "scale_vector", 0.01f),
preprocess_func(convert_element_type_and_mean, "convert_element_type_and_mean", 0.01f),
preprocess_func(tensor_element_type_and_mean, "tensor_element_type_and_mean", 0.01f),
preprocess_func(custom_preprocessing, "custom_preprocessing", 0.01f),
preprocess_func(multiple_ops, "multiple_ops", 0.01f),
preprocess_func(two_inputs_basic, "two_inputs_basic", 0.01f),
preprocess_func(two_inputs_trivial, "two_inputs_trivial", 0.01f),
preprocess_func(reuse_network_layout, "reuse_network_layout", 0.01f),
preprocess_func(tensor_layout, "tensor_layout", 0.01f),
preprocess_func(resize_linear, "resize_linear", 0.01f),
preprocess_func(resize_nearest, "resize_nearest", 0.01f),
preprocess_func(resize_linear_nhwc, "resize_linear_nhwc", 0.01f),
preprocess_func(resize_cubic, "resize_cubic", 0.01f),
preprocess_func(resize_dynamic, "resize_dynamic", 0.01f, { Shape {1, 3, 223, 323} }),
preprocess_func(crop_basic, "crop_basic", 0.000001f),
preprocess_func(crop_negative, "crop_negative", 0.000001f),
preprocess_func(crop_dynamic, "crop_dynamic", 0.000001f, { Shape {1, 3, 123, 123} }),
preprocess_func(convert_layout_by_dims, "convert_layout_by_dims", 0.01f),
preprocess_func(convert_layout_hwc_to_nchw, "convert_layout_hwc_to_nchw", 0.01f),
preprocess_func(resize_and_convert_layout, "resize_and_convert_layout", 0.01f),
preprocess_func(resize_and_convert_layout_i8, "resize_and_convert_layout_i8", 0.01f),
preprocess_func(cvt_color_nv12_to_rgb_single_plane, "cvt_color_nv12_to_rgb_single_plane", 1.f),
preprocess_func(cvt_color_nv12_to_bgr_two_planes, "cvt_color_nv12_to_bgr_two_planes", 1.f),
preprocess_func(cvt_color_nv12_cvt_layout_resize, "cvt_color_nv12_cvt_layout_resize", 1.f),
preprocess_func(cvt_color_i420_to_rgb_single_plane, "cvt_color_i420_to_rgb_single_plane", 1.f),
preprocess_func(cvt_color_i420_to_bgr_three_planes, "cvt_color_i420_to_bgr_three_planes", 1.f),
preprocess_func(cvt_color_bgrx_to_bgr, "cvt_color_bgrx_to_bgr", 0.01f),
return std::vector<preprocess_func>{
preprocess_func(mean_only, "mean_only", 0.01f),
preprocess_func(scale_only, "scale_only", 0.01f),
preprocess_func(mean_scale, "mean_scale", 0.01f),
preprocess_func(scale_mean, "scale_mean", 0.01f),
preprocess_func(mean_vector, "mean_vector", 0.01f),
preprocess_func(scale_vector, "scale_vector", 0.01f),
preprocess_func(convert_element_type_and_mean, "convert_element_type_and_mean", 0.01f),
preprocess_func(tensor_element_type_and_mean, "tensor_element_type_and_mean", 0.01f),
preprocess_func(custom_preprocessing, "custom_preprocessing", 0.01f),
preprocess_func(multiple_ops, "multiple_ops", 0.01f),
preprocess_func(two_inputs_basic, "two_inputs_basic", 0.01f),
preprocess_func(two_inputs_trivial, "two_inputs_trivial", 0.01f),
preprocess_func(reuse_network_layout, "reuse_network_layout", 0.01f),
preprocess_func(tensor_layout, "tensor_layout", 0.01f),
preprocess_func(resize_linear, "resize_linear", 0.01f),
preprocess_func(resize_nearest, "resize_nearest", 0.01f),
preprocess_func(resize_linear_nhwc, "resize_linear_nhwc", 0.01f),
preprocess_func(resize_cubic, "resize_cubic", 0.01f),
preprocess_func(resize_dynamic, "resize_dynamic", 0.01f, {Shape{1, 3, 223, 323}}),
preprocess_func(crop_basic, "crop_basic", 0.000001f),
preprocess_func(crop_negative, "crop_negative", 0.000001f),
preprocess_func(crop_dynamic, "crop_dynamic", 0.000001f, {Shape{1, 3, 123, 123}}),
preprocess_func(convert_layout_by_dims, "convert_layout_by_dims", 0.01f),
preprocess_func(convert_layout_hwc_to_nchw, "convert_layout_hwc_to_nchw", 0.01f),
preprocess_func(resize_and_convert_layout, "resize_and_convert_layout", 0.01f),
preprocess_func(resize_and_convert_layout_i8, "resize_and_convert_layout_i8", 0.01f),
preprocess_func(cvt_color_nv12_to_rgb_single_plane, "cvt_color_nv12_to_rgb_single_plane", 1.f),
preprocess_func(cvt_color_nv12_to_bgr_two_planes, "cvt_color_nv12_to_bgr_two_planes", 1.f),
preprocess_func(cvt_color_nv12_cvt_layout_resize, "cvt_color_nv12_cvt_layout_resize", 1.f),
preprocess_func(cvt_color_i420_to_rgb_single_plane, "cvt_color_i420_to_rgb_single_plane", 1.f),
preprocess_func(cvt_color_i420_to_bgr_three_planes, "cvt_color_i420_to_bgr_three_planes", 1.f),
preprocess_func(cvt_color_bgrx_to_bgr, "cvt_color_bgrx_to_bgr", 0.01f),
};
}

View File

@ -4,882 +4,99 @@
#pragma once
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "openvino/core/model.hpp"
namespace ngraph {
namespace builder {
namespace subgraph {
inline std::shared_ptr<ngraph::Function> makeConvPoolRelu(std::vector<size_t> inputShape = {1, 1, 32, 32},
ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) {
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
params.front()->set_friendly_name("Param_1");
params.front()->output(0).get_tensor().set_names({"data"});
std::vector<size_t> constShape = {inputShape[0], inputShape[2], inputShape[1], inputShape[3]};
auto const1 = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{4}, constShape);
const1->set_friendly_name("Const_1");
const1->output(0).get_tensor().set_names({"const1"});
auto reshape1 = std::make_shared<ngraph::opset1::Reshape>(params.front(), const1, false);
reshape1->set_friendly_name("Reshape_1");
reshape1->output(0).get_tensor().set_names({"reshape1"});
auto conv1 = ngraph::builder::makeConvolution(reshape1, ngPrc, {1, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 4);
conv1->set_friendly_name("Conv_1");
conv1->output(0).get_tensor().set_names({"conv"});
std::vector<size_t> stride{1, 1}, padB{0, 0}, padE = padB, kernel{1, 2};
auto pool1 = std::make_shared<ngraph::opset1::MaxPool>(conv1, stride, padB, padE, kernel,
ngraph::op::RoundingType::FLOOR,
ngraph::op::PadType::EXPLICIT);
pool1->output(0).get_tensor().set_names({"pool"});
pool1->set_friendly_name("Pool_1");
auto relu1 = std::make_shared<ngraph::opset1::Relu>(pool1);
relu1->set_friendly_name("Relu_1");
relu1->output(0).get_tensor().set_names({"relu"});
ngraph::Shape reluShape = relu1->outputs()[0].get_tensor().get_shape();
std::vector<size_t> constShape2 = {1, ngraph::shape_size(reluShape)};
auto const2 = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{2}, constShape2);
const2->output(0).get_tensor().set_names({"const2"});
const2->set_friendly_name("Const_2");
auto reshape2 = std::make_shared<ngraph::opset1::Reshape>(relu1, const2, false);
reshape2->output(0).get_tensor().set_names({"reshape2"});
reshape2->set_friendly_name("Reshape_2");
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(reshape2)};
std::shared_ptr<ngraph::Function> fnPtr = std::make_shared<ngraph::Function>(results, params);
return fnPtr;
}
std::shared_ptr<ov::Model> makeConvPoolRelu(std::vector<size_t> inputShape = {1, 1, 32, 32},
ov::element::Type_t ngPrc = ov::element::Type_t::f32);
inline std::shared_ptr<ngraph::Function> makeConvPoolReluNoReshapes(std::vector<size_t> inputShape = {1, 1, 32, 32},
ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) {
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
params.front()->set_friendly_name("Param_1");
params.front()->output(0).get_tensor().set_names({"data"});
auto conv1 = ngraph::builder::makeConvolution(params.front(), ngPrc, {1, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 4);
conv1->set_friendly_name("Conv_1");
conv1->output(0).get_tensor().set_names({"conv"});
std::vector<size_t> stride{1, 1}, padB{0, 0}, padE = padB, kernel{1, 2};
auto pool1 = std::make_shared<ngraph::opset1::MaxPool>(conv1, stride, padB, padE, kernel,
ngraph::op::RoundingType::FLOOR,
ngraph::op::PadType::EXPLICIT);
pool1->output(0).get_tensor().set_names({"pool"});
pool1->set_friendly_name("Pool_1");
auto relu1 = std::make_shared<ngraph::opset1::Relu>(pool1);
relu1->set_friendly_name("Relu_1");
relu1->output(0).get_tensor().set_names({"relu"});
ngraph::Shape reluShape = relu1->outputs()[0].get_tensor().get_shape();
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(relu1)};
std::shared_ptr<ngraph::Function> fnPtr = std::make_shared<ngraph::Function>(results, params);
return fnPtr;
}
std::shared_ptr<ov::Model> makeConvPoolReluNoReshapes(std::vector<size_t> inputShape = {1, 1, 32, 32},
ov::element::Type_t ngPrc = ov::element::Type_t::f32);
inline std::shared_ptr<ngraph::Function> makeConvPool2Relu2(std::vector<size_t> inputShape = {1, 1, 32, 32},
ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) {
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
params.front()->set_friendly_name("Param_1");
params.front()->output(0).get_tensor().set_names({"data"});
std::vector<size_t> constShape = {inputShape[0], inputShape[2], inputShape[1], inputShape[3]};
auto const1 = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{4}, constShape);
const1->set_friendly_name("Const_1");
const1->output(0).get_tensor().set_names({"const1"});
auto reshape1 = std::make_shared<ngraph::opset1::Reshape>(params.front(), const1, false);
reshape1->set_friendly_name("Reshape_1");
reshape1->output(0).get_tensor().set_names({"reshape1"});
auto conv1 = ngraph::builder::makeConvolution(reshape1, ngPrc, {1, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 4);
conv1->set_friendly_name("Conv_1");
conv1->output(0).get_tensor().set_names({"conv"});
std::vector<size_t> stride{1, 1}, padB{0, 0}, padE = padB, kernel{1, 2};
std::shared_ptr<ov::Model> makeConvPool2Relu2(std::vector<size_t> inputShape = {1, 1, 32, 32},
ov::element::Type_t ngPrc = ov::element::Type_t::f32);
ngraph::ResultVector results;
{
auto pool1 = std::make_shared<ngraph::opset1::MaxPool>(conv1, stride, padB, padE, kernel,
ngraph::op::RoundingType::FLOOR,
ngraph::op::PadType::EXPLICIT);
pool1->output(0).get_tensor().set_names({"pool_0"});
pool1->set_friendly_name("Pool_1_0");
auto relu1 = std::make_shared<ngraph::opset1::Relu>(pool1);
relu1->set_friendly_name("Relu_1_0");
relu1->output(0).get_tensor().set_names({"relu_0"});
ngraph::Shape reluShape = relu1->outputs()[0].get_tensor().get_shape();
std::vector<size_t> constShape2 = {1, ngraph::shape_size(reluShape)};
auto const2 = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{2}, constShape2);
const2->output(0).get_tensor().set_names({"const2_0"});
const2->set_friendly_name("Const_2_0");
auto reshape2 = std::make_shared<ngraph::opset1::Reshape>(relu1, const2, false);
reshape2->output(0).get_tensor().set_names({"reshape2_0"});
reshape2->set_friendly_name("Reshape_2_0");
results.push_back(std::make_shared<ngraph::opset1::Result>(reshape2));
}
{
auto pool1 = std::make_shared<ngraph::opset1::MaxPool>(conv1, stride, padB, padE, kernel,
ngraph::op::RoundingType::FLOOR,
ngraph::op::PadType::EXPLICIT);
pool1->output(0).get_tensor().set_names({"pool_1"});
pool1->set_friendly_name("Pool_1_1");
auto relu1 = std::make_shared<ngraph::opset1::Relu>(pool1);
relu1->set_friendly_name("Relu_1_1");
relu1->output(0).get_tensor().set_names({"relu_1"});
ngraph::Shape reluShape = relu1->outputs()[0].get_tensor().get_shape();
std::vector<size_t> constShape2 = {1, ngraph::shape_size(reluShape)};
auto const2 = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{2}, constShape2);
const2->output(0).get_tensor().set_names({"const2_1"});
const2->set_friendly_name("Const_2_1");
auto reshape2 = std::make_shared<ngraph::opset1::Reshape>(relu1, const2, false);
reshape2->output(0).get_tensor().set_names({"reshape2_1"});
reshape2->set_friendly_name("Reshape_2_1");
results.push_back(std::make_shared<ngraph::opset1::Result>(reshape2));
}
std::shared_ptr<ngraph::Function> fnPtr = std::make_shared<ngraph::Function>(results, params);
return fnPtr;
}
std::shared_ptr<ov::Model> makeConvPoolReluNonZero(std::vector<size_t> inputShape = {1, 1, 32, 32},
ov::element::Type_t ngPrc = ov::element::Type_t::f32);
inline std::shared_ptr<ngraph::Function> makeConvPoolReluNonZero(std::vector<size_t> inputShape = {1, 1, 32, 32},
ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) {
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
params.front()->set_friendly_name("Param_1");
params.front()->output(0).get_tensor().set_names({"data"});
auto conv1 = ngraph::builder::makeConvolution(params.front(), ngPrc, {1, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 4);
conv1->set_friendly_name("Conv_1");
conv1->output(0).get_tensor().set_names({"conv"});
std::vector<size_t> stride{1, 1}, padB{0, 0}, padE = padB, kernel{1, 2};
auto pool1 = std::make_shared<ngraph::opset1::MaxPool>(conv1, stride, padB, padE, kernel,
ngraph::op::RoundingType::FLOOR,
ngraph::op::PadType::EXPLICIT);
pool1->output(0).get_tensor().set_names({"pool"});
pool1->set_friendly_name("Pool_1");
auto relu1 = std::make_shared<ngraph::opset1::Relu>(pool1);
relu1->set_friendly_name("Relu_1");
relu1->output(0).get_tensor().set_names({"relu"});
auto nonZero = std::make_shared<ngraph::op::NonZero>(relu1);
nonZero->set_friendly_name("nonZero_1");
nonZero->output(0).get_tensor().set_names({"nonZero"});
auto gatherIndices = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
ngraph::Shape{1},
std::vector<int64_t>{0});
gatherIndices->set_friendly_name("gatherIndices_1");
gatherIndices->output(0).get_tensor().set_names({"gatherIndices"});
auto gatherAxis = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
ngraph::Shape{1},
std::vector<int64_t>{1});
gatherAxis->set_friendly_name("gatherAxis_1");
gatherAxis->output(0).get_tensor().set_names({"gatherAxis"});
auto gather = std::make_shared<ngraph::opset1::Gather>(nonZero->output(0), gatherIndices, gatherAxis);
gather->set_friendly_name("gather_1");
gather->output(0).get_tensor().set_names({"gather"});
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(gather)};
std::shared_ptr<ngraph::Function> fnPtr = std::make_shared<ngraph::Function>(results, params);
return fnPtr;
}
std::shared_ptr<ov::Model> makeSplitConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20},
ov::element::Type_t ngPrc = ov::element::Type_t::f32);
inline std::shared_ptr<ngraph::Function> makeSplitConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20},
ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) {
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
params.front()->set_friendly_name("Param_1");
params.front()->get_output_tensor(0).set_names({"input_tensor"});
auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1);
std::shared_ptr<ov::Model> makeKSOFunction(std::vector<size_t> inputShape = {1, 4, 20, 20},
ov::element::Type_t ngPrc = ov::element::Type_t::f32);
auto conv1 = ngraph::builder::makeConvolution(split->output(0), ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu1 = std::make_shared<ngraph::opset1::Relu>(conv1);
std::shared_ptr<ov::Model> makeSplitMultiConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20},
ov::element::Type_t ngPrc = ov::element::Type_t::f32);
auto conv2 = ngraph::builder::makeConvolution(split->output(1), ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu2 = std::make_shared<ngraph::opset1::Relu>(conv2);
std::shared_ptr<ov::Model> makeTIwithLSTMcell(ov::element::Type_t ngPRC = ov::element::Type_t::f32,
size_t N = 32, // Batch size
size_t L = 10, // Sequence length
size_t I = 8, // Input size
size_t H = 32); // Hidden size
auto concat = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu1->output(0), relu2->output(0)}, 1);
concat->get_output_tensor(0).set_names({"concat_tensor"});
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(concat)};
std::shared_ptr<ngraph::Function> fnPtr = std::make_shared<ngraph::Function>(results, params);
fnPtr->set_friendly_name("SplitConvConcat");
return fnPtr;
}
std::shared_ptr<ov::Model> makeSingleConv(std::vector<size_t> inputShape = {1, 3, 24, 24},
ov::element::Type_t type = ov::element::Type_t::f32);
inline std::shared_ptr<ngraph::Function> makeKSOFunction(std::vector<size_t> inputShape = {1, 4, 20, 20},
ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) {
ov::ParameterVector params {std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape)),
std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
std::shared_ptr<ov::Model> makeDetectionOutput(ov::element::Type_t type = ov::element::Type_t::f32);
auto shapeOf = std::make_shared<ngraph::opset4::ShapeOf>(params[0]);
auto convert = std::make_shared<ngraph::opset4::Convert>(shapeOf, ngPrc);
auto newShape = ngraph::builder::makeConstant<int64_t>(ngraph::element::i64, {4}, {1, 4, 1, 1});
auto reshape = std::make_shared<ngraph::opset4::Reshape>(convert, newShape, false);
auto conv1 = ngraph::builder::makeConvolution(params[1], ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 4);
auto relu1 = std::make_shared<ngraph::opset4::Relu>(conv1);
auto add = std::make_shared<ngraph::opset4::Add>(relu1, reshape);
std::shared_ptr<ov::Model> makeMultiSingleConv(std::vector<size_t> inputShape = {1, 3, 24, 24},
ov::element::Type type = ov::element::Type_t::f32);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(add)};
std::shared_ptr<ngraph::Function> fnPtr = std::make_shared<ngraph::Function>(results, params);
fnPtr->set_friendly_name("KSOFunction");
return fnPtr;
}
std::shared_ptr<ov::Model> make2InputSubtract(std::vector<size_t> inputShape = {1, 3, 24, 24},
ov::element::Type_t type = ov::element::Type_t::f32);
inline std::shared_ptr<ngraph::Function> makeSplitMultiConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20},
ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) {
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
params.front()->set_friendly_name("Param_1");
params.front()->get_output_tensor(0).set_names({ "input_tensor" });
auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1);
std::shared_ptr<ov::Model> makeNestedBranchConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20},
ov::element::Type ngPrc = ov::element::Type_t::f32);
auto conv1_0 = ngraph::builder::makeConvolution(split->output(0), ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu1_0 = std::make_shared<ngraph::opset1::Relu>(conv1_0);
auto conv1_1 = ngraph::builder::makeConvolution(relu1_0, ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu1_1 = std::make_shared<ngraph::opset1::Relu>(conv1_1);
auto conv1_2 = ngraph::builder::makeConvolution(relu1_1, ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu1_2 = std::make_shared<ngraph::opset1::Relu>(conv1_2);
auto conv1_3 = ngraph::builder::makeConvolution(relu1_2, ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu1_3 = std::make_shared<ngraph::opset1::Relu>(conv1_3);
auto conv1_4 = ngraph::builder::makeConvolution(relu1_2, ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu1_4 = std::make_shared<ngraph::opset1::Relu>(conv1_4);
std::shared_ptr<ov::Model> makeNestedSplitConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20},
ov::element::Type ngPrc = ov::element::Type_t::f32);
auto conv2_0 = ngraph::builder::makeConvolution(split->output(1), ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu2_0 = std::make_shared<ngraph::opset1::Relu>(conv2_0);
auto conv2_1 = ngraph::builder::makeConvolution(relu2_0, ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu2_1 = std::make_shared<ngraph::opset1::Relu>(conv2_1);
auto conv2_2 = ngraph::builder::makeConvolution(relu2_1, ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu2_2 = std::make_shared<ngraph::opset1::Relu>(conv2_2);
auto conv2_3 = ngraph::builder::makeConvolution(relu2_2, ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu2_3 = std::make_shared<ngraph::opset1::Relu>(conv2_3);
auto conv2_4 = ngraph::builder::makeConvolution(relu2_2, ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu2_4 = std::make_shared<ngraph::opset1::Relu>(conv2_4);
std::shared_ptr<ov::Model> makeSplitConvConcatInputInBranch(std::vector<size_t> inputShape = {1, 4, 20, 20},
ov::element::Type ngPrc = ov::element::Type_t::f32);
auto concat = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu1_4->output(0), relu2_4->output(0)}, 1);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(concat)};
std::shared_ptr<ngraph::Function> fnPtr = std::make_shared<ngraph::Function>(results, params);
fnPtr->set_friendly_name("SplitMultiConvConcat");
return fnPtr;
}
std::shared_ptr<ov::Model> makeSplitConvConcatNestedInBranch(std::vector<size_t> inputShape = {1, 4, 20, 20},
ov::element::Type ngPrc = ov::element::Type_t::f32);
inline std::shared_ptr<ngraph::Function> makeTIwithLSTMcell(
ngraph::element::Type_t ngPRC = ngraph::element::Type_t::f32,
size_t N = 32, // Batch size
size_t L = 10, // Sequence length
size_t I = 8, // Input size
size_t H = 32) { // Hidden size
auto SENT = std::make_shared<ngraph::opset1::Parameter>(ngPRC, ngraph::Shape{N, L, I});
std::shared_ptr<ov::Model> makeSplitConvConcatNestedInBranchNestedOut(
std::vector<size_t> inputShape = {1, 4, 20, 20},
ov::element::Type ngPrc = ov::element::Type_t::f32);
auto H_init = std::make_shared<ngraph::opset1::Parameter>(ngPRC, ngraph::Shape{N, 1, H});
auto C_init = std::make_shared<ngraph::opset1::Parameter>(ngPRC, ngraph::Shape{N, 1, H});
std::shared_ptr<ov::Model> makeConvBias(std::vector<size_t> inputShape = {1, 3, 24, 24},
ov::element::Type type = ov::element::Type_t::f32);
auto H_t = std::make_shared<ngraph::opset1::Parameter>(ngPRC, ngraph::Shape{N, 1, H});
auto C_t = std::make_shared<ngraph::opset1::Parameter>(ngPRC, ngraph::Shape{N, 1, H});
std::shared_ptr<ov::Model> makeReadConcatSplitAssign(std::vector<size_t> inputShape = {1, 1, 2, 4},
ov::element::Type type = ov::element::Type_t::f32);
// Body
auto X = std::make_shared<ngraph::opset1::Parameter>(ngPRC, ngraph::Shape{N, 1, I});
std::vector<uint64_t> dataW(4 * H * I, 0);
auto W_body = std::make_shared<ngraph::opset1::Constant>(ngPRC, ngraph::Shape{4 * H, I}, dataW);
std::vector<uint64_t> dataR(4 * H * H, 0);
auto R_body = std::make_shared<ngraph::opset1::Constant>(ngPRC, ngraph::Shape{4 * H, H}, dataR);
std::vector<uint64_t> inShape = {N, H};
auto constantH = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64, ngraph::Shape{2}, inShape);
inShape = {N, I};
auto constantX = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64, ngraph::Shape{2}, inShape);
auto LSTM_cell =
std::make_shared<ngraph::opset4::LSTMCell>(std::make_shared<ngraph::opset1::Reshape>(X, constantX, false),
std::make_shared<ngraph::opset1::Reshape>(H_t, constantH, false),
std::make_shared<ngraph::opset1::Reshape>(C_t, constantH, false),
W_body,
R_body,
H);
inShape = {N, 1, H};
auto constantHo = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{3}, inShape);
auto H_o = std::make_shared<ngraph::opset1::Reshape>(LSTM_cell->output(0), constantHo, false);
auto C_o = std::make_shared<ngraph::opset1::Reshape>(LSTM_cell->output(1), constantHo, false);
auto body = std::make_shared<ngraph::Function>(
ngraph::OutputVector{H_o, C_o}, ngraph::ParameterVector{X, H_t, C_t});
std::shared_ptr<ov::Model> makeMatMulBias(std::vector<size_t> inputShape = {1, 3, 24, 24},
ov::element::Type type = ov::element::Type_t::f32);
auto tensor_iterator = std::make_shared<ngraph::op::TensorIterator>();
tensor_iterator->set_body(body);
// start=0, stride=1, part_size=1, end=39, axis=1
tensor_iterator->set_sliced_input(X, SENT, 0, 1, 1, -1, 1);
// H_t is Hinit on the first iteration, Ho after that
tensor_iterator->set_merged_input(H_t, H_init, H_o);
tensor_iterator->set_merged_input(C_t, C_init, C_o);
std::shared_ptr<ov::Model> makeConvertTranspose(std::vector<size_t> inputShape = {1, 3, 24, 24},
std::vector<size_t> inputOrder = {0, 1, 2, 3},
ov::element::Type type = ov::element::Type_t::f32);
// Output 0 is last Ho, result 0 of body
auto out0 = tensor_iterator->get_iter_value(H_o, -1);
// Output 1 is last Co, result 1 of body
auto out1 = tensor_iterator->get_iter_value(C_o, -1);
std::shared_ptr<ov::Model> makeMultipleInputOutputReLU(std::vector<size_t> inputShape = {1, 1, 32, 32},
ov::element::Type_t type = ov::element::Type_t::f32);
auto results = ngraph::ResultVector{std::make_shared<ngraph::opset1::Result>(out0),
std::make_shared<ngraph::opset1::Result>(out1)};
auto fn_ptr = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{SENT, H_init, C_init});
fn_ptr->set_friendly_name("TIwithLSTMcell");
return fn_ptr;
}
std::shared_ptr<ov::Model> makeMultipleInputOutputDoubleConcat(std::vector<size_t> inputShape = {1, 1, 32, 32},
ov::element::Type_t type = ov::element::Type_t::f32);
inline std::shared_ptr<ngraph::Function> makeSingleConv(std::vector<size_t> inputShape = {1, 3, 24, 24},
ngraph::element::Type_t type = ngraph::element::Type_t::f32) {
auto param0 = std::make_shared<ngraph::opset1::Parameter>(type, ngraph::Shape(inputShape));
std::shared_ptr<ov::Model> makeSingleConcatWithConstant(std::vector<size_t> inputShape = {1, 1, 2, 4},
ov::element::Type type = ov::element::Type_t::f32);
auto conv1 = ngraph::builder::makeConvolution(param0, type, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 4);
auto result = std::make_shared<ngraph::opset1::Result>(conv1);
auto fn_ptr = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param0});
fn_ptr->set_friendly_name("SingleConv");
return fn_ptr;
}
std::shared_ptr<ov::Model> makeConcatWithParams(std::vector<size_t> inputShape = {1, 1, 32, 32},
ov::element::Type_t type = ov::element::Type_t::f32);
inline std::shared_ptr<ngraph::Function> makeDetectionOutput(ngraph::element::Type_t type = ngraph::element::Type_t::f32) {
const auto& data = std::make_shared<ngraph::opset1::Parameter>(type, ngraph::Shape{1, 4, 10, 10});
std::shared_ptr<ov::Model> makeSingleSplit(std::vector<size_t> inputShape = {1, 4, 32, 32},
ov::element::Type_t type = ov::element::Type_t::f32);
const auto& constant_0 = std::make_shared<ngraph::opset1::Constant>(type, ngraph::Shape{1, 1, 1, 1});
const auto& mul_0 = std::make_shared<ngraph::opset1::Multiply>(data, constant_0);
const auto& filters = std::make_shared<ngraph::opset1::Constant>(type, ngraph::Shape{1, 4, 1, 1});
const auto& conv = std::make_shared<ngraph::opset1::Convolution>(
mul_0, filters, ngraph::Strides{1, 1}, ngraph::CoordinateDiff{0, 0}, ngraph::CoordinateDiff{0, 0}, ngraph::Strides{1, 1});
const auto& box_logits_reshape = std::make_shared<ngraph::opset1::Constant>(
ngraph::element::i64, ngraph::Shape{2}, std::vector<int64_t>{0, -1});
const auto& box_logits = std::make_shared<ngraph::opset1::Reshape>(conv, box_logits_reshape, true);
const auto& four_times = std::make_shared<ngraph::opset1::Tile>(box_logits, std::make_shared<ngraph::opset1::Constant>(
ngraph::element::i64, ngraph::Shape{2}, std::vector<int64_t>{1, 4}));
const auto& third_input_reshape = std::make_shared<ngraph::opset1::Constant>(
ngraph::element::i64, ngraph::Shape{3}, std::vector<int64_t>{0, 1, -1});
const auto& third_input = std::make_shared<ngraph::opset1::Reshape>(four_times, third_input_reshape, true);
ngraph::op::DetectionOutput::Attributes attr;
attr.num_classes = 4;
attr.background_label_id = 0;
attr.top_k = 75;
attr.variance_encoded_in_target = true;
attr.keep_top_k = {50};
attr.code_type = std::string{"caffe.PriorBoxParameter.CORNER"};
attr.share_location = true;
attr.nms_threshold = 0.5f;
attr.confidence_threshold = 0.5f;
attr.clip_after_nms = false;
attr.clip_before_nms = false;
attr.decrease_label_id = false;
attr.normalized = true;
attr.input_height = 1;
attr.input_width = 1;
attr.objectness_score = 0.4f;
const auto& detection = std::make_shared<ngraph::opset1::DetectionOutput>(four_times, four_times, third_input, attr);
const auto& convert = std::make_shared<ngraph::opset1::Convert>(detection, type);
return std::make_shared<ov::Model>(ov::NodeVector{convert}, ov::ParameterVector{data}, "SplitableDetectionOutput");
}
inline std::shared_ptr<ngraph::Function> makeMultiSingleConv(std::vector<size_t> inputShape = {1, 3, 24, 24},
ngraph::element::Type type = ngraph::element::Type_t::f32) {
auto param0 = std::make_shared<ngraph::opset1::Parameter>(type, ngraph::Shape(inputShape));
auto conv1 = ngraph::builder::makeConvolution(param0, type, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto conv2 = ngraph::builder::makeConvolution(conv1, type, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto conv3 = ngraph::builder::makeConvolution(conv2, type, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto conv4 = ngraph::builder::makeConvolution(conv3, type, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto conv5 = ngraph::builder::makeConvolution(conv4, type, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto conv6 = ngraph::builder::makeConvolution(conv5, type, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto conv7 = ngraph::builder::makeConvolution(conv6, type, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto conv8 = ngraph::builder::makeConvolution(conv7, type, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto conv9 = ngraph::builder::makeConvolution(conv8, type, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto conv10 = ngraph::builder::makeConvolution(conv9, type, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto result = std::make_shared<ngraph::opset1::Result>(conv10);
auto fn_ptr = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param0});
fn_ptr->set_friendly_name("MultiSingleConv");
return fn_ptr;
}
inline std::shared_ptr<ngraph::Function> make2InputSubtract(std::vector<size_t> inputShape = {1, 3, 24, 24},
ngraph::element::Type_t type = ngraph::element::Type_t::f32) {
auto param0 = std::make_shared<ngraph::opset1::Parameter>(type, ngraph::Shape(inputShape));
auto param1 = std::make_shared<ngraph::opset1::Parameter>(type, ngraph::Shape(inputShape));
auto subtract = std::make_shared<ngraph::opset1::Subtract>(param0, param1);
auto result = std::make_shared<ngraph::opset1::Result>(subtract);
auto fn_ptr = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param0, param1});
fn_ptr->set_friendly_name("TwoInputSubtract");
return fn_ptr;
}
inline std::shared_ptr<ngraph::Function> makeNestedBranchConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20},
ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) {
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
auto relu0 = std::make_shared<ngraph::opset1::Relu>(params[0]);
auto conv1 = ngraph::builder::makeConvolution(relu0, ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu1 = std::make_shared<ngraph::opset1::Relu>(conv1);
auto conv2 = ngraph::builder::makeConvolution(relu0, ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 10);
auto relu2 = std::make_shared<ngraph::opset1::Relu>(conv2);
auto conv3 = ngraph::builder::makeConvolution(relu2, ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu3 = std::make_shared<ngraph::opset1::Relu>(conv3);
auto conv4 = ngraph::builder::makeConvolution(relu2, ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu4 = std::make_shared<ngraph::opset1::Relu>(conv4);
auto concat = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu3->output(0), relu4->output(0)}, 1);
auto concat1 = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu1->output(0), concat}, 1);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(concat1)};
std::shared_ptr<ngraph::Function> fnPtr = std::make_shared<ngraph::Function>(results, params);
fnPtr->set_friendly_name("NestedBranchConvConcat");
return fnPtr;
}
inline std::shared_ptr<ngraph::Function> makeNestedSplitConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20},
ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) {
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1);
auto conv1 = ngraph::builder::makeConvolution(split->output(0), ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu1 = std::make_shared<ngraph::opset1::Relu>(conv1);
auto conv2 = ngraph::builder::makeConvolution(split->output(1), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 10);
auto relu2 = std::make_shared<ngraph::opset1::Relu>(conv2);
auto split2 = ngraph::builder::makeSplit(relu2, ngPrc, 2, 1);
auto conv3 = ngraph::builder::makeConvolution(split2->output(0), ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu3 = std::make_shared<ngraph::opset1::Relu>(conv3);
auto conv4 = ngraph::builder::makeConvolution(split2->output(1), ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu4 = std::make_shared<ngraph::opset1::Relu>(conv4);
auto concat = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu3->output(0), relu4->output(0)}, 1);
auto concat1 = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu1->output(0), concat}, 1);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(concat1)};
std::shared_ptr<ngraph::Function> fnPtr = std::make_shared<ngraph::Function>(results, params);
fnPtr->set_friendly_name("NestedSplitConvConcat");
return fnPtr;
}
inline std::shared_ptr<ngraph::Function> makeSplitConvConcatInputInBranch(std::vector<size_t> inputShape = {1, 4, 20, 20},
ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) {
ov::ParameterVector params {std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape)),
std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1);
auto conv1 = ngraph::builder::makeConvolution(split->output(0), ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu1 = std::make_shared<ngraph::opset1::Relu>(conv1);
auto conv2 = ngraph::builder::makeConvolution(split->output(1), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu2 = std::make_shared<ngraph::opset1::Relu>(conv2);
auto conv4 = ngraph::builder::makeConvolution(params[1]->output(0), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu4 = std::make_shared<ngraph::opset1::Relu>(conv4);
auto concat = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu4->output(0), relu2->output(0)}, 1);
auto conv3 = ngraph::builder::makeConvolution(concat, ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5);
auto relu3 = std::make_shared<ngraph::opset1::Relu>(conv3);
auto concat1 = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu1->output(0), relu3->output(0)}, 1);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(concat1)};
std::shared_ptr<ngraph::Function> fnPtr = std::make_shared<ngraph::Function>(results, params);
fnPtr->set_friendly_name("SplitConvConcatInputInBranch");
return fnPtr;
}
inline std::shared_ptr<ngraph::Function> makeSplitConvConcatNestedInBranch(std::vector<size_t> inputShape = {1, 4, 20, 20},
ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) {
ov::ParameterVector params {std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape)),
std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
int localId = 0;
#define SET_NAME(node) node->set_friendly_name(#node + std::to_string(localId++));
auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); SET_NAME(split);
auto conv1 = ngraph::builder::makeConvolution(split->output(0), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv1);
auto relu1 = std::make_shared<ngraph::opset1::Relu>(conv1); SET_NAME(relu1);
auto conv2 = ngraph::builder::makeConvolution(split->output(1), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv2);
auto relu2 = std::make_shared<ngraph::opset1::Relu>(conv2); SET_NAME(relu2);
auto nestedSubgraph = [&] {
auto split = ngraph::builder::makeSplit(params[1], ngPrc, 2, 1); SET_NAME(split);
auto conv1 = ngraph::builder::makeConvolution(split->output(0), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv1);
auto relu1 = std::make_shared<ngraph::opset1::Relu>(conv1); SET_NAME(relu1);
auto conv2 = ngraph::builder::makeConvolution(split->output(1), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 10); SET_NAME(conv2);
auto relu2 = std::make_shared<ngraph::opset1::Relu>(conv2); SET_NAME(relu2);
auto split2 = ngraph::builder::makeSplit(relu2, ngPrc, 2, 1); SET_NAME(split2);
auto conv3 = ngraph::builder::makeConvolution(split2->output(0), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv3);
auto relu3 = std::make_shared<ngraph::opset1::Relu>(conv3); SET_NAME(relu3);
auto conv4 = ngraph::builder::makeConvolution(split2->output(1), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv4);
auto relu4 = std::make_shared<ngraph::opset1::Relu>(conv4); SET_NAME(relu4);
auto concat = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu3->output(0), relu4->output(0)}, 1);
SET_NAME(concat);
auto concat1 = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu1->output(0), concat}, 1); SET_NAME(concat1);
auto conv5 = ngraph::builder::makeConvolution(concat1, ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv5);
auto relu5 = std::make_shared<ngraph::opset1::Relu>(conv5); SET_NAME(relu5);
return relu5;
}();
auto concat = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{nestedSubgraph->output(0), relu2->output(0)}, 1);
SET_NAME(concat);
auto conv3 = ngraph::builder::makeConvolution(concat, ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv3);
auto relu3 = std::make_shared<ngraph::opset1::Relu>(conv3); SET_NAME(relu3);
auto concat1 = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu1->output(0), relu3->output(0)}, 1); SET_NAME(concat1);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(concat1)};
std::shared_ptr<ngraph::Function> fnPtr = std::make_shared<ngraph::Function>(results, params);
fnPtr->set_friendly_name("SplitConvConcatNestedInBranch");
return fnPtr;
}
inline std::shared_ptr<ngraph::Function> makeSplitConvConcatNestedInBranchNestedOut(
std::vector<size_t> inputShape = {1, 4, 20, 20},
ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) {
ov::ParameterVector params {std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape)),
std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
int localId = 0;
#define SET_NAME(node) node->set_friendly_name(#node + std::to_string(localId++));
auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); SET_NAME(split);
auto conv1 = ngraph::builder::makeConvolution(split->output(0), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv1);
auto relu1 = std::make_shared<ngraph::opset1::Relu>(conv1); SET_NAME(relu1);
auto conv2 = ngraph::builder::makeConvolution(split->output(1), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 10); SET_NAME(conv2);
auto relu2 = std::make_shared<ngraph::opset1::Relu>(conv2); SET_NAME(relu2);
auto split3 = ngraph::builder::makeSplit(relu2, ngPrc, 2, 1); SET_NAME(split3);
auto conv32 = ngraph::builder::makeConvolution(split3->output(1), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 10); SET_NAME(conv32);
auto relu32 = std::make_shared<ngraph::opset1::Relu>(conv32); SET_NAME(relu32);
auto nestedSubgraph = [&] {
auto split = ngraph::builder::makeSplit(params[1], ngPrc, 2, 1); SET_NAME(split);
auto conv1 = ngraph::builder::makeConvolution(split->output(0), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv1);
auto relu1 = std::make_shared<ngraph::opset1::Relu>(conv1); SET_NAME(relu1);
auto conv2 = ngraph::builder::makeConvolution(split->output(1), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 10); SET_NAME(conv2);
auto relu2 = std::make_shared<ngraph::opset1::Relu>(conv2); SET_NAME(relu2);
auto split2 = ngraph::builder::makeSplit(relu2, ngPrc, 2, 1); SET_NAME(split2);
auto conv3 = ngraph::builder::makeConvolution(split2->output(0), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv3);
auto relu3 = std::make_shared<ngraph::opset1::Relu>(conv3); SET_NAME(relu3);
auto conv4 = ngraph::builder::makeConvolution(split2->output(1), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv4);
auto relu4 = std::make_shared<ngraph::opset1::Relu>(conv4); SET_NAME(relu4);
auto concat = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu3->output(0), relu4->output(0)}, 1);
SET_NAME(concat);
auto concat1 = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu1->output(0), concat}, 1); SET_NAME(concat1);
auto conv5 = ngraph::builder::makeConvolution(concat1, ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv5);
auto relu5 = std::make_shared<ngraph::opset1::Relu>(conv5); SET_NAME(relu5);
return relu5;
}();
auto nestedSubgraph1 = [&] {
auto split = ngraph::builder::makeSplit(relu32, ngPrc, 2, 1); SET_NAME(split);
auto conv1 = ngraph::builder::makeConvolution(split->output(0), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv1);
auto relu1 = std::make_shared<ngraph::opset1::Relu>(conv1); SET_NAME(relu1);
auto conv2 = ngraph::builder::makeConvolution(split->output(1), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 10); SET_NAME(conv2);
auto relu2 = std::make_shared<ngraph::opset1::Relu>(conv2); SET_NAME(relu2);
auto split2 = ngraph::builder::makeSplit(relu2, ngPrc, 2, 1); SET_NAME(split2);
auto conv3 = ngraph::builder::makeConvolution(split2->output(0), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv3);
auto relu3 = std::make_shared<ngraph::opset1::Relu>(conv3); SET_NAME(relu3);
auto conv4 = ngraph::builder::makeConvolution(split2->output(1), ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv4);
auto relu4 = std::make_shared<ngraph::opset1::Relu>(conv4); SET_NAME(relu4);
auto concat = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu3->output(0), relu4->output(0)}, 1);
SET_NAME(concat);
auto concat1 = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu1->output(0), concat}, 1); SET_NAME(concat1);
auto conv5 = ngraph::builder::makeConvolution(concat1, ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv5);
auto relu5 = std::make_shared<ngraph::opset1::Relu>(conv5); SET_NAME(relu5);
return relu5;
}();
auto concat = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{nestedSubgraph->output(0), split3->output(0)}, 1);
SET_NAME(concat);
auto conv3 = ngraph::builder::makeConvolution(concat, ngPrc, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1},
ngraph::op::PadType::EXPLICIT, 5); SET_NAME(conv3);
auto relu3 = std::make_shared<ngraph::opset1::Relu>(conv3); SET_NAME(relu3);
auto concat1 = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu1->output(0), relu3->output(0)}, 1); SET_NAME(concat1);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(concat1), std::make_shared<ngraph::opset1::Result>(nestedSubgraph1)};
std::shared_ptr<ngraph::Function> fnPtr = std::make_shared<ngraph::Function>(results, params);
fnPtr->set_friendly_name("SplitConvConcatNestedInBranchNestedOut");
return fnPtr;
}
inline std::shared_ptr<ngraph::Function> makeConvBias(std::vector<size_t> inputShape = {1, 3, 24, 24},
ngraph::element::Type type = ngraph::element::Type_t::f32) {
ov::ParameterVector parameter {std::make_shared<ov::op::v0::Parameter>(type, ov::Shape(inputShape))};
parameter[0]->set_friendly_name("parameter");
auto weights = ngraph::opset1::Constant::create(type, ngraph::Shape{6, 3, 1, 1}, {1});
auto biases = ngraph::opset1::Constant::create(type, ngraph::Shape{6, 1, 1}, {1});
auto conv = std::make_shared<ngraph::opset1::Convolution>(parameter[0], weights, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 0}, ngraph::CoordinateDiff{0, 0}, ngraph::Strides{1, 1});
conv->set_friendly_name("conv");
auto add = std::make_shared<ngraph::opset1::Add>(conv, biases);
add->set_friendly_name("add");
auto result = std::make_shared<ngraph::opset1::Result>(add);
result->set_friendly_name("result");
std::shared_ptr<ngraph::Function> fn_ptr = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{parameter});
fn_ptr->set_friendly_name("ConvBias");
return fn_ptr;
}
inline std::shared_ptr<ngraph::Function> makeReadConcatSplitAssign(std::vector<size_t> inputShape = {1, 1, 2, 4},
ngraph::element::Type type = ngraph::element::Type_t::f32) {
ov::ParameterVector parameter {std::make_shared<ov::op::v0::Parameter>(type, ov::Shape(inputShape))};
parameter[0]->set_friendly_name("parameter");
auto init_const = ngraph::op::Constant::create(type, inputShape, {0});
auto read = std::make_shared<ngraph::opset5::ReadValue>(init_const, "v0");
read->set_friendly_name("read");
std::vector<std::shared_ptr<ngraph::Node>> args = {parameter[0], read};
auto conc = std::make_shared<ngraph::op::Concat>(args, 3);
conc->set_friendly_name("concat");
auto res = std::make_shared<ngraph::op::Result>(conc);
res->set_friendly_name("result");
const auto axis = ngraph::op::Constant::create(element::i64, Shape{}, {3});
axis->set_friendly_name("axis");
auto crop = std::make_shared<ngraph::op::v1::Split>(conc, axis, 2);
crop->set_friendly_name("split");
auto assign = std::make_shared<ngraph::opset5::Assign>(crop, "v0");
assign->set_friendly_name("assign");
std::shared_ptr<ngraph::Function> fn_ptr = std::make_shared<ngraph::Function>(ngraph::ResultVector({res}),
ngraph::SinkVector({assign}),
ngraph::ParameterVector{parameter});
fn_ptr->set_friendly_name("ReadConcatSplitAssign");
return fn_ptr;
}
inline std::shared_ptr<ngraph::Function> makeMatMulBias(std::vector<size_t> inputShape = { 1, 3, 24, 24 },
ngraph::element::Type type = ngraph::element::Type_t::f32) {
ov::ParameterVector parameter {std::make_shared<ov::op::v0::Parameter>(type, ov::Shape(inputShape))};
parameter[0]->set_friendly_name("parameter");
auto weights = ngraph::opset1::Constant::create(type, ngraph::Shape{ 24, 24 }, { 1 });
auto biases = ngraph::opset1::Constant::create(type, ngraph::Shape{ 1, 24 }, { 1 });
auto matmul = std::make_shared<opset1::MatMul>(parameter[0], weights);
matmul->set_friendly_name("matmul");
auto add = std::make_shared<opset1::Add>(matmul, biases);
add->set_friendly_name("add");
auto result = std::make_shared<ngraph::opset1::Result>(add);
result->set_friendly_name("result");
std::shared_ptr<ngraph::Function> fn_ptr = std::make_shared<ngraph::Function>(ngraph::ResultVector{ result }, ngraph::ParameterVector{ parameter });
fn_ptr->set_friendly_name("MatMulBias");
return fn_ptr;
}
inline std::shared_ptr<ngraph::Function> makeConvertTranspose(std::vector<size_t> inputShape = { 1, 3, 24, 24 },
std::vector<size_t> inputOrder = { 0, 1, 2, 3 },
ngraph::element::Type type = ngraph::element::Type_t::f32) {
ov::ParameterVector params {std::make_shared<ov::op::v0::Parameter>(type, ov::Shape(inputShape))};
params.front()->set_friendly_name("Param_1");
params.front()->output(0).get_tensor().set_names({"data"});
const auto order = ngraph::op::Constant::create(element::i32, {inputOrder.size()}, inputOrder);
auto convert = std::make_shared<opset1::Convert>(params.front(), type);
convert->set_friendly_name("convert");
auto transpose = std::make_shared<opset1::Transpose>(convert, order);
transpose->set_friendly_name("transpose");
auto result = std::make_shared<ngraph::opset1::Result>(transpose);
result->set_friendly_name("result");
std::shared_ptr<ngraph::Function> fn_ptr = std::make_shared<ngraph::Function>(ngraph::ResultVector{ result }, ngraph::ParameterVector{ params });
fn_ptr->set_friendly_name("ConvertTranspose");
return fn_ptr;
}
inline std::shared_ptr<ngraph::Function> makeMultipleInputOutputReLU(std::vector<size_t> inputShape = {1, 1, 32, 32},
ngraph::element::Type_t type = ngraph::element::Type_t::f32) {
auto param1 = std::make_shared<ngraph::opset8::Parameter>(type, ngraph::Shape(inputShape));
param1->set_friendly_name("param1");
param1->output(0).get_tensor().set_names({"data1"});
auto param2 = std::make_shared<ngraph::opset8::Parameter>(type, ngraph::Shape(inputShape));
param2->set_friendly_name("param2");
param2->output(0).get_tensor().set_names({"data2"});
auto relu = std::make_shared<ngraph::opset8::Relu>(param1);
relu->set_friendly_name("relu_op");
relu->output(0).get_tensor().set_names({"relu"});
auto result1 = std::make_shared<ngraph::opset8::Result>(relu);
result1->set_friendly_name("result1");
auto concat = std::make_shared<ngraph::opset8::Concat>(OutputVector{relu, param2}, 1);
concat->set_friendly_name("concat_op");
concat->output(0).get_tensor().set_names({"concat"});
auto result2 = std::make_shared<ngraph::opset8::Result>(concat);
result2->set_friendly_name("result2");
auto fn_ptr = std::make_shared<ngraph::Function>(ngraph::ResultVector{result1, result2},
ngraph::ParameterVector{param1, param2});
fn_ptr->set_friendly_name("MultipleInputOutputReLU");
return fn_ptr;
}
inline std::shared_ptr<ngraph::Function> makeMultipleInputOutputDoubleConcat(std::vector<size_t> inputShape = {1, 1, 32, 32},
ngraph::element::Type_t type = ngraph::element::Type_t::f32) {
auto param1 = std::make_shared<ngraph::opset8::Parameter>(type, ngraph::Shape{inputShape});
param1->set_friendly_name("param1");
param1->output(0).get_tensor().set_names({"data1"});
auto param2 = std::make_shared<ngraph::opset8::Parameter>(type, ngraph::Shape(inputShape));
param2->set_friendly_name("param2");
param2->output(0).get_tensor().set_names({"data2"});
auto concat1 = std::make_shared<ngraph::opset8::Concat>(OutputVector{param1, param2}, 1);
concat1->set_friendly_name("concat_op1");
concat1->output(0).get_tensor().set_names({"concat1"});
auto result1 = std::make_shared<ngraph::opset8::Result>(concat1);
result1->set_friendly_name("result1");
auto concat2 = std::make_shared<ngraph::opset8::Concat>(OutputVector{concat1, param2}, 1);
concat2->set_friendly_name("concat_op2");
concat2->output(0).get_tensor().set_names({"concat2"});
auto result2 = std::make_shared<ngraph::opset8::Result>(concat2);
result2->set_friendly_name("result2");
auto fn_ptr = std::make_shared<ngraph::Function>(ngraph::ResultVector{ result1, result2 },
ngraph::ParameterVector{ param1, param2 });
fn_ptr->set_friendly_name("makeMultipleInputOutputDoubleConcat");
return fn_ptr;
}
inline std::shared_ptr<ngraph::Function> makeSingleConcatWithConstant(std::vector<size_t> inputShape = {1, 1, 2, 4},
ngraph::element::Type type = ngraph::element::Type_t::f32) {
ov::ParameterVector parameter {std::make_shared<ov::op::v0::Parameter>(type, ov::Shape(inputShape))};
parameter[0]->set_friendly_name("Param_1");
parameter[0]->output(0).get_tensor().set_names({"data"});
auto init_const = ngraph::op::Constant::create(type, inputShape, {0});
std::vector<std::shared_ptr<ngraph::Node>> args = {parameter[0], init_const};
auto conc = std::make_shared<ngraph::op::Concat>(args, 3);
conc->set_friendly_name("concat");
auto res = std::make_shared<ngraph::op::Result>(conc);
res->set_friendly_name("result");
std::shared_ptr<ngraph::Function> fn_ptr = std::make_shared<ngraph::Function>(ngraph::ResultVector({res}),
ngraph::ParameterVector{parameter});
fn_ptr->set_friendly_name("SingleConcatWithConstant");
return fn_ptr;
}
inline std::shared_ptr<ngraph::Function> makeConcatWithParams(std::vector<size_t> inputShape = {1, 1, 32, 32},
ngraph::element::Type_t type = ngraph::element::Type_t::f32) {
auto parameter1 = std::make_shared<ngraph::opset8::Parameter>(type, ngraph::Shape{inputShape});
parameter1->set_friendly_name("param1");
parameter1->output(0).get_tensor().set_names({"data1"});
auto parameter2 = std::make_shared<ngraph::opset8::Parameter>(type, ngraph::Shape{inputShape});
parameter2->set_friendly_name("param2");
parameter2->output(0).get_tensor().set_names({"data2"});
auto concat = std::make_shared<ngraph::opset8::Concat>(OutputVector{parameter1, parameter2}, 1);
concat->set_friendly_name("concat_op");
concat->output(0).get_tensor().set_names({"concat"});
auto result = std::make_shared<ngraph::opset8::Result>(concat);
result->set_friendly_name("result");
auto fn_ptr = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
ngraph::ParameterVector{parameter1, parameter2});
fn_ptr->set_friendly_name("SingleConcatWithParams");
return fn_ptr;
}
inline std::shared_ptr<ngraph::Function> makeSingleSplit(std::vector<size_t> inputShape = {1, 4, 32, 32},
ngraph::element::Type_t type = ngraph::element::Type_t::f32) {
auto param1 = std::make_shared<ngraph::opset8::Parameter>(type, ngraph::Shape{inputShape});
param1->set_friendly_name("param1");
param1->output(0).get_tensor().set_names({"data1"});
auto axis_node = ngraph::opset8::Constant::create(element::i64, Shape{}, {1});
auto split = std::make_shared<ngraph::opset8::Split>(param1, axis_node, 2);
split->set_friendly_name("split");
split->output(0).get_tensor().set_names({"tensor_split_1"});
split->output(1).get_tensor().set_names({"tensor_split_2"});
auto result1 = std::make_shared<ngraph::opset8::Result>(split->output(0));
result1->set_friendly_name("result1");
auto result2 = std::make_shared<ngraph::opset8::Result>(split->output(1));
result2->set_friendly_name("result2");
auto fn_ptr =
std::make_shared<ngraph::Function>(ngraph::ResultVector{result1, result2}, ngraph::ParameterVector{param1});
fn_ptr->set_friendly_name("SingleSplit");
return fn_ptr;
}
inline std::shared_ptr<ngraph::Function> makeSplitConcat(std::vector<size_t> inputShape = {1, 4, 24, 24},
ngraph::element::Type_t type = ngraph::element::Type_t::f32) {
auto param1 = std::make_shared<ngraph::opset8::Parameter>(type, ngraph::Shape{inputShape});
param1->set_friendly_name("Param1");
param1->output(0).get_tensor().set_names({"data1"});
auto axis_node = ngraph::opset8::Constant::create(element::i64, Shape{}, {1});
auto split = std::make_shared<ngraph::opset8::Split>(param1, axis_node, 2);
split->set_friendly_name("Split");
split->output(0).get_tensor().set_names({"tensor_split_1"});
split->output(1).get_tensor().set_names({"tensor_split_2"});
auto concat = std::make_shared<ngraph::opset8::Concat>(OutputVector{split->output(0), split->output(1)}, 1);
concat->set_friendly_name("Concat_op");
concat->output(0).get_tensor().set_names({"Concat"});
auto result = std::make_shared<ngraph::opset8::Result>(concat);
result->set_friendly_name("Result");
auto fn_ptr = std::make_shared<ngraph::Function>(ngraph::ResultVector{ result },
ngraph::ParameterVector{ param1 });
fn_ptr->set_friendly_name("SplitConcat");
return fn_ptr;
}
std::shared_ptr<ov::Model> makeSplitConcat(std::vector<size_t> inputShape = {1, 4, 24, 24},
ov::element::Type_t type = ov::element::Type_t::f32);
} // namespace subgraph
} // namespace builder

View File

@ -4,24 +4,22 @@
#pragma once
#include <vector>
#include <random>
#include <chrono>
#include <random>
#include <vector>
#include <ngraph/type/element_type.hpp>
#include "ngraph_helpers.hpp"
#include "openvino/core/type/element_type_traits.hpp"
namespace NGraphFunctions {
namespace Utils {
template<ngraph::element::Type_t dType>
std::vector<typename ngraph::helpers::nGraphTypesTrait<dType>::value_type> inline
generateVector(size_t vec_len,
typename ngraph::helpers::nGraphTypesTrait<dType>::value_type upTo = 10,
typename ngraph::helpers::nGraphTypesTrait<dType>::value_type startFrom = 1,
int32_t seed = 1) {
using dataType = typename ngraph::helpers::nGraphTypesTrait<dType>::value_type;
template <ov::element::Type_t dType>
std::vector<typename ov::element_type_traits<dType>::value_type> inline generateVector(
size_t vec_len,
typename ov::element_type_traits<dType>::value_type upTo = 10,
typename ov::element_type_traits<dType>::value_type startFrom = 1,
int32_t seed = 1) {
using dataType = typename ov::element_type_traits<dType>::value_type;
std::vector<dataType> res(vec_len);
std::mt19937 gen(seed);
@ -54,13 +52,12 @@ generateVector(size_t vec_len,
}
}
template<>
std::vector<ngraph::float16> inline
generateVector<ngraph::element::Type_t::f16>(size_t vec_len,
ngraph::float16 upTo,
ngraph::float16 startFrom,
int32_t seed) {
std::vector<ngraph::float16> res(vec_len);
template <>
std::vector<ov::float16> inline generateVector<ov::element::Type_t::f16>(size_t vec_len,
ov::float16 upTo,
ov::float16 startFrom,
int32_t seed) {
std::vector<ov::float16> res(vec_len);
std::mt19937 gen(seed);
// chose values between this range to avoid type overrun (e.g. in case of I8 precision)
std::uniform_real_distribution<float> dist(startFrom, upTo);
@ -68,36 +65,36 @@ generateVector<ngraph::element::Type_t::f16>(size_t vec_len,
res[0] = startFrom;
res[vec_len - 1] = upTo;
for (size_t i = 1; i < vec_len - 1; i++) {
res[i] = ngraph::float16(dist(gen));
res[i] = ov::float16(dist(gen));
}
return res;
}
template<>
std::vector<ngraph::bfloat16> inline
generateVector<ngraph::element::Type_t::bf16>(size_t vec_len,
ngraph::bfloat16 upTo,
ngraph::bfloat16 startFrom,
int32_t seed) {
std::vector<ngraph::bfloat16> res(vec_len);
template <>
std::vector<ov::bfloat16> inline generateVector<ov::element::Type_t::bf16>(size_t vec_len,
ov::bfloat16 upTo,
ov::bfloat16 startFrom,
int32_t seed) {
std::vector<ov::bfloat16> res(vec_len);
std::mt19937 gen(seed);
// chose values between this range to avoid type overrun (e.g. in case of I8 precision)
std::uniform_real_distribution<float> dist(startFrom, upTo);;
std::uniform_real_distribution<float> dist(startFrom, upTo);
;
// explicitly include data range borders to avoid missing the corner values while data generation
res[0] = startFrom;
res[vec_len - 1] = upTo;
for (size_t i = 1; i < vec_len - 1; i++) {
res[i] = ngraph::bfloat16(dist(gen));
res[i] = ov::bfloat16(dist(gen));
}
return res;
}
template<typename fromType, typename toType>
std::vector<toType> castVector(const std::vector<fromType> &vec) {
template <typename fromType, typename toType>
std::vector<toType> castVector(const std::vector<fromType>& vec) {
std::vector<toType> resVec;
resVec.reserve(vec.size());
for (const auto &el : vec) {
for (const auto& el : vec) {
resVec.push_back(static_cast<toType>(el));
}
return resVec;

View File

@ -4,208 +4,92 @@
#pragma once
#ifdef _WIN32
#ifndef NOMINMAX
#define NOMINMAX
#endif
# ifndef NOMINMAX
# define NOMINMAX
# endif
#endif
#include <vector>
#include <memory>
#include <ngraph/opsets/opset1.hpp>
#include <ngraph/runtime/tensor.hpp>
#include <vector>
#include "common_test_utils/test_enums.hpp"
namespace ngraph {
namespace helpers {
template<ngraph::element::Type_t type>
struct nGraphTypesTrait {
};
template<>
struct nGraphTypesTrait<ngraph::element::Type_t::boolean> {
using value_type = bool;
};
template<>
struct nGraphTypesTrait<ngraph::element::Type_t::f64> {
using value_type = double;
};
template<>
struct nGraphTypesTrait<ngraph::element::Type_t::f32> {
using value_type = float;
};
template<>
struct nGraphTypesTrait<ngraph::element::Type_t::f16> {
using value_type = ngraph::float16;
};
template<>
struct nGraphTypesTrait<ngraph::element::Type_t::bf16> {
using value_type = ngraph::bfloat16;
};
template<>
struct nGraphTypesTrait<ngraph::element::Type_t::i8> {
using value_type = int8_t;
};
template<>
struct nGraphTypesTrait<ngraph::element::Type_t::i16> {
using value_type = int16_t;
};
template<>
struct nGraphTypesTrait<ngraph::element::Type_t::i32> {
using value_type = int32_t;
};
template<>
struct nGraphTypesTrait<ngraph::element::Type_t::i64> {
using value_type = int64_t;
};
template<>
struct nGraphTypesTrait<ngraph::element::Type_t::u8> {
using value_type = uint8_t;
};
template<>
struct nGraphTypesTrait<ngraph::element::Type_t::u16> {
using value_type = uint16_t;
};
template<>
struct nGraphTypesTrait<ngraph::element::Type_t::u32> {
using value_type = uint32_t;
};
template<>
struct nGraphTypesTrait<ngraph::element::Type_t::u64> {
using value_type = uint64_t;
};
enum PoolingTypes {
MAX,
AVG
};
enum ROIPoolingTypes {
ROI_MAX,
ROI_BILINEAR
};
enum ActivationTypes {
None,
Sigmoid,
Tanh,
Relu,
LeakyRelu,
Exp,
Log,
Sign,
Abs,
Gelu,
Clamp,
Negative,
Acos,
Acosh,
Asin,
Asinh,
Atan,
Atanh,
Cos,
Cosh,
Floor,
Sin,
Sinh,
Sqrt,
Tan,
Elu,
Erf,
HardSigmoid,
Selu,
Ceiling,
PReLu,
Mish,
HSwish,
SoftPlus,
Swish,
HSigmoid,
RoundHalfToEven,
RoundHalfAwayFromZero,
GeluErf,
GeluTanh,
SoftSign
};
enum EltwiseTypes {
ADD,
MULTIPLY,
SUBTRACT,
DIVIDE,
SQUARED_DIFF,
POWER,
FLOOR_MOD,
MOD,
ERF
};
enum ComparisonTypes {
EQUAL,
NOT_EQUAL,
IS_FINITE,
IS_INF,
IS_NAN,
LESS,
LESS_EQUAL,
GREATER,
GREATER_EQUAL
};
enum ConversionTypes {
CONVERT,
CONVERT_LIKE
};
enum LogicalTypes {
LOGICAL_AND,
LOGICAL_OR,
LOGICAL_XOR,
LOGICAL_NOT
};
enum SqueezeOpType {
SQUEEZE,
UNSQUEEZE
};
enum MinMaxOpType {
MINIMUM,
MAXIMUM
};
// clang-format off
using ov::test::utils::PoolingTypes;
using ov::test::utils::ROIPoolingTypes;
using ov::test::utils::ActivationTypes;
using ov::test::utils::ActivationTypes::None;
using ov::test::utils::ActivationTypes::Sigmoid;
using ov::test::utils::ActivationTypes::Tanh;
using ov::test::utils::ActivationTypes::Relu;
using ov::test::utils::ActivationTypes::LeakyRelu;
using ov::test::utils::ActivationTypes::Exp;
using ov::test::utils::ActivationTypes::Log;
using ov::test::utils::ActivationTypes::Sign;
using ov::test::utils::ActivationTypes::Abs;
using ov::test::utils::ActivationTypes::Gelu;
using ov::test::utils::ActivationTypes::Clamp;
using ov::test::utils::ActivationTypes::Negative;
using ov::test::utils::ActivationTypes::Acos;
using ov::test::utils::ActivationTypes::Acosh;
using ov::test::utils::ActivationTypes::Asin;
using ov::test::utils::ActivationTypes::Asinh;
using ov::test::utils::ActivationTypes::Atan;
using ov::test::utils::ActivationTypes::Atanh;
using ov::test::utils::ActivationTypes::Cos;
using ov::test::utils::ActivationTypes::Cosh;
using ov::test::utils::ActivationTypes::Floor;
using ov::test::utils::ActivationTypes::Sin;
using ov::test::utils::ActivationTypes::Sinh;
using ov::test::utils::ActivationTypes::Sqrt;
using ov::test::utils::ActivationTypes::Tan;
using ov::test::utils::ActivationTypes::Elu;
using ov::test::utils::ActivationTypes::Erf;
using ov::test::utils::ActivationTypes::HardSigmoid;
using ov::test::utils::ActivationTypes::Selu;
using ov::test::utils::ActivationTypes::Ceiling;
using ov::test::utils::ActivationTypes::PReLu;
using ov::test::utils::ActivationTypes::Mish;
using ov::test::utils::ActivationTypes::HSwish;
using ov::test::utils::ActivationTypes::SoftPlus;
using ov::test::utils::ActivationTypes::Swish;
using ov::test::utils::ActivationTypes::HSigmoid;
using ov::test::utils::ActivationTypes::RoundHalfToEven;
using ov::test::utils::ActivationTypes::RoundHalfAwayFromZero;
using ov::test::utils::ActivationTypes::GeluErf;
using ov::test::utils::ActivationTypes::GeluTanh;
using ov::test::utils::ActivationTypes::SoftSign;
using ov::test::utils::EltwiseTypes;
using ov::test::utils::EltwiseTypes::ADD;
using ov::test::utils::EltwiseTypes::MULTIPLY;
using ov::test::utils::EltwiseTypes::SUBTRACT;
using ov::test::utils::EltwiseTypes::DIVIDE;
using ov::test::utils::EltwiseTypes::SQUARED_DIFF;
using ov::test::utils::EltwiseTypes::POWER;
using ov::test::utils::EltwiseTypes::FLOOR_MOD;
using ov::test::utils::EltwiseTypes::MOD;
using ov::test::utils::EltwiseTypes::ERF;
using ov::test::utils::ComparisonTypes;
using ov::test::utils::ConversionTypes;
using ov::test::utils::LogicalTypes;
using ov::test::utils::SqueezeOpType;
using ov::test::utils::MinMaxOpType;
enum QuantizationGranularity {
Pertensor,
Perchannel
};
enum ReductionType {
Mean,
Max,
Min,
Prod,
Sum,
LogicalOr,
LogicalAnd,
L1,
L2
};
using ov::test::utils::ReductionType;
using ov::test::utils::DFTOpType;
using ov::test::utils::InputLayerType;
using ov::test::utils::PadMode;
enum class DFTOpType {
FORWARD,
INVERSE
};
enum class InputLayerType {
CONSTANT,
PARAMETER,
};
enum class PadMode {
CONSTANT,
EDGE,
REFLECT,
SYMMETRIC,
};
enum class TensorIteratorBody {
RNN,
@ -214,15 +98,7 @@ enum class TensorIteratorBody {
// CNN todo: implement
};
enum class SequenceTestsMode {
PURE_SEQ,
PURE_SEQ_RAND_SEQ_LEN_CONST,
PURE_SEQ_RAND_SEQ_LEN_PARAM,
CONVERT_TO_TI_MAX_SEQ_LEN_CONST,
CONVERT_TO_TI_MAX_SEQ_LEN_PARAM,
CONVERT_TO_TI_RAND_SEQ_LEN_CONST,
CONVERT_TO_TI_RAND_SEQ_LEN_PARAM,
};
using ov::test::utils::SequenceTestsMode;
enum class MemoryTransformation {
NONE,
@ -230,16 +106,14 @@ enum class MemoryTransformation {
LOW_LATENCY_V2_REGULAR_API,
LOW_LATENCY_V2_ORIGINAL_INIT
};
// clang-format on
std::ostream &operator<<(std::ostream &os, const ReductionType &m);
std::ostream &operator<<(std::ostream &os, const PadMode &m);
bool is_tensor_iterator_exist(const std::shared_ptr<ngraph::Function>& func);
bool is_tensor_iterator_exist(const std::shared_ptr<ngraph::Function> & func);
inline std::string quantizationGranularityToString(const QuantizationGranularity &granularity) {
inline std::string quantizationGranularityToString(const QuantizationGranularity& granularity) {
static std::map<QuantizationGranularity, std::string> names = {
{Pertensor, "Pertensor"},
{Perchannel, "Perchannel"},
{Pertensor, "Pertensor"},
{Perchannel, "Perchannel"},
};
auto i = names.find(granularity);
@ -249,29 +123,28 @@ inline std::string quantizationGranularityToString(const QuantizationGranularity
throw std::runtime_error("Unsupported QuantizationGranularity type");
}
inline std::ostream &operator<<(std::ostream &out, const QuantizationGranularity &granularity) {
inline std::ostream& operator<<(std::ostream& out, const QuantizationGranularity& granularity) {
return out << quantizationGranularityToString(granularity);
}
ngraph::OutputVector convert2OutputVector(const std::vector<std::shared_ptr<ngraph::Node>> &nodes);
ngraph::OutputVector convert2OutputVector(const std::vector<std::shared_ptr<ngraph::Node>>& nodes);
template<class opType>
inline ngraph::NodeVector castOps2Nodes(const std::vector<std::shared_ptr<opType>> &ops) {
template <class opType>
inline ngraph::NodeVector castOps2Nodes(const std::vector<std::shared_ptr<opType>>& ops) {
ngraph::NodeVector nodes;
for (const auto &op : ops) {
for (const auto& op : ops) {
nodes.push_back(std::dynamic_pointer_cast<ngraph::Node>(op));
}
return nodes;
}
std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>>
interpreterFunction(const std::shared_ptr<Function> &function,
const std::vector<std::vector<std::uint8_t>> &inputs,
const std::vector<ngraph::element::Type> &inputTypes = {});
std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>> interpreterFunction(
const std::shared_ptr<Function>& function,
const std::vector<std::vector<std::uint8_t>>& inputs,
const std::vector<ngraph::element::Type>& inputTypes = {});
std::vector<ov::Tensor>
interpretFunction(const std::shared_ptr<Function> &function,
const std::map<std::shared_ptr<ov::Node>, ov::Tensor>& inputs);
std::vector<ov::Tensor> interpretFunction(const std::shared_ptr<Function>& function,
const std::map<std::shared_ptr<ov::Node>, ov::Tensor>& inputs);
//
// This function compares two nGraph functions and requires them to have exactly one output
@ -279,54 +152,30 @@ interpretFunction(const std::shared_ptr<Function> &function,
// Check number of inputs
// Check shapes of each Node
//
void CompareFunctions(const Function &actual, const Function &expected);
void CompareFunctions(const Function& actual, const Function& expected);
std::shared_ptr<Function> foldFunction(const std::shared_ptr<Function>& function,
const std::vector<std::vector<std::uint8_t>>& inputs,
const std::vector<ngraph::element::Type>& inputTypes = {});
std::shared_ptr<Function> foldFunction(const std::shared_ptr<Function> &function,
const std::vector<std::vector<std::uint8_t>> &inputs,
const std::vector<ngraph::element::Type> &inputTypes = {});
std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>> getConstData(
const std::shared_ptr<Function>& function);
std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>> getConstData(const std::shared_ptr<Function> &function);
std::shared_ptr<ngraph::Node> getNodeSharedPtr(const ngraph::NodeTypeInfo& type_info,
const ngraph::OutputVector& outputVector);
std::shared_ptr<ngraph::Node> getNodeSharedPtr(const ngraph::NodeTypeInfo &type_info,
const ngraph::OutputVector &outputVector);
std::vector<std::uint8_t> convertOutputPrecision(const std::vector<std::uint8_t> &output,
const element::Type_t &fromPrecision,
const element::Type_t &toPrecision,
std::vector<std::uint8_t> convertOutputPrecision(const std::vector<std::uint8_t>& output,
const element::Type_t& fromPrecision,
const element::Type_t& toPrecision,
const size_t elementsCount);
std::ostream& operator<<(std::ostream & os, ngraph::helpers::EltwiseTypes type);
std::ostream& operator<<(std::ostream& os, TensorIteratorBody type);
std::ostream& operator<<(std::ostream & os, ngraph::helpers::SqueezeOpType type);
std::ostream& operator<<(std::ostream& os, ngraph::helpers::InputLayerType type);
std::ostream& operator<<(std::ostream & os, ngraph::helpers::ComparisonTypes type);
std::ostream& operator<<(std::ostream & os, ngraph::helpers::LogicalTypes type);
std::ostream& operator<<(std::ostream & os, ngraph::op::v4::Interpolate::InterpolateMode type);
std::ostream& operator<<(std::ostream & os, ngraph::op::v4::Interpolate::CoordinateTransformMode type);
std::ostream& operator<<(std::ostream & os, ngraph::op::v4::Interpolate::NearestMode type);
std::ostream& operator<<(std::ostream & os, ngraph::op::v4::Interpolate::ShapeCalcMode type);
std::ostream& operator<<(std::ostream & os, TensorIteratorBody type);
std::ostream& operator<<(std::ostream & os, SequenceTestsMode type);
std::ostream& operator<<(std::ostream & os, MemoryTransformation type);
std::ostream& operator<<(std::ostream & os, op::util::MulticlassNmsBase::SortResultType type);
std::ostream& operator<<(std::ostream & os, op::v8::MatrixNms::SortResultType type);
std::ostream& operator<<(std::ostream & os, op::v8::MatrixNms::DecayFunction type);
std::ostream& operator<<(std::ostream& os, MemoryTransformation type);
void resize_function(std::shared_ptr<ov::Model> function, const std::vector<ov::Shape>& targetInputStaticShapes);
using ov::test::utils::operator<<;
} // namespace helpers
} // namespace ngraph

View File

@ -2,142 +2,171 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include <vector>
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "common_test_utils/test_enums.hpp"
#include "openvino/core/node.hpp"
#include "openvino/core/node_output.hpp"
#include "openvino/op/abs.hpp"
#include "openvino/op/acos.hpp"
#include "openvino/op/acosh.hpp"
#include "openvino/op/asin.hpp"
#include "openvino/op/asinh.hpp"
#include "openvino/op/atan.hpp"
#include "openvino/op/atanh.hpp"
#include "openvino/op/ceiling.hpp"
#include "openvino/op/clamp.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/cos.hpp"
#include "openvino/op/cosh.hpp"
#include "openvino/op/elu.hpp"
#include "openvino/op/erf.hpp"
#include "openvino/op/exp.hpp"
#include "openvino/op/floor.hpp"
#include "openvino/op/gelu.hpp"
#include "openvino/op/hard_sigmoid.hpp"
#include "openvino/op/hsigmoid.hpp"
#include "openvino/op/hswish.hpp"
#include "openvino/op/log.hpp"
#include "openvino/op/mish.hpp"
#include "openvino/op/negative.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/prelu.hpp"
#include "openvino/op/relu.hpp"
#include "openvino/op/round.hpp"
#include "openvino/op/selu.hpp"
#include "openvino/op/sigmoid.hpp"
#include "openvino/op/sign.hpp"
#include "openvino/op/sin.hpp"
#include "openvino/op/sinh.hpp"
#include "openvino/op/softplus.hpp"
#include "openvino/op/softsign.hpp"
#include "openvino/op/sqrt.hpp"
#include "openvino/op/swish.hpp"
#include "openvino/op/tan.hpp"
#include "openvino/op/tanh.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeActivation(const ngraph::Output<Node> &in,
const element::Type &type,
ngraph::helpers::ActivationTypes activationType,
std::vector<size_t> inShape,
std::vector<float> constantsValue) {
std::shared_ptr<ov::Node> makeActivation(const ov::Output<Node>& in,
const element::Type& type,
ov::test::utils::ActivationTypes activationType,
std::vector<size_t> inShape,
std::vector<float> constantsValue) {
switch (activationType) {
case ngraph::helpers::ActivationTypes::Sigmoid:
return std::make_shared<ngraph::op::Sigmoid>(in);
case ngraph::helpers::ActivationTypes::Tanh:
return std::make_shared<ngraph::op::Tanh>(in);
case ngraph::helpers::ActivationTypes::Relu:
return std::make_shared<ngraph::op::Relu>(in);
case ngraph::helpers::ActivationTypes::LeakyRelu: {
auto leaky_slope = std::make_shared<ngraph::op::Constant>(
ngraph::element::f32,
inShape,
constantsValue);
return std::make_shared<ngraph::op::PRelu>(in, leaky_slope);
}
case ngraph::helpers::ActivationTypes::Exp:
return std::make_shared<ngraph::op::Exp>(in);
case ngraph::helpers::ActivationTypes::Log:
return std::make_shared<ngraph::op::Log>(in);
case ngraph::helpers::ActivationTypes::Sign:
return std::make_shared<ngraph::op::Sign>(in);
case ngraph::helpers::ActivationTypes::Abs:
return std::make_shared<ngraph::op::Abs>(in);
case ngraph::helpers::ActivationTypes::Gelu:
return std::make_shared<ngraph::op::v0::Gelu>(in);
case ngraph::helpers::ActivationTypes::Clamp:
return std::make_shared<ngraph::op::Clamp>(in, constantsValue[0], constantsValue[1]);
case ngraph::helpers::ActivationTypes::Negative:
return std::make_shared<ngraph::op::Negative>(in);
case ngraph::helpers::ActivationTypes::Acos:
return std::make_shared<ngraph::op::Acos>(in);
case ngraph::helpers::ActivationTypes::Acosh:
return std::make_shared<ngraph::op::Acosh>(in);
case ngraph::helpers::ActivationTypes::Asin:
return std::make_shared<ngraph::op::Asin>(in);
case ngraph::helpers::ActivationTypes::Asinh:
return std::make_shared<ngraph::op::Asinh>(in);
case ngraph::helpers::ActivationTypes::Atan:
return std::make_shared<ngraph::op::Atan>(in);
case ngraph::helpers::ActivationTypes::Atanh:
return std::make_shared<ngraph::op::Atanh>(in);
case ngraph::helpers::ActivationTypes::Cos:
return std::make_shared<ngraph::op::Cos>(in);
case ngraph::helpers::ActivationTypes::Cosh:
return std::make_shared<ngraph::op::Cosh>(in);
case ngraph::helpers::ActivationTypes::Floor:
return std::make_shared<ngraph::op::Floor>(in);
case ngraph::helpers::ActivationTypes::Sin:
return std::make_shared<ngraph::op::Sin>(in);
case ngraph::helpers::ActivationTypes::Sinh:
return std::make_shared<ngraph::op::Sinh>(in);
case ngraph::helpers::ActivationTypes::Sqrt:
return std::make_shared<ngraph::op::Sqrt>(in);
case ngraph::helpers::ActivationTypes::Tan:
return std::make_shared<ngraph::op::Tan>(in);
case ngraph::helpers::ActivationTypes::Elu:
return std::make_shared<ngraph::op::Elu>(in, constantsValue[0]);
case ngraph::helpers::ActivationTypes::Erf:
return std::make_shared<ngraph::op::Erf>(in);
case ngraph::helpers::ActivationTypes::HardSigmoid: {
auto hard_sigmoid_alpha = std::make_shared<ngraph::op::Constant>(
type, inShape, constantsValue[0]);
auto hard_sigmoid_beta = std::make_shared<ngraph::op::Constant>(
type, inShape, constantsValue[1]);
return std::make_shared<ngraph::op::HardSigmoid>(in, hard_sigmoid_alpha, hard_sigmoid_beta);
}
case ngraph::helpers::ActivationTypes::Selu: {
auto selu_alpha = std::make_shared<ngraph::op::Constant>(
type, inShape, constantsValue[0]);
auto selu_lambda = std::make_shared<ngraph::op::Constant>(
type, inShape, constantsValue[1]);
return std::make_shared<ngraph::op::Selu>(in, selu_alpha, selu_lambda);
}
case ngraph::helpers::ActivationTypes::Ceiling:
return std::make_shared<ngraph::op::Ceiling>(in);
case ngraph::helpers::ActivationTypes::PReLu: {
auto negative_slope = std::make_shared<ngraph::op::Constant>(
ngraph::element::f32,
inShape,
constantsValue);
return std::make_shared<ngraph::op::PRelu>(in, negative_slope);
}
case ngraph::helpers::ActivationTypes::Mish:
return std::make_shared<ngraph::op::v4::Mish>(in);
case ngraph::helpers::ActivationTypes::HSwish:
return std::make_shared<ngraph::op::v4::HSwish>(in);
case ngraph::helpers::ActivationTypes::SoftPlus:
return std::make_shared<ngraph::op::v4::SoftPlus>(in);
case ngraph::helpers::ActivationTypes::Swish: {
auto beta = std::make_shared<ngraph::op::Constant>(type, inShape, constantsValue[0]);
return std::make_shared<ngraph::op::v4::Swish>(in, beta);
}
case ngraph::helpers::ActivationTypes::HSigmoid:
return std::make_shared<ngraph::op::v5::HSigmoid>(in);
case ngraph::helpers::ActivationTypes::RoundHalfToEven:
return std::make_shared<ngraph::op::v5::Round>(in, ngraph::op::v5::Round::RoundMode::HALF_TO_EVEN);
case ngraph::helpers::ActivationTypes::RoundHalfAwayFromZero:
return std::make_shared<ngraph::op::v5::Round>(in, ngraph::op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO);
case ngraph::helpers::ActivationTypes::GeluErf:
return std::make_shared<ngraph::op::v7::Gelu>(in, ngraph::op::GeluApproximationMode::ERF);
case ngraph::helpers::ActivationTypes::GeluTanh:
return std::make_shared<ngraph::op::v7::Gelu>(in, ngraph::op::GeluApproximationMode::TANH);
case ngraph::helpers::ActivationTypes::SoftSign:
return std::make_shared<ngraph::op::v9::SoftSign>(in);
default:
throw std::runtime_error("Can't create layer for this activation type");
case ov::test::utils::ActivationTypes::Sigmoid:
return std::make_shared<ov::op::v0::Sigmoid>(in);
case ov::test::utils::ActivationTypes::Tanh:
return std::make_shared<ov::op::v0::Tanh>(in);
case ov::test::utils::ActivationTypes::Relu:
return std::make_shared<ov::op::v0::Relu>(in);
case ov::test::utils::ActivationTypes::LeakyRelu: {
auto leaky_slope = std::make_shared<ov::op::v0::Constant>(ov::element::f32, inShape, constantsValue);
return std::make_shared<ov::op::v0::PRelu>(in, leaky_slope);
}
case ov::test::utils::ActivationTypes::Exp:
return std::make_shared<ov::op::v0::Exp>(in);
case ov::test::utils::ActivationTypes::Log:
return std::make_shared<ov::op::v0::Log>(in);
case ov::test::utils::ActivationTypes::Sign:
return std::make_shared<ov::op::v0::Sign>(in);
case ov::test::utils::ActivationTypes::Abs:
return std::make_shared<ov::op::v0::Abs>(in);
case ov::test::utils::ActivationTypes::Gelu:
return std::make_shared<ov::op::v0::Gelu>(in);
case ov::test::utils::ActivationTypes::Clamp:
return std::make_shared<ov::op::v0::Clamp>(in, constantsValue[0], constantsValue[1]);
case ov::test::utils::ActivationTypes::Negative:
return std::make_shared<ov::op::v0::Negative>(in);
case ov::test::utils::ActivationTypes::Acos:
return std::make_shared<ov::op::v0::Acos>(in);
case ov::test::utils::ActivationTypes::Acosh:
return std::make_shared<ov::op::v3::Acosh>(in);
case ov::test::utils::ActivationTypes::Asin:
return std::make_shared<ov::op::v0::Asin>(in);
case ov::test::utils::ActivationTypes::Asinh:
return std::make_shared<ov::op::v3::Asinh>(in);
case ov::test::utils::ActivationTypes::Atan:
return std::make_shared<ov::op::v0::Atan>(in);
case ov::test::utils::ActivationTypes::Atanh:
return std::make_shared<ov::op::v3::Atanh>(in);
case ov::test::utils::ActivationTypes::Cos:
return std::make_shared<ov::op::v0::Cos>(in);
case ov::test::utils::ActivationTypes::Cosh:
return std::make_shared<ov::op::v0::Cosh>(in);
case ov::test::utils::ActivationTypes::Floor:
return std::make_shared<ov::op::v0::Floor>(in);
case ov::test::utils::ActivationTypes::Sin:
return std::make_shared<ov::op::v0::Sin>(in);
case ov::test::utils::ActivationTypes::Sinh:
return std::make_shared<ov::op::v0::Sinh>(in);
case ov::test::utils::ActivationTypes::Sqrt:
return std::make_shared<ov::op::v0::Sqrt>(in);
case ov::test::utils::ActivationTypes::Tan:
return std::make_shared<ov::op::v0::Tan>(in);
case ov::test::utils::ActivationTypes::Elu:
return std::make_shared<ov::op::v0::Elu>(in, constantsValue[0]);
case ov::test::utils::ActivationTypes::Erf:
return std::make_shared<ov::op::v0::Erf>(in);
case ov::test::utils::ActivationTypes::HardSigmoid: {
auto hard_sigmoid_alpha = std::make_shared<ov::op::v0::Constant>(type, inShape, constantsValue[0]);
auto hard_sigmoid_beta = std::make_shared<ov::op::v0::Constant>(type, inShape, constantsValue[1]);
return std::make_shared<ov::op::v0::HardSigmoid>(in, hard_sigmoid_alpha, hard_sigmoid_beta);
}
case ov::test::utils::ActivationTypes::Selu: {
auto selu_alpha = std::make_shared<ov::op::v0::Constant>(type, inShape, constantsValue[0]);
auto selu_lambda = std::make_shared<ov::op::v0::Constant>(type, inShape, constantsValue[1]);
return std::make_shared<ov::op::v0::Selu>(in, selu_alpha, selu_lambda);
}
case ov::test::utils::ActivationTypes::Ceiling:
return std::make_shared<ov::op::v0::Ceiling>(in);
case ov::test::utils::ActivationTypes::PReLu: {
auto negative_slope = std::make_shared<ov::op::v0::Constant>(ov::element::f32, inShape, constantsValue);
return std::make_shared<ov::op::v0::PRelu>(in, negative_slope);
}
case ov::test::utils::ActivationTypes::Mish:
return std::make_shared<ov::op::v4::Mish>(in);
case ov::test::utils::ActivationTypes::HSwish:
return std::make_shared<ov::op::v4::HSwish>(in);
case ov::test::utils::ActivationTypes::SoftPlus:
return std::make_shared<ov::op::v4::SoftPlus>(in);
case ov::test::utils::ActivationTypes::Swish: {
auto beta = std::make_shared<ov::op::v0::Constant>(type, inShape, constantsValue[0]);
return std::make_shared<ov::op::v4::Swish>(in, beta);
}
case ov::test::utils::ActivationTypes::HSigmoid:
return std::make_shared<ov::op::v5::HSigmoid>(in);
case ov::test::utils::ActivationTypes::RoundHalfToEven:
return std::make_shared<ov::op::v5::Round>(in, ov::op::v5::Round::RoundMode::HALF_TO_EVEN);
case ov::test::utils::ActivationTypes::RoundHalfAwayFromZero:
return std::make_shared<ov::op::v5::Round>(in, ov::op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO);
case ov::test::utils::ActivationTypes::GeluErf:
return std::make_shared<ov::op::v7::Gelu>(in, ov::op::GeluApproximationMode::ERF);
case ov::test::utils::ActivationTypes::GeluTanh:
return std::make_shared<ov::op::v7::Gelu>(in, ov::op::GeluApproximationMode::TANH);
case ov::test::utils::ActivationTypes::SoftSign:
return std::make_shared<ov::op::v9::SoftSign>(in);
default:
throw std::runtime_error("Can't create layer for this activation type");
}
}
std::shared_ptr<ngraph::Node> makeActivation(const ngraph::ParameterVector &parameters,
const element::Type &type,
ngraph::helpers::ActivationTypes activationType) {
std::shared_ptr<ov::Node> makeActivation(const ov::ParameterVector& parameters,
const element::Type& type,
ov::test::utils::ActivationTypes activationType) {
switch (activationType) {
case ngraph::helpers::ActivationTypes::LeakyRelu:
return std::make_shared<ngraph::op::PRelu>(parameters[0], parameters[1]);
case ngraph::helpers::ActivationTypes::HardSigmoid:
return std::make_shared<ngraph::op::HardSigmoid>(parameters[0], parameters[1], parameters[2]);
case ngraph::helpers::ActivationTypes::Selu:
return std::make_shared<ngraph::op::Selu>(parameters[0], parameters[1], parameters[2]);
case ngraph::helpers::ActivationTypes::PReLu:
return std::make_shared<ngraph::op::PRelu>(parameters[0], parameters[1]);
default:
throw std::runtime_error("It is impossible to create layer for this activation type with input as parameter");
case ov::test::utils::ActivationTypes::LeakyRelu:
return std::make_shared<ov::op::v0::PRelu>(parameters[0], parameters[1]);
case ov::test::utils::ActivationTypes::HardSigmoid:
return std::make_shared<ov::op::v0::HardSigmoid>(parameters[0], parameters[1], parameters[2]);
case ov::test::utils::ActivationTypes::Selu:
return std::make_shared<ov::op::v0::Selu>(parameters[0], parameters[1], parameters[2]);
case ov::test::utils::ActivationTypes::PReLu:
return std::make_shared<ov::op::v0::PRelu>(parameters[0], parameters[1]);
default:
throw std::runtime_error("It is impossible to create layer for this activation type with input as parameter");
}
}

View File

@ -2,12 +2,12 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "ov_ops/augru_cell.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "ov_ops/augru_cell.hpp"
#include "ov_ops/augru_sequence.hpp"
namespace ngraph {
@ -18,16 +18,16 @@ namespace builder {
* 0 1 2 3
* X init_hidden_state attention seq_length
* or,
* 0 1 2
* 0 1 2
* X init_hidden_state attention
*
*/
std::shared_ptr<ov::Node> makeAUGRU(const OutputVector& in,
const std::vector<ov::Shape>& constants,
std::size_t hidden_size,
bool make_sequence,
ov::op::RecurrentSequenceDirection direction,
ngraph::helpers::SequenceTestsMode mode) {
const std::vector<ov::Shape>& constants,
std::size_t hidden_size,
bool make_sequence,
ov::op::RecurrentSequenceDirection direction,
ov::test::utils::SequenceTestsMode mode) {
std::vector<float> empty;
auto W = ngraph::builder::makeConstant(in[0].get_element_type(), constants[0], empty, true);
W->set_friendly_name("augru_w");
@ -43,33 +43,46 @@ std::shared_ptr<ov::Node> makeAUGRU(const OutputVector& in,
} else {
std::shared_ptr<Node> seq_lengths;
switch (mode) {
case ngraph::helpers::SequenceTestsMode::PURE_SEQ:
case ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST: {
std::vector<float> lengths(in[0].get_partial_shape()[0].get_min_length(), in[0].get_partial_shape()[1].get_min_length());
seq_lengths = ngraph::builder::makeConstant(element::i64, constants[3], lengths, false);
break;
}
case ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST:
case ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST: {
for (size_t i = 0; i <= in[0].get_shape().at(0); ++i) {
std::vector<float> lengths;
seq_lengths = ngraph::builder::makeConstant(element::i64, constants[3], lengths, true,
static_cast<float>(in[0].get_shape()[1]), 0.f);
}
break;
}
case ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM:
case ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM:
case ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM: {
// Seq_lengths should be as a Parameter node for these two modes
if (in.size() < 4) throw std::runtime_error("Incorrect number of inputs for creation of Sequence operation");
seq_lengths = in.at(3).get_node_shared_ptr();
break;
}
default:
throw std::runtime_error("Incorrect mode for creation of Sequence operation");
case ov::test::utils::SequenceTestsMode::PURE_SEQ:
case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST: {
std::vector<float> lengths(in[0].get_partial_shape()[0].get_min_length(),
in[0].get_partial_shape()[1].get_min_length());
seq_lengths = ngraph::builder::makeConstant(element::i64, constants[3], lengths, false);
break;
}
return std::make_shared<ov::op::internal::AUGRUSequence>(in[0], in[1], seq_lengths, W, R, B, in[2], hidden_size);
case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST:
case ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST: {
for (size_t i = 0; i <= in[0].get_shape().at(0); ++i) {
std::vector<float> lengths;
seq_lengths = ngraph::builder::makeConstant(element::i64,
constants[3],
lengths,
true,
static_cast<float>(in[0].get_shape()[1]),
0.f);
}
break;
}
case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM:
case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM:
case ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM: {
// Seq_lengths should be as a Parameter node for these two modes
if (in.size() < 4)
throw std::runtime_error("Incorrect number of inputs for creation of Sequence operation");
seq_lengths = in.at(3).get_node_shared_ptr();
break;
}
default:
throw std::runtime_error("Incorrect mode for creation of Sequence operation");
}
return std::make_shared<ov::op::internal::AUGRUSequence>(in[0],
in[1],
seq_lengths,
W,
R,
B,
in[2],
hidden_size);
}
}
}

View File

@ -2,29 +2,32 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/batch_norm.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeBatchNormInference(const ngraph::Output<Node>& data,
double epsilon) {
std::shared_ptr<ov::Node> makeBatchNormInference(const ov::Output<Node>& data, double epsilon) {
auto ngPrc = data.get_element_type();
size_t C = data.get_shape().at(1);
size_t C = data.get_shape().at(1);
bool random = true;
std::vector<float> values(C);
auto gamma = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{C}, values, random, 1.f, 0.f);
auto beta = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{C}, values, random, 1.f, 0.f);
auto mean = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{C}, values, random, 1.f, 0.f);
auto gamma = ngraph::builder::makeConstant(ngPrc, ov::Shape{C}, values, random, 1.f, 0.f);
auto beta = ngraph::builder::makeConstant(ngPrc, ov::Shape{C}, values, random, 1.f, 0.f);
auto mean = ngraph::builder::makeConstant(ngPrc, ov::Shape{C}, values, random, 1.f, 0.f);
// Fill the vector for variance with positive values
std::default_random_engine gen;
std::uniform_real_distribution<float> dis(0.0, 10.0);
std::generate(values.begin(), values.end(), [&dis, &gen]() { return dis(gen); });
auto variance = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{C}, values, !random);
return std::make_shared<ngraph::opset5::BatchNormInference>(data, gamma, beta, mean, variance, epsilon);
std::generate(values.begin(), values.end(), [&dis, &gen]() {
return dis(gen);
});
auto variance = ngraph::builder::makeConstant(ngPrc, ov::Shape{C}, values, !random);
return std::make_shared<ov::op::v5::BatchNormInference>(data, gamma, beta, mean, variance, epsilon);
}
} // namespace builder
} // namespace ngraph

View File

@ -2,24 +2,24 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_functions/builders.hpp"
#include "openvino/op/batch_to_space.hpp"
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeBatchToSpace(const ngraph::Output<Node> &in,
const element::Type &type,
const std::vector<int64_t> &blockShape,
const std::vector<int64_t> &cropsBegin,
const std::vector<int64_t> &cropsEnd) {
ngraph::Shape constShape = {in.get_partial_shape().size()};
auto blockShapeNode = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64, constShape,
blockShape.data());
auto cropsBeginNode = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64, constShape,
cropsBegin.data());
auto cropsEndNode = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64, constShape, cropsEnd.data());
auto btsNode = std::make_shared<ngraph::opset2::BatchToSpace>(in, blockShapeNode, cropsBeginNode, cropsEndNode);
std::shared_ptr<ov::Node> makeBatchToSpace(const ov::Output<Node>& in,
const element::Type& type,
const std::vector<int64_t>& blockShape,
const std::vector<int64_t>& cropsBegin,
const std::vector<int64_t>& cropsEnd) {
ov::Shape constShape = {in.get_partial_shape().size()};
auto blockShapeNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, blockShape.data());
auto cropsBeginNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, cropsBegin.data());
auto cropsEndNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, cropsEnd.data());
auto btsNode = std::make_shared<ov::op::v1::BatchToSpace>(in, blockShapeNode, cropsBeginNode, cropsEndNode);
return btsNode;
}

View File

@ -2,8 +2,10 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/binary_convolution.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/utils/data_utils.hpp"
@ -11,22 +13,22 @@
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeBinaryConvolution(const Output<Node> &in,
const std::vector<size_t> &filterSize,
const std::vector<size_t> &strides,
const std::vector<ptrdiff_t> &padsBegin,
const std::vector<ptrdiff_t> &padsEnd,
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
std::shared_ptr<Node> makeBinaryConvolution(const Output<Node>& in,
const std::vector<size_t>& filterSize,
const std::vector<size_t>& strides,
const std::vector<ptrdiff_t>& padsBegin,
const std::vector<ptrdiff_t>& padsEnd,
const std::vector<size_t>& dilations,
const op::PadType& autoPad,
size_t numOutChannels,
float padValue,
const std::vector<int8_t> &filterWeihgts) {
const std::vector<int8_t>& filterWeihgts) {
auto shape = in.get_shape();
std::vector<size_t> filterWeightsShape = {numOutChannels, shape[1]};
filterWeightsShape.insert(filterWeightsShape.end(), filterSize.begin(), filterSize.end());
auto filterWeightsNode = std::make_shared<op::Constant>(element::u1, filterWeightsShape);
const size_t byteNum = (ngraph::shape_size(filterWeightsShape) + 7) / 8;
int8_t *buffer = const_cast<int8_t *>(filterWeightsNode->get_data_ptr<int8_t>());
auto filterWeightsNode = std::make_shared<ov::op::v0::Constant>(element::u1, filterWeightsShape);
const size_t byteNum = (ov::shape_size(filterWeightsShape) + 7) / 8;
int8_t* buffer = const_cast<int8_t*>(filterWeightsNode->get_data_ptr<int8_t>());
if (filterWeihgts.size() == 0) {
std::vector<int8_t> weihgts = NGraphFunctions::Utils::generateVector<element::Type_t::i8>(byteNum);
for (size_t i = 0; i < byteNum; i++)
@ -35,8 +37,16 @@ std::shared_ptr<Node> makeBinaryConvolution(const Output<Node> &in,
for (size_t i = 0; i < byteNum; i++)
buffer[i] = filterWeihgts[i];
}
auto conv = std::make_shared<opset1::BinaryConvolution>(in, filterWeightsNode, strides, padsBegin, padsEnd, dilations,
opset1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT, padValue, autoPad);
auto conv = std::make_shared<ov::op::v1::BinaryConvolution>(
in,
filterWeightsNode,
strides,
padsBegin,
padsEnd,
dilations,
ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT,
padValue,
autoPad);
return conv;
}

View File

@ -2,27 +2,24 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/broadcast.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeBroadcast(const ngraph::Output<Node> &in,
const ngraph::Output<Node> &target_shape,
const ngraph::op::BroadcastType& mode,
const ngraph::AxisSet& axisSet) {
if (mode == ngraph::op::BroadcastType::NONE) {
auto axisSetConst = ngraph::opset5::Constant::create(ngraph::element::i64, {axisSet.size()}, axisSet.to_vector());
return std::make_shared<ngraph::opset5::Broadcast>(in,
target_shape,
axisSetConst,
mode);
} else { // numpy/bidirectional modes
return std::make_shared<ngraph::opset5::Broadcast>(in,
target_shape,
mode);
std::shared_ptr<ov::Node> makeBroadcast(const ov::Output<Node>& in,
const ov::Output<Node>& target_shape,
const ov::op::BroadcastType& mode,
const ov::AxisSet& axisSet) {
if (mode == ov::op::BroadcastType::NONE) {
auto axisSetConst = ov::op::v0::Constant::create(ov::element::i64, {axisSet.size()}, axisSet.to_vector());
return std::make_shared<ov::op::v3::Broadcast>(in, target_shape, axisSetConst, mode);
} else { // numpy/bidirectional modes
return std::make_shared<ov::op::v3::Broadcast>(in, target_shape, mode);
}
}
} // namespace builder

View File

@ -3,37 +3,38 @@
//
#include <memory>
#include <ngraph/opsets/opset3.hpp>
#include "common_test_utils/test_enums.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ov::Node> makeComparison(const ov::Output<Node> &in0,
const ov::Output<Node> &in1,
ngraph::helpers::ComparisonTypes comparisonType) {
std::shared_ptr<ov::Node> makeComparison(const ov::Output<Node>& in0,
const ov::Output<Node>& in1,
ov::test::utils::ComparisonTypes comparisonType) {
switch (comparisonType) {
case ngraph::helpers::ComparisonTypes::EQUAL:
return std::make_shared<ngraph::opset3::Equal>(in0, in1);
case ngraph::helpers::ComparisonTypes::NOT_EQUAL:
return std::make_shared<ngraph::opset3::NotEqual>(in0, in1);
case ngraph::helpers::ComparisonTypes::GREATER:
return std::make_shared<ngraph::opset3::Greater>(in0, in1);
case ngraph::helpers::ComparisonTypes::GREATER_EQUAL:
return std::make_shared<ngraph::opset3::GreaterEqual>(in0, in1);
case ngraph::helpers::ComparisonTypes::IS_FINITE:
return std::make_shared<ov::op::v10::IsFinite>(in0);
case ngraph::helpers::ComparisonTypes::IS_INF:
return std::make_shared<ov::op::v10::IsInf>(in0);
case ngraph::helpers::ComparisonTypes::IS_NAN:
return std::make_shared<ov::op::v10::IsNaN>(in0);
case ngraph::helpers::ComparisonTypes::LESS:
return std::make_shared<ngraph::opset3::Less>(in0, in1);
case ngraph::helpers::ComparisonTypes::LESS_EQUAL:
return std::make_shared<ngraph::opset3::LessEqual>(in0, in1);
default: {
throw std::runtime_error("Incorrect type of Comparison operation");
}
case ov::test::utils::ComparisonTypes::EQUAL:
return std::make_shared<ov::op::v1::Equal>(in0, in1);
case ov::test::utils::ComparisonTypes::NOT_EQUAL:
return std::make_shared<ov::op::v1::NotEqual>(in0, in1);
case ov::test::utils::ComparisonTypes::GREATER:
return std::make_shared<ov::op::v1::Greater>(in0, in1);
case ov::test::utils::ComparisonTypes::GREATER_EQUAL:
return std::make_shared<ov::op::v1::GreaterEqual>(in0, in1);
case ov::test::utils::ComparisonTypes::IS_FINITE:
return std::make_shared<ov::op::v10::IsFinite>(in0);
case ov::test::utils::ComparisonTypes::IS_INF:
return std::make_shared<ov::op::v10::IsInf>(in0);
case ov::test::utils::ComparisonTypes::IS_NAN:
return std::make_shared<ov::op::v10::IsNaN>(in0);
case ov::test::utils::ComparisonTypes::LESS:
return std::make_shared<ov::op::v1::Less>(in0, in1);
case ov::test::utils::ComparisonTypes::LESS_EQUAL:
return std::make_shared<ov::op::v1::LessEqual>(in0, in1);
default: {
throw std::runtime_error("Incorrect type of Comparison operation");
}
}
}

View File

@ -2,16 +2,18 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/concat.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeConcat(const std::vector<ngraph::Output<Node>>& in, const int& axis) {
return std::make_shared<ngraph::opset4::Concat>(in, axis);
std::shared_ptr<ov::Node> makeConcat(const std::vector<ov::Output<Node>>& in, const int& axis) {
return std::make_shared<ov::op::v0::Concat>(in, axis);
}
} // namespace builder

View File

@ -3,20 +3,24 @@
//
#include <memory>
#include <ngraph/opsets/opset1.hpp>
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "common_test_utils/test_enums.hpp"
#include "openvino/core/node.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/convert.hpp"
#include "openvino/op/convert_like.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeConversion(const ngraph::Output<Node>& in,
const element::Type& output_type,
const ngraph::helpers::ConversionTypes& conversionType) {
if (conversionType == ngraph::helpers::ConversionTypes::CONVERT) {
return std::make_shared<ngraph::opset1::Convert>(in, output_type);
} else if (conversionType == ngraph::helpers::ConversionTypes::CONVERT_LIKE) {
const auto like = std::make_shared<op::Constant>(output_type, ngraph::Shape{1});
return std::make_shared<ngraph::opset1::ConvertLike>(in, like);
std::shared_ptr<ov::Node> makeConversion(const ov::Output<Node>& in,
const element::Type& output_type,
const ov::test::utils::ConversionTypes& conversionType) {
if (conversionType == ov::test::utils::ConversionTypes::CONVERT) {
return std::make_shared<ov::op::v0::Convert>(in, output_type);
} else if (conversionType == ov::test::utils::ConversionTypes::CONVERT_LIKE) {
const auto like = std::make_shared<ov::op::v0::Constant>(output_type, ov::Shape{1});
return std::make_shared<ov::op::v1::ConvertLike>(in, like);
} else {
throw std::runtime_error("Incorrect type of Conversion operation");
}

View File

@ -2,67 +2,70 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/convolution.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "openvino/op/add.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeConvolution(const ngraph::Output<Node> &in,
const element::Type &type,
const std::vector<size_t> &filterSize,
const std::vector<size_t> &strides,
const std::vector<ptrdiff_t> &padsBegin,
const std::vector<ptrdiff_t> &padsEnd,
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
std::shared_ptr<Node> makeConvolution(const ov::Output<Node>& in,
const element::Type& type,
const std::vector<size_t>& filterSize,
const std::vector<size_t>& strides,
const std::vector<ptrdiff_t>& padsBegin,
const std::vector<ptrdiff_t>& padsEnd,
const std::vector<size_t>& dilations,
const op::PadType& autoPad,
size_t numOutChannels,
bool addBiases,
const std::vector<float> &filterWeights,
const std::vector<float> &biasesWeights) {
const std::vector<float>& filterWeights,
const std::vector<float>& biasesWeights) {
bool randomFilterWeights = filterWeights.empty();
auto shape = in.get_partial_shape();
std::vector<size_t> filterWeightsShape = {numOutChannels, static_cast<size_t>(shape[1].get_length())};
filterWeightsShape.insert(filterWeightsShape.end(), filterSize.begin(), filterSize.end());
auto filterWeightsNode = makeConstant(type, filterWeightsShape, filterWeights, randomFilterWeights);
auto conv = std::make_shared<opset1::Convolution>(in, filterWeightsNode, strides, padsBegin, padsEnd, dilations,
autoPad);
auto conv = std::make_shared<ov::op::v1::Convolution>(in,
filterWeightsNode,
strides,
padsBegin,
padsEnd,
dilations,
autoPad);
if (addBiases) {
bool randomBiases = biasesWeights.empty();
auto biasesWeightsNode = makeConstant(type, {1, numOutChannels , 1, 1}, biasesWeights, randomBiases);
auto add = std::make_shared<ngraph::opset1::Add>(conv, biasesWeightsNode);
auto biasesWeightsNode = makeConstant(type, {1, numOutChannels, 1, 1}, biasesWeights, randomBiases);
auto add = std::make_shared<ov::op::v1::Add>(conv, biasesWeightsNode);
return add;
} else {
return conv;
}
}
std::shared_ptr<Node> makeConvolution(const ngraph::Output<Node>& in_data,
const ngraph::Output<Node>& in_weights,
const element::Type &type,
const std::vector<size_t> &filterSize,
const std::vector<size_t> &strides,
const std::vector<ptrdiff_t> &padsBegin,
const std::vector<ptrdiff_t> &padsEnd,
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
std::shared_ptr<Node> makeConvolution(const ov::Output<Node>& in_data,
const ov::Output<Node>& in_weights,
const element::Type& type,
const std::vector<size_t>& filterSize,
const std::vector<size_t>& strides,
const std::vector<ptrdiff_t>& padsBegin,
const std::vector<ptrdiff_t>& padsEnd,
const std::vector<size_t>& dilations,
const op::PadType& autoPad,
size_t numOutChannels,
bool addBiases,
const std::vector<float> &biasesWeights) {
const std::vector<float>& biasesWeights) {
auto shape = in_data.get_partial_shape();
auto conv = std::make_shared<opset1::Convolution>(in_data,
in_weights,
strides,
padsBegin,
padsEnd,
dilations,
autoPad);
auto conv =
std::make_shared<ov::op::v1::Convolution>(in_data, in_weights, strides, padsBegin, padsEnd, dilations, autoPad);
if (addBiases) {
bool randomBiases = biasesWeights.empty();
auto biasesWeightsNode = makeConstant(type, {1, numOutChannels , 1, 1}, biasesWeights, randomBiases);
auto add = std::make_shared<ngraph::opset1::Add>(conv, biasesWeightsNode);
auto biasesWeightsNode = makeConstant(type, {1, numOutChannels, 1, 1}, biasesWeights, randomBiases);
auto add = std::make_shared<ov::op::v1::Add>(conv, biasesWeightsNode);
return add;
} else {
return conv;

View File

@ -2,94 +2,133 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/convolution.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeConvolutionBackpropData(const ngraph::Output<Node> &in,
const element::Type &type,
const std::vector<size_t> &filterSize,
const std::vector<size_t> &strides,
const std::vector<ptrdiff_t> &padsBegin,
const std::vector<ptrdiff_t> &padsEnd,
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
std::shared_ptr<Node> makeConvolutionBackpropData(const ov::Output<Node>& in,
const element::Type& type,
const std::vector<size_t>& filterSize,
const std::vector<size_t>& strides,
const std::vector<ptrdiff_t>& padsBegin,
const std::vector<ptrdiff_t>& padsEnd,
const std::vector<size_t>& dilations,
const op::PadType& autoPad,
size_t numOutChannels,
bool addBiases,
const std::vector<ptrdiff_t> &outputPadding,
const std::vector<float> &filterWeights,
const std::vector<float> &biasesWeights) {
const std::vector<ptrdiff_t>& outputPadding,
const std::vector<float>& filterWeights,
const std::vector<float>& biasesWeights) {
bool randomFilterWeights = filterWeights.empty();
auto shape = in.get_partial_shape();
std::vector<size_t> filterWeightsShape = {static_cast<size_t>(shape[1].get_length()), numOutChannels};
filterWeightsShape.insert(filterWeightsShape.end(), filterSize.begin(), filterSize.end());
auto filterWeightsNode = makeConstant(type, filterWeightsShape, filterWeights, randomFilterWeights);
return makeConvolutionBackpropData(in, filterWeightsNode, type, strides, padsBegin, padsEnd, dilations, autoPad, addBiases, outputPadding, biasesWeights);
return makeConvolutionBackpropData(in,
filterWeightsNode,
type,
strides,
padsBegin,
padsEnd,
dilations,
autoPad,
addBiases,
outputPadding,
biasesWeights);
}
std::shared_ptr<Node> makeConvolutionBackpropData(const ngraph::Output<Node> &in,
const ngraph::Output<Node> &weights,
const element::Type &type,
const std::vector<size_t> &strides,
const std::vector<ptrdiff_t> &padsBegin,
const std::vector<ptrdiff_t> &padsEnd,
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
std::shared_ptr<Node> makeConvolutionBackpropData(const ov::Output<Node>& in,
const ov::Output<Node>& weights,
const element::Type& type,
const std::vector<size_t>& strides,
const std::vector<ptrdiff_t>& padsBegin,
const std::vector<ptrdiff_t>& padsEnd,
const std::vector<size_t>& dilations,
const op::PadType& autoPad,
bool addBiases,
const std::vector<ptrdiff_t> &outputPadding,
const std::vector<float> &biasesWeights) {
auto deconv = std::make_shared<opset1::ConvolutionBackpropData>(in, weights, strides, padsBegin, padsEnd, dilations, autoPad);
const std::vector<ptrdiff_t>& outputPadding,
const std::vector<float>& biasesWeights) {
auto deconv = std::make_shared<ov::op::v1::ConvolutionBackpropData>(in,
weights,
strides,
padsBegin,
padsEnd,
dilations,
autoPad);
if (!outputPadding.empty()) {
deconv = std::make_shared<opset1::ConvolutionBackpropData>(in, weights, strides, padsBegin, padsEnd, dilations, autoPad, outputPadding);
deconv = std::make_shared<ov::op::v1::ConvolutionBackpropData>(in,
weights,
strides,
padsBegin,
padsEnd,
dilations,
autoPad,
outputPadding);
}
if (addBiases) {
bool randomBiases = biasesWeights.empty();
auto biasesWeightsNode = makeConstant(type, {}, biasesWeights, randomBiases);
auto add = std::make_shared<ngraph::opset1::Add>(deconv, biasesWeightsNode);
auto add = std::make_shared<ov::op::v1::Add>(deconv, biasesWeightsNode);
return add;
} else {
return deconv;
}
}
std::shared_ptr<Node> makeConvolutionBackpropData(const ngraph::Output<Node> &in,
const ngraph::Output<Node> &outputShape,
const element::Type &type,
const std::vector<size_t> &filterSize,
const std::vector<size_t> &strides,
const std::vector<ptrdiff_t> &padsBegin,
const std::vector<ptrdiff_t> &padsEnd,
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
std::shared_ptr<Node> makeConvolutionBackpropData(const ov::Output<Node>& in,
const ov::Output<Node>& outputShape,
const element::Type& type,
const std::vector<size_t>& filterSize,
const std::vector<size_t>& strides,
const std::vector<ptrdiff_t>& padsBegin,
const std::vector<ptrdiff_t>& padsEnd,
const std::vector<size_t>& dilations,
const op::PadType& autoPad,
size_t numOutChannels,
bool addBiases,
const std::vector<ptrdiff_t> &outputPadding,
const std::vector<float> &filterWeights,
const std::vector<float> &biasesWeights) {
const std::vector<ptrdiff_t>& outputPadding,
const std::vector<float>& filterWeights,
const std::vector<float>& biasesWeights) {
bool randomFilterWeights = filterWeights.empty();
auto shape = in.get_partial_shape();
std::vector<size_t> filterWeightsShape = {static_cast<size_t>(shape[1].get_length()), numOutChannels};
filterWeightsShape.insert(filterWeightsShape.end(), filterSize.begin(), filterSize.end());
auto filterWeightsNode = makeConstant(type, filterWeightsShape, filterWeights, randomFilterWeights);
auto deconv = std::make_shared<opset1::ConvolutionBackpropData>(in, filterWeightsNode, outputShape, strides, padsBegin, padsEnd, dilations, autoPad);
auto deconv = std::make_shared<ov::op::v1::ConvolutionBackpropData>(in,
filterWeightsNode,
outputShape,
strides,
padsBegin,
padsEnd,
dilations,
autoPad);
if (!outputPadding.empty()) {
deconv = std::make_shared<opset1::ConvolutionBackpropData>(in, filterWeightsNode, outputShape, strides, padsBegin,
padsEnd, dilations, autoPad, outputPadding);
deconv = std::make_shared<ov::op::v1::ConvolutionBackpropData>(in,
filterWeightsNode,
outputShape,
strides,
padsBegin,
padsEnd,
dilations,
autoPad,
outputPadding);
}
if (addBiases) {
bool randomBiases = biasesWeights.empty();
auto biasesWeightsNode = makeConstant(type, {}, biasesWeights, randomBiases);
auto add = std::make_shared<ngraph::opset1::Add>(deconv, biasesWeightsNode);
auto add = std::make_shared<ov::op::v1::Add>(deconv, biasesWeightsNode);
return add;
} else {
return deconv;

View File

@ -2,17 +2,17 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/ctc_greedy_decoder.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeCTCGreedyDecoder(
const ngraph::Output<Node>& inputData,
const bool mergeRepeated) {
std::shared_ptr<ov::Node> makeCTCGreedyDecoder(const ov::Output<Node>& inputData, const bool mergeRepeated) {
auto inputDataShape = inputData.get_shape();
size_t T = inputDataShape[0];
size_t B = inputDataShape[1];
@ -30,7 +30,8 @@ std::shared_ptr<ngraph::Node> makeCTCGreedyDecoder(
auto sequenceMaskNode = makeConstant(inputData.get_element_type(), {T, B}, sequenceMaskData);
auto CTCGreedyDecoderNode = std::make_shared<opset1::CTCGreedyDecoder>(inputData, sequenceMaskNode, mergeRepeated);
auto CTCGreedyDecoderNode =
std::make_shared<ov::op::v0::CTCGreedyDecoder>(inputData, sequenceMaskNode, mergeRepeated);
return CTCGreedyDecoderNode;
}

View File

@ -2,20 +2,21 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/ctc_greedy_decoder_seq_len.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeCTCGreedyDecoderSeqLen(
const ngraph::Output<Node>& inputData,
const ngraph::Output<Node>& sequenceLengthData,
int32_t blankIndex,
bool mergeRepeated,
const element::Type& idxPrecision) {
std::shared_ptr<ov::Node> makeCTCGreedyDecoderSeqLen(const ov::Output<Node>& inputData,
const ov::Output<Node>& sequenceLengthData,
int32_t blankIndex,
bool mergeRepeated,
const element::Type& idxPrecision) {
const auto blankIndexNode = [&] {
if (idxPrecision == element::i32) {
const auto blankIdxDataI32 = std::vector<int32_t>{blankIndex};
@ -27,19 +28,18 @@ std::shared_ptr<ngraph::Node> makeCTCGreedyDecoderSeqLen(
throw std::logic_error("Unsupported index precision");
}();
return std::make_shared<op::v6::CTCGreedyDecoderSeqLen>(inputData,
sequenceLengthData,
blankIndexNode,
mergeRepeated,
idxPrecision,
idxPrecision);
return std::make_shared<ov::op::v6::CTCGreedyDecoderSeqLen>(inputData,
sequenceLengthData,
blankIndexNode,
mergeRepeated,
idxPrecision,
idxPrecision);
}
std::shared_ptr<ngraph::Node> makeCTCGreedyDecoderSeqLen(
const ngraph::Output<Node>& inputData,
int32_t blankIndex,
bool mergeRepeated,
const element::Type& idxPrecision) {
std::shared_ptr<ov::Node> makeCTCGreedyDecoderSeqLen(const ov::Output<Node>& inputData,
int32_t blankIndex,
bool mergeRepeated,
const element::Type& idxPrecision) {
const auto sequenceLengthData = [&] {
const size_t N = inputData.get_shape().at(0);
const size_t T = inputData.get_shape().at(1);
@ -54,11 +54,7 @@ std::shared_ptr<ngraph::Node> makeCTCGreedyDecoderSeqLen(
throw std::logic_error("Unsupported index precision");
}();
return makeCTCGreedyDecoderSeqLen(inputData,
sequenceLengthData,
blankIndex,
mergeRepeated,
idxPrecision);
return makeCTCGreedyDecoderSeqLen(inputData, sequenceLengthData, blankIndex, mergeRepeated, idxPrecision);
}
} // namespace builder

View File

@ -2,25 +2,26 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/ctc_loss.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeCTCLoss(
const ngraph::Output<Node>& logitsNode,
std::vector<int>& logitsLength,
std::vector<std::vector<int>>& labels,
std::vector<int>& labelsLength,
int blankIndex,
const element::Type& fType,
const element::Type& iType,
const bool preprocessCollapseRepeated,
const bool ctcMergeRepeated,
const bool unique) {
std::shared_ptr<Node> makeCTCLoss(const ov::Output<Node>& logitsNode,
std::vector<int>& logitsLength,
std::vector<std::vector<int>>& labels,
std::vector<int>& labelsLength,
int blankIndex,
const element::Type& fType,
const element::Type& iType,
const bool preprocessCollapseRepeated,
const bool ctcMergeRepeated,
const bool unique) {
auto logitsShape = logitsNode.get_shape();
size_t N = logitsShape[0];
size_t T = logitsShape[1];
@ -34,8 +35,14 @@ std::shared_ptr<Node> makeCTCLoss(
auto labelsLengthNode = makeConstant(iType, {N}, labelsLength);
auto blankIndexNode = makeConstant<int>(iType, {}, {blankIndex});
auto ctcLossNode = std::make_shared<opset4::CTCLoss>(logitsNode, logitsLengthNode, labelsNode,
labelsLengthNode, blankIndexNode, preprocessCollapseRepeated, ctcMergeRepeated, unique);
auto ctcLossNode = std::make_shared<ov::op::v4::CTCLoss>(logitsNode,
logitsLengthNode,
labelsNode,
labelsLengthNode,
blankIndexNode,
preprocessCollapseRepeated,
ctcMergeRepeated,
unique);
return ctcLossNode;
}

View File

@ -2,16 +2,18 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/cum_sum.hpp"
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeCumSum(const ngraph::Output<Node> &in,
const ngraph::Output<Node> &axis,
bool exclusive,
bool reverse) {
return std::make_shared<ngraph::op::CumSum>(in, axis, exclusive, reverse);
std::shared_ptr<ov::Node> makeCumSum(const ov::Output<Node>& in,
const ov::Output<Node>& axis,
bool exclusive,
bool reverse) {
return std::make_shared<ov::op::v0::CumSum>(in, axis, exclusive, reverse);
}
} // namespace builder

View File

@ -7,10 +7,10 @@
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeDepthToSpace(const ngraph::Output<Node> &in,
ngraph::opset3::DepthToSpace::DepthToSpaceMode mode,
size_t blockSize) {
auto dtsNode = std::make_shared<ngraph::opset3::DepthToSpace>(in, mode, blockSize);
std::shared_ptr<ov::Node> makeDepthToSpace(const ov::Output<Node>& in,
ov::op::v0::DepthToSpace::DepthToSpaceMode mode,
size_t blockSize) {
auto dtsNode = std::make_shared<ov::op::v0::DepthToSpace>(in, mode, blockSize);
return dtsNode;
}

View File

@ -7,12 +7,17 @@
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeDetectionOutput(const ngraph::OutputVector &inputs,
const ngraph::op::DetectionOutputAttrs& attrs) {
std::shared_ptr<ov::Node> makeDetectionOutput(const ov::OutputVector& inputs,
const ov::op::v0::DetectionOutput::Attributes& attrs) {
if (inputs.size() == 3)
return std::make_shared<ngraph::opset3::DetectionOutput>(inputs[0], inputs[1], inputs[2], attrs);
return std::make_shared<ov::op::v0::DetectionOutput>(inputs[0], inputs[1], inputs[2], attrs);
else if (inputs.size() == 5)
return std::make_shared<ngraph::opset3::DetectionOutput>(inputs[0], inputs[1], inputs[2], inputs[3], inputs[4], attrs);
return std::make_shared<ov::op::v0::DetectionOutput>(inputs[0],
inputs[1],
inputs[2],
inputs[3],
inputs[4],
attrs);
else
throw std::runtime_error("DetectionOutput layer supports only 3 or 5 inputs");
}

View File

@ -2,39 +2,45 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/dft.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "openvino/op/idft.hpp"
namespace ngraph {
namespace builder {
namespace {
template <typename ...Args>
std::shared_ptr<ngraph::Node> CallDftCtorWithArgs(const ngraph::helpers::DFTOpType opType, Args&&... args) {
switch (opType) {
case ngraph::helpers::DFTOpType::FORWARD:
return std::make_shared<ngraph::op::v7::DFT>(std::forward<Args>(args)...);
case ngraph::helpers::DFTOpType::INVERSE:
return std::make_shared<ngraph::op::v7::IDFT>(std::forward<Args>(args)...);
default:
throw std::logic_error("Unsupported operation type");
}
template <typename... Args>
std::shared_ptr<ov::Node> CallDftCtorWithArgs(const ov::test::utils::DFTOpType opType, Args&&... args) {
switch (opType) {
case ov::test::utils::DFTOpType::FORWARD:
return std::make_shared<ov::op::v7::DFT>(std::forward<Args>(args)...);
case ov::test::utils::DFTOpType::INVERSE:
return std::make_shared<ov::op::v7::IDFT>(std::forward<Args>(args)...);
default:
throw std::logic_error("Unsupported operation type");
}
} // namespace
}
} // namespace
std::shared_ptr<ngraph::Node> makeDFT(const ngraph::Output<Node> &dataNode,
const std::vector<int64_t> &axes,
const std::vector<int64_t> &signalSize,
const ngraph::helpers::DFTOpType opType) {
auto axesNode = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{axes.size()}, axes)->output(0);
std::shared_ptr<ov::Node> makeDFT(const ov::Output<Node>& dataNode,
const std::vector<int64_t>& axes,
const std::vector<int64_t>& signalSize,
const ov::test::utils::DFTOpType opType) {
auto axesNode =
std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64, ov::Shape{axes.size()}, axes)->output(0);
if (!signalSize.empty()) {
auto signalSizeNode = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{signalSize.size()}, signalSize)->output(0);
auto signalSizeNode =
std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64, ov::Shape{signalSize.size()}, signalSize)
->output(0);
return CallDftCtorWithArgs(opType, dataNode, axesNode, signalSizeNode);
}
return CallDftCtorWithArgs(opType, dataNode, axesNode);
}
} // namespace builder
} // namespace ngraph
} // namespace builder
} // namespace ngraph

View File

@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/einsum.hpp"
#include <memory>
#include <string>
@ -10,11 +12,10 @@
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeEinsum(const OutputVector& inputs,
const std::string& equation) {
std::shared_ptr<ngraph::Node> einsum = std::make_shared<ngraph::opset7::Einsum>(inputs, equation);
return einsum;
}
std::shared_ptr<ov::Node> makeEinsum(const OutputVector& inputs, const std::string& equation) {
std::shared_ptr<ov::Node> einsum = std::make_shared<ov::op::v7::Einsum>(inputs, equation);
return einsum;
}
} // namespace builder
} // namespace ngraph

View File

@ -3,38 +3,38 @@
//
#include <memory>
#include <ngraph/opsets/opset3.hpp>
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "common_test_utils/test_enums.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeEltwise(const ngraph::Output<Node> &in0,
const ngraph::Output<Node> &in1,
ngraph::helpers::EltwiseTypes eltwiseType) {
std::shared_ptr<ov::Node> makeEltwise(const ov::Output<Node>& in0,
const ov::Output<Node>& in1,
ov::test::utils::EltwiseTypes eltwiseType) {
switch (eltwiseType) {
case ngraph::helpers::EltwiseTypes::ADD:
return std::make_shared<ngraph::opset3::Add>(in0, in1);
case ngraph::helpers::EltwiseTypes::SUBTRACT:
return std::make_shared<ngraph::opset3::Subtract>(in0, in1);
case ngraph::helpers::EltwiseTypes::MULTIPLY:
return std::make_shared<ngraph::opset3::Multiply>(in0, in1);
case ngraph::helpers::EltwiseTypes::DIVIDE:
return std::make_shared<ngraph::opset3::Divide>(in0, in1);
case ngraph::helpers::EltwiseTypes::SQUARED_DIFF:
return std::make_shared<ngraph::opset3::SquaredDifference>(in0, in1);
case ngraph::helpers::EltwiseTypes::POWER:
return std::make_shared<ngraph::opset3::Power>(in0, in1);
case ngraph::helpers::EltwiseTypes::FLOOR_MOD:
return std::make_shared<ngraph::opset3::FloorMod>(in0, in1);
case ngraph::helpers::EltwiseTypes::MOD:
return std::make_shared<ngraph::opset3::Mod>(in0, in1);
case ngraph::helpers::EltwiseTypes::ERF:
return std::make_shared<ngraph::opset1::Erf>(in0);
default: {
throw std::runtime_error("Incorrect type of Eltwise operation");
}
case ov::test::utils::EltwiseTypes::ADD:
return std::make_shared<ov::op::v1::Add>(in0, in1);
case ov::test::utils::EltwiseTypes::SUBTRACT:
return std::make_shared<ov::op::v1::Subtract>(in0, in1);
case ov::test::utils::EltwiseTypes::MULTIPLY:
return std::make_shared<ov::op::v1::Multiply>(in0, in1);
case ov::test::utils::EltwiseTypes::DIVIDE:
return std::make_shared<ov::op::v1::Divide>(in0, in1);
case ov::test::utils::EltwiseTypes::SQUARED_DIFF:
return std::make_shared<ov::op::v0::SquaredDifference>(in0, in1);
case ov::test::utils::EltwiseTypes::POWER:
return std::make_shared<ov::op::v1::Power>(in0, in1);
case ov::test::utils::EltwiseTypes::FLOOR_MOD:
return std::make_shared<ov::op::v1::FloorMod>(in0, in1);
case ov::test::utils::EltwiseTypes::MOD:
return std::make_shared<ov::op::v1::Mod>(in0, in1);
case ov::test::utils::EltwiseTypes::ERF:
return std::make_shared<ov::op::v0::Erf>(in0);
default: {
throw std::runtime_error("Incorrect type of Eltwise operation");
}
}
}

View File

@ -2,44 +2,48 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "openvino/op/embeddingbag_offsets_sum.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeEmbeddingBagOffsetsSum(
const element::Type& dataType,
const element::Type& indicesType,
const ngraph::Output<Node>& embTableNode,
const std::vector<size_t>& indices,
const std::vector<size_t>& offsets,
size_t default_index,
bool with_weights,
bool with_default_index) {
std::shared_ptr<Node> makeEmbeddingBagOffsetsSum(const element::Type& dataType,
const element::Type& indicesType,
const ov::Output<Node>& embTableNode,
const std::vector<size_t>& indices,
const std::vector<size_t>& offsets,
size_t default_index,
bool with_weights,
bool with_default_index) {
std::vector<size_t> i_shape = {indices.size()};
auto indicesNode = std::make_shared<ngraph::opset1::Constant>(indicesType, i_shape, indices);
auto indicesNode = std::make_shared<ov::op::v0::Constant>(indicesType, i_shape, indices);
std::vector<size_t> o_shape = {offsets.size()};
auto offsetsNode = std::make_shared<ngraph::opset1::Constant>(indicesType, o_shape, offsets);
auto offsetsNode = std::make_shared<ov::op::v0::Constant>(indicesType, o_shape, offsets);
std::shared_ptr<Node> embBag;
if (with_default_index) {
std::vector<size_t> d_shape = {};
auto defIdxNode = std::make_shared<ngraph::opset1::Constant>(indicesType, d_shape, default_index);
auto defIdxNode = std::make_shared<ov::op::v0::Constant>(indicesType, d_shape, default_index);
if (with_weights) {
auto weightsNode = makeConstant<float>(dataType, {indices.size()}, {}, true);
embBag = std::make_shared<opset3::EmbeddingBagOffsetsSum>(
embTableNode, indicesNode, offsetsNode, defIdxNode, weightsNode);
embBag = std::make_shared<ov::op::v3::EmbeddingBagOffsetsSum>(embTableNode,
indicesNode,
offsetsNode,
defIdxNode,
weightsNode);
} else {
embBag = std::make_shared<opset3::EmbeddingBagOffsetsSum>(
embTableNode, indicesNode, offsetsNode, defIdxNode);
embBag = std::make_shared<ov::op::v3::EmbeddingBagOffsetsSum>(embTableNode,
indicesNode,
offsetsNode,
defIdxNode);
}
} else {
embBag = std::make_shared<opset3::EmbeddingBagOffsetsSum>(
embTableNode, indicesNode, offsetsNode);
embBag = std::make_shared<ov::op::v3::EmbeddingBagOffsetsSum>(embTableNode, indicesNode, offsetsNode);
}
return embBag;
}

View File

@ -2,36 +2,34 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "openvino/op/embeddingbag_packedsum.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeEmbeddingBagPackedSum(
const element::Type& dataType,
const element::Type& indicesType,
const ngraph::Output<Node>& embTableNode,
const std::vector<std::vector<size_t>>& indices,
bool with_weights) {
std::shared_ptr<Node> makeEmbeddingBagPackedSum(const element::Type& dataType,
const element::Type& indicesType,
const ov::Output<Node>& embTableNode,
const std::vector<std::vector<size_t>>& indices,
bool with_weights) {
std::vector<size_t> i_shape({indices.size(), indices[0].size()});
size_t i_size = ngraph::shape_size(i_shape);
size_t i_size = ov::shape_size(i_shape);
std::vector<size_t> i_values(i_size);
for (int i = 0; i < indices.size(); i++)
memcpy(i_values.data() + indices[0].size() * i, indices[i].data(), indices[0].size() * sizeof(size_t));
auto indicesNode = std::make_shared<ngraph::opset1::Constant>(indicesType, i_shape, i_values);
auto indicesNode = std::make_shared<ov::op::v0::Constant>(indicesType, i_shape, i_values);
std::shared_ptr<Node> embBag;
if (with_weights) {
auto weightsNode = makeConstant<float>(dataType, i_shape, {}, true);
embBag = std::make_shared<opset3::EmbeddingBagPackedSum>(
embTableNode, indicesNode, weightsNode);
embBag = std::make_shared<ov::op::v3::EmbeddingBagPackedSum>(embTableNode, indicesNode, weightsNode);
} else {
embBag = std::make_shared<opset3::EmbeddingBagPackedSum>(
embTableNode, indicesNode);
embBag = std::make_shared<ov::op::v3::EmbeddingBagPackedSum>(embTableNode, indicesNode);
}
return embBag;
}

View File

@ -2,46 +2,56 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/embedding_segments_sum.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeEmbeddingSegmentsSum(
const element::Type& dataType,
const element::Type& indicesType,
const ngraph::Output<Node>& embTableNode,
const std::vector<size_t>& indices,
const std::vector<size_t>& segment_ids,
size_t num_segments,
size_t default_index,
bool with_weights,
bool with_default_index) {
std::shared_ptr<Node> makeEmbeddingSegmentsSum(const element::Type& dataType,
const element::Type& indicesType,
const ov::Output<Node>& embTableNode,
const std::vector<size_t>& indices,
const std::vector<size_t>& segment_ids,
size_t num_segments,
size_t default_index,
bool with_weights,
bool with_default_index) {
std::vector<size_t> i_shape = {indices.size()};
auto indicesNode = std::make_shared<ngraph::opset1::Constant>(indicesType, i_shape, indices);
auto indicesNode = std::make_shared<ov::op::v0::Constant>(indicesType, i_shape, indices);
std::vector<size_t> o_shape = {segment_ids.size()};
auto segmentIdNode = std::make_shared<ngraph::opset1::Constant>(indicesType, o_shape, segment_ids);
auto segmentIdNode = std::make_shared<ov::op::v0::Constant>(indicesType, o_shape, segment_ids);
std::vector<size_t> shape_0 = {};
auto segmentNumNode = std::make_shared<ngraph::opset1::Constant>(indicesType, shape_0, num_segments);
auto segmentNumNode = std::make_shared<ov::op::v0::Constant>(indicesType, shape_0, num_segments);
std::shared_ptr<Node> embBag;
if (with_default_index) {
auto defIdxNode = std::make_shared<ngraph::opset1::Constant>(indicesType, shape_0, default_index);
auto defIdxNode = std::make_shared<ov::op::v0::Constant>(indicesType, shape_0, default_index);
if (with_weights) {
auto weightsNode = makeConstant<float>(dataType, {indices.size()}, {}, true);
embBag = std::make_shared<opset3::EmbeddingSegmentsSum>(
embTableNode, indicesNode, segmentIdNode, segmentNumNode, defIdxNode, weightsNode);
embBag = std::make_shared<ov::op::v3::EmbeddingSegmentsSum>(embTableNode,
indicesNode,
segmentIdNode,
segmentNumNode,
defIdxNode,
weightsNode);
} else {
embBag = std::make_shared<opset3::EmbeddingSegmentsSum>(
embTableNode, indicesNode, segmentIdNode, segmentNumNode, defIdxNode);
embBag = std::make_shared<ov::op::v3::EmbeddingSegmentsSum>(embTableNode,
indicesNode,
segmentIdNode,
segmentNumNode,
defIdxNode);
}
} else {
embBag = std::make_shared<opset3::EmbeddingSegmentsSum>(
embTableNode, indicesNode, segmentIdNode, segmentNumNode);
embBag = std::make_shared<ov::op::v3::EmbeddingSegmentsSum>(embTableNode,
indicesNode,
segmentIdNode,
segmentNumNode);
}
return embBag;
}

View File

@ -2,47 +2,55 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/fake_quantize.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeFakeQuantize(const ngraph::Output<Node> &in,
const element::Type &constantType,
std::shared_ptr<Node> makeFakeQuantize(const ov::Output<Node>& in,
const element::Type& constantType,
std::size_t levels,
std::vector<size_t> constShapes,
const std::vector<float> &inputLowData,
const std::vector<float> &inputHighData,
const std::vector<float> &outputLowData,
const std::vector<float> &outputHighData) {
const std::vector<float>& inputLowData,
const std::vector<float>& inputHighData,
const std::vector<float>& outputLowData,
const std::vector<float>& outputHighData) {
auto inputLowNode = makeConstant(constantType, constShapes, inputLowData, inputLowData.empty());
auto inputHighNode = makeConstant(constantType, constShapes, inputHighData, inputHighData.empty());
auto outputLowNode = makeConstant(constantType, constShapes, outputLowData, outputLowData.empty());
auto outputHighNode = makeConstant(constantType, constShapes, outputHighData, outputHighData.empty());
auto fq = std::make_shared<ngraph::opset1::FakeQuantize>(in, inputLowNode, inputHighNode, outputLowNode, outputHighNode, levels);
auto fq = std::make_shared<ov::op::v0::FakeQuantize>(in,
inputLowNode,
inputHighNode,
outputLowNode,
outputHighNode,
levels);
return fq;
}
std::shared_ptr<ngraph::Node> makeFakeQuantize(const ngraph::Output<ngraph::Node> &in,
const ngraph::element::Type &type,
std::size_t levels,
std::vector<size_t> constShapes,
const int32_t seed) {
size_t constDataSize = ngraph::shape_size(constShapes);
std::shared_ptr<ov::Node> makeFakeQuantize(const ov::Output<ov::Node>& in,
const ov::element::Type& type,
std::size_t levels,
std::vector<size_t> constShapes,
const int32_t seed) {
size_t constDataSize = ov::shape_size(constShapes);
std::vector<float> inputLowData, inputHighData, outputLowData, outputHighData;
inputLowData = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(constDataSize, 10, 1, seed);
inputLowData = NGraphFunctions::Utils::generateVector<ov::element::Type_t::f32>(constDataSize, 10, 1, seed);
if (levels != 2) {
inputHighData = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(constDataSize, 10, 1, seed);
outputLowData = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(constDataSize, 10, 1, seed);
outputHighData = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(constDataSize, 10, 1, seed);
inputHighData = NGraphFunctions::Utils::generateVector<ov::element::Type_t::f32>(constDataSize, 10, 1, seed);
outputLowData = NGraphFunctions::Utils::generateVector<ov::element::Type_t::f32>(constDataSize, 10, 1, seed);
outputHighData = NGraphFunctions::Utils::generateVector<ov::element::Type_t::f32>(constDataSize, 10, 1, seed);
} else {
inputHighData = inputLowData;
outputLowData = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(constDataSize, 10, 1, seed);
outputHighData = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(constDataSize, 10, 1, seed);
outputLowData = NGraphFunctions::Utils::generateVector<ov::element::Type_t::f32>(constDataSize, 10, 1, seed);
outputHighData = NGraphFunctions::Utils::generateVector<ov::element::Type_t::f32>(constDataSize, 10, 1, seed);
for (int i = 0; i < constDataSize; i++) {
if (outputLowData[i] > outputHighData[i]) {
@ -69,12 +77,21 @@ std::shared_ptr<ngraph::Node> makeFakeQuantize(const ngraph::Output<ngraph::Node
outputHighData[i] += 1;
}
auto inputLowNode = ngraph::builder::makeConstant(type, constShapes, inputLowData, inputLowData.empty(), 10.f, 1.f, seed);
auto inputHighNode = ngraph::builder::makeConstant(type, constShapes, inputHighData, inputHighData.empty(), 10.f, 1.f, seed);
auto outputLowNode = ngraph::builder::makeConstant(type, constShapes, outputLowData, outputLowData.empty(), 10.f, 1.f, seed);
auto outputHighNode = ngraph::builder::makeConstant(type, constShapes, outputHighData, outputHighData.empty(), 10.f, 1.f, seed);
auto inputLowNode =
ngraph::builder::makeConstant(type, constShapes, inputLowData, inputLowData.empty(), 10.f, 1.f, seed);
auto inputHighNode =
ngraph::builder::makeConstant(type, constShapes, inputHighData, inputHighData.empty(), 10.f, 1.f, seed);
auto outputLowNode =
ngraph::builder::makeConstant(type, constShapes, outputLowData, outputLowData.empty(), 10.f, 1.f, seed);
auto outputHighNode =
ngraph::builder::makeConstant(type, constShapes, outputHighData, outputHighData.empty(), 10.f, 1.f, seed);
auto fq = std::make_shared<ngraph::opset1::FakeQuantize>(in, inputLowNode, inputHighNode, outputLowNode, outputHighNode, levels);
auto fq = std::make_shared<ov::op::v0::FakeQuantize>(in,
inputLowNode,
inputHighNode,
outputLowNode,
outputHighNode,
levels);
return fq;
}

View File

@ -6,15 +6,17 @@
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/matmul.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeFullyConnected(const ngraph::Output<Node>& in,
std::shared_ptr<Node> makeFullyConnected(const ov::Output<Node>& in,
const element::Type& type,
const size_t outputSize,
bool addBias,
const ngraph::Shape& weightsShape,
const ov::Shape& weightsShape,
const std::vector<float>& weights,
const std::vector<float>& biasWeights) {
auto shape = weightsShape;
@ -26,13 +28,13 @@ std::shared_ptr<Node> makeFullyConnected(const ngraph::Output<Node>& in,
bool randomWeights = weights.empty();
auto weightsNode = makeConstant(type, shape, weights, randomWeights);
auto fc = std::make_shared<ngraph::opset1::MatMul>(in, weightsNode, false, false);
auto fc = std::make_shared<ov::op::v0::MatMul>(in, weightsNode, false, false);
fc->set_friendly_name("FullyConnected");
if (addBias) {
bool randomBiasWeights = biasWeights.empty();
auto biasWeightsNode = makeConstant(type, {}, biasWeights, randomBiasWeights);
auto add = std::make_shared<ngraph::opset1::Add>(fc, biasWeightsNode);
auto add = std::make_shared<ov::op::v1::Add>(fc, biasWeightsNode);
return add;
} else {

View File

@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/gather_elements.hpp"
#include <memory>
#include <numeric>
#include <vector>
@ -11,23 +13,22 @@
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeGatherElements(
const Output<Node>& dataNode,
const Shape& indicesShape,
const element::Type& indicesType,
const int axis) {
std::shared_ptr<Node> makeGatherElements(const Output<Node>& dataNode,
const Shape& indicesShape,
const element::Type& indicesType,
const int axis) {
const auto& dataShape = dataNode.get_shape();
int posAxis = axis;
if (posAxis < 0)
posAxis += dataShape.size();
const auto axisDim = dataShape[posAxis];
const auto indicesSize = std::accumulate(begin(indicesShape), end(indicesShape),
1ull, std::multiplies<std::size_t>{});
const auto indicesSize =
std::accumulate(begin(indicesShape), end(indicesShape), 1ull, std::multiplies<std::size_t>{});
auto indicesValues = NGraphFunctions::Utils::generateVector<element::Type_t::i32>(indicesSize, axisDim - 1, 0);
auto indicesNode = opset5::Constant::create(indicesType, indicesShape, indicesValues);
auto indicesNode = ov::op::v0::Constant::create(indicesType, indicesShape, indicesValues);
auto gatherElNode = std::make_shared<op::v6::GatherElements>(dataNode, indicesNode, axis);
auto gatherElNode = std::make_shared<ov::op::v6::GatherElements>(dataNode, indicesNode, axis);
gatherElNode->set_friendly_name("GatherElements");
return gatherElNode;

View File

@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/gather_nd.hpp"
#include <memory>
#include <numeric>
#include <vector>
@ -11,20 +13,20 @@
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeGatherND(
const ngraph::Output<Node>& dataNode,
const ngraph::Shape& indicesShape,
const element::Type& indicesType,
const std::size_t batchDims) {
std::shared_ptr<Node> makeGatherND(const ov::Output<Node>& dataNode,
const ov::Shape& indicesShape,
const element::Type& indicesType,
const std::size_t batchDims) {
const auto indices = [&] {
const auto& dataShape = dataNode.get_shape();
const auto indicesCount = std::accumulate(begin(indicesShape), prev(end(indicesShape)),
1ull, std::multiplies<std::size_t>{});
const auto indicesCount =
std::accumulate(begin(indicesShape), prev(end(indicesShape)), 1ull, std::multiplies<std::size_t>{});
const auto sliceRank = indicesShape.back();
const auto maxDim = *std::max_element(begin(dataShape), end(dataShape));
auto indicesValues = NGraphFunctions::Utils::generateVector<element::Type_t::i32>(indicesCount * sliceRank, maxDim, 0);
auto indicesValues =
NGraphFunctions::Utils::generateVector<element::Type_t::i32>(indicesCount * sliceRank, maxDim, 0);
auto indicesData = indicesValues.data();
for (int i = 0; i < indicesCount; i++) {
for (int dim = 0; dim < sliceRank; dim++) {
@ -32,29 +34,29 @@ std::shared_ptr<Node> makeGatherND(
indicesData++;
}
}
return opset5::Constant::create(indicesType, indicesShape, indicesValues);
return op::v0::Constant::create(indicesType, indicesShape, indicesValues);
}();
auto gatherNdNode = std::make_shared<opset5::GatherND>(dataNode, indices, batchDims);
auto gatherNdNode = std::make_shared<ov::op::v5::GatherND>(dataNode, indices, batchDims);
gatherNdNode->set_friendly_name("GatherND");
return gatherNdNode;
}
std::shared_ptr<Node> makeGatherND8(
const ngraph::Output<Node>& dataNode,
const ngraph::Shape& indicesShape,
const element::Type& indicesType,
const std::size_t batchDims) {
std::shared_ptr<Node> makeGatherND8(const ov::Output<Node>& dataNode,
const ov::Shape& indicesShape,
const element::Type& indicesType,
const std::size_t batchDims) {
const auto indices = [&] {
const auto& dataShape = dataNode.get_shape();
const auto indicesCount = std::accumulate(begin(indicesShape), prev(end(indicesShape)),
1ull, std::multiplies<std::size_t>{});
const auto indicesCount =
std::accumulate(begin(indicesShape), prev(end(indicesShape)), 1ull, std::multiplies<std::size_t>{});
const auto sliceRank = indicesShape.back();
const auto maxDim = *std::max_element(begin(dataShape), end(dataShape));
auto indicesValues = NGraphFunctions::Utils::generateVector<element::Type_t::i32>(indicesCount * sliceRank, maxDim, 0);
auto indicesValues =
NGraphFunctions::Utils::generateVector<element::Type_t::i32>(indicesCount * sliceRank, maxDim, 0);
auto indicesData = indicesValues.data();
for (int i = 0; i < indicesCount; i++) {
for (int dim = 0; dim < sliceRank; dim++) {
@ -62,10 +64,10 @@ std::shared_ptr<Node> makeGatherND8(
indicesData++;
}
}
return opset8::Constant::create(indicesType, indicesShape, indicesValues);
return op::v0::Constant::create(indicesType, indicesShape, indicesValues);
}();
auto gatherNdNode = std::make_shared<opset8::GatherND>(dataNode, indices, batchDims);
auto gatherNdNode = std::make_shared<ov::op::v8::GatherND>(dataNode, indices, batchDims);
gatherNdNode->set_friendly_name("GatherND");
return gatherNdNode;

View File

@ -2,27 +2,29 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/group_conv.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeGroupConvolution(const ngraph::Output<Node> &in,
const element::Type &type,
const std::vector<size_t> &filterSize,
const std::vector<size_t> &strides,
const std::vector<ptrdiff_t> &padsBegin,
const std::vector<ptrdiff_t> &padsEnd,
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
size_t numOutChannels,
size_t numGroups,
bool addBiases,
const std::vector<float> &filterWeights,
const std::vector<float> &biasesWeights) {
std::shared_ptr<Node> makeGroupConvolution(const ov::Output<Node>& in,
const element::Type& type,
const std::vector<size_t>& filterSize,
const std::vector<size_t>& strides,
const std::vector<ptrdiff_t>& padsBegin,
const std::vector<ptrdiff_t>& padsEnd,
const std::vector<size_t>& dilations,
const op::PadType& autoPad,
size_t numOutChannels,
size_t numGroups,
bool addBiases,
const std::vector<float>& filterWeights,
const std::vector<float>& biasesWeights) {
bool randomFilterWeights = filterWeights.empty();
auto shape = in.get_partial_shape();
std::vector<size_t> filterWeightsShape = {numOutChannels, static_cast<size_t>(shape[1].get_length())};
@ -34,24 +36,34 @@ std::shared_ptr<Node> makeGroupConvolution(const ngraph::Output<Node> &in,
filterWeightsShape.insert(filterWeightsShape.end(), filterSize.begin(), filterSize.end());
auto filterWeightsNode = makeConstant(type, filterWeightsShape, filterWeights, randomFilterWeights);
return makeGroupConvolution(in, filterWeightsNode, type, strides, padsBegin, padsEnd, dilations, autoPad, addBiases, biasesWeights);
return makeGroupConvolution(in,
filterWeightsNode,
type,
strides,
padsBegin,
padsEnd,
dilations,
autoPad,
addBiases,
biasesWeights);
}
std::shared_ptr<Node> makeGroupConvolution(const ngraph::Output<Node> &in,
const ngraph::Output<Node> &weights,
const element::Type &type,
const std::vector<size_t> &strides,
const std::vector<ptrdiff_t> &padsBegin,
const std::vector<ptrdiff_t> &padsEnd,
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
std::shared_ptr<Node> makeGroupConvolution(const ov::Output<Node>& in,
const ov::Output<Node>& weights,
const element::Type& type,
const std::vector<size_t>& strides,
const std::vector<ptrdiff_t>& padsBegin,
const std::vector<ptrdiff_t>& padsEnd,
const std::vector<size_t>& dilations,
const op::PadType& autoPad,
bool addBiases,
const std::vector<float> &biasesWeights) {
auto conv = std::make_shared<opset1::GroupConvolution>(in, weights, strides, padsBegin, padsEnd, dilations, autoPad);
const std::vector<float>& biasesWeights) {
auto conv =
std::make_shared<ov::op::v1::GroupConvolution>(in, weights, strides, padsBegin, padsEnd, dilations, autoPad);
if (addBiases) {
bool randomBiases = biasesWeights.empty();
auto biasesWeightsNode = makeConstant(type, {}, biasesWeights, randomBiases);
auto add = std::make_shared<ngraph::opset1::Add>(conv, biasesWeightsNode);
auto add = std::make_shared<ov::op::v1::Add>(conv, biasesWeightsNode);
return add;
} else {
return conv;

View File

@ -2,28 +2,30 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/group_conv.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeGroupConvolutionBackpropData(const ngraph::Output<Node> &in,
const element::Type &type,
const std::vector<size_t> &filterSize,
const std::vector<size_t> &strides,
const std::vector<ptrdiff_t> &padsBegin,
const std::vector<ptrdiff_t> &padsEnd,
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
std::shared_ptr<Node> makeGroupConvolutionBackpropData(const ov::Output<Node>& in,
const element::Type& type,
const std::vector<size_t>& filterSize,
const std::vector<size_t>& strides,
const std::vector<ptrdiff_t>& padsBegin,
const std::vector<ptrdiff_t>& padsEnd,
const std::vector<size_t>& dilations,
const op::PadType& autoPad,
size_t numOutChannels,
size_t numGroups,
bool addBiases,
const std::vector<ptrdiff_t> &outputPadding,
const std::vector<float> &filterWeights,
const std::vector<float> &biasesWeights) {
const std::vector<ptrdiff_t>& outputPadding,
const std::vector<float>& filterWeights,
const std::vector<float>& biasesWeights) {
bool randomFilterWeights = filterWeights.empty();
auto shape = in.get_partial_shape();
std::vector<size_t> filterWeightsShape = {static_cast<size_t>(shape[1].get_length()), numOutChannels};
@ -35,51 +37,73 @@ std::shared_ptr<Node> makeGroupConvolutionBackpropData(const ngraph::Output<Node
filterWeightsShape.insert(filterWeightsShape.end(), filterSize.begin(), filterSize.end());
auto filterWeightsNode = makeConstant(type, filterWeightsShape, filterWeights, randomFilterWeights);
return makeGroupConvolutionBackpropData(in, filterWeightsNode, type, strides, padsBegin, padsEnd, dilations, autoPad, addBiases,
outputPadding, biasesWeights);
return makeGroupConvolutionBackpropData(in,
filterWeightsNode,
type,
strides,
padsBegin,
padsEnd,
dilations,
autoPad,
addBiases,
outputPadding,
biasesWeights);
}
std::shared_ptr<Node> makeGroupConvolutionBackpropData(const ngraph::Output<Node> &in,
const ngraph::Output<Node> &weights,
const element::Type &type,
const std::vector<size_t> &strides,
const std::vector<ptrdiff_t> &padsBegin,
const std::vector<ptrdiff_t> &padsEnd,
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
std::shared_ptr<Node> makeGroupConvolutionBackpropData(const ov::Output<Node>& in,
const ov::Output<Node>& weights,
const element::Type& type,
const std::vector<size_t>& strides,
const std::vector<ptrdiff_t>& padsBegin,
const std::vector<ptrdiff_t>& padsEnd,
const std::vector<size_t>& dilations,
const op::PadType& autoPad,
bool addBiases,
const std::vector<ptrdiff_t> &outputPadding,
const std::vector<float> &biasesWeights) {
auto deconv = std::make_shared<opset1::GroupConvolutionBackpropData>(in, weights, strides, padsBegin, padsEnd, dilations, autoPad);
const std::vector<ptrdiff_t>& outputPadding,
const std::vector<float>& biasesWeights) {
auto deconv = std::make_shared<ov::op::v1::GroupConvolutionBackpropData>(in,
weights,
strides,
padsBegin,
padsEnd,
dilations,
autoPad);
if (!outputPadding.empty()) {
deconv = std::make_shared<opset1::GroupConvolutionBackpropData>(in, weights, strides, padsBegin, padsEnd, dilations, autoPad, outputPadding);
deconv = std::make_shared<ov::op::v1::GroupConvolutionBackpropData>(in,
weights,
strides,
padsBegin,
padsEnd,
dilations,
autoPad,
outputPadding);
}
if (addBiases) {
bool randomBiases = biasesWeights.empty();
auto biasesWeightsNode = makeConstant(type, {}, biasesWeights, randomBiases);
auto add = std::make_shared<ngraph::opset1::Add>(deconv, biasesWeightsNode);
auto add = std::make_shared<ov::op::v1::Add>(deconv, biasesWeightsNode);
return add;
} else {
return deconv;
}
}
std::shared_ptr<Node> makeGroupConvolutionBackpropData(const ngraph::Output<Node> &in,
const ngraph::Output<Node> &outputShape,
const element::Type &type,
const std::vector<size_t> &filterSize,
const std::vector<size_t> &strides,
const std::vector<ptrdiff_t> &padsBegin,
const std::vector<ptrdiff_t> &padsEnd,
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
std::shared_ptr<Node> makeGroupConvolutionBackpropData(const ov::Output<Node>& in,
const ov::Output<Node>& outputShape,
const element::Type& type,
const std::vector<size_t>& filterSize,
const std::vector<size_t>& strides,
const std::vector<ptrdiff_t>& padsBegin,
const std::vector<ptrdiff_t>& padsEnd,
const std::vector<size_t>& dilations,
const op::PadType& autoPad,
size_t numOutChannels,
size_t numGroups,
bool addBiases,
const std::vector<ptrdiff_t> &outputPadding,
const std::vector<float> &filterWeights,
const std::vector<float> &biasesWeights) {
const std::vector<ptrdiff_t>& outputPadding,
const std::vector<float>& filterWeights,
const std::vector<float>& biasesWeights) {
bool randomFilterWeights = filterWeights.empty();
auto shape = in.get_partial_shape();
std::vector<size_t> filterWeightsShape = {static_cast<size_t>(shape[1].get_length()), numOutChannels};
@ -91,17 +115,31 @@ std::shared_ptr<Node> makeGroupConvolutionBackpropData(const ngraph::Output<Node
filterWeightsShape.insert(filterWeightsShape.end(), filterSize.begin(), filterSize.end());
auto filterWeightsNode = makeConstant(type, filterWeightsShape, filterWeights, randomFilterWeights);
auto deconv = std::make_shared<opset1::GroupConvolutionBackpropData>(in, filterWeightsNode, outputShape, strides, padsBegin, padsEnd, dilations, autoPad);
auto deconv = std::make_shared<ov::op::v1::GroupConvolutionBackpropData>(in,
filterWeightsNode,
outputShape,
strides,
padsBegin,
padsEnd,
dilations,
autoPad);
if (!outputPadding.empty()) {
deconv = std::make_shared<opset1::GroupConvolutionBackpropData>(in, filterWeightsNode, outputShape, strides, padsBegin,
padsEnd, dilations, autoPad, outputPadding);
deconv = std::make_shared<ov::op::v1::GroupConvolutionBackpropData>(in,
filterWeightsNode,
outputShape,
strides,
padsBegin,
padsEnd,
dilations,
autoPad,
outputPadding);
}
if (addBiases) {
bool randomBiases = biasesWeights.empty();
auto biasesWeightsNode = makeConstant(type, {}, biasesWeights, randomBiases);
auto add = std::make_shared<ngraph::opset1::Add>(deconv, biasesWeightsNode);
auto add = std::make_shared<ov::op::v1::Add>(deconv, biasesWeightsNode);
return add;
} else {
return deconv;

View File

@ -2,67 +2,105 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/gru_cell.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "openvino/op/gru_sequence.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ov::Node> makeGRU(const OutputVector& in,
const std::vector<ov::Shape>& constants,
std::size_t hidden_size,
const std::vector<std::string>& activations,
const std::vector<float>& activations_alpha,
const std::vector<float>& activations_beta,
float clip,
bool linear_before_reset,
bool make_sequence,
ov::op::RecurrentSequenceDirection direction,
ngraph::helpers::SequenceTestsMode mode) {
const std::vector<ov::Shape>& constants,
std::size_t hidden_size,
const std::vector<std::string>& activations,
const std::vector<float>& activations_alpha,
const std::vector<float>& activations_beta,
float clip,
bool linear_before_reset,
bool make_sequence,
ov::op::RecurrentSequenceDirection direction,
ov::test::utils::SequenceTestsMode mode) {
std::vector<float> empty;
auto W = ngraph::builder::makeConstant(in[0].get_element_type(), constants[0], empty, true);
auto R = ngraph::builder::makeConstant(in[0].get_element_type(), constants[1], empty, true);
auto B = ngraph::builder::makeConstant(in[0].get_element_type(), constants[2], empty, true);
if (!make_sequence) {
return std::make_shared<ov::op::v3::GRUCell>(in[0], in[1], W, R, B, hidden_size, activations,
activations_alpha, activations_beta, clip,
linear_before_reset);
return std::make_shared<ov::op::v3::GRUCell>(in[0],
in[1],
W,
R,
B,
hidden_size,
activations,
activations_alpha,
activations_beta,
clip,
linear_before_reset);
} else {
if (in.size() > 2 && in[2].get_partial_shape().is_dynamic()) {
return std::make_shared<ov::op::v5::GRUSequence>(in[0], in[1], in[2], W, R, B, hidden_size, direction,
activations, activations_alpha, activations_beta, clip, linear_before_reset);
return std::make_shared<ov::op::v5::GRUSequence>(in[0],
in[1],
in[2],
W,
R,
B,
hidden_size,
direction,
activations,
activations_alpha,
activations_beta,
clip,
linear_before_reset);
} else {
std::shared_ptr<Node> seq_lengths;
switch (mode) {
case ngraph::helpers::SequenceTestsMode::PURE_SEQ:
case ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST: {
std::vector<float> lengths(in[0].get_partial_shape()[0].get_min_length(), in[0].get_partial_shape()[1].get_min_length());
seq_lengths = ngraph::builder::makeConstant(element::i64, constants[3], lengths, false);
break;
}
case ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST:
case ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST: {
for (size_t i = 0; i <= in[0].get_shape().at(0); ++i) {
std::vector<float> lengths;
seq_lengths = ngraph::builder::makeConstant(element::i64, constants[3], lengths, true,
static_cast<float>(in[0].get_shape()[1]), 0.f);
}
break;
}
case ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM:
case ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM:
case ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM: {
// Seq_lengths should be as a Parameter node for these two modes
seq_lengths = in.at(2).get_node_shared_ptr();
break;
}
default:
throw std::runtime_error("Incorrect mode for creation of Sequence operation");
case ov::test::utils::SequenceTestsMode::PURE_SEQ:
case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST: {
std::vector<float> lengths(in[0].get_partial_shape()[0].get_min_length(),
in[0].get_partial_shape()[1].get_min_length());
seq_lengths = ngraph::builder::makeConstant(element::i64, constants[3], lengths, false);
break;
}
return std::make_shared<ov::op::v5::GRUSequence>(in[0], in[1], seq_lengths, W, R, B, hidden_size, direction,
activations, activations_alpha, activations_beta, clip, linear_before_reset);
case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST:
case ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST: {
for (size_t i = 0; i <= in[0].get_shape().at(0); ++i) {
std::vector<float> lengths;
seq_lengths = ngraph::builder::makeConstant(element::i64,
constants[3],
lengths,
true,
static_cast<float>(in[0].get_shape()[1]),
0.f);
}
break;
}
case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM:
case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM:
case ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM: {
// Seq_lengths should be as a Parameter node for these two modes
seq_lengths = in.at(2).get_node_shared_ptr();
break;
}
default:
throw std::runtime_error("Incorrect mode for creation of Sequence operation");
}
return std::make_shared<ov::op::v5::GRUSequence>(in[0],
in[1],
seq_lengths,
W,
R,
B,
hidden_size,
direction,
activations,
activations_alpha,
activations_beta,
clip,
linear_before_reset);
}
}
}

View File

@ -2,40 +2,41 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeInputLayer(const element::Type &type, ngraph::helpers::InputLayerType inputType,
const std::vector<size_t> &shape) {
std::shared_ptr<ngraph::Node> input;
std::shared_ptr<ov::Node> makeInputLayer(const element::Type& type,
ov::test::utils::InputLayerType inputType,
const std::vector<size_t>& shape) {
std::shared_ptr<ov::Node> input;
switch (inputType) {
case ngraph::helpers::InputLayerType::CONSTANT: {
input = ngraph::builder::makeConstant<float>(type, shape, {}, true);
break;
}
case ngraph::helpers::InputLayerType::PARAMETER:
input = std::make_shared<ov::op::v0::Parameter>(type, ov::Shape(shape));
break;
default:
throw std::runtime_error("Unsupported inputType");
case ov::test::utils::InputLayerType::CONSTANT: {
input = ngraph::builder::makeConstant<float>(type, shape, {}, true);
break;
}
case ov::test::utils::InputLayerType::PARAMETER:
input = std::make_shared<ov::op::v0::Parameter>(type, ov::Shape(shape));
break;
default:
throw std::runtime_error("Unsupported inputType");
}
return input;
}
std::shared_ptr<ngraph::Node> makeDynamicInputLayer(const element::Type &type, ngraph::helpers::InputLayerType inputType,
const PartialShape& shape) {
std::shared_ptr<ov::Node> makeDynamicInputLayer(const element::Type& type,
ov::test::utils::InputLayerType inputType,
const PartialShape& shape) {
if (shape.is_static()) {
return makeInputLayer(type, inputType, shape.get_shape());
}
if (inputType == ngraph::helpers::InputLayerType::PARAMETER) {
if (inputType == ov::test::utils::InputLayerType::PARAMETER) {
return std::make_shared<ov::op::v0::Parameter>(type, shape);
}

View File

@ -3,44 +3,44 @@
//
#include <memory>
#include <ngraph/opsets/opset3.hpp>
#include "common_test_utils/test_enums.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeLogical(const ngraph::Output<Node> &in0,
const ngraph::Output<Node> &in1,
ngraph::helpers::LogicalTypes logicalType) {
std::shared_ptr<ov::Node> makeLogical(const ov::Output<Node>& in0,
const ov::Output<Node>& in1,
ov::test::utils::LogicalTypes logicalType) {
switch (logicalType) {
case ngraph::helpers::LogicalTypes::LOGICAL_AND:
return std::make_shared<ngraph::opset3::LogicalAnd>(in0, in1);
case ngraph::helpers::LogicalTypes::LOGICAL_OR:
return std::make_shared<ngraph::opset3::LogicalOr>(in0, in1);
case ngraph::helpers::LogicalTypes::LOGICAL_NOT:
return std::make_shared<ngraph::opset3::LogicalNot>(in0);
case ngraph::helpers::LogicalTypes::LOGICAL_XOR:
return std::make_shared<ngraph::opset3::LogicalXor>(in0, in1);
default: {
throw std::runtime_error("Incorrect type of Logical operation");
}
case ov::test::utils::LogicalTypes::LOGICAL_AND:
return std::make_shared<ov::op::v1::LogicalAnd>(in0, in1);
case ov::test::utils::LogicalTypes::LOGICAL_OR:
return std::make_shared<ov::op::v1::LogicalOr>(in0, in1);
case ov::test::utils::LogicalTypes::LOGICAL_NOT:
return std::make_shared<ov::op::v1::LogicalNot>(in0);
case ov::test::utils::LogicalTypes::LOGICAL_XOR:
return std::make_shared<ov::op::v1::LogicalXor>(in0, in1);
default: {
throw std::runtime_error("Incorrect type of Logical operation");
}
}
}
std::shared_ptr<ngraph::Node> makeLogical(const ngraph::ParameterVector& inputs,
ngraph::helpers::LogicalTypes logicalType) {
std::shared_ptr<ov::Node> makeLogical(const ov::ParameterVector& inputs, ov::test::utils::LogicalTypes logicalType) {
switch (logicalType) {
case ngraph::helpers::LogicalTypes::LOGICAL_AND:
return std::make_shared<ngraph::opset3::LogicalAnd>(inputs[0], inputs[1]);
case ngraph::helpers::LogicalTypes::LOGICAL_OR:
return std::make_shared<ngraph::opset3::LogicalOr>(inputs[0], inputs[1]);
case ngraph::helpers::LogicalTypes::LOGICAL_NOT:
return std::make_shared<ngraph::opset3::LogicalNot>(inputs[0]);
case ngraph::helpers::LogicalTypes::LOGICAL_XOR:
return std::make_shared<ngraph::opset3::LogicalXor>(inputs[0], inputs[1]);
default: {
throw std::runtime_error("Incorrect type of Logical operation");
}
case ov::test::utils::LogicalTypes::LOGICAL_AND:
return std::make_shared<ov::op::v1::LogicalAnd>(inputs[0], inputs[1]);
case ov::test::utils::LogicalTypes::LOGICAL_OR:
return std::make_shared<ov::op::v1::LogicalOr>(inputs[0], inputs[1]);
case ov::test::utils::LogicalTypes::LOGICAL_NOT:
return std::make_shared<ov::op::v1::LogicalNot>(inputs[0]);
case ov::test::utils::LogicalTypes::LOGICAL_XOR:
return std::make_shared<ov::op::v1::LogicalXor>(inputs[0], inputs[1]);
default: {
throw std::runtime_error("Incorrect type of Logical operation");
}
}
}

View File

@ -2,25 +2,26 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "openvino/op/lstm_sequence.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ov::Node> makeLSTM(const std::vector<ov::Output<Node>>& in,
const std::vector<ov::Shape>& constants,
std::size_t hidden_size,
const std::vector<std::string>& activations,
const std::vector<float>& activations_alpha,
const std::vector<float>& activations_beta,
float clip,
bool make_sequence,
ov::op::RecurrentSequenceDirection direction,
ngraph::helpers::SequenceTestsMode mode,
float WRB_range) {
const std::vector<ov::Shape>& constants,
std::size_t hidden_size,
const std::vector<std::string>& activations,
const std::vector<float>& activations_alpha,
const std::vector<float>& activations_beta,
float clip,
bool make_sequence,
ov::op::RecurrentSequenceDirection direction,
ov::test::utils::SequenceTestsMode mode,
float WRB_range) {
std::vector<float> empty;
auto W = ngraph::builder::makeConstant(in[0].get_element_type(), constants[0], empty, true);
auto R = ngraph::builder::makeConstant(in[0].get_element_type(), constants[1], empty, true);
@ -32,42 +33,78 @@ std::shared_ptr<ov::Node> makeLSTM(const std::vector<ov::Output<Node>>& in,
B = ngraph::builder::makeConstant(in[0].get_element_type(), constants[2], empty, true, -WRB_range, WRB_range);
}
if (!make_sequence) {
return std::make_shared<ov::op::v4::LSTMCell>(in[0], in[1], in[2], W, R, B, hidden_size, activations,
activations_alpha, activations_beta, clip);
return std::make_shared<ov::op::v4::LSTMCell>(in[0],
in[1],
in[2],
W,
R,
B,
hidden_size,
activations,
activations_alpha,
activations_beta,
clip);
} else {
if (in.size() > 3 && in[3].get_partial_shape().is_dynamic()) {
return std::make_shared<ov::op::v5::LSTMSequence>(in[0], in[1], in[2], in[3], W, R, B, hidden_size, direction,
activations_alpha, activations_beta, activations, clip);
return std::make_shared<ov::op::v5::LSTMSequence>(in[0],
in[1],
in[2],
in[3],
W,
R,
B,
hidden_size,
direction,
activations_alpha,
activations_beta,
activations,
clip);
} else {
std::shared_ptr<Node> seq_lengths;
switch (mode) {
case ngraph::helpers::SequenceTestsMode::PURE_SEQ:
case ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST: {
std::vector<float> lengths(in[0].get_partial_shape()[0].get_min_length(), in[0].get_partial_shape()[1].get_min_length());
seq_lengths = ngraph::builder::makeConstant(element::i64, constants[3], lengths, false);
break;
}
case ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST:
case ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST: {
for (size_t i = 0; i <= in[0].get_shape().at(0); ++i) {
std::vector<float> lengths;
seq_lengths = ngraph::builder::makeConstant(element::i64, constants[3], lengths, true,
static_cast<float>(in[0].get_shape()[1]), 0.f);
}
break;
}
case ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM:
case ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM:
case ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM: {
// Seq_lengths should be as a Parameter node for these two modes
seq_lengths = in.at(3).get_node_shared_ptr();
break;
}
default:
throw std::runtime_error("Incorrect mode for creation of Sequence operation");
case ov::test::utils::SequenceTestsMode::PURE_SEQ:
case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST: {
std::vector<float> lengths(in[0].get_partial_shape()[0].get_min_length(),
in[0].get_partial_shape()[1].get_min_length());
seq_lengths = ngraph::builder::makeConstant(element::i64, constants[3], lengths, false);
break;
}
return std::make_shared<ov::op::v5::LSTMSequence>(in[0], in[1], in[2], seq_lengths, W, R, B, hidden_size, direction,
activations_alpha, activations_beta, activations, clip);
case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST:
case ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST: {
for (size_t i = 0; i <= in[0].get_shape().at(0); ++i) {
std::vector<float> lengths;
seq_lengths = ngraph::builder::makeConstant(element::i64,
constants[3],
lengths,
true,
static_cast<float>(in[0].get_shape()[1]),
0.f);
}
break;
}
case ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM:
case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM:
case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM: {
// Seq_lengths should be as a Parameter node for these two modes
seq_lengths = in.at(3).get_node_shared_ptr();
break;
}
default:
throw std::runtime_error("Incorrect mode for creation of Sequence operation");
}
return std::make_shared<ov::op::v5::LSTMSequence>(in[0],
in[1],
in[2],
seq_lengths,
W,
R,
B,
hidden_size,
direction,
activations_alpha,
activations_beta,
activations,
clip);
}
}
}

View File

@ -3,15 +3,13 @@
//
#include "ngraph_functions/builders.hpp"
#include "openvino/op/matmul.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeMatMul(const Output<Node>& A,
const Output<Node>& B,
bool transpose_a,
bool transpose_b) {
return std::make_shared<ngraph::opset3::MatMul>(A, B, transpose_a, transpose_b);
std::shared_ptr<Node> makeMatMul(const Output<Node>& A, const Output<Node>& B, bool transpose_a, bool transpose_b) {
return std::make_shared<ov::op::v0::MatMul>(A, B, transpose_a, transpose_b);
}
} // namespace builder

View File

@ -3,20 +3,22 @@
//
#include "ngraph_functions/builders.hpp"
#include "openvino/op/maximum.hpp"
#include "openvino/op/minimum.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeMinMax(const ngraph::Output<Node> &in1,
const ngraph::Output<Node> &in2,
ngraph::helpers::MinMaxOpType opType) {
std::shared_ptr<ov::Node> makeMinMax(const ov::Output<Node>& in1,
const ov::Output<Node>& in2,
ov::test::utils::MinMaxOpType opType) {
switch (opType) {
case ngraph::helpers::MinMaxOpType::MINIMUM:
return std::make_shared<ngraph::opset3::Minimum>(in1, in2);
case ngraph::helpers::MinMaxOpType::MAXIMUM:
return std::make_shared<ngraph::opset3::Maximum>(in1, in2);
default:
throw std::logic_error("Unsupported operation type");
case ov::test::utils::MinMaxOpType::MINIMUM:
return std::make_shared<ov::op::v1::Minimum>(in1, in2);
case ov::test::utils::MinMaxOpType::MAXIMUM:
return std::make_shared<ov::op::v1::Maximum>(in1, in2);
default:
throw std::logic_error("Unsupported operation type");
}
}

View File

@ -2,19 +2,18 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/mvn.hpp"
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeMVN(const ngraph::Output<Node> &in,
bool acrossChannels,
bool normalizeVariance,
double eps) {
auto mvnNode = std::make_shared<ngraph::op::MVN>(in, acrossChannels, normalizeVariance, eps);
std::shared_ptr<ov::Node> makeMVN(const ov::Output<Node>& in, bool acrossChannels, bool normalizeVariance, double eps) {
auto mvnNode = std::make_shared<ov::op::v0::MVN>(in, acrossChannels, normalizeVariance, eps);
// Ngraph MVN implementation implicitly adds 0th dimension to reduction axes set which is not valid behavior
ngraph::AxisSet axes;
// OpenVINO MVN implementation implicitly adds 0th dimension to reduction axes set which is not valid behavior
ov::AxisSet axes;
const size_t startAxis = acrossChannels ? 1 : 2;
const size_t numOfDims = in.get_partial_shape().size();
for (size_t i = startAxis; i < numOfDims; i++)
@ -24,11 +23,11 @@ std::shared_ptr<ngraph::Node> makeMVN(const ngraph::Output<Node> &in,
return mvnNode;
}
std::shared_ptr<ngraph::Node> makeMVN(const ngraph::Output<Node> &in,
const ngraph::AxisSet &axes,
bool normalizeVariance,
double eps) {
auto mvnNode = std::make_shared<ngraph::op::MVN>(in, axes, normalizeVariance, eps);
std::shared_ptr<ov::Node> makeMVN(const ov::Output<Node>& in,
const ov::AxisSet& axes,
bool normalizeVariance,
double eps) {
auto mvnNode = std::make_shared<ov::op::v0::MVN>(in, axes, normalizeVariance, eps);
return mvnNode;
}
@ -38,10 +37,10 @@ std::shared_ptr<Node> makeMVN6(const Output<Node>& in,
bool normalizeVariance,
float eps,
std::string& epsMode) {
op::MVNEpsMode nEpsMode = op::MVNEpsMode::INSIDE_SQRT;
ov::op::MVNEpsMode nEpsMode = ov::op::MVNEpsMode::INSIDE_SQRT;
if (epsMode == "outside_sqrt")
nEpsMode = op::MVNEpsMode::OUTSIDE_SQRT;
auto mvnNode = std::make_shared<op::v6::MVN>(in, axesNode, normalizeVariance, eps, nEpsMode);
nEpsMode = ov::op::MVNEpsMode::OUTSIDE_SQRT;
auto mvnNode = std::make_shared<ov::op::v6::MVN>(in, axesNode, normalizeVariance, eps, nEpsMode);
return mvnNode;
}

View File

@ -2,28 +2,30 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/non_max_suppression.hpp"
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
template <typename NmsOperation>
std::shared_ptr<ngraph::Node> makeNms(const ngraph::Output<Node>& boxes,
const ngraph::Output<Node>& scores,
const element::Type& maxBoxesPrec,
const element::Type& thrPrec,
const int32_t& maxOutBoxesPerClass,
const float& iouThr,
const float& scoreThr,
const float& softNmsSigma,
const bool& isCenter,
const bool& sortResDescend,
const ngraph::element::Type& outType) {
std::shared_ptr<ov::Node> makeNms(const ov::Output<Node>& boxes,
const ov::Output<Node>& scores,
const element::Type& maxBoxesPrec,
const element::Type& thrPrec,
const int32_t& maxOutBoxesPerClass,
const float& iouThr,
const float& scoreThr,
const float& softNmsSigma,
const bool& isCenter,
const bool& sortResDescend,
const ov::element::Type& outType) {
auto maxOutBoxesPerClassNode =
makeConstant(maxBoxesPrec, ngraph::Shape{}, std::vector<int32_t>{maxOutBoxesPerClass})->output(0);
auto iouThrNode = makeConstant(thrPrec, ngraph::Shape{}, std::vector<float>{iouThr})->output(0);
auto scoreThrNode = makeConstant(thrPrec, ngraph::Shape{}, std::vector<float>{scoreThr})->output(0);
auto softNmsSigmaNode = makeConstant(thrPrec, ngraph::Shape{}, std::vector<float>{softNmsSigma})->output(0);
makeConstant(maxBoxesPrec, ov::Shape{}, std::vector<int32_t>{maxOutBoxesPerClass})->output(0);
auto iouThrNode = makeConstant(thrPrec, ov::Shape{}, std::vector<float>{iouThr})->output(0);
auto scoreThrNode = makeConstant(thrPrec, ov::Shape{}, std::vector<float>{scoreThr})->output(0);
auto softNmsSigmaNode = makeConstant(thrPrec, ov::Shape{}, std::vector<float>{softNmsSigma})->output(0);
typename NmsOperation::BoxEncodingType boxEncodingType =
isCenter ? NmsOperation::BoxEncodingType::CENTER : NmsOperation::BoxEncodingType::CORNER;
@ -39,43 +41,43 @@ std::shared_ptr<ngraph::Node> makeNms(const ngraph::Output<Node>& boxes,
outType);
}
std::shared_ptr<ngraph::Node> makeNms(const ngraph::Output<Node>& boxes,
const ngraph::Output<Node>& scores,
const element::Type& maxBoxesPrec,
const element::Type& thrPrec,
const int32_t& maxOutBoxesPerClass,
const float& iouThr,
const float& scoreThr,
const float& softNmsSigma,
const bool isCenter,
const bool& sortResDescend,
const ngraph::element::Type& outType,
const NmsVersion nmsVersion) {
std::shared_ptr<ov::Node> makeNms(const ov::Output<Node>& boxes,
const ov::Output<Node>& scores,
const element::Type& maxBoxesPrec,
const element::Type& thrPrec,
const int32_t& maxOutBoxesPerClass,
const float& iouThr,
const float& scoreThr,
const float& softNmsSigma,
const bool isCenter,
const bool& sortResDescend,
const ov::element::Type& outType,
const NmsVersion nmsVersion) {
switch (nmsVersion) {
case NmsVersion::NmsVersion5:
return makeNms<opset5::NonMaxSuppression>(boxes,
scores,
maxBoxesPrec,
thrPrec,
maxOutBoxesPerClass,
iouThr,
scoreThr,
softNmsSigma,
isCenter,
sortResDescend,
outType);
return makeNms<ov::op::v5::NonMaxSuppression>(boxes,
scores,
maxBoxesPrec,
thrPrec,
maxOutBoxesPerClass,
iouThr,
scoreThr,
softNmsSigma,
isCenter,
sortResDescend,
outType);
default:
return makeNms<opset9::NonMaxSuppression>(boxes,
scores,
maxBoxesPrec,
thrPrec,
maxOutBoxesPerClass,
iouThr,
scoreThr,
softNmsSigma,
isCenter,
sortResDescend,
outType);
return makeNms<ov::op::v9::NonMaxSuppression>(boxes,
scores,
maxBoxesPrec,
thrPrec,
maxOutBoxesPerClass,
iouThr,
scoreThr,
softNmsSigma,
isCenter,
sortResDescend,
outType);
}
}

View File

@ -2,19 +2,21 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/normalize_l2.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeNormalizeL2(const ngraph::Output<Node>& data,
const std::vector<int64_t>& axes,
float eps,
ngraph::op::EpsMode epsMode) {
auto normAxes = std::make_shared<ngraph::opset4::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{axes.size()}, axes);
return std::make_shared<ngraph::opset4::NormalizeL2>(data, normAxes, eps, epsMode);
std::shared_ptr<ov::Node> makeNormalizeL2(const ov::Output<Node>& data,
const std::vector<int64_t>& axes,
float eps,
ov::op::EpsMode epsMode) {
auto normAxes = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64, ov::Shape{axes.size()}, axes);
return std::make_shared<ov::op::v0::NormalizeL2>(data, normAxes, eps, epsMode);
}
} // namespace builder
} // namespace ngraph

View File

@ -2,24 +2,26 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/one_hot.hpp"
#include <memory>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeOneHot(const ngraph::Output<Node>& indices,
const element::Type& depth_type,
const int64_t& depth_val,
const element::Type& set_type,
const float& on_val,
const float& off_val,
const int64_t& axis) {
auto depth_const = std::make_shared<ngraph::op::Constant>(depth_type, ngraph::Shape{ }, depth_val);
auto on_value_const = std::make_shared<ngraph::op::Constant>(set_type, ngraph::Shape{ }, on_val);
auto off_value_const = std::make_shared<ngraph::op::Constant>(set_type, ngraph::Shape{ }, off_val);
return std::make_shared<ngraph::opset5::OneHot>(indices, depth_const, on_value_const, off_value_const, axis);
std::shared_ptr<ov::Node> makeOneHot(const ov::Output<Node>& indices,
const element::Type& depth_type,
const int64_t& depth_val,
const element::Type& set_type,
const float& on_val,
const float& off_val,
const int64_t& axis) {
auto depth_const = std::make_shared<ov::op::v0::Constant>(depth_type, ov::Shape{}, depth_val);
auto on_value_const = std::make_shared<ov::op::v0::Constant>(set_type, ov::Shape{}, on_val);
auto off_value_const = std::make_shared<ov::op::v0::Constant>(set_type, ov::Shape{}, off_val);
return std::make_shared<ov::op::v1::OneHot>(indices, depth_const, on_value_const, off_value_const, axis);
}
} // namespace builder
} // namespace ngraph

View File

@ -2,47 +2,48 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/pad.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makePad(const ngraph::Output<Node>& data,
const std::vector<int64_t>& padsBegin,
const std::vector<int64_t>& padsEnd,
float argPadValue,
ngraph::helpers::PadMode padMode,
const bool allow_negative_pad) {
ngraph::op::PadMode pad_mode;
std::shared_ptr<ov::Node> makePad(const ov::Output<Node>& data,
const std::vector<int64_t>& padsBegin,
const std::vector<int64_t>& padsEnd,
float argPadValue,
ov::test::utils::PadMode padMode,
const bool allow_negative_pad) {
ov::op::PadMode pad_mode;
switch (padMode) {
case ngraph::helpers::PadMode::CONSTANT:
pad_mode = ngraph::op::PadMode::CONSTANT;
case ov::test::utils::PadMode::CONSTANT:
pad_mode = ov::op::PadMode::CONSTANT;
break;
case ngraph::helpers::PadMode::EDGE:
pad_mode = ngraph::op::PadMode::EDGE;
case ov::test::utils::PadMode::EDGE:
pad_mode = ov::op::PadMode::EDGE;
break;
case ngraph::helpers::PadMode::REFLECT:
pad_mode = ngraph::op::PadMode::REFLECT;
case ov::test::utils::PadMode::REFLECT:
pad_mode = ov::op::PadMode::REFLECT;
break;
case ngraph::helpers::PadMode::SYMMETRIC:
pad_mode = ngraph::op::PadMode::SYMMETRIC;
case ov::test::utils::PadMode::SYMMETRIC:
pad_mode = ov::op::PadMode::SYMMETRIC;
break;
default:
throw std::runtime_error("Can't create layer for this pad mode");
}
auto pads_begin = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64,
ngraph::Shape{padsBegin.size()}, padsBegin.data());
auto pads_end = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64,
ngraph::Shape{padsEnd.size()}, padsEnd.data());
auto arg_pad_value = std::make_shared<ngraph::opset3::Constant>(data.get_element_type(), ngraph::Shape{}, &argPadValue);
auto pads_begin =
std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{padsBegin.size()}, padsBegin.data());
auto pads_end = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{padsEnd.size()}, padsEnd.data());
auto arg_pad_value = std::make_shared<ov::op::v0::Constant>(data.get_element_type(), ov::Shape{}, &argPadValue);
if (allow_negative_pad) {
return std::make_shared<ov::op::v12::Pad>(data, pads_begin, pads_end, arg_pad_value, pad_mode);
} else {
return std::make_shared<ngraph::opset3::Pad>(data, pads_begin, pads_end, arg_pad_value, pad_mode);
return std::make_shared<ov::op::v1::Pad>(data, pads_begin, pads_end, arg_pad_value, pad_mode);
}
}
@ -50,21 +51,21 @@ std::shared_ptr<ov::Node> makePad(const ov::Output<Node>& in,
const ov::Output<Node>& beginNode,
const ov::Output<Node>& endNode,
const ov::Output<Node>& valueNode,
ngraph::helpers::PadMode padMode,
ov::test::utils::PadMode padMode,
const bool allow_negative_pad) {
ngraph::op::PadMode pad_mode;
ov::op::PadMode pad_mode;
switch (padMode) {
case ngraph::helpers::PadMode::CONSTANT:
pad_mode = ngraph::op::PadMode::CONSTANT;
case ov::test::utils::PadMode::CONSTANT:
pad_mode = ov::op::PadMode::CONSTANT;
break;
case ngraph::helpers::PadMode::EDGE:
pad_mode = ngraph::op::PadMode::EDGE;
case ov::test::utils::PadMode::EDGE:
pad_mode = ov::op::PadMode::EDGE;
break;
case ngraph::helpers::PadMode::REFLECT:
pad_mode = ngraph::op::PadMode::REFLECT;
case ov::test::utils::PadMode::REFLECT:
pad_mode = ov::op::PadMode::REFLECT;
break;
case ngraph::helpers::PadMode::SYMMETRIC:
pad_mode = ngraph::op::PadMode::SYMMETRIC;
case ov::test::utils::PadMode::SYMMETRIC:
pad_mode = ov::op::PadMode::SYMMETRIC;
break;
default:
throw std::runtime_error("Can't create layer for this pad mode");

View File

@ -2,52 +2,65 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "openvino/op/avg_pool.hpp"
#include "openvino/op/max_pool.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makePooling(const ngraph::Output<Node> &in,
const std::vector<size_t> &strides,
const std::vector<size_t> &padsBegin,
const std::vector<size_t> &padsEnd,
const std::vector<size_t> &kernel,
const op::RoundingType &roundingType,
const op::PadType &padType,
std::shared_ptr<Node> makePooling(const ov::Output<Node>& in,
const std::vector<size_t>& strides,
const std::vector<size_t>& padsBegin,
const std::vector<size_t>& padsEnd,
const std::vector<size_t>& kernel,
const op::RoundingType& roundingType,
const op::PadType& padType,
bool excludePad,
const ngraph::helpers::PoolingTypes &poolType) {
std::shared_ptr<ngraph::Node> pooling;
const ov::test::utils::PoolingTypes& poolType) {
std::shared_ptr<ov::Node> pooling;
switch (poolType) {
case ngraph::helpers::PoolingTypes::MAX:
pooling = std::make_shared<ngraph::opset3::MaxPool>(in, strides, padsBegin, padsEnd, kernel, roundingType,
padType);
case ov::test::utils::PoolingTypes::MAX:
pooling = std::make_shared<ov::op::v1::MaxPool>(in, strides, padsBegin, padsEnd, kernel, roundingType, padType);
break;
case ngraph::helpers::PoolingTypes::AVG:
pooling = std::make_shared<ngraph::opset3::AvgPool>(in, strides, padsBegin, padsEnd, kernel,
excludePad,
roundingType, padType);
break;
break;
case ov::test::utils::PoolingTypes::AVG:
pooling = std::make_shared<ov::op::v1::AvgPool>(in,
strides,
padsBegin,
padsEnd,
kernel,
excludePad,
roundingType,
padType);
break;
}
return pooling;
}
std::shared_ptr<Node> makeMaxPoolingV8(const ngraph::Output<Node> &in,
const std::vector<size_t> &strides,
const std::vector<size_t> &dilation,
const std::vector<size_t> &padsBegin,
const std::vector<size_t> &padsEnd,
const std::vector<size_t> &kernel,
const op::RoundingType &roundingType,
const op::PadType &padType,
const ov::element::Type &indexElementType,
std::shared_ptr<Node> makeMaxPoolingV8(const ov::Output<Node>& in,
const std::vector<size_t>& strides,
const std::vector<size_t>& dilation,
const std::vector<size_t>& padsBegin,
const std::vector<size_t>& padsEnd,
const std::vector<size_t>& kernel,
const op::RoundingType& roundingType,
const op::PadType& padType,
const ov::element::Type& indexElementType,
const int64_t axis) {
std::shared_ptr<ngraph::Node> pooling = std::make_shared<ngraph::opset8::MaxPool>(in, strides, dilation, padsBegin, padsEnd,
kernel, roundingType, padType,
indexElementType, axis);
std::shared_ptr<ov::Node> pooling = std::make_shared<ov::op::v8::MaxPool>(in,
strides,
dilation,
padsBegin,
padsEnd,
kernel,
roundingType,
padType,
indexElementType,
axis);
return pooling;
}

View File

@ -4,10 +4,12 @@
#pragma once
#include <ngraph/ngraph.hpp>
#include <ngraph/ops.hpp>
#include <algorithm>
#include <cassert>
#include <cctype>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <functional>
#include <initializer_list>
#include <iterator>
@ -24,9 +26,3 @@
#include <unordered_set>
#include <utility>
#include <vector>
#include <cassert>
#include <cctype>
#include <cmath>
#include <cstdlib>
#include <cstring>

View File

@ -2,33 +2,35 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/proposal.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeProposal(const ngraph::Output<Node> &class_probs,
const ngraph::Output<Node> &class_logits,
std::shared_ptr<Node> makeProposal(const ov::Output<Node>& class_probs,
const ov::Output<Node>& class_logits,
const std::vector<float>& image_info,
const element::Type &type,
const element::Type& type,
size_t base_size,
size_t pre_nms_topn,
size_t post_nms_topn,
float nms_thresh,
size_t feat_stride,
size_t min_size,
const std::vector<float> &ratio,
const std::vector<float> &scale,
const std::vector<float>& ratio,
const std::vector<float>& scale,
bool clip_before_nms,
bool clip_after_nms,
bool normalize,
float box_size_scale,
float box_coordinate_scale,
std::string framework) {
ngraph::op::ProposalAttrs attrs;
ov::op::v4::Proposal::Attributes attrs;
attrs.base_size = base_size;
attrs.pre_nms_topn = pre_nms_topn;
attrs.post_nms_topn = post_nms_topn;
@ -45,9 +47,9 @@ std::shared_ptr<Node> makeProposal(const ngraph::Output<Node> &class_probs,
attrs.framework = framework;
attrs.infer_probs = true;
auto image_shape = makeConstant(ngraph::element::Type_t::f32, {3}, image_info);
auto image_shape = makeConstant(ov::element::Type_t::f32, {3}, image_info);
return std::make_shared<opset4::Proposal>(class_probs, class_logits, image_shape, attrs);
return std::make_shared<ov::op::v4::Proposal>(class_probs, class_logits, image_shape, attrs);
}
} // namespace builder

View File

@ -2,39 +2,47 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include "openvino/op/rdft.hpp"
#include "ngraph_functions/builders.hpp"
#include <memory>
#include <vector>
#include "common_test_utils/test_enums.hpp"
#include "openvino/core/node.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/irdft.hpp"
namespace ngraph {
namespace builder {
namespace {
template <typename ...Args>
std::shared_ptr<ngraph::Node> CallDftCtorWithArgs(const ngraph::helpers::DFTOpType opType, Args&&... args) {
switch (opType) {
case ngraph::helpers::DFTOpType::FORWARD:
return std::make_shared<ngraph::op::v9::RDFT>(std::forward<Args>(args)...);
case ngraph::helpers::DFTOpType::INVERSE:
return std::make_shared<ngraph::op::v9::IRDFT>(std::forward<Args>(args)...);
default:
throw std::logic_error("Unsupported operation type");
}
template <typename... Args>
std::shared_ptr<ov::Node> CallDftCtorWithArgs(const ov::test::utils::DFTOpType opType, Args&&... args) {
switch (opType) {
case ov::test::utils::DFTOpType::FORWARD:
return std::make_shared<ov::op::v9::RDFT>(std::forward<Args>(args)...);
case ov::test::utils::DFTOpType::INVERSE:
return std::make_shared<ov::op::v9::IRDFT>(std::forward<Args>(args)...);
default:
throw std::logic_error("Unsupported operation type");
}
} // namespace
}
} // namespace
std::shared_ptr<ngraph::Node> makeRDFT(const ngraph::Output<Node> &dataNode,
const std::vector<int64_t> &axes,
const std::vector<int64_t> &signalSize,
const ngraph::helpers::DFTOpType opType) {
auto axesNode = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{axes.size()}, axes)->output(0);
std::shared_ptr<ov::Node> makeRDFT(const ov::Output<Node>& dataNode,
const std::vector<int64_t>& axes,
const std::vector<int64_t>& signalSize,
const ov::test::utils::DFTOpType opType) {
auto axesNode =
std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64, ov::Shape{axes.size()}, axes)->output(0);
if (!signalSize.empty()) {
auto signalSizeNode = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{signalSize.size()}, signalSize)->output(0);
auto signalSizeNode =
std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64, ov::Shape{signalSize.size()}, signalSize)
->output(0);
return CallDftCtorWithArgs(opType, dataNode, axesNode, signalSizeNode);
}
return CallDftCtorWithArgs(opType, dataNode, axesNode);
}
} // namespace builder
} // namespace ngraph
} // namespace builder
} // namespace ngraph

View File

@ -2,38 +2,48 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "common_test_utils/test_enums.hpp"
#include "openvino/core/node.hpp"
#include "openvino/op/reduce_l1.hpp"
#include "openvino/op/reduce_l2.hpp"
#include "openvino/op/reduce_logical_and.hpp"
#include "openvino/op/reduce_logical_or.hpp"
#include "openvino/op/reduce_max.hpp"
#include "openvino/op/reduce_mean.hpp"
#include "openvino/op/reduce_min.hpp"
#include "openvino/op/reduce_prod.hpp"
#include "openvino/op/reduce_sum.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeReduce(const ngraph::Output<Node>& data,
const ngraph::Output<Node>& axes,
bool keepDims,
ngraph::helpers::ReductionType reductionType) {
std::shared_ptr<ov::Node> makeReduce(const ov::Output<ov::Node>& data,
const ov::Output<ov::Node>& axes,
bool keepDims,
ov::test::utils::ReductionType reductionType) {
switch (reductionType) {
case helpers::Mean:
return std::make_shared<ngraph::opset4::ReduceMean>(data, axes, keepDims);
case helpers::Max:
return std::make_shared<ngraph::opset4::ReduceMax>(data, axes, keepDims);
case helpers::Min:
return std::make_shared<ngraph::opset4::ReduceMin>(data, axes, keepDims);
case helpers::Prod:
return std::make_shared<ngraph::opset4::ReduceProd>(data, axes, keepDims);
case helpers::Sum:
return std::make_shared<ngraph::opset4::ReduceSum>(data, axes, keepDims);
case helpers::LogicalOr:
return std::make_shared<ngraph::opset4::ReduceLogicalOr>(data, axes, keepDims);
case helpers::LogicalAnd:
return std::make_shared<ngraph::opset4::ReduceLogicalAnd>(data, axes, keepDims);
case helpers::L1:
return std::make_shared<ngraph::opset4::ReduceL1>(data, axes, keepDims);
case helpers::L2:
return std::make_shared<ngraph::opset4::ReduceL2>(data, axes, keepDims);
default:
throw std::runtime_error("Can't create layer for this reduction type");
case ov::test::utils::Mean:
return std::make_shared<ov::op::v1::ReduceMean>(data, axes, keepDims);
case ov::test::utils::Max:
return std::make_shared<ov::op::v1::ReduceMax>(data, axes, keepDims);
case ov::test::utils::Min:
return std::make_shared<ov::op::v1::ReduceMin>(data, axes, keepDims);
case ov::test::utils::Prod:
return std::make_shared<ov::op::v1::ReduceProd>(data, axes, keepDims);
case ov::test::utils::Sum:
return std::make_shared<ov::op::v1::ReduceSum>(data, axes, keepDims);
case ov::test::utils::LogicalOr:
return std::make_shared<ov::op::v1::ReduceLogicalOr>(data, axes, keepDims);
case ov::test::utils::LogicalAnd:
return std::make_shared<ov::op::v1::ReduceLogicalAnd>(data, axes, keepDims);
case ov::test::utils::L1:
return std::make_shared<ov::op::v4::ReduceL1>(data, axes, keepDims);
case ov::test::utils::L2:
return std::make_shared<ov::op::v4::ReduceL2>(data, axes, keepDims);
default:
throw std::runtime_error("Can't create layer for this reduction type");
}
}
} // namespace builder

View File

@ -2,65 +2,101 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/rnn_cell.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "openvino/op/rnn_sequence.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ov::Node> makeRNN(const OutputVector& in,
const std::vector<ov::Shape>& constants,
std::size_t hidden_size,
const std::vector<std::string>& activations,
const std::vector<float>& activations_alpha,
const std::vector<float>& activations_beta,
float clip,
bool make_sequence,
ov::op::RecurrentSequenceDirection direction,
ngraph::helpers::SequenceTestsMode mode) {
const std::vector<ov::Shape>& constants,
std::size_t hidden_size,
const std::vector<std::string>& activations,
const std::vector<float>& activations_alpha,
const std::vector<float>& activations_beta,
float clip,
bool make_sequence,
ov::op::RecurrentSequenceDirection direction,
ov::test::utils::SequenceTestsMode mode) {
std::vector<float> empty;
auto W = ngraph::builder::makeConstant(in[0].get_element_type(), constants[0], empty, true);
auto R = ngraph::builder::makeConstant(in[0].get_element_type(), constants[1], empty, true);
auto B = ngraph::builder::makeConstant(in[0].get_element_type(), constants[2], empty, true);
if (!make_sequence) {
return std::make_shared<ov::op::v0::RNNCell>(in[0], in[1], W, R, B, hidden_size, activations,
activations_alpha, activations_beta, clip);
return std::make_shared<ov::op::v0::RNNCell>(in[0],
in[1],
W,
R,
B,
hidden_size,
activations,
activations_alpha,
activations_beta,
clip);
} else {
if (in.size() > 2 && in[2].get_partial_shape().is_dynamic()) {
return std::make_shared<ov::op::v5::RNNSequence>(in[0], in[1], in[2], W, R, B, hidden_size, direction,
activations, activations_alpha, activations_beta, clip);
return std::make_shared<ov::op::v5::RNNSequence>(in[0],
in[1],
in[2],
W,
R,
B,
hidden_size,
direction,
activations,
activations_alpha,
activations_beta,
clip);
} else {
std::shared_ptr<Node> seq_lengths;
switch (mode) {
case ngraph::helpers::SequenceTestsMode::PURE_SEQ:
case ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST: {
std::vector<float> lengths(in[0].get_partial_shape()[0].get_min_length(), in[0].get_partial_shape()[1].get_min_length());
seq_lengths = ngraph::builder::makeConstant(element::i64, constants[3], lengths, false);
break;
}
case ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST:
case ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST: {
for (size_t i = 0; i <= in[0].get_shape().at(0); ++i) {
std::vector<float> lengths;
seq_lengths = ngraph::builder::makeConstant(element::i64, constants[3], lengths, true,
static_cast<float>(in[0].get_shape()[1]), 0.f);
}
break;
}
case ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM:
case ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM:
case ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM: {
// Seq_lengths should be as a Parameter node for these two modes
seq_lengths = in.at(2).get_node_shared_ptr();
break;
}
default:
throw std::runtime_error("Incorrect mode for creation of Sequence operation");
case ov::test::utils::SequenceTestsMode::PURE_SEQ:
case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST: {
std::vector<float> lengths(in[0].get_partial_shape()[0].get_min_length(),
in[0].get_partial_shape()[1].get_min_length());
seq_lengths = ngraph::builder::makeConstant(element::i64, constants[3], lengths, false);
break;
}
return std::make_shared<ov::op::v5::RNNSequence>(in[0], in[1], seq_lengths, W, R, B, hidden_size, direction,
activations, activations_alpha, activations_beta, clip);
case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST:
case ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST: {
for (size_t i = 0; i <= in[0].get_shape().at(0); ++i) {
std::vector<float> lengths;
seq_lengths = ngraph::builder::makeConstant(element::i64,
constants[3],
lengths,
true,
static_cast<float>(in[0].get_shape()[1]),
0.f);
}
break;
}
case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM:
case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM:
case ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM: {
// Seq_lengths should be as a Parameter node for these two modes
seq_lengths = in.at(2).get_node_shared_ptr();
break;
}
default:
throw std::runtime_error("Incorrect mode for creation of Sequence operation");
}
return std::make_shared<ov::op::v5::RNNSequence>(in[0],
in[1],
seq_lengths,
W,
R,
B,
hidden_size,
direction,
activations,
activations_alpha,
activations_beta,
clip);
}
}
}

View File

@ -2,26 +2,29 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include "openvino/op/roi_pooling.hpp"
#include "ngraph_functions/builders.hpp"
#include <memory>
#include <vector>
#include "common_test_utils/test_enums.hpp"
#include "openvino/core/node.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeROIPooling(const Output<Node>& input,
const Output<Node>& coords,
const Shape& output_size,
const float spatial_scale,
const ngraph::helpers::ROIPoolingTypes& roi_pool_type) {
std::shared_ptr<ov::Node> makeROIPooling(const ov::Output<ov::Node>& input,
const ov::Output<ov::Node>& coords,
const ov::Shape& output_size,
const float spatial_scale,
const ov::test::utils::ROIPoolingTypes& roi_pool_type) {
switch (roi_pool_type) {
case helpers::ROIPoolingTypes::ROI_MAX:
return std::make_shared<ngraph::opset3::ROIPooling>(input, coords, output_size, spatial_scale, "max");
case helpers::ROIPoolingTypes::ROI_BILINEAR:
return std::make_shared<ngraph::opset3::ROIPooling>(input, coords, output_size, spatial_scale, "bilinear");
default:
throw std::runtime_error("Incorrect type of ROIPooling operation");
case ov::test::utils::ROIPoolingTypes::ROI_MAX:
return std::make_shared<ov::op::v0::ROIPooling>(input, coords, output_size, spatial_scale, "max");
case ov::test::utils::ROIPoolingTypes::ROI_BILINEAR:
return std::make_shared<ov::op::v0::ROIPooling>(input, coords, output_size, spatial_scale, "bilinear");
default:
throw std::runtime_error("Incorrect type of ROIPooling operation");
}
}

View File

@ -2,15 +2,19 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_functions/builders.hpp"
#include "openvino/op/roll.hpp"
#include <memory>
#include "openvino/core/node.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeRoll(const ngraph::Output<Node> &in,
const ngraph::Output<Node> &shift,
const ngraph::Output<Node> &axes) {
return std::make_shared<ngraph::op::v7::Roll>(in, shift, axes);
std::shared_ptr<ov::Node> makeRoll(const ov::Output<Node>& in,
const ov::Output<Node>& shift,
const ov::Output<Node>& axes) {
return std::make_shared<ov::op::v7::Roll>(in, shift, axes);
}
} // namespace builder

View File

@ -2,18 +2,24 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_functions/builders.hpp"
#include "openvino/op/scatter_nd_update.hpp"
#include <memory>
#include "openvino/core/node.hpp"
#include "openvino/core/node_output.hpp"
#include "openvino/op/constant.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeScatterNDUpdate(const ngraph::Output<Node> &in,
const element::Type& indicesType,
const std::vector<size_t>& indicesShape,
const std::vector<size_t>& indices,
const ngraph::Output<Node> &update) {
auto indicesNode = std::make_shared<ngraph::opset1::Constant>(indicesType, indicesShape, indices);
auto dtsNode = std::make_shared<ngraph::opset4::ScatterNDUpdate>(in, indicesNode, update);
std::shared_ptr<ov::Node> makeScatterNDUpdate(const ov::Output<ov::Node>& in,
const ov::element::Type& indicesType,
const std::vector<size_t>& indicesShape,
const std::vector<size_t>& indices,
const ov::Output<Node>& update) {
auto indicesNode = std::make_shared<ov::op::v0::Constant>(indicesType, indicesShape, indices);
auto dtsNode = std::make_shared<ov::op::v3::ScatterNDUpdate>(in, indicesNode, update);
return dtsNode;
}

View File

@ -2,21 +2,24 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/scatter_elements_update.hpp"
#include "ngraph_functions/builders.hpp"
#include "openvino/op/constant.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeScatterElementsUpdate(const ngraph::Output<Node> &in,
const element::Type& indicesType,
const std::vector<size_t>& indicesShape,
const std::vector<size_t>& indices,
const ngraph::Output<Node> &update,
int axis) {
auto indicesNode = std::make_shared<ngraph::opset1::Constant>(indicesType, indicesShape, indices);
auto axis_node = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i32, ngraph::Shape{},
std::vector<int>{axis});
auto dtsNode = std::make_shared<ngraph::opset3::ScatterElementsUpdate>(in, indicesNode, update, axis_node);
std::shared_ptr<ov::Node> makeScatterElementsUpdate(const ov::Output<Node>& in,
const element::Type& indicesType,
const std::vector<size_t>& indicesShape,
const std::vector<size_t>& indices,
const ov::Output<Node>& update,
int axis) {
auto indicesNode = std::make_shared<ov::op::v0::Constant>(indicesType, indicesShape, indices);
auto axis_node =
std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i32, ov::Shape{}, std::vector<int>{axis});
auto dtsNode = std::make_shared<ov::op::v3::ScatterElementsUpdate>(in, indicesNode, update, axis_node);
return dtsNode;
}

View File

@ -2,21 +2,24 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/scatter_update.hpp"
#include "ngraph_functions/builders.hpp"
#include "openvino/op/constant.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeScatterUpdate(const ngraph::Output<Node> &in,
const element::Type& indicesType,
const std::vector<size_t> &indicesShape,
const std::vector<int64_t> &indices,
const ngraph::Output<Node> &update,
int64_t axis) {
auto indicesNode = std::make_shared<ngraph::opset1::Constant>(indicesType, indicesShape, indices);
auto axis_node = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{},
std::vector<int64_t>{axis});
auto dtsNode = std::make_shared<ngraph::opset3::ScatterUpdate>(in, indicesNode, update, axis_node);
std::shared_ptr<ov::Node> makeScatterUpdate(const ov::Output<Node>& in,
const element::Type& indicesType,
const std::vector<size_t>& indicesShape,
const std::vector<int64_t>& indices,
const ov::Output<Node>& update,
int64_t axis) {
auto indicesNode = std::make_shared<ov::op::v0::Constant>(indicesType, indicesShape, indices);
auto axis_node =
std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64, ov::Shape{}, std::vector<int64_t>{axis});
auto dtsNode = std::make_shared<ov::op::v3::ScatterUpdate>(in, indicesNode, update, axis_node);
return dtsNode;
}

View File

@ -2,14 +2,16 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/select.hpp"
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeSelect(std::vector<ngraph::Output<Node>> &in,
const ngraph::op::AutoBroadcastSpec& auto_broadcast) {
auto selectNode = std::make_shared<ngraph::opset1::Select>(in[0], in[1], in[2], auto_broadcast);
std::shared_ptr<ov::Node> makeSelect(std::vector<ov::Output<Node>>& in,
const ov::op::AutoBroadcastSpec& auto_broadcast) {
auto selectNode = std::make_shared<ov::op::v1::Select>(in[0], in[1], in[2], auto_broadcast);
return selectNode;
}

View File

@ -2,18 +2,18 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/shuffle_channels.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeShuffleChannels(const ngraph::Output<Node> &in,
int axis,
int group) {
return std::make_shared<ngraph::opset3::ShuffleChannels>(in, axis, group);
std::shared_ptr<Node> makeShuffleChannels(const ov::Output<Node>& in, int axis, int group) {
return std::make_shared<ov::op::v0::ShuffleChannels>(in, axis, group);
}
} // namespace builder

View File

@ -2,23 +2,24 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/space_to_batch.hpp"
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeSpaceToBatch(const ngraph::Output<Node> &in,
const element::Type &type,
const std::vector<int64_t> &blockShape,
const std::vector<int64_t> &padsBegin,
const std::vector<int64_t> &padsEnd) {
ngraph::Shape constShape = {in.get_partial_shape().size()};
auto blockShapeNode = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64, constShape,
blockShape.data());
auto padsBeginNode = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64, constShape,
padsBegin.data());
auto padsEndNode = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64, constShape, padsEnd.data());
auto stbNode = std::make_shared<ngraph::opset2::SpaceToBatch>(in, blockShapeNode, padsBeginNode, padsEndNode);
std::shared_ptr<ov::Node> makeSpaceToBatch(const ov::Output<Node>& in,
const element::Type& type,
const std::vector<int64_t>& blockShape,
const std::vector<int64_t>& padsBegin,
const std::vector<int64_t>& padsEnd) {
ov::Shape constShape = {in.get_partial_shape().size()};
auto blockShapeNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, blockShape.data());
auto padsBeginNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, padsBegin.data());
auto padsEndNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, padsEnd.data());
auto stbNode = std::make_shared<ov::op::v1::SpaceToBatch>(in, blockShapeNode, padsBeginNode, padsEndNode);
return stbNode;
}

View File

@ -7,10 +7,10 @@
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeSpaceToDepth(const ngraph::Output<Node> &in,
ngraph::opset3::SpaceToDepth::SpaceToDepthMode mode,
size_t blockSize) {
auto dtsNode = std::make_shared<ngraph::opset3::SpaceToDepth>(in, mode, blockSize);
std::shared_ptr<ov::Node> makeSpaceToDepth(const ov::Output<Node>& in,
ov::op::v0::SpaceToDepth::SpaceToDepthMode mode,
size_t blockSize) {
auto dtsNode = std::make_shared<ov::op::v0::SpaceToDepth>(in, mode, blockSize);
return dtsNode;
}

View File

@ -2,20 +2,22 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/split.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeSplit(const ngraph::Output<Node> &in,
const element::Type &type,
size_t numSplits,
int64_t axis) {
auto splitAxisOp = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{},
std::vector<int64_t>{axis});
auto splitNode = std::make_shared<ngraph::opset1::Split>(in, splitAxisOp, numSplits);
std::shared_ptr<ov::Node> makeSplit(const ov::Output<Node>& in,
const element::Type& type,
size_t numSplits,
int64_t axis) {
auto splitAxisOp =
std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64, ov::Shape{}, std::vector<int64_t>{axis});
auto splitNode = std::make_shared<ov::op::v1::Split>(in, splitAxisOp, numSplits);
return splitNode;
}
} // namespace builder

View File

@ -2,26 +2,29 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/squeeze.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "openvino/op/unsqueeze.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeSqueezeUnsqueeze(const ngraph::Output<Node> &in,
const element::Type &type,
const std::vector<int> &squeeze_indices,
ngraph::helpers::SqueezeOpType opType) {
auto constant = std::make_shared<ngraph::opset1::Constant>(type, ngraph::Shape{squeeze_indices.size()}, squeeze_indices);
std::shared_ptr<ov::Node> makeSqueezeUnsqueeze(const ov::Output<Node>& in,
const element::Type& type,
const std::vector<int>& squeeze_indices,
ov::test::utils::SqueezeOpType opType) {
auto constant = std::make_shared<ov::op::v0::Constant>(type, ov::Shape{squeeze_indices.size()}, squeeze_indices);
switch (opType) {
case ngraph::helpers::SqueezeOpType::SQUEEZE:
return std::make_shared<ngraph::opset1::Squeeze>(in, constant);
case ngraph::helpers::SqueezeOpType::UNSQUEEZE:
return std::make_shared<ngraph::opset1::Unsqueeze>(in, constant);
default:
throw std::logic_error("Unsupported operation type");
case ov::test::utils::SqueezeOpType::SQUEEZE:
return std::make_shared<ov::op::v0::Squeeze>(in, constant);
case ov::test::utils::SqueezeOpType::UNSQUEEZE:
return std::make_shared<ov::op::v0::Unsqueeze>(in, constant);
default:
throw std::logic_error("Unsupported operation type");
}
}
} // namespace builder
} // namespace ngraph
} // namespace builder
} // namespace ngraph

View File

@ -2,50 +2,67 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/strided_slice.hpp"
#include "ngraph_functions/builders.hpp"
#include "openvino/op/slice.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ov::Node> makeStridedSlice(const ov::Output<Node> &in,
const std::vector<int64_t> &begin,
const std::vector<int64_t> &end,
const std::vector<int64_t> &stride,
const element::Type &type,
const std::vector<int64_t> &begin_mask,
const std::vector<int64_t> &end_mask,
const std::vector<int64_t> &new_axis_mask,
const std::vector<int64_t> &shrink_mask,
const std::vector<int64_t> &ellipsis_mask) {
std::shared_ptr<ov::Node> makeStridedSlice(const ov::Output<Node>& in,
const std::vector<int64_t>& begin,
const std::vector<int64_t>& end,
const std::vector<int64_t>& stride,
const element::Type& type,
const std::vector<int64_t>& begin_mask,
const std::vector<int64_t>& end_mask,
const std::vector<int64_t>& new_axis_mask,
const std::vector<int64_t>& shrink_mask,
const std::vector<int64_t>& ellipsis_mask) {
ov::Shape constShape = {begin.size()};
auto beginNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, begin.data());
auto endNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, end.data());
auto strideNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, stride.data());
auto ssNode = std::make_shared<ov::op::v1::StridedSlice>(in, beginNode, endNode, strideNode, begin_mask, end_mask,
new_axis_mask, shrink_mask, ellipsis_mask);
auto ssNode = std::make_shared<ov::op::v1::StridedSlice>(in,
beginNode,
endNode,
strideNode,
begin_mask,
end_mask,
new_axis_mask,
shrink_mask,
ellipsis_mask);
return ssNode;
}
std::shared_ptr<ov::Node> makeStridedSlice(const ov::Output<Node> &in,
const ov::Output<Node> &beginNode,
const ov::Output<Node> &endNode,
const ov::Output<Node> &strideNode,
const element::Type &type,
const std::vector<int64_t> &begin_mask,
const std::vector<int64_t> &end_mask,
const std::vector<int64_t> &new_axis_mask,
const std::vector<int64_t> &shrink_mask,
const std::vector<int64_t> &ellipsis_mask) {
auto ssNode = std::make_shared<ov::op::v1::StridedSlice>(in, beginNode, endNode, strideNode, begin_mask, end_mask,
new_axis_mask, shrink_mask, ellipsis_mask);
std::shared_ptr<ov::Node> makeStridedSlice(const ov::Output<Node>& in,
const ov::Output<Node>& beginNode,
const ov::Output<Node>& endNode,
const ov::Output<Node>& strideNode,
const element::Type& type,
const std::vector<int64_t>& begin_mask,
const std::vector<int64_t>& end_mask,
const std::vector<int64_t>& new_axis_mask,
const std::vector<int64_t>& shrink_mask,
const std::vector<int64_t>& ellipsis_mask) {
auto ssNode = std::make_shared<ov::op::v1::StridedSlice>(in,
beginNode,
endNode,
strideNode,
begin_mask,
end_mask,
new_axis_mask,
shrink_mask,
ellipsis_mask);
return ssNode;
}
std::shared_ptr<ov::Node> makeSlice(const ov::Output<Node> &in,
const std::vector<int64_t> &begin,
const std::vector<int64_t> &end,
const std::vector<int64_t> &stride,
const std::vector<int64_t> &axes,
const element::Type &type) {
std::shared_ptr<ov::Node> makeSlice(const ov::Output<Node>& in,
const std::vector<int64_t>& begin,
const std::vector<int64_t>& end,
const std::vector<int64_t>& stride,
const std::vector<int64_t>& axes,
const element::Type& type) {
ov::Shape constShape = {begin.size()};
auto beginNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, begin.data());
auto endNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, end.data());
@ -58,18 +75,18 @@ std::shared_ptr<ov::Node> makeSlice(const ov::Output<Node> &in,
}
}
std::shared_ptr<ov::Node> makeSlice(const ov::Output<Node> &in,
const ov::Output<Node> &begin,
const ov::Output<Node> &end,
const ov::Output<Node> &stride,
const ov::Output<Node> &axes) {
std::shared_ptr<ov::Node> makeSlice(const ov::Output<Node>& in,
const ov::Output<Node>& begin,
const ov::Output<Node>& end,
const ov::Output<Node>& stride,
const ov::Output<Node>& axes) {
return std::make_shared<ov::op::v8::Slice>(in, begin, end, stride, axes);
}
std::shared_ptr<ov::Node> makeSlice(const ov::Output<Node> &in,
const ov::Output<Node> &begin,
const ov::Output<Node> &end,
const ov::Output<Node> &stride) {
std::shared_ptr<ov::Node> makeSlice(const ov::Output<Node>& in,
const ov::Output<Node>& begin,
const ov::Output<Node>& end,
const ov::Output<Node>& stride) {
return std::make_shared<ov::op::v8::Slice>(in, begin, end, stride);
}
} // namespace builder

File diff suppressed because it is too large Load Diff

View File

@ -2,15 +2,17 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/tile.hpp"
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeTile(const ngraph::Output<Node>& in,
const std::vector<int64_t>& repeats) {
auto repeatsNode = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64, std::vector<size_t>{repeats.size()}, repeats);
auto tileNode = std::make_shared<ngraph::opset1::Tile>(in, repeatsNode);
std::shared_ptr<ov::Node> makeTile(const ov::Output<Node>& in, const std::vector<int64_t>& repeats) {
auto repeatsNode =
std::make_shared<ov::op::v0::Constant>(ov::element::i64, std::vector<size_t>{repeats.size()}, repeats);
auto tileNode = std::make_shared<ov::op::v0::Tile>(in, repeatsNode);
return tileNode;
}

View File

@ -2,22 +2,22 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "openvino/op/variadic_split.hpp"
#include <memory>
#include <vector>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeVariadicSplit(const ngraph::Output<Node> &in,
const std::vector<size_t> numSplits,
int64_t axis) {
auto splitAxisOp = std::make_shared<ngraph::opset3::Constant>(element::i64, ngraph::Shape{},
std::vector<int64_t>{axis});
auto numSplit = std::make_shared<ngraph::opset3::Constant>(element::u64, ngraph::Shape{numSplits.size()},
numSplits);
auto VariadicSplitNode = std::make_shared<ngraph::opset3::VariadicSplit>(in, splitAxisOp, numSplit);
return VariadicSplitNode;
}
std::shared_ptr<ov::Node> makeVariadicSplit(const ov::Output<Node>& in,
const std::vector<size_t> numSplits,
int64_t axis) {
auto splitAxisOp = std::make_shared<ov::op::v0::Constant>(element::i64, ov::Shape{}, std::vector<int64_t>{axis});
auto numSplit = std::make_shared<ov::op::v0::Constant>(element::u64, ov::Shape{numSplits.size()}, numSplits);
auto VariadicSplitNode = std::make_shared<ov::op::v1::VariadicSplit>(in, splitAxisOp, numSplit);
return VariadicSplitNode;
}
} // namespace builder
} // namespace ngraph

View File

@ -4,8 +4,8 @@
#include "subgraph_softmax.hpp"
#include "common_test_utils/data_utils.hpp"
#include <snippets/op/subgraph.hpp>
#include "ngraph_functions/builders.hpp"
#include <snippets/op/subgraph.hpp>
namespace ov {
namespace test {
@ -26,27 +26,27 @@ std::shared_ptr<ov::Model> AddSoftmaxFunction::initOriginal() const {
}
std::shared_ptr<ov::Model> TransposeSoftmaxFunction::initOriginal() const {
const auto transpose0Param = std::make_shared<ngraph::opset1::Parameter>(precision, input_shapes[0]);
const auto transpose0Const = ngraph::builder::makeConstant(ngraph::element::i64, ov::Shape{m_order.size()}, m_order);
const auto transpose0Param = std::make_shared<ov::opset1::Parameter>(precision, input_shapes[0]);
const auto transpose0Const = ngraph::builder::makeConstant(ov::element::i64, ov::Shape{m_order.size()}, m_order);
const auto transpose2 = std::make_shared<ov::op::v1::Transpose>(transpose0Param, transpose0Const);
const auto softMax = std::make_shared<ngraph::opset8::Softmax>(transpose2, m_axis);
const auto softMax = std::make_shared<ov::op::v8::Softmax>(transpose2, m_axis);
return std::make_shared<ov::Model>(ov::NodeVector{softMax}, ov::ParameterVector {transpose0Param}, "softmax_transpose");
}
std::shared_ptr<ov::Model> TransposeSoftmaxEltwiseFunction::initOriginal() const {
const auto transpose0Param = std::make_shared<ngraph::opset1::Parameter>(precision, input_shapes[0]);
const auto transpose0Const = ngraph::builder::makeConstant(ngraph::element::i64, ov::Shape{m_order.size()},
const auto transpose0Param = std::make_shared<ov::opset1::Parameter>(precision, input_shapes[0]);
const auto transpose0Const = ngraph::builder::makeConstant(ov::element::i64, ov::Shape{m_order.size()},
m_order);
const auto transpose2 = std::make_shared<ov::op::v1::Transpose>(transpose0Param, transpose0Const);
const auto mulConst = ngraph::builder::makeConstant(ngraph::element::f32, transpose2->get_shape(),
const auto mulConst = ngraph::builder::makeConstant(ov::element::f32, transpose2->get_shape(),
std::vector<float>{}, true);
const auto mul = std::make_shared<ngraph::opset1::Multiply>(transpose2, mulConst);
const auto softMax = std::make_shared<ngraph::opset8::Softmax>(mul, m_axis);
const auto hswish = std::make_shared<ngraph::opset6::HSwish>(softMax);
const auto mul = std::make_shared<ov::op::v1::Multiply>(transpose2, mulConst);
const auto softMax = std::make_shared<ov::op::v8::Softmax>(mul, m_axis);
const auto hswish = std::make_shared<ov::op::v4::HSwish>(softMax);
return std::make_shared<ov::Model>(ov::NodeVector{hswish}, ov::ParameterVector{transpose0Param},
"softmax_transpose");
}
} // namespace snippets
} // namespace test
} // namespace ov
} // namespace ov

View File

@ -4,23 +4,139 @@
#pragma once
#include <memory>
#include <ostream>
#include "openvino/core/model.hpp"
#include "openvino/op/interpolate.hpp"
#include "openvino/op/matrix_nms.hpp"
#include "openvino/op/util/attr_types.hpp"
#include "openvino/op/util/multiclass_nms_base.hpp"
namespace ov {
namespace test {
namespace utils {
enum class ComparisonTypes { EQUAL, NOT_EQUAL, IS_FINITE, IS_INF, IS_NAN, LESS, LESS_EQUAL, GREATER, GREATER_EQUAL };
// clang-format off
enum ComparisonTypes {
EQUAL,
NOT_EQUAL,
IS_FINITE,
IS_INF,
IS_NAN,
LESS,
LESS_EQUAL,
GREATER,
GREATER_EQUAL
};
enum class ConversionTypes { CONVERT, CONVERT_LIKE };
enum ConversionTypes {
CONVERT,
CONVERT_LIKE
};
enum class ReductionType { Mean, Max, Min, Prod, Sum, LogicalOr, LogicalAnd, L1, L2 };
enum ReductionType {
Mean,
Max,
Min,
Prod,
Sum,
LogicalOr,
LogicalAnd,
L1,
L2
};
enum EltwiseTypes {
ADD,
MULTIPLY,
SUBTRACT,
DIVIDE,
SQUARED_DIFF,
POWER,
FLOOR_MOD,
MOD,
ERF
};
enum SqueezeOpType {
SQUEEZE,
UNSQUEEZE
};
enum class InputLayerType {
CONSTANT,
PARAMETER,
};
enum LogicalTypes {
LOGICAL_AND,
LOGICAL_OR,
LOGICAL_XOR,
LOGICAL_NOT
};
enum ActivationTypes {
None,
Sigmoid,
Tanh,
Relu,
LeakyRelu,
Exp,
Log,
Sign,
Abs,
Gelu,
Clamp,
Negative,
Acos,
Acosh,
Asin,
Asinh,
Atan,
Atanh,
Cos,
Cosh,
Floor,
Sin,
Sinh,
Sqrt,
Tan,
Elu,
Erf,
HardSigmoid,
Selu,
Ceiling,
PReLu,
Mish,
HSwish,
SoftPlus,
Swish,
HSigmoid,
RoundHalfToEven,
RoundHalfAwayFromZero,
GeluErf,
GeluTanh,
SoftSign
};
enum MinMaxOpType {
MINIMUM,
MAXIMUM
};
enum PoolingTypes {
MAX,
AVG
};
enum ROIPoolingTypes {
ROI_MAX,
ROI_BILINEAR
};
using ov::op::PadMode;
enum class SequenceTestsMode {
PURE_SEQ,
PURE_SEQ_RAND_SEQ_LEN_CONST,
@ -31,15 +147,40 @@ enum class SequenceTestsMode {
CONVERT_TO_TI_RAND_SEQ_LEN_PARAM,
};
std::ostream& operator<<(std::ostream& os, const ComparisonTypes type);
enum class DFTOpType {
FORWARD,
INVERSE
};
std::ostream& operator<<(std::ostream& os, const ConversionTypes type);
// clang-format on
std::ostream& operator<<(std::ostream& os, const ReductionType type);
std::ostream& operator<<(std::ostream& os, const ReductionType& m);
std::ostream& operator<<(std::ostream& os, const InputLayerType type);
std::ostream& operator<<(std::ostream& os, ov::test::utils::EltwiseTypes type);
std::ostream& operator<<(std::ostream& os, const SequenceTestsMode type);
std::ostream& operator<<(std::ostream& os, ov::test::utils::SqueezeOpType type);
std::ostream& operator<<(std::ostream& os, ov::test::utils::InputLayerType type);
std::ostream& operator<<(std::ostream& os, ov::test::utils::ComparisonTypes type);
std::ostream& operator<<(std::ostream& os, ov::test::utils::LogicalTypes type);
std::ostream& operator<<(std::ostream& os, ov::op::v4::Interpolate::InterpolateMode type);
std::ostream& operator<<(std::ostream& os, ov::op::v4::Interpolate::CoordinateTransformMode type);
std::ostream& operator<<(std::ostream& os, ov::op::v4::Interpolate::NearestMode type);
std::ostream& operator<<(std::ostream& os, ov::op::v4::Interpolate::ShapeCalcMode type);
std::ostream& operator<<(std::ostream& os, SequenceTestsMode type);
std::ostream& operator<<(std::ostream& os, ov::op::util::MulticlassNmsBase::SortResultType type);
std::ostream& operator<<(std::ostream& os, ov::op::v8::MatrixNms::SortResultType type);
std::ostream& operator<<(std::ostream& os, ov::op::v8::MatrixNms::DecayFunction type);
} // namespace utils
} // namespace test

View File

@ -8,95 +8,94 @@ namespace ov {
namespace test {
namespace utils {
std::ostream& operator<<(std::ostream& os, const ComparisonTypes type) {
switch (type) {
case ComparisonTypes::EQUAL:
os << "Equal";
break;
case ComparisonTypes::NOT_EQUAL:
os << "NotEqual";
break;
case ComparisonTypes::GREATER:
os << "Greater";
break;
case ComparisonTypes::GREATER_EQUAL:
os << "GreaterEqual";
break;
case ComparisonTypes::IS_FINITE:
os << "IsFinite";
break;
case ComparisonTypes::IS_INF:
os << "IsInf";
break;
case ComparisonTypes::IS_NAN:
os << "IsNaN";
break;
case ComparisonTypes::LESS:
os << "Less";
break;
case ComparisonTypes::LESS_EQUAL:
os << "LessEqual";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_COMPARISON_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, const ConversionTypes type) {
switch (type) {
case ConversionTypes::CONVERT:
os << "Convert";
break;
case ConversionTypes::CONVERT_LIKE:
os << "ConvertLike";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_CONVERSION_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, const ReductionType type) {
switch (type) {
case ReductionType::Mean:
std::ostream& operator<<(std::ostream& os, const ov::test::utils::ReductionType& m) {
switch (m) {
case ov::test::utils::Mean:
os << "Mean";
break;
case ReductionType::Max:
case ov::test::utils::Max:
os << "Max";
break;
case ReductionType::Min:
case ov::test::utils::Min:
os << "Min";
break;
case ReductionType::Prod:
case ov::test::utils::Prod:
os << "Prod";
break;
case ReductionType::Sum:
case ov::test::utils::Sum:
os << "Sum";
break;
case ReductionType::LogicalOr:
case ov::test::utils::LogicalOr:
os << "LogicalOr";
break;
case ReductionType::LogicalAnd:
case ov::test::utils::LogicalAnd:
os << "LogicalAnd";
break;
case ReductionType::L1:
case ov::test::utils::L1:
os << "ReduceL1";
break;
case ReductionType::L2:
case ov::test::utils::L2:
os << "ReduceL2";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_REDUCTION_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, const InputLayerType type) {
std::ostream& operator<<(std::ostream& os, const ov::test::utils::EltwiseTypes type) {
switch (type) {
case InputLayerType::CONSTANT:
case ov::test::utils::EltwiseTypes::SUBTRACT:
os << "Sub";
break;
case ov::test::utils::EltwiseTypes::MULTIPLY:
os << "Prod";
break;
case ov::test::utils::EltwiseTypes::ADD:
os << "Sum";
break;
case ov::test::utils::EltwiseTypes::DIVIDE:
os << "Div";
break;
case ov::test::utils::EltwiseTypes::SQUARED_DIFF:
os << "SqDiff";
break;
case ov::test::utils::EltwiseTypes::POWER:
os << "Pow";
break;
case ov::test::utils::EltwiseTypes::FLOOR_MOD:
os << "FloorMod";
break;
case ov::test::utils::EltwiseTypes::MOD:
os << "Mod";
break;
case ov::test::utils::EltwiseTypes::ERF:
os << "Erf";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_OP_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, ov::test::utils::SqueezeOpType type) {
switch (type) {
case ov::test::utils::SqueezeOpType::SQUEEZE:
os << "Squeeze";
break;
case ov::test::utils::SqueezeOpType::UNSQUEEZE:
os << "Unsqueeze";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_OP_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, ov::test::utils::InputLayerType type) {
switch (type) {
case ov::test::utils::InputLayerType::CONSTANT:
os << "CONSTANT";
break;
case InputLayerType::PARAMETER:
case ov::test::utils::InputLayerType::PARAMETER:
os << "PARAMETER";
break;
default:
@ -105,7 +104,148 @@ std::ostream& operator<<(std::ostream& os, const InputLayerType type) {
return os;
}
std::ostream& operator<<(std::ostream& os, const SequenceTestsMode type) {
std::ostream& operator<<(std::ostream& os, ov::test::utils::ComparisonTypes type) {
switch (type) {
case ov::test::utils::ComparisonTypes::EQUAL:
os << "Equal";
break;
case ov::test::utils::ComparisonTypes::NOT_EQUAL:
os << "NotEqual";
break;
case ov::test::utils::ComparisonTypes::GREATER:
os << "Greater";
break;
case ov::test::utils::ComparisonTypes::GREATER_EQUAL:
os << "GreaterEqual";
break;
case ov::test::utils::ComparisonTypes::IS_FINITE:
os << "IsFinite";
break;
case ov::test::utils::ComparisonTypes::IS_INF:
os << "IsInf";
break;
case ov::test::utils::ComparisonTypes::IS_NAN:
os << "IsNaN";
break;
case ov::test::utils::ComparisonTypes::LESS:
os << "Less";
break;
case ov::test::utils::ComparisonTypes::LESS_EQUAL:
os << "LessEqual";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_OP_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, ov::test::utils::LogicalTypes type) {
switch (type) {
case ov::test::utils::LogicalTypes::LOGICAL_AND:
os << "LogicalAnd";
break;
case ov::test::utils::LogicalTypes::LOGICAL_OR:
os << "LogicalOr";
break;
case ov::test::utils::LogicalTypes::LOGICAL_NOT:
os << "LogicalNot";
break;
case ov::test::utils::LogicalTypes::LOGICAL_XOR:
os << "LogicalXor";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_OP_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, ov::op::v11::Interpolate::InterpolateMode type) {
switch (type) {
case ov::op::v11::Interpolate::InterpolateMode::CUBIC:
os << "cubic";
break;
case ov::op::v11::Interpolate::InterpolateMode::LINEAR:
os << "linear";
break;
case ov::op::v11::Interpolate::InterpolateMode::LINEAR_ONNX:
os << "linear_onnx";
break;
case ov::op::v11::Interpolate::InterpolateMode::NEAREST:
os << "nearest";
break;
case ov::op::v11::Interpolate::InterpolateMode::BILINEAR_PILLOW:
os << "bilinear_pillow";
break;
case ov::op::v11::Interpolate::InterpolateMode::BICUBIC_PILLOW:
os << "bicubic_pillow";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_OP_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, ov::op::v11::Interpolate::CoordinateTransformMode type) {
switch (type) {
case ov::op::v11::Interpolate::CoordinateTransformMode::ALIGN_CORNERS:
os << "align_corners";
break;
case ov::op::v11::Interpolate::CoordinateTransformMode::ASYMMETRIC:
os << "asymmetric";
break;
case ov::op::v11::Interpolate::CoordinateTransformMode::HALF_PIXEL:
os << "half_pixel";
break;
case ov::op::v11::Interpolate::CoordinateTransformMode::PYTORCH_HALF_PIXEL:
os << "pytorch_half_pixel";
break;
case ov::op::v11::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN:
os << "tf_half_pixel_for_nn";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_OP_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, ov::op::v11::Interpolate::NearestMode type) {
switch (type) {
case ov::op::v11::Interpolate::NearestMode::CEIL:
os << "ceil";
break;
case ov::op::v11::Interpolate::NearestMode::ROUND_PREFER_CEIL:
os << "round_prefer_ceil";
break;
case ov::op::v11::Interpolate::NearestMode::FLOOR:
os << "floor";
break;
case ov::op::v11::Interpolate::NearestMode::ROUND_PREFER_FLOOR:
os << "round_prefer_floor";
break;
case ov::op::v11::Interpolate::NearestMode::SIMPLE:
os << "simple";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_OP_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, ov::op::v11::Interpolate::ShapeCalcMode type) {
switch (type) {
case ov::op::v11::Interpolate::ShapeCalcMode::SCALES:
os << "scales";
break;
case ov::op::v11::Interpolate::ShapeCalcMode::SIZES:
os << "sizes";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_OP_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, SequenceTestsMode type) {
switch (type) {
case SequenceTestsMode::PURE_SEQ:
os << "PURE_SEQ";
@ -133,6 +273,55 @@ std::ostream& operator<<(std::ostream& os, const SequenceTestsMode type) {
}
return os;
}
std::ostream& operator<<(std::ostream& os, op::v8::MatrixNms::SortResultType type) {
switch (type) {
case op::v8::MatrixNms::SortResultType::CLASSID:
os << "CLASSID";
break;
case op::v8::MatrixNms::SortResultType::SCORE:
os << "SCORE";
break;
case op::v8::MatrixNms::SortResultType::NONE:
os << "NONE";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, ov::op::util::MulticlassNmsBase::SortResultType type) {
switch (type) {
case op::util::MulticlassNmsBase::SortResultType::CLASSID:
os << "CLASSID";
break;
case op::util::MulticlassNmsBase::SortResultType::SCORE:
os << "SCORE";
break;
case op::util::MulticlassNmsBase::SortResultType::NONE:
os << "NONE";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, op::v8::MatrixNms::DecayFunction type) {
switch (type) {
case op::v8::MatrixNms::DecayFunction::GAUSSIAN:
os << "GAUSSIAN";
break;
case op::v8::MatrixNms::DecayFunction::LINEAR:
os << "LINEAR";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_TYPE");
}
return os;
}
} // namespace utils
} // namespace test
} // namespace ov