Subgraph builders refactor (#21373)

* Refactor subgraph builders

* Refactor make_conv_pool_relu builder

* Refactor make_conv_pool_relu_no_reshapes builder

* Refactor make_conv_pool2_relu2 and make_conv_pool_relu_non_zero

* Refactor make_split_conv_concat builders

* Refactor make_kso_function

* Refactor make_split_multi_conv_concat

* Refactor make_ti_with_lstm_cell

* Refactor make_single_conv

* Refactor make_detection_output

* Refactor make_multi_single_conv

* Refactor make_2_input_subtract

* Refator make_nested_branch_conv_concat

* Refactor make_nested_split_conv_concat

* Refactor make_conv_bias

* Refactor make_read_concat_split_assign

* Refactor make_matmul_bias

* Refactor make_convert_transpose

* Refactor make_multiple_input_output_double_concat
This commit is contained in:
Oleg Pipikin 2023-12-13 10:46:31 +01:00 committed by GitHub
parent 47939684df
commit 3fb60dc41c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
122 changed files with 2538 additions and 1378 deletions

View File

@ -6,6 +6,8 @@
#include <common_test_utils/file_utils.hpp>
#include "common_test_utils/subgraph_builders/conv_pool_relu_no_reshapes.hpp"
namespace TestDataHelpers {
const std::string model_bin_name = "test_model.bin";
@ -15,7 +17,7 @@ const std::string model_exported_name = "test_exported_model.blob";
void generate_test_model() {
ov::pass::Manager manager;
manager.register_pass<ov::pass::Serialize>(model_xml_name, model_bin_name);
auto function = ngraph::builder::subgraph::makeConvPoolReluNoReshapes({1, 3, 227, 227});
auto function = ov::test::utils::make_conv_pool_relu_no_reshapes({1, 3, 227, 227});
manager.run_passes(function);
}

View File

@ -11,6 +11,7 @@
#include <string>
#include "common_test_utils/ov_test_utils.hpp"
#include "common_test_utils/subgraph_builders/detection_output.hpp"
#include "openvino/core/dimension_tracker.hpp"
#include "openvino/core/model.hpp"
#include "openvino/opsets/opset1.hpp"
@ -19,6 +20,7 @@
#include "transformations/common_optimizations/divide_fusion.hpp"
#include "transformations/init_node_info.hpp"
#include "transformations/utils/utils.hpp"
using namespace ov;
using namespace testing;
@ -307,7 +309,7 @@ TEST(TransformationTests, AutoBatch_FindBatch_NegativeTracking) {
}
TEST(TransformationTests, AutoBatch_FindBatch_AutoBatch_LabelPropagation_DO_detachment) {
auto f = ngraph::builder::subgraph::makeDetectionOutput();
auto f = ov::test::utils::make_detection_output();
auto& data = f->get_parameters()[0];
ov::pass::Manager m;

View File

@ -14,6 +14,7 @@
#include <vector>
#include "common_test_utils/file_utils.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
#include "ie_plugin_config.hpp"
#include "openvino/core/any.hpp"
#include "openvino/core/except.hpp"
@ -32,7 +33,6 @@
#include "openvino/runtime/iplugin.hpp"
#include "openvino/runtime/iremote_context.hpp"
#include "openvino/runtime/properties.hpp"
#include "ov_models/subgraph_builders.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_iasync_infer_request.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_iplugin.hpp"
@ -220,7 +220,7 @@ public:
ov::pass::Manager manager;
manager.register_pass<ov::pass::Serialize>(modelName, weightsName);
manager.run_passes(ngraph::builder::subgraph::makeConvPoolRelu({1, 3, 227, 227}, ov::element::Type_t::f32));
manager.run_passes(ov::test::utils::make_conv_pool_relu({1, 3, 227, 227}, ov::element::Type_t::f32));
}
void TearDown() override {

View File

@ -12,6 +12,9 @@
#include "common_test_utils/test_common.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ov_models/subgraph_builders.hpp"
#include "common_test_utils/subgraph_builders/single_conv.hpp"
#include "common_test_utils/subgraph_builders/detection_output.hpp"
#include "common_test_utils/subgraph_builders/multi_single_conv.hpp"
using namespace ::testing;
using namespace InferenceEngine;
@ -29,7 +32,7 @@ class AutoBatching_Test : public BehaviorTestsUtils::IEPluginTestBase,
std::tie(target_device, use_get_blob, num_streams, num_requests, num_batch) = this->GetParam();
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
fn_ptrs = {ngraph::builder::subgraph::makeSingleConv(), ngraph::builder::subgraph::makeMultiSingleConv()};
fn_ptrs = {ov::test::utils::make_single_conv(), ov::test::utils::make_multi_single_conv()};
};
public:
@ -144,7 +147,7 @@ public:
std::tie(target_device, use_get_blob, num_streams, num_requests, num_batch) = this->GetParam();
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
fn_ptrs = {ngraph::builder::subgraph::makeDetectionOutput(), ngraph::builder::subgraph::makeDetectionOutput()};
fn_ptrs = {ov::test::utils::make_detection_output(), ov::test::utils::make_detection_output()};
};
static std::string getTestCaseName(const testing::TestParamInfo<AutoBatchTwoNetsParams>& obj) {

View File

@ -13,6 +13,7 @@
#include "openvino/runtime/threading/immediate_executor.hpp"
#include "transformations/utils/utils.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp"
#include "common_test_utils/subgraph_builders/multi_single_conv.hpp"
using ::testing::_;
using ::testing::AnyNumber;
@ -115,7 +116,7 @@ public:
std::tie(m_batch_size, m_element_type, m_infer_interval) = this->GetParam();
m_terminate = false;
std::vector<size_t> inputShape = {1, 3, 24, 24};
m_model = ngraph::builder::subgraph::makeMultiSingleConv(inputShape, m_element_type);
m_model = ov::test::utils::make_multi_single_conv(inputShape, m_element_type);
prepare_input(m_model, m_batch_size);

View File

@ -10,6 +10,7 @@
#include "openvino/core/dimension_tracker.hpp"
#include "openvino/runtime/threading/immediate_executor.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp"
#include "common_test_utils/subgraph_builders/multi_single_conv.hpp"
using ::testing::_;
using ::testing::AnyNumber;
@ -83,7 +84,7 @@ public:
void SetUp() override {
std::tie(m_batch_size, m_infer_request_num) = this->GetParam();
m_model = ngraph::builder::subgraph::makeMultiSingleConv();
m_model = ov::test::utils::make_multi_single_conv();
m_core = std::shared_ptr<NiceMock<ov::MockICore>>(new NiceMock<ov::MockICore>());
m_auto_batch_plugin =

View File

@ -9,6 +9,7 @@
#include "ov_models/subgraph_builders.hpp"
#include "openvino/core/dimension_tracker.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp"
#include "common_test_utils/subgraph_builders/multi_single_conv.hpp"
using ::testing::_;
using ::testing::AnyNumber;
@ -67,7 +68,7 @@ public:
void SetUp() override {
std::tie(m_properity_name, m_throw_exception) = this->GetParam();
m_model = ngraph::builder::subgraph::makeMultiSingleConv();
m_model = ov::test::utils::make_multi_single_conv();
m_core = std::shared_ptr<NiceMock<ov::MockICore>>(new NiceMock<ov::MockICore>());
m_plugin =
std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>>(new NiceMock<MockAutoBatchInferencePlugin>());

View File

@ -9,6 +9,7 @@
#include "ov_models/subgraph_builders.hpp"
#include "openvino/core/dimension_tracker.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp"
#include "common_test_utils/subgraph_builders/multi_single_conv.hpp"
using ::testing::_;
using ::testing::AnyNumber;
@ -48,7 +49,7 @@ public:
}
void SetUp() override {
m_model = ngraph::builder::subgraph::makeMultiSingleConv();
m_model = ov::test::utils::make_multi_single_conv();
m_core = std::shared_ptr<NiceMock<ov::MockICore>>(new NiceMock<ov::MockICore>());
m_plugin =
std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>>(new NiceMock<MockAutoBatchInferencePlugin>());

View File

@ -9,6 +9,7 @@
#include "ov_models/subgraph_builders.hpp"
#include "openvino/core/dimension_tracker.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp"
#include "common_test_utils/subgraph_builders/multi_single_conv.hpp"
using ::testing::_;
using ::testing::AnyNumber;
@ -70,7 +71,7 @@ public:
void SetUp() override {
std::tie(m_properities, m_throw_exception) = this->GetParam();
m_model = ngraph::builder::subgraph::makeMultiSingleConv();
m_model = ov::test::utils::make_multi_single_conv();
m_core = std::shared_ptr<NiceMock<ov::MockICore>>(new NiceMock<ov::MockICore>());
m_plugin =
std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>>(new NiceMock<MockAutoBatchInferencePlugin>());

View File

@ -9,6 +9,8 @@
#include "ov_models/subgraph_builders.hpp"
#include "openvino/core/dimension_tracker.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu_non_zero.hpp"
#include "common_test_utils/subgraph_builders/multi_single_conv.hpp"
using ::testing::_;
using ::testing::AnyNumber;
@ -118,17 +120,17 @@ public:
};
TEST_P(PluginCompileModelTest, PluginCompileModelTestCase) {
m_model = ngraph::builder::subgraph::makeMultiSingleConv();
m_model = ov::test::utils::make_multi_single_conv();
ASSERT_NO_THROW(m_plugin->compile_model(m_model, m_plugin_properities));
}
TEST_P(PluginCompileModelTest, PluginCompileModelWithRemoteContextTestCase) {
m_model = ngraph::builder::subgraph::makeMultiSingleConv();
m_model = ov::test::utils::make_multi_single_conv();
ASSERT_NO_THROW(m_plugin->compile_model(m_model, m_plugin_properities, m_remote_context));
}
TEST_P(PluginCompileModelTest, PluginCompileModelBatchedModelTestCase) {
m_model = ngraph::builder::subgraph::makeConvPoolReluNonZero({1, 1, 32, 32});
m_model = ov::test::utils::make_conv_pool_relu_non_zero({1, 1, 32, 32});
auto batch = ov::Dimension(5);
ov::DimensionTracker::set_label(batch, 11);
auto p_shape = ov::PartialShape{batch, 1, 32, 32};
@ -137,7 +139,7 @@ TEST_P(PluginCompileModelTest, PluginCompileModelBatchedModelTestCase) {
}
TEST_P(PluginCompileModelTest, PluginCompileModelBatchedModelWithRemoteContextTestCase) {
m_model = ngraph::builder::subgraph::makeConvPoolReluNonZero({1, 1, 32, 32});
m_model = ov::test::utils::make_conv_pool_relu_non_zero({1, 1, 32, 32});
auto batch = ov::Dimension(5);
ov::DimensionTracker::set_label(batch, 11);
auto p_shape = ov::PartialShape{batch, 1, 32, 32};

View File

@ -8,6 +8,7 @@
#include "mock_common.hpp"
#include "ov_models/subgraph_builders.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp"
#include "common_test_utils/subgraph_builders/multi_single_conv.hpp"
using ::testing::_;
using ::testing::AnyNumber;
@ -60,7 +61,7 @@ public:
void SetUp() override {
std::tie(m_properties, m_throw_exception) = this->GetParam();
m_model = ngraph::builder::subgraph::makeMultiSingleConv();
m_model = ov::test::utils::make_multi_single_conv();
m_core = std::shared_ptr<NiceMock<ov::MockICore>>(new NiceMock<ov::MockICore>());
m_plugin =
std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>>(new NiceMock<MockAutoBatchInferencePlugin>());

View File

@ -13,6 +13,7 @@
#include "openvino/runtime/threading/immediate_executor.hpp"
#include "transformations/utils/utils.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp"
#include "common_test_utils/subgraph_builders/multi_single_conv.hpp"
using ::testing::_;
using ::testing::AnyNumber;
@ -97,7 +98,7 @@ public:
void SetUp() override {
std::tie(m_batch_size, m_element_type) = this->GetParam();
std::vector<size_t> inputShape = {1, 3, 24, 24};
m_model = ngraph::builder::subgraph::makeMultiSingleConv(inputShape, m_element_type);
m_model = ov::test::utils::make_multi_single_conv(inputShape, m_element_type);
m_core = std::shared_ptr<NiceMock<ov::MockICore>>(new NiceMock<ov::MockICore>());
m_auto_batch_plugin =

View File

@ -7,7 +7,7 @@
#include "openvino/runtime/core.hpp"
#include "openvino/runtime/compiled_model.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "ov_models/subgraph_builders.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
class OVClassConfigTestCPU : public ::testing::Test {
public:
@ -16,6 +16,6 @@ public:
void SetUp() override {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
model = ngraph::builder::subgraph::makeConvPoolRelu();
model = ov::test::utils::make_conv_pool_relu();
}
};

View File

@ -21,6 +21,8 @@
#include <transformations/opset_conversions/convert_opset2_to_opset1.hpp>
#include <transformations/opset_conversions/convert_opset3_to_opset2.hpp>
#include "common_test_utils/subgraph_builders/conv_bias.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
#include "ov_models/subgraph_builders.hpp"
#include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp"
@ -60,7 +62,7 @@ void transformNetwork(InferenceEngine::CNNNetwork& clonedNetwork, bool keep_cons
TEST(KeepConstantInputsTests, ConvertConvolutionPoolReluNetworkWithTrue) {
std::shared_ptr<ov::Model> f_ptr;
f_ptr = ngraph::builder::subgraph::makeConvPoolRelu();
f_ptr = ov::test::utils::make_conv_pool_relu();
InferenceEngine::CNNNetwork network(f_ptr), originalNetwork = network;
transformNetwork(originalNetwork, true);
ASSERT_EQ(numberOfInputsForLayerInCNNNetwork(originalNetwork, "Convolution"), 2);
@ -68,7 +70,7 @@ TEST(KeepConstantInputsTests, ConvertConvolutionPoolReluNetworkWithTrue) {
TEST(KeepConstantInputsTests, ConvertConvolutionPoolReluNetworkWithFalse) {
std::shared_ptr<ov::Model> f_ptr;
f_ptr = ngraph::builder::subgraph::makeConvPoolRelu();
f_ptr = ov::test::utils::make_conv_pool_relu();
InferenceEngine::CNNNetwork network(f_ptr), originalNetwork = network;
transformNetwork(originalNetwork, false);
ASSERT_EQ(numberOfInputsForLayerInCNNNetwork(originalNetwork, "Convolution"), 1);
@ -76,7 +78,7 @@ TEST(KeepConstantInputsTests, ConvertConvolutionPoolReluNetworkWithFalse) {
TEST(KeepConstantInputsTests, ConvertConvolutionBiasNetworkWithTrue) {
std::shared_ptr<ov::Model> f_ptr;
f_ptr = ngraph::builder::subgraph::makeConvBias();
f_ptr = ov::test::utils::make_conv_bias();
InferenceEngine::CNNNetwork network(f_ptr), originalNetwork = network;
transformNetwork(originalNetwork, true);
ASSERT_EQ(numberOfInputsForLayerInCNNNetwork(originalNetwork, "Convolution"), 3);
@ -84,7 +86,7 @@ TEST(KeepConstantInputsTests, ConvertConvolutionBiasNetworkWithTrue) {
TEST(KeepConstantInputsTests, ConvertConvolutionBiasNetworkWithFalse) {
std::shared_ptr<ov::Model> f_ptr;
f_ptr = ngraph::builder::subgraph::makeConvBias();
f_ptr = ov::test::utils::make_conv_bias();
InferenceEngine::CNNNetwork network(f_ptr), originalNetwork = network;
transformNetwork(originalNetwork, false);
ASSERT_EQ(numberOfInputsForLayerInCNNNetwork(originalNetwork, "Convolution"), 1);

View File

@ -6,6 +6,8 @@
#include <vector>
#include "common_test_utils/subgraph_builders/split_conv_concat.hpp"
using namespace ov::test::behavior;
namespace {
@ -14,7 +16,7 @@ const std::vector<ov::AnyMap> configs = {{}};
INSTANTIATE_TEST_SUITE_P(
smoke_BehaviorTests,
OVInferRequestDynamicTests,
::testing::Combine(::testing::Values(ngraph::builder::subgraph::makeSplitConvConcat()),
::testing::Combine(::testing::Values(ov::test::utils::make_split_conv_concat()),
::testing::Values(std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>>{
{{1, 4, 20, 20}, {1, 10, 18, 18}},
{{2, 4, 20, 20}, {2, 10, 18, 18}}}),

View File

@ -10,6 +10,8 @@
#include "ov_models/subgraph_builders.hpp"
#include "transformations/utils/utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp"
#include "common_test_utils/subgraph_builders/read_concat_split_assign.hpp"
namespace {
typedef std::tuple<
@ -88,7 +90,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_GPU_BehaviorTests, InferRequestIOPrecision,
TEST(TensorTest, smoke_canSetShapeForPreallocatedTensor) {
auto core = ov::Core();
using namespace ov::preprocess;
auto p = PrePostProcessor(ngraph::builder::subgraph::makeSplitMultiConvConcat());
auto p = PrePostProcessor(ov::test::utils::make_split_multi_conv_concat());
p.input().tensor().set_element_type(ov::element::i8);
p.input().preprocess().convert_element_type(ov::element::f32);
@ -135,7 +137,7 @@ TEST(TensorTest, smoke_canSetScalarTensor) {
TEST(TensorTest, smoke_canSetTensorForDynamicInput) {
auto core = ov::Core();
using namespace ov::preprocess;
auto p = PrePostProcessor(ngraph::builder::subgraph::makeSplitMultiConvConcat());
auto p = PrePostProcessor(ov::test::utils::make_split_multi_conv_concat());
p.input().tensor().set_element_type(ov::element::i8);
p.input().preprocess().convert_element_type(ov::element::f32);
@ -172,7 +174,7 @@ TEST(TensorTest, smoke_canSetTensorForDynamicInput) {
TEST(TensorTest, smoke_canReallocateDeviceInputForHostTensor) {
auto ov = ov::Core();
using namespace ov::preprocess;
auto p = PrePostProcessor(ngraph::builder::subgraph::makeSplitMultiConvConcat());
auto p = PrePostProcessor(ov::test::utils::make_split_multi_conv_concat());
p.input().tensor().set_element_type(ov::element::i8);
p.input().preprocess().convert_element_type(ov::element::f32);
auto function = p.build();
@ -196,7 +198,7 @@ TEST(VariablesTest, smoke_canSetStateTensor) {
const ov::Shape virable_shape = {1, 3, 2, 4};
const ov::Shape input_shape = {1, 3, 2, 4};
const ov::element::Type et = ov::element::f16;
auto model = ngraph::builder::subgraph::makeReadConcatSplitAssign(input_shape, et);
auto model = ov::test::utils::make_read_concat_split_assign(input_shape, et);
auto compiled_model = ov.compile_model(model, ov::test::utils::DEVICE_GPU);
auto request = compiled_model.create_infer_request();

View File

@ -4,6 +4,7 @@
#include "base/ov_behavior_test_utils.hpp"
#include "openvino/runtime/core.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
namespace {
using params = std::tuple<ov::element::Type, ov::element::Type>;
@ -54,7 +55,7 @@ TEST(ExecutionModeTest, SetCompileGetInferPrecisionAndExecMode) {
ov::Core core;
core.set_property(ov::test::utils::DEVICE_GPU, ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE));
auto model = ngraph::builder::subgraph::makeConvPoolRelu();
auto model = ov::test::utils::make_conv_pool_relu();
{
auto compiled_model = core.compile_model(model, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f32));
ASSERT_EQ(ov::hint::ExecutionMode::PERFORMANCE, compiled_model.get_property(ov::hint::execution_mode));

View File

@ -17,6 +17,10 @@
#include "openvino/runtime/infer_request.hpp"
#include "openvino/runtime/compiled_model.hpp"
#include "functional_test_utils/ov_plugin_cache.hpp"
#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp"
#include "common_test_utils/subgraph_builders/ti_with_lstm_cell.hpp"
#include "common_test_utils/subgraph_builders/detection_output.hpp"
#include "common_test_utils/subgraph_builders/multi_single_conv.hpp"
namespace {
using ConcurrencyTestParams = std::tuple<size_t, // number of streams
@ -26,9 +30,9 @@ class OVConcurrencyTest : public ov::test::TestsCommon,
public testing::WithParamInterface<ConcurrencyTestParams> {
void SetUp() override {
std::tie(num_streams, num_requests) = this->GetParam();
fn_ptrs = {ngraph::builder::subgraph::makeSplitMultiConvConcat(),
ngraph::builder::subgraph::makeMultiSingleConv(),
ngraph::builder::subgraph::makeTIwithLSTMcell()};
fn_ptrs = {ov::test::utils::make_split_multi_conv_concat(),
ov::test::utils::make_multi_single_conv(),
ov::test::utils::make_ti_with_lstm_cell()};
};
public:
static std::string getTestCaseName(const testing::TestParamInfo<ConcurrencyTestParams>& obj) {
@ -144,7 +148,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_RemoteTensor, OVConcurrencyTest,
TEST(canSwapTensorsBetweenInferRequests, inputs) {
std::vector<ov::Tensor> ref;
std::vector<ov::Tensor> input_tensors;
auto fn = ngraph::builder::subgraph::makeSplitMultiConvConcat();
auto fn = ov::test::utils::make_split_multi_conv_concat();
auto core = ov::test::utils::PluginCache::get().core();
auto compiled_model = core->compile_model(fn, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f32));
@ -218,7 +222,7 @@ TEST(canSwapTensorsBetweenInferRequests, inputs) {
}
TEST(smoke_InferRequestDeviceMemoryAllocation, usmHostIsNotChanged) {
auto fn = ngraph::builder::subgraph::makeDetectionOutput(ov::element::f32);
auto fn = ov::test::utils::make_detection_output(ov::element::f32);
auto core = ov::test::utils::PluginCache::get().core();
auto compiled_model = core->compile_model(fn, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f32));
@ -257,7 +261,7 @@ TEST(smoke_InferRequestDeviceMemoryAllocation, usmHostIsNotChanged) {
}
TEST(smoke_InferRequestDeviceMemoryAllocation, canSetSystemHostTensor) {
auto fn = ngraph::builder::subgraph::makeDetectionOutput(ov::element::f32);
auto fn = ov::test::utils::make_detection_output(ov::element::f32);
auto core = ov::test::utils::PluginCache::get().core();
auto compiled_model = core->compile_model(fn, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f32));
@ -283,7 +287,7 @@ TEST(canSwapTensorsBetweenInferRequests, outputs) {
std::vector<ov::Tensor> ref;
std::vector<ov::Tensor> input_tensors;
std::vector<ov::Tensor> output_tensors;
auto fn = ngraph::builder::subgraph::makeSplitMultiConvConcat();
auto fn = ov::test::utils::make_split_multi_conv_concat();
auto core = ov::test::utils::PluginCache::get().core();
auto compiled_model = core->compile_model(fn, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f32));

View File

@ -9,6 +9,7 @@
#include "openvino/runtime/core.hpp"
#include "ov_models/subgraph_builders.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp"
namespace {
using ov::test::InputShape;
@ -69,8 +70,8 @@ protected:
init_input_shapes(input_shape);
//TODO: think how we can switch between several input topologies in the future
// function = ngraph::builder::subgraph::makeSplitConvConcat(input_shape.front().first.get_min_shape(), model_type);
function = ngraph::builder::subgraph::makeSplitMultiConvConcat(input_shape.front().first.get_min_shape(), model_type);
// function = ov::test::utils::make_split_conv_concat(input_shape.front().first.get_min_shape(), model_type);
function = ov::test::utils::make_split_multi_conv_concat(input_shape.front().first.get_min_shape(), model_type);
// make topology dynamic
std::map<std::string, ov::PartialShape> dynShape;

View File

@ -18,6 +18,8 @@
#include "base/ov_behavior_test_utils.hpp"
#include "ov_models/subgraph_builders.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp"
#include "common_test_utils/subgraph_builders/multi_single_conv.hpp"
using namespace ::testing;
using namespace InferenceEngine;
@ -578,8 +580,8 @@ class TwoNets_Test : public ov::test::TestsCommon,
public testing::WithParamInterface<TwoNetsParams> {
void SetUp() override {
std::tie(num_streams, num_requests) = this->GetParam();
fn_ptrs = {ngraph::builder::subgraph::makeSplitMultiConvConcat(),
ngraph::builder::subgraph::makeMultiSingleConv()};
fn_ptrs = {ov::test::utils::make_split_multi_conv_concat(),
ov::test::utils::make_multi_single_conv()};
};
public:
static std::string getTestCaseName(const testing::TestParamInfo<TwoNetsParams>& obj) {

View File

@ -14,6 +14,7 @@
#include "common_test_utils/file_utils.hpp"
#include "ov_models/subgraph_builders.hpp"
#include "openvino/core/preprocess/pre_post_process.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
#ifdef _WIN32
#ifdef ENABLE_DX11
@ -147,7 +148,7 @@ struct DX11CachedTexture_Test : DX11RemoteCtx_Test {
GTEST_SKIP();
#endif
// inference using remote blob with batch
auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, texture_description.Height, texture_description.Width});
auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({1, 3, texture_description.Height, texture_description.Width});
ov::Core core;
ov::intel_gpu::ocl::D3DContext context(core, device_ptr);

View File

@ -11,13 +11,16 @@
#include "common_test_utils/ov_tensor_utils.hpp"
#include "base/ov_behavior_test_utils.hpp"
#include "ov_models/subgraph_builders.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp"
#include "common_test_utils/subgraph_builders/convert_transpose.hpp"
class OVRemoteTensor_Test : public ov::test::TestsCommon {
protected:
std::shared_ptr<ov::Model> fn_ptr;
void SetUp() override {
fn_ptr = ngraph::builder::subgraph::makeSplitMultiConvConcat();
fn_ptr = ov::test::utils::make_split_multi_conv_concat();
}
};
@ -739,7 +742,7 @@ protected:
public:
void SetUp() override {
fn_ptr = ngraph::builder::subgraph::makeSplitMultiConvConcat();
fn_ptr = ov::test::utils::make_split_multi_conv_concat();
deviceName = ov::test::utils::DEVICE_GPU;
auto with_auto_batching = this->GetParam();
if (with_auto_batching) {
@ -1277,7 +1280,7 @@ TEST_F(OVRemoteTensor_Test, NV12toGray) {
// ------------------------------------------------------
// inference using remote tensor
auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({1, feature, height, width});
auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({1, feature, height, width});
using namespace ov::preprocess;
@ -1325,7 +1328,7 @@ TEST_F(OVRemoteTensor_Test, NV12toGray) {
// ------------------------------------------------------
// regular inference
auto fn_ptr_regular = ngraph::builder::subgraph::makeConvPoolRelu({1, feature, height, width});
auto fn_ptr_regular = ov::test::utils::make_conv_pool_relu({1, feature, height, width});
auto p_reg = PrePostProcessor(fn_ptr_regular);
p_reg.input().tensor().set_element_type(ov::element::f32)
@ -1368,7 +1371,7 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image_ConvertTranspose) {
// ------------------------------------------------------
// inference using remote tensor
auto fn_ptr_remote = ngraph::builder::subgraph::makeConvertTranspose({1, 3, height, width});
auto fn_ptr_remote = ov::test::utils::make_convert_transpose({1, 3, height, width});
using namespace ov::preprocess;
auto p = PrePostProcessor(fn_ptr_remote);
@ -1435,7 +1438,7 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image_ConvertTranspose) {
// ------------------------------------------------------
// regular inference
auto fn_ptr_regular = ngraph::builder::subgraph::makeConvertTranspose({1, 3, height, width});
auto fn_ptr_regular = ov::test::utils::make_convert_transpose({1, 3, height, width});
using namespace ov::preprocess;
auto p_reg = PrePostProcessor(fn_ptr_regular);
@ -1478,7 +1481,7 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image_single_plane) {
// ------------------------------------------------------
// inference using remote tensor
auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, height, width});
auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({1, 3, height, width});
using namespace ov::preprocess;
auto p = PrePostProcessor(fn_ptr_remote);
@ -1526,7 +1529,7 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image_single_plane) {
// ------------------------------------------------------
// regular inference
auto fn_ptr_regular = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, height, width});
auto fn_ptr_regular = ov::test::utils::make_conv_pool_relu({1, 3, height, width});
using namespace ov::preprocess;
auto p_reg = PrePostProcessor(fn_ptr_regular);
@ -1569,7 +1572,7 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image_two_planes) {
// ------------------------------------------------------
// inference using remote tensor
auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, height, width});
auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({1, 3, height, width});
using namespace ov::preprocess;
auto p = PrePostProcessor(fn_ptr_remote);
@ -1634,7 +1637,7 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image_two_planes) {
// ------------------------------------------------------
// regular inference
auto fn_ptr_regular = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, height, width});
auto fn_ptr_regular = ov::test::utils::make_conv_pool_relu({1, 3, height, width});
using namespace ov::preprocess;
auto p_reg = PrePostProcessor(fn_ptr_regular);
@ -1676,7 +1679,7 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_buffer) {
auto ie = ov::Core();
auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, height, width});
auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({1, 3, height, width});
using namespace ov::preprocess;
auto p = PrePostProcessor(fn_ptr_remote);
@ -1781,7 +1784,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_image_single_plane) {
// ------------------------------------------------------
// inference using remote tensor
auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({num_batch, 3, height, width});
auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({num_batch, 3, height, width});
using namespace ov::preprocess;
auto p = PrePostProcessor(fn_ptr_remote);
@ -1839,7 +1842,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_image_single_plane) {
// ------------------------------------------------------
// regular inference
auto fn_ptr_regular = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, height, width});
auto fn_ptr_regular = ov::test::utils::make_conv_pool_relu({1, 3, height, width});
using namespace ov::preprocess;
auto p_reg = PrePostProcessor(fn_ptr_regular);
@ -1889,7 +1892,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_image_two_planes) {
// ------------------------------------------------------
// inference using remote tensor
auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({num_batch, 3, height, width});
auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({num_batch, 3, height, width});
using namespace ov::preprocess;
auto p = PrePostProcessor(fn_ptr_remote);
@ -1963,7 +1966,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_image_two_planes) {
// ------------------------------------------------------
// regular inference
auto fn_ptr_regular = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, height, width});
auto fn_ptr_regular = ov::test::utils::make_conv_pool_relu({1, 3, height, width});
using namespace ov::preprocess;
auto p_reg = PrePostProcessor(fn_ptr_regular);
@ -2028,7 +2031,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toGray) {
// ------------------------------------------------------
// inference using remote tensor
auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({num_batch, feature, height, width});
auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({num_batch, feature, height, width});
using namespace ov::preprocess;
@ -2086,7 +2089,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toGray) {
// ------------------------------------------------------
// regular inference
auto fn_ptr_regular = ngraph::builder::subgraph::makeConvPoolRelu({1, 1, height, width});
auto fn_ptr_regular = ov::test::utils::make_conv_pool_relu({1, 1, height, width});
auto p_reg = PrePostProcessor(fn_ptr_regular);
p_reg.input().tensor().set_element_type(ov::element::f32)
@ -2133,7 +2136,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_buffer) {
// ------------------------------------------------------
// inference using remote tensor
auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({num_batch, 3, height, width});
auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({num_batch, 3, height, width});
using namespace ov::preprocess;
auto p = PrePostProcessor(fn_ptr_remote);
@ -2235,7 +2238,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_buffer) {
// ------------------------------------------------------
// regular inference
auto fn_ptr_regular = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, height, width});
auto fn_ptr_regular = ov::test::utils::make_conv_pool_relu({1, 3, height, width});
using namespace ov::preprocess;
auto p_reg = PrePostProcessor(fn_ptr_regular);
@ -2369,7 +2372,7 @@ TEST(OVRemoteContextGPU, smoke_RemoteContextCaching) {
const auto gpuDeviceFirst = gpuDevices[0];
const auto gpuDeviceSecond = gpuDevices[1];
auto model = ngraph::builder::subgraph::makeConvertTranspose();
auto model = ov::test::utils::make_convert_transpose();
auto compiledModelFirst = core.compile_model(model, gpuDeviceFirst);
auto compiledModelSecond = core.compile_model(model, gpuDeviceSecond);
@ -2410,7 +2413,7 @@ TEST(OVRemoteContextGPU, smoke_RemoteContextSingleDevice) {
check_contexts_are_same(default_ctx, core.get_default_context(ov::test::utils::DEVICE_GPU));
// Ensure compiled model uses default context too
auto model = ngraph::builder::subgraph::makeConvertTranspose();
auto model = ov::test::utils::make_convert_transpose();
auto compiled_model = core.compile_model(model, ov::test::utils::DEVICE_GPU);
check_contexts_are_same(default_ctx, compiled_model.get_context());
ASSERT_EQ(2, compiled_model.get_property(ov::streams::num));

View File

@ -3,7 +3,12 @@
//
#include <behavior/plugin/core_threading.hpp>
#include <remote_tensor_tests/helpers.hpp>
#include "remote_tensor_tests/helpers.hpp"
#include "common_test_utils/subgraph_builders/split_conv_concat.hpp"
#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp"
#include "common_test_utils/subgraph_builders/single_conv.hpp"
#include "common_test_utils/subgraph_builders/multi_single_conv.hpp"
#include "common_test_utils/subgraph_builders/2_input_subtract.hpp"
using namespace InferenceEngine;
using namespace InferenceEngine::gpu;
@ -26,11 +31,11 @@ TEST_P(CoreThreadingTestsWithIterations, smoke_LoadNetwork_RemoteContext) {
std::atomic<unsigned int> counter{0u};
std::vector<InferenceEngine::CNNNetwork> networks;
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::make2InputSubtract()));
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeMultiSingleConv()));
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSingleConv()));
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitConvConcat()));
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitMultiConvConcat()));
networks.emplace_back(InferenceEngine::CNNNetwork(ov::test::utils::make_2_input_subtract()));
networks.emplace_back(InferenceEngine::CNNNetwork(ov::test::utils::make_multi_single_conv()));
networks.emplace_back(InferenceEngine::CNNNetwork(ov::test::utils::make_single_conv()));
networks.emplace_back(InferenceEngine::CNNNetwork(ov::test::utils::make_split_conv_concat()));
networks.emplace_back(InferenceEngine::CNNNetwork(ov::test::utils::make_split_multi_conv_concat()));
auto ocl_instance = std::make_shared<OpenCL>();
ie.SetConfig(config, target_device);

View File

@ -6,6 +6,8 @@
#include <vector>
#include "common_test_utils/subgraph_builders/split_conv_concat.hpp"
using namespace ov::test::behavior;
namespace {
@ -17,7 +19,7 @@ const std::vector<ov::AnyMap> HeteroConfigs = {{ov::device::priorities(ov::test:
INSTANTIATE_TEST_SUITE_P(
smoke_BehaviorTests,
OVInferRequestDynamicTests,
::testing::Combine(::testing::Values(ngraph::builder::subgraph::makeSplitConvConcat()),
::testing::Combine(::testing::Values(ov::test::utils::make_split_conv_concat()),
::testing::Values(std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>>{
{{1, 4, 20, 20}, {1, 10, 18, 18}},
{{2, 4, 20, 20}, {2, 10, 18, 18}}}),
@ -28,7 +30,7 @@ INSTANTIATE_TEST_SUITE_P(
INSTANTIATE_TEST_SUITE_P(
smoke_Hetero_BehaviorTests,
OVInferRequestDynamicTests,
::testing::Combine(::testing::Values(ngraph::builder::subgraph::makeSplitConvConcat()),
::testing::Combine(::testing::Values(ov::test::utils::make_split_conv_concat()),
::testing::Values(std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>>{
{{1, 4, 20, 20}, {1, 10, 18, 18}},
{{2, 4, 20, 20}, {2, 10, 18, 18}}}),

View File

@ -5,8 +5,9 @@
#include <vector>
#include "behavior/plugin/hetero_synthetic.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu_non_zero.hpp"
#include "ov_models/builders.hpp"
#include "ov_models/subgraph_builders.hpp"
namespace {
using namespace HeteroTests;
@ -21,7 +22,7 @@ INSTANTIATE_TEST_SUITE_P(
{"TEMPLATE1", "openvino_template_plugin"}}),
::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::withMajorNodesFunctions(
[] {
return ngraph::builder::subgraph::makeConvPool2Relu2();
return ov::test::utils::make_conv_pool2_relu2();
},
{"Conv_1"},
true))),
@ -45,7 +46,7 @@ INSTANTIATE_TEST_SUITE_P(
static std::vector<std::function<std::shared_ptr<ov::Model>()>> dynamicBuilders = {
[] {
return ngraph::builder::subgraph::makeConvPoolReluNonZero();
return ov::test::utils::make_conv_pool_relu_non_zero();
},
};

View File

@ -10,6 +10,7 @@
#include "common_test_utils/file_utils.hpp"
#include "openvino/util/file_util.hpp"
#include "functional_test_utils/summary/api_summary.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
namespace BehaviorTestsUtils {
@ -174,7 +175,7 @@ protected:
std::tie(netPrecision, target_device, configuration) = this->GetParam();
SKIP_IF_CURRENT_TEST_IS_DISABLED()
APIBaseTest::SetUp();
function = ngraph::builder::subgraph::makeConvPoolRelu();
function = ov::test::utils::make_conv_pool_relu();
}
void TearDown() override {
if (!configuration.empty()) {

View File

@ -10,6 +10,7 @@
#include "common_test_utils/test_constants.hpp"
#include "ov_models/subgraph_builders.hpp"
#include "openvino/util/common_util.hpp"
#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp"
using namespace ::testing;
@ -31,7 +32,7 @@ class MultiDevice_Test : public ov::test::TestsCommon, public testing::WithParam
std::vector<DeviceName> deviceNameList;
std::tie(deviceNameList, _properties) = this->GetParam();
device_names = getDeviceStringWithMulti(deviceNameList);
fn_ptr = ngraph::builder::subgraph::makeSplitMultiConvConcat();
fn_ptr = ov::test::utils::make_split_multi_conv_concat();
}
public:
@ -61,7 +62,7 @@ class MultiDevice_SupportTest : public ov::test::TestsCommon, public testing::Wi
std::vector<DeviceName> deviceNameList;
std::tie(deviceNameList, expected_status, _properties) = this->GetParam();
device_names = getDeviceStringWithMulti(deviceNameList);
fn_ptr = ngraph::builder::subgraph::makeSplitMultiConvConcat();
fn_ptr = ov::test::utils::make_split_multi_conv_concat();
}
public:

View File

@ -27,6 +27,11 @@
#include "functional_test_utils/blob_utils.hpp"
#include "functional_test_utils/summary/api_summary.hpp"
#include "openvino/util/file_util.hpp"
#include "common_test_utils/subgraph_builders/split_conv_concat.hpp"
#include "common_test_utils/subgraph_builders/kso_func.hpp"
#include "common_test_utils/subgraph_builders/single_concat_with_constant.hpp"
#include "common_test_utils/subgraph_builders/concat_with_params.hpp"
#include "common_test_utils/subgraph_builders/split_concat.hpp"
namespace ov {
namespace test {
@ -34,7 +39,7 @@ namespace behavior {
inline std::shared_ptr<ov::Model> getDefaultNGraphFunctionForTheDevice(std::vector<size_t> inputShape = {1, 2, 32, 32},
ov::element::Type_t ngPrc = ov::element::Type_t::f32) {
return ngraph::builder::subgraph::makeSplitConcat(inputShape, ngPrc);
return ov::test::utils::make_split_concat(inputShape, ngPrc);
}
inline bool sw_plugin_in_target_device(std::string targetDevice) {
@ -175,13 +180,13 @@ public:
void SetUp() {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
// Generic network
actualNetwork = ngraph::builder::subgraph::makeSplitConcat();
actualNetwork = ov::test::utils::make_split_concat();
// Quite simple network
simpleNetwork = ngraph::builder::subgraph::makeSingleConcatWithConstant();
simpleNetwork = ov::test::utils::make_single_concat_with_constant();
// Multinput to substruct network
multinputNetwork = ngraph::builder::subgraph::makeConcatWithParams();
multinputNetwork = ov::test::utils::make_concat_with_params();
// Network with KSO
ksoNetwork = ngraph::builder::subgraph::makeKSOFunction();
ksoNetwork = ov::test::utils::make_kso_function();
}
virtual void setHeteroNetworkAffinity(const std::string &targetDevice) {
@ -240,7 +245,7 @@ public:
std::tie(target_device, configuration) = GetParam();
SKIP_IF_CURRENT_TEST_IS_DISABLED();
APIBaseTest::SetUp();
actualNetwork = ngraph::builder::subgraph::makeSplitConvConcat();
actualNetwork = ov::test::utils::make_split_conv_concat();
}
};

View File

@ -14,6 +14,12 @@
#include "functional_test_utils/plugin_cache.hpp"
#include "openvino/op/concat.hpp"
#include "openvino/runtime/tensor.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
#include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp"
#include "common_test_utils/subgraph_builders/single_concat_with_constant.hpp"
#include "common_test_utils/subgraph_builders/concat_with_params.hpp"
#include "common_test_utils/subgraph_builders/single_split.hpp"
#include "common_test_utils/subgraph_builders/split_concat.hpp"
namespace ov {
namespace test {
@ -241,7 +247,7 @@ TEST_P(OVCompiledModelBaseTest, canCompileModelwithBrace) {
TEST(OVCompiledModelBaseTest, canCompileModelToDefaultDevice) {
std::shared_ptr<ov::Core> core = utils::PluginCache::get().core();
std::shared_ptr<ov::Model> function = ngraph::builder::subgraph::makeSingleConcatWithConstant();
std::shared_ptr<ov::Model> function = ov::test::utils::make_single_concat_with_constant();
EXPECT_NO_THROW(auto execNet = core->compile_model(function));
}
@ -280,12 +286,12 @@ TEST_P(OVCompiledModelBaseTest, CanCreateTwoCompiledModelsAndCheckRuntimeModel)
TEST_P(OVCompiledModelBaseTest, pluginDoesNotChangeOriginalNetwork) {
// compare 2 networks
auto referenceNetwork = ngraph::builder::subgraph::makeConvPoolRelu();
auto referenceNetwork = ov::test::utils::make_conv_pool_relu();
compare_functions(function, referenceNetwork);
}
TEST_P(OVCompiledModelBaseTest, CanSetInputPrecisionForNetwork) {
std::shared_ptr<ov::Model> model = ngraph::builder::subgraph::makeSingleConcatWithConstant();
std::shared_ptr<ov::Model> model = ov::test::utils::make_single_concat_with_constant();
ov::Core core = createCoreWithTemplate();
auto ppp = ov::preprocess::PrePostProcessor(model);
ov::preprocess::InputInfo& input = ppp.input();
@ -296,7 +302,7 @@ TEST_P(OVCompiledModelBaseTest, CanSetInputPrecisionForNetwork) {
}
TEST_P(OVCompiledModelBaseTest, CanSetOutputPrecisionForNetwork) {
std::shared_ptr<ov::Model> model = ngraph::builder::subgraph::makeSingleConcatWithConstant();
std::shared_ptr<ov::Model> model = ov::test::utils::make_single_concat_with_constant();
ov::Core core = createCoreWithTemplate();
auto ppp = ov::preprocess::PrePostProcessor(model);
ov::preprocess::OutputInfo& output = ppp.output();
@ -451,7 +457,7 @@ TEST_P(OVCompiledModelBaseTestOptional, CheckExecGraphInfoAfterExecution) {
TEST_P(OVCompiledModelBaseTest, getInputFromFunctionWithSingleInput) {
ov::CompiledModel execNet;
function = ngraph::builder::subgraph::makeSplitConcat();
function = ov::test::utils::make_split_concat();
execNet = core->compile_model(function, target_device, configuration);
EXPECT_EQ(function->inputs().size(), 1);
@ -465,7 +471,7 @@ TEST_P(OVCompiledModelBaseTest, getInputFromFunctionWithSingleInput) {
TEST_P(OVCompiledModelBaseTest, getOutputFromFunctionWithSingleInput) {
ov::CompiledModel execNet;
function = ngraph::builder::subgraph::makeSplitConcat();
function = ov::test::utils::make_split_concat();
execNet = core->compile_model(function, target_device, configuration);
EXPECT_EQ(function->outputs().size(), 1);
@ -479,7 +485,7 @@ TEST_P(OVCompiledModelBaseTest, getOutputFromFunctionWithSingleInput) {
TEST_P(OVCompiledModelBaseTest, getInputsFromFunctionWithSeveralInputs) {
ov::CompiledModel execNet;
function = ngraph::builder::subgraph::makeConcatWithParams();
function = ov::test::utils::make_concat_with_params();
execNet = core->compile_model(function, target_device, configuration);
EXPECT_EQ(function->inputs().size(), 2);
@ -500,7 +506,7 @@ TEST_P(OVCompiledModelBaseTest, getInputsFromFunctionWithSeveralInputs) {
TEST_P(OVCompiledModelBaseTest, getOutputsFromFunctionWithSeveralOutputs) {
ov::CompiledModel execNet;
function = ngraph::builder::subgraph::makeMultipleInputOutputDoubleConcat();
function = ov::test::utils::make_multiple_input_output_double_concat();
execNet = core->compile_model(function, target_device, configuration);
EXPECT_EQ(function->outputs().size(), 2);
@ -521,7 +527,7 @@ TEST_P(OVCompiledModelBaseTest, getOutputsFromFunctionWithSeveralOutputs) {
TEST_P(OVCompiledModelBaseTest, getOutputsFromSplitFunctionWithSeveralOutputs) {
ov::CompiledModel execNet;
function = ngraph::builder::subgraph::makeSingleSplit();
function = ov::test::utils::make_single_split();
execNet = core->compile_model(function, target_device, configuration);
EXPECT_EQ(function->outputs().size(), 2);

View File

@ -11,6 +11,7 @@
#include "common_test_utils/test_assertions.hpp"
#include "common_test_utils/file_utils.hpp"
#include "common_test_utils/unicode_utils.hpp"
#include "common_test_utils/subgraph_builders/single_conv.hpp"
namespace ov {
namespace test {
@ -118,7 +119,7 @@ public:
std::tie(target_device, configuration) = GetParam();
SKIP_IF_CURRENT_TEST_IS_DISABLED();
APIBaseTest::SetUp();
simpleNetwork = ngraph::builder::subgraph::makeSingleConv();
simpleNetwork = ov::test::utils::make_single_conv();
}
};

View File

@ -8,6 +8,7 @@
#include "common_test_utils/file_utils.hpp"
#include "openvino/core/model.hpp"
#include "openvino/op/relu.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
namespace BehaviorTestsDefinitions {
class ExecutableNetworkBaseTest : public BehaviorTestsUtils::IEExecutableNetworkTestBase,
@ -292,7 +293,7 @@ TEST_P(ExecutableNetworkBaseTest, canExport) {
TEST_P(ExecutableNetworkBaseTest, pluginDoesNotChangeOriginalNetwork) {
// compare 2 networks
auto referenceNetwork = ngraph::builder::subgraph::makeConvPoolRelu();
auto referenceNetwork = ov::test::utils::make_conv_pool_relu();
compare_functions(cnnNet.getFunction(), referenceNetwork);
}
@ -303,7 +304,7 @@ protected:
std::tie(netPrecision, target_device, configuration) = this->GetParam();
SKIP_IF_CURRENT_TEST_IS_DISABLED()
APIBaseTest::SetUp();
function = ngraph::builder::subgraph::makeConvPoolRelu();
function = ov::test::utils::make_conv_pool_relu();
}
void TearDown() override {
if (!configuration.empty()) {

View File

@ -9,6 +9,7 @@
#include "base/behavior_test_utils.hpp"
#include "shared_test_classes/subgraph/basic_lstm.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
namespace BehaviorTestsDefinitions {
using InferRequestIOBBlobTest = BehaviorTestsUtils::InferRequestTests;
@ -455,7 +456,7 @@ public:
void SetUp() override {
std::tie(layout, target_device, configuration) = this->GetParam();
SKIP_IF_CURRENT_TEST_IS_DISABLED()
function = ngraph::builder::subgraph::makeConvPoolRelu();
function = ov::test::utils::make_conv_pool_relu();
cnnNet = InferenceEngine::CNNNetwork(function);
execNet = ie->LoadNetwork(cnnNet, target_device, configuration);
}

View File

@ -8,6 +8,7 @@
#include "base/behavior_test_utils.hpp"
#include "common_test_utils/common_utils.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
namespace BehaviorTestsDefinitions {
@ -42,8 +43,7 @@ public:
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
APIBaseTest::SetUp();
std::shared_ptr<ngraph::Function> function = ngraph::builder::subgraph::makeConvPoolRelu(
{4, 3, 6, 8}, ngraph::element::Type_t::u8);
std::shared_ptr<ngraph::Function> function = ov::test::utils::make_conv_pool_relu({4, 3, 6, 8}, ov::element::u8);
InferenceEngine::CNNNetwork cnnNetwork(function);
executableNetwork = ie->LoadNetwork(cnnNetwork, target_device, config);
}

View File

@ -13,6 +13,7 @@
#include "common_test_utils/file_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp"
namespace ov {
namespace test {
@ -76,7 +77,7 @@ TEST_P(OVExecGraphImportExportTest, importExportedFunction) {
ov::CompiledModel execNet;
// Create simple function
function = ngraph::builder::subgraph::makeMultipleInputOutputDoubleConcat({1, 2, 24, 24}, elementType);
function = ov::test::utils::make_multiple_input_output_double_concat({1, 2, 24, 24}, elementType);
execNet = core->compile_model(function, target_device, configuration);
std::stringstream strm;
@ -337,7 +338,7 @@ TEST_P(OVExecGraphImportExportTest, importExportedIENetwork) {
InferenceEngine::ExecutableNetwork execNet;
// Create simple function
function = ngraph::builder::subgraph::makeMultipleInputOutputDoubleConcat({1, 2, 24, 24}, elementType);
function = ov::test::utils::make_multiple_input_output_double_concat({1, 2, 24, 24}, elementType);
execNet = ie->LoadNetwork(InferenceEngine::CNNNetwork(function), target_device, any_copy(configuration));
@ -471,7 +472,7 @@ TEST_P(OVExecGraphImportExportTest, ieImportExportedFunction) {
ov::CompiledModel execNet;
// Create simple function
function = ngraph::builder::subgraph::makeMultipleInputOutputDoubleConcat({1, 2, 24, 24}, elementType);
function = ov::test::utils::make_multiple_input_output_double_concat({1, 2, 24, 24}, elementType);
execNet = core->compile_model(function, target_device, configuration);
std::stringstream strm;

View File

@ -12,6 +12,12 @@
#include "functional_test_utils/plugin_cache.hpp"
#include "openvino/op/concat.hpp"
#include "openvino/runtime/tensor.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
#include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp"
#include "common_test_utils/subgraph_builders/single_concat_with_constant.hpp"
#include "common_test_utils/subgraph_builders/concat_with_params.hpp"
#include "common_test_utils/subgraph_builders/single_split.hpp"
#include "common_test_utils/subgraph_builders/split_concat.hpp"
namespace ov {
namespace test {
@ -142,7 +148,7 @@ TEST_P(OVExecutableNetworkBaseTest, canLoadNetworkFromMemory) {
TEST(OVExecutableNetworkBaseTest, smoke_LoadNetworkToDefaultDeviceNoThrow) {
std::shared_ptr<ov::Core> core = utils::PluginCache::get().core();
std::shared_ptr<ov::Model> function = ngraph::builder::subgraph::makeSingleConcatWithConstant();
std::shared_ptr<ov::Model> function = ov::test::utils::make_single_concat_with_constant();
EXPECT_NO_THROW(auto execNet = core->compile_model(function));
}
@ -412,14 +418,14 @@ TEST_P(OVExecutableNetworkBaseTest, canExport) {
TEST_P(OVExecutableNetworkBaseTest, pluginDoesNotChangeOriginalNetwork) {
// compare 2 networks
auto referenceNetwork = ngraph::builder::subgraph::makeConvPoolRelu();
auto referenceNetwork = ov::test::utils::make_conv_pool_relu();
compare_functions(function, referenceNetwork);
}
TEST_P(OVExecutableNetworkBaseTest, getInputFromFunctionWithSingleInput) {
ov::CompiledModel execNet;
function = ngraph::builder::subgraph::makeSplitConcat();
function = ov::test::utils::make_split_concat();
execNet = core->compile_model(function, target_device, configuration);
EXPECT_EQ(function->inputs().size(), 1);
@ -433,7 +439,7 @@ TEST_P(OVExecutableNetworkBaseTest, getInputFromFunctionWithSingleInput) {
TEST_P(OVExecutableNetworkBaseTest, getOutputFromFunctionWithSingleInput) {
ov::CompiledModel execNet;
function = ngraph::builder::subgraph::makeSplitConcat();
function = ov::test::utils::make_split_concat();
execNet = core->compile_model(function, target_device, configuration);
EXPECT_EQ(function->outputs().size(), 1);
@ -447,7 +453,7 @@ TEST_P(OVExecutableNetworkBaseTest, getOutputFromFunctionWithSingleInput) {
TEST_P(OVExecutableNetworkBaseTest, getInputsFromFunctionWithSeveralInputs) {
ov::CompiledModel execNet;
function = ngraph::builder::subgraph::makeConcatWithParams();
function = ov::test::utils::make_concat_with_params();
execNet = core->compile_model(function, target_device, configuration);
EXPECT_EQ(function->inputs().size(), 2);
@ -468,7 +474,7 @@ TEST_P(OVExecutableNetworkBaseTest, getInputsFromFunctionWithSeveralInputs) {
TEST_P(OVExecutableNetworkBaseTest, getOutputsFromFunctionWithSeveralOutputs) {
ov::CompiledModel execNet;
function = ngraph::builder::subgraph::makeMultipleInputOutputDoubleConcat();
function = ov::test::utils::make_multiple_input_output_double_concat();
execNet = core->compile_model(function, target_device, configuration);
EXPECT_EQ(function->outputs().size(), 2);
@ -489,7 +495,7 @@ TEST_P(OVExecutableNetworkBaseTest, getOutputsFromFunctionWithSeveralOutputs) {
TEST_P(OVExecutableNetworkBaseTest, getOutputsFromSplitFunctionWithSeveralOutputs) {
ov::CompiledModel execNet;
function = ngraph::builder::subgraph::makeSingleSplit();
function = ov::test::utils::make_single_split();
execNet = core->compile_model(function, target_device, configuration);
EXPECT_EQ(function->outputs().size(), 2);

View File

@ -7,6 +7,7 @@
#include <gtest/gtest.h>
#include <base/ov_behavior_test_utils.hpp>
#include "common_test_utils/subgraph_builders/single_conv.hpp"
#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT
# include <iostream>
@ -43,7 +44,7 @@ public:
std::tie(target_device, configuration) = GetParam();
SKIP_IF_CURRENT_TEST_IS_DISABLED();
APIBaseTest::SetUp();
simpleNetwork = ngraph::builder::subgraph::makeSingleConv();
simpleNetwork = ov::test::utils::make_single_conv();
}
};
using OVClassExecutableNetworkGetMetricTest_DEVICE_PRIORITY = OVClassExecutableNetworkGetMetricTest_Priority;

View File

@ -21,6 +21,8 @@
#include "openvino/op/result.hpp"
#include "openvino/op/shape_of.hpp"
#include "openvino/util/file_util.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu_no_reshapes.hpp"
#include "common_test_utils/subgraph_builders/split_conv_concat.hpp"
#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT
# include <iostream>
@ -103,7 +105,7 @@ public:
std::tie(target_device, configuration) = GetParam();
SKIP_IF_CURRENT_TEST_IS_DISABLED();
APIBaseTest::SetUp();
actualNetwork = ngraph::builder::subgraph::makeSplitConvConcat();
actualNetwork = ov::test::utils::make_split_conv_concat();
}
};
@ -221,7 +223,7 @@ TEST(OVClassBasicTest, smoke_createMockEngineConfigThrows) {
inline void generateModelFile() {
ov::pass::Manager manager;
manager.register_pass<ov::pass::Serialize>("test_model.xml", "test_model.bin");
auto function = ngraph::builder::subgraph::makeConvPoolReluNoReshapes({1, 3, 227, 227});
auto function = ov::test::utils::make_conv_pool_relu_no_reshapes({1, 3, 227, 227});
manager.run_passes(function);
}

View File

@ -14,6 +14,9 @@
#include "ov_models/subgraph_builders.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "base/behavior_test_utils.hpp"
#include "common_test_utils/subgraph_builders/single_conv.hpp"
#include "common_test_utils/subgraph_builders/detection_output.hpp"
#include "common_test_utils/subgraph_builders/multi_single_conv.hpp"
using namespace ::testing;
using namespace InferenceEngine;
@ -30,8 +33,8 @@ class AutoBatching_Test : public BehaviorTestsUtils::IEPluginTestBase,
public testing::WithParamInterface<AutoBatchTwoNetsParams> {
void SetUp() override {
std::tie(target_device, use_get_blob, num_streams, num_requests, num_batch) = this->GetParam();
fn_ptrs = {ngraph::builder::subgraph::makeSingleConv(),
ngraph::builder::subgraph::makeMultiSingleConv()};
fn_ptrs = {ov::test::utils::make_single_conv(),
ov::test::utils::make_multi_single_conv()};
};
public:
static std::string getTestCaseName(const testing::TestParamInfo<AutoBatchTwoNetsParams> &obj) {
@ -148,8 +151,8 @@ class AutoBatching_Test_DetectionOutput : public AutoBatching_Test {
public:
void SetUp() override {
std::tie(target_device, use_get_blob, num_streams, num_requests, num_batch) = this->GetParam();
fn_ptrs = {ngraph::builder::subgraph::makeDetectionOutput(),
ngraph::builder::subgraph::makeDetectionOutput()};
fn_ptrs = {ov::test::utils::make_detection_output(),
ov::test::utils::make_detection_output()};
};
static std::string getTestCaseName(const testing::TestParamInfo<AutoBatchTwoNetsParams> &obj) {

View File

@ -14,6 +14,7 @@
#include "common_test_utils/unicode_utils.hpp"
#include "openvino/util/common_util.hpp"
#include "base/behavior_test_utils.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
#include <ie_core.hpp>
#include <ie_common.h>
@ -80,7 +81,7 @@ protected:
std::tie(targetDevice, userConfig) = GetParam();
target_device = targetDevice;
APIBaseTest::SetUp();
function = ngraph::builder::subgraph::makeConvPoolRelu();
function = ov::test::utils::make_conv_pool_relu();
configuration = userConfig.first;
std::string ext = userConfig.second;
std::string::size_type pos = 0;

View File

@ -19,6 +19,7 @@
#include "common_test_utils/file_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "base/behavior_test_utils.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
namespace BehaviorTestsDefinitions {
@ -74,7 +75,7 @@ public:
target_device = this->GetParam();
SKIP_IF_CURRENT_TEST_IS_DISABLED()
APIBaseTest::SetUp();
function = ngraph::builder::subgraph::makeConvPoolRelu();
function = ov::test::utils::make_conv_pool_relu();
cnnNet = InferenceEngine::CNNNetwork(function);
}
};
@ -139,7 +140,7 @@ public:
std::tie(target_device, configuration) = this->GetParam();
SKIP_IF_CURRENT_TEST_IS_DISABLED();
APIBaseTest::SetUp();
function = ngraph::builder::subgraph::makeConvPoolRelu();
function = ov::test::utils::make_conv_pool_relu();
cnnNet = InferenceEngine::CNNNetwork(function);
}
@ -164,7 +165,7 @@ public:
std::tuple<std::string, std::string, InferenceEngine::Parameter> entry;
std::tie(target_device, entry) = this->GetParam();
std::tie(key, value, reference) = entry;
function = ngraph::builder::subgraph::makeConvPoolRelu();
function = ov::test::utils::make_conv_pool_relu();
cnnNet = InferenceEngine::CNNNetwork(function);
}
@ -185,7 +186,7 @@ public:
std::tie(target_device, key) = this->GetParam();
SKIP_IF_CURRENT_TEST_IS_DISABLED();
APIBaseTest::SetUp();
function = ngraph::builder::subgraph::makeConvPoolRelu();
function = ov::test::utils::make_conv_pool_relu();
cnnNet = InferenceEngine::CNNNetwork(function);
}
@ -231,7 +232,7 @@ public:
std::tie(target_device, configuration, loadNetWorkConfig) = this->GetParam();
SKIP_IF_CURRENT_TEST_IS_DISABLED();
APIBaseTest::SetUp();
function = ngraph::builder::subgraph::makeConvPoolRelu();
function = ov::test::utils::make_conv_pool_relu();
cnnNet = InferenceEngine::CNNNetwork(function);
}

View File

@ -13,6 +13,7 @@
#include "common_test_utils/file_utils.hpp"
#include "common_test_utils/unicode_utils.hpp"
#include "openvino/util/file_util.hpp"
#include "common_test_utils/subgraph_builders/split_conv_concat.hpp"
#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT
#include <iostream>
@ -907,7 +908,7 @@ TEST_P(IEClassQueryNetworkTest, QueryNetworkHETEROWithBigDeviceIDThrows) {
TEST(IEClassBasicTest, smoke_LoadNetworkToDefaultDeviceNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
InferenceEngine::CNNNetwork actualCnnNetwork;
std::shared_ptr<ngraph::Function> actualNetwork = ngraph::builder::subgraph::makeSplitConvConcat();
std::shared_ptr<ngraph::Function> actualNetwork = ov::test::utils::make_split_conv_concat();
ASSERT_NO_THROW(actualCnnNetwork = InferenceEngine::CNNNetwork(actualNetwork));
InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate();
ASSERT_NO_THROW(ie.LoadNetwork(actualCnnNetwork));

View File

@ -16,6 +16,7 @@
#include <common_test_utils/test_assertions.hpp>
#include <common_test_utils/test_constants.hpp>
#include "base/behavior_test_utils.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
#include <gtest/gtest.h>
#include <thread>
@ -25,6 +26,11 @@
#include <fstream>
#include <functional_test_utils/skip_tests_config.hpp>
#include "base/ov_behavior_test_utils.hpp"
#include "common_test_utils/subgraph_builders/split_conv_concat.hpp"
#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp"
#include "common_test_utils/subgraph_builders/single_conv.hpp"
#include "common_test_utils/subgraph_builders/multi_single_conv.hpp"
#include "common_test_utils/subgraph_builders/2_input_subtract.hpp"
using Device = std::string;
using Config = std::map<std::string, std::string>;
@ -150,7 +156,7 @@ TEST_P(CoreThreadingTests, smoke_GetMetric) {
// tested function: QueryNetwork
TEST_P(CoreThreadingTests, smoke_QueryNetwork) {
InferenceEngine::Core ie;
InferenceEngine::CNNNetwork network(ngraph::builder::subgraph::make2InputSubtract());
InferenceEngine::CNNNetwork network(ov::test::utils::make_2_input_subtract());
ie.SetConfig(config, target_device);
InferenceEngine::QueryNetworkResult refResult = ie.QueryNetwork(network, target_device);
@ -225,14 +231,14 @@ protected:
void SetupNetworks() {
if (modelClass == ModelClass::ConvPoolRelu) {
for (unsigned i = 0; i < numThreads; i++) {
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeConvPoolRelu()));
networks.emplace_back(InferenceEngine::CNNNetwork(ov::test::utils::make_conv_pool_relu()));
}
} else {
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::make2InputSubtract()));
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeMultiSingleConv()));
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSingleConv()));
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitConvConcat()));
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitMultiConvConcat()));
networks.emplace_back(InferenceEngine::CNNNetwork(ov::test::utils::make_2_input_subtract()));
networks.emplace_back(InferenceEngine::CNNNetwork(ov::test::utils::make_multi_single_conv()));
networks.emplace_back(InferenceEngine::CNNNetwork(ov::test::utils::make_single_conv()));
networks.emplace_back(InferenceEngine::CNNNetwork(ov::test::utils::make_split_conv_concat()));
networks.emplace_back(InferenceEngine::CNNNetwork(ov::test::utils::make_split_multi_conv_concat()));
}
}
};

View File

@ -7,6 +7,7 @@
#include <cstdint>
#include "openvino/runtime/properties.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
#include <locale.h>
@ -74,7 +75,7 @@ void OVCompileModelGetExecutionDeviceTests::SetUp() {
std::tie(target_device, userConfig) = GetParam();
compileModelProperties = userConfig.first;
expectedDeviceName = userConfig.second;
model = ngraph::builder::subgraph::makeConvPoolRelu();
model = ov::test::utils::make_conv_pool_relu();
}
TEST_P(OVClassCompiledModelPropertiesTests, CanUseCache) {

View File

@ -11,6 +11,9 @@
#include "openvino/op/parameter.hpp"
#include "openvino/op/concat.hpp"
#include "openvino/op/result.hpp"
#include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp"
#include "common_test_utils/subgraph_builders/single_split.hpp"
#include "common_test_utils/subgraph_builders/split_concat.hpp"
namespace ov {
namespace test {
@ -282,7 +285,7 @@ void OVInferRequestIOTensorSetPrecisionTest::SetUp() {
std::tie(element_type, target_device, config) = this->GetParam();
SKIP_IF_CURRENT_TEST_IS_DISABLED()
APIBaseTest::SetUp();
function = ngraph::builder::subgraph::makeSplitConcat();
function = ov::test::utils::make_split_concat();
execNet = core->compile_model(function, target_device, config);
req = execNet.create_infer_request();
}
@ -384,7 +387,7 @@ void OVInferRequestCheckTensorPrecision::TearDown() {
}
TEST_P(OVInferRequestCheckTensorPrecision, getInputFromFunctionWithSingleInput) {
model = ngraph::builder::subgraph::makeSplitConcat({1, 4, 24, 24}, element_type);
model = ov::test::utils::make_split_concat({1, 4, 24, 24}, element_type);
createInferRequest();
ov::Tensor tensor1, tensor2;
@ -400,7 +403,7 @@ TEST_P(OVInferRequestCheckTensorPrecision, getInputFromFunctionWithSingleInput)
}
TEST_P(OVInferRequestCheckTensorPrecision, getOutputFromFunctionWithSingleInput) {
model = ngraph::builder::subgraph::makeSplitConcat({1, 4, 24, 24}, element_type);
model = ov::test::utils::make_split_concat({1, 4, 24, 24}, element_type);
createInferRequest();
ov::Tensor tensor1, tensor2;
@ -416,7 +419,7 @@ TEST_P(OVInferRequestCheckTensorPrecision, getOutputFromFunctionWithSingleInput)
}
TEST_P(OVInferRequestCheckTensorPrecision, getInputsFromFunctionWithSeveralInputs) {
model = ngraph::builder::subgraph::makeMultipleInputOutputDoubleConcat({1, 1, 32, 32}, element_type);
model = ov::test::utils::make_multiple_input_output_double_concat({1, 1, 32, 32}, element_type);
createInferRequest();
ov::Tensor tensor1, tensor2;
@ -447,7 +450,7 @@ TEST_P(OVInferRequestCheckTensorPrecision, getInputsFromFunctionWithSeveralInput
}
TEST_P(OVInferRequestCheckTensorPrecision, getOutputsFromFunctionWithSeveralOutputs) {
model = ngraph::builder::subgraph::makeMultipleInputOutputDoubleConcat({1, 1, 32, 32}, element_type);
model = ov::test::utils::make_multiple_input_output_double_concat({1, 1, 32, 32}, element_type);
createInferRequest();
ov::Tensor tensor1, tensor2;
@ -478,7 +481,7 @@ TEST_P(OVInferRequestCheckTensorPrecision, getOutputsFromFunctionWithSeveralOutp
}
TEST_P(OVInferRequestCheckTensorPrecision, getOutputsFromSplitFunctionWithSeveralOutputs) {
model = ngraph::builder::subgraph::makeSingleSplit({1, 4, 24, 24}, element_type);
model = ov::test::utils::make_single_split({1, 4, 24, 24}, element_type);
createInferRequest();
ov::Tensor tensor1, tensor2;

View File

@ -5,6 +5,7 @@
#include "behavior/ov_infer_request/perf_counters.hpp"
#include "openvino/runtime/profiling_info.hpp"
#include "common_test_utils/subgraph_builders/concat_with_params.hpp"
namespace ov {
namespace test {
@ -13,7 +14,7 @@ void OVInferRequestPerfCountersTest::SetUp() {
std::tie(target_device, configuration) = this->GetParam();
SKIP_IF_CURRENT_TEST_IS_DISABLED()
APIBaseTest::SetUp();
function = ngraph::builder::subgraph::makeConcatWithParams();
function = ov::test::utils::make_concat_with_params();
configuration.insert(ov::enable_profiling(true));
execNet = core->compile_model(function, target_device, configuration);
req = execNet.create_infer_request();

View File

@ -13,12 +13,22 @@
#include "common_test_utils/file_utils.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "functional_test_utils/summary/api_summary.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
#include "ov_models/builders.hpp"
#include "ov_models/subgraph_builders.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
#include "openvino/core/node_vector.hpp"
#include "openvino/op/parameter.hpp"
#include "common_test_utils/subgraph_builders/split_conv_concat.hpp"
#include "common_test_utils/subgraph_builders/kso_func.hpp"
#include "common_test_utils/subgraph_builders/ti_with_lstm_cell.hpp"
#include "common_test_utils/subgraph_builders/single_conv.hpp"
#include "common_test_utils/subgraph_builders/2_input_subtract.hpp"
#include "common_test_utils/subgraph_builders/nested_split_conv_concat.hpp"
#include "common_test_utils/subgraph_builders/conv_bias.hpp"
#include "common_test_utils/subgraph_builders/read_concat_split_assign.hpp"
#include "common_test_utils/subgraph_builders/matmul_bias.hpp"
#define GTEST_COUT std::cout << "[ ] [ INFO ] "
@ -77,37 +87,37 @@ std::vector<ovModelWithName> CompileModelCacheTestBase::getNumericTypeOnlyFuncti
res.push_back(ovModelWithName { simple_function_multiply, "SimpleFunctionMultiply"});
res.push_back(ovModelWithName { simple_function_relu, "SimpleFunctionRelu"});
res.push_back(ovModelWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeConvPoolRelu, {1, 1, 32, 32}),
inputShapeWrapper(ov::test::utils::make_conv_pool_relu, {1, 1, 32, 32}),
"ConvPoolRelu"});
res.push_back(ovModelWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeSplitConvConcat, {1, 4, 20, 20}),
inputShapeWrapper(ov::test::utils::make_split_conv_concat, {1, 4, 20, 20}),
"SplitConvConcat"});
res.push_back(ovModelWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeKSOFunction, {1, 4, 20, 20}),
inputShapeWrapper(ov::test::utils::make_kso_function, {1, 4, 20, 20}),
"KSOFunction"});
res.push_back(ovModelWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeSingleConv, {1, 3, 24, 24}),
inputShapeWrapper(ov::test::utils::make_single_conv, {1, 3, 24, 24}),
"SingleConv"});
res.push_back(ovModelWithName {
inputShapeWrapper(ngraph::builder::subgraph::make2InputSubtract, {1, 3, 24, 24}),
inputShapeWrapper(ov::test::utils::make_2_input_subtract, {1, 3, 24, 24}),
"2InputSubtract"});
res.push_back(ovModelWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeNestedSplitConvConcat, {1, 4, 20, 20}),
inputShapeWrapper(ov::test::utils::make_nested_split_conv_concat, {1, 4, 20, 20}),
"NestedSplitConvConcat"});
res.push_back(ovModelWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeSplitConvConcatInputInBranch, {1, 4, 20, 20}),
inputShapeWrapper(ov::test::utils::make_cplit_conv_concat_input_in_branch, {1, 4, 20, 20}),
"SplitConvConcatInputInBranch"});
res.push_back(ovModelWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeSplitConvConcatNestedInBranch, {1, 4, 20, 20}),
inputShapeWrapper(ov::test::utils::make_cplit_conv_concat_nested_in_branch, {1, 4, 20, 20}),
"SplitConvConcatNestedInBranch"});
res.push_back(ovModelWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeSplitConvConcatNestedInBranchNestedOut, {1, 4, 20, 20}),
inputShapeWrapper(ov::test::utils::make_cplit_conv_concat_nested_in_branch_nested_out, {1, 4, 20, 20}),
"SplitConvConcatNestedInBranchNestedOut"});
res.push_back(ovModelWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeConvBias, {1, 3, 24, 24}),
inputShapeWrapper(ov::test::utils::make_conv_bias, {1, 3, 24, 24}),
"ConvBias"});
res.push_back(ovModelWithName{
inputShapeWrapper(ngraph::builder::subgraph::makeMatMulBias, {1, 3, 24, 24}),
inputShapeWrapper(ov::test::utils::make_matmul_bias, {1, 3, 24, 24}),
"MatMulBias" });
return res;
}
@ -115,7 +125,7 @@ std::vector<ovModelWithName> CompileModelCacheTestBase::getNumericTypeOnlyFuncti
std::vector<ovModelWithName> CompileModelCacheTestBase::getAnyTypeOnlyFunctions() {
std::vector<ovModelWithName> res;
res.push_back(ovModelWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeReadConcatSplitAssign, {1, 1, 2, 4}),
inputShapeWrapper(ov::test::utils::make_read_concat_split_assign, {1, 1, 2, 4}),
"ReadConcatSplitAssign"});
return res;
}
@ -123,7 +133,7 @@ std::vector<ovModelWithName> CompileModelCacheTestBase::getAnyTypeOnlyFunctions(
std::vector<ovModelWithName> CompileModelCacheTestBase::getFloatingPointOnlyFunctions() {
std::vector<ovModelWithName> res;
res.push_back(ovModelWithName { [](ngraph::element::Type type, size_t batchSize) {
return ngraph::builder::subgraph::makeTIwithLSTMcell(type, batchSize);
return ov::test::utils::make_ti_with_lstm_cell(type, batchSize);
}, "TIwithLSTMcell1"});
return res;
}
@ -293,8 +303,7 @@ void CompileModelLoadFromFileTestBase::SetUp() {
core->set_property(ov::cache_dir());
ov::pass::Manager manager;
manager.register_pass<ov::pass::Serialize>(m_modelName, m_weightsName);
manager.run_passes(ngraph::builder::subgraph::makeConvPoolRelu(
{1, 3, 227, 227}, InferenceEngine::details::convertPrecision(InferenceEngine::Precision::FP32)));
manager.run_passes(ov::test::utils::make_conv_pool_relu({1, 3, 227, 227}, ov::element::f32));
}
void CompileModelLoadFromFileTestBase::TearDown() {
@ -376,9 +385,7 @@ void CompileModelLoadFromMemoryTestBase::SetUp() {
core->set_property(ov::cache_dir());
ov::pass::Manager manager;
manager.register_pass<ov::pass::Serialize>(m_modelName, m_weightsName);
manager.run_passes(ngraph::builder::subgraph::makeConvPoolRelu(
{1, 3, 227, 227},
InferenceEngine::details::convertPrecision(InferenceEngine::Precision::FP32)));
manager.run_passes(ov::test::utils::make_conv_pool_relu({1, 3, 227, 227}, ov::element::f32));
try {
std::ifstream model_file(m_modelName, std::ios::binary);
@ -500,7 +507,7 @@ std::string CompiledKernelsCacheTest::getTestCaseName(testing::TestParamInfo<com
}
void CompiledKernelsCacheTest::SetUp() {
function = ngraph::builder::subgraph::makeConvPoolRelu();
function = ov::test::utils::make_conv_pool_relu();
std::pair<ov::AnyMap, std::string> userConfig;
std::tie(targetDevice, userConfig) = GetParam();
target_device = targetDevice;

View File

@ -6,6 +6,7 @@
#include <ov_models/subgraph_builders.hpp>
#include "behavior/ov_plugin/life_time.hpp"
#include "common_test_utils/subgraph_builders/split_concat.hpp"
namespace ov {
namespace test {
@ -24,7 +25,7 @@ void OVHoldersTest::SetUp() {
if (deathTestStyle == "fast") {
::testing::GTEST_FLAG(death_test_style) = "threadsafe";
}
function = ngraph::builder::subgraph::makeSplitConcat();
function = ov::test::utils::make_split_concat();
}
void OVHoldersTest::TearDown() {
@ -139,7 +140,7 @@ void OVHoldersTestOnImportedNetwork::SetUp() {
if (deathTestStyle == "fast") {
::testing::GTEST_FLAG(death_test_style) = "threadsafe";
}
function = ngraph::builder::subgraph::makeSplitConcat();
function = ov::test::utils::make_split_concat();
}
void OVHoldersTestOnImportedNetwork::TearDown() {

View File

@ -2,9 +2,11 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <cstdint>
#include "behavior/ov_plugin/properties_tests.hpp"
#include "openvino/runtime/properties.hpp"
#include <cstdint>
#include "common_test_utils/subgraph_builders/split_concat.hpp"
namespace ov {
namespace test {
@ -27,7 +29,7 @@ void OVPropertiesTests::SetUp() {
std::tie(target_device, properties) = this->GetParam();
APIBaseTest::SetUp();
SKIP_IF_CURRENT_TEST_IS_DISABLED();
model = ngraph::builder::subgraph::makeSplitConcat();
model = ov::test::utils::make_split_concat();
}
void OVPropertiesTests::TearDown() {
@ -57,7 +59,7 @@ std::string OVSetPropComplieModleGetPropTests::getTestCaseName(testing::TestPara
void OVSetPropComplieModleGetPropTests::SetUp() {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
std::tie(target_device, properties, compileModelProperties) = this->GetParam();
model = ngraph::builder::subgraph::makeSplitConcat();
model = ov::test::utils::make_split_concat();
}
std::string OVPropertiesTestsWithCompileModelProps::getTestCaseName(testing::TestParamInfo<PropertiesParams> obj) {
@ -96,7 +98,7 @@ void OVPropertiesTestsWithCompileModelProps::SetUp() {
compileModelProperties = {{ CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , hw_device}};
}
model = ngraph::builder::subgraph::makeSplitConcat();
model = ov::test::utils::make_split_concat();
APIBaseTest::SetUp();
}

View File

@ -5,7 +5,7 @@
#include "behavior/ov_plugin/remote.hpp"
#include "transformations/utils/utils.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "ov_models/subgraph_builders.hpp"
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
namespace ov {
namespace test {
@ -45,7 +45,7 @@ void OVRemoteTest::SetUp() {
std::pair<ov::AnyMap, ov::AnyMap> param_pair;
std::tie(element_type, target_device, config, param_pair) = GetParam();
std::tie(context_parameters, tensor_parameters) = param_pair;
function = ngraph::builder::subgraph::makeConvPoolRelu({1, 1, 32, 32}, element_type);
function = ov::test::utils::make_conv_pool_relu({1, 1, 32, 32}, element_type);
exec_network = core.compile_model(function, target_device, config);
infer_request = exec_network.create_infer_request();
input = function->get_parameters().front();

View File

@ -10,6 +10,14 @@
#include "common_test_utils/file_utils.hpp"
#include "ov_models/builders.hpp"
#include "ov_models/subgraph_builders.hpp"
#include "common_test_utils/subgraph_builders/split_conv_concat.hpp"
#include "common_test_utils/subgraph_builders/kso_func.hpp"
#include "common_test_utils/subgraph_builders/ti_with_lstm_cell.hpp"
#include "common_test_utils/subgraph_builders/single_conv.hpp"
#include "common_test_utils/subgraph_builders/2_input_subtract.hpp"
#include "common_test_utils/subgraph_builders/nested_split_conv_concat.hpp"
#include "common_test_utils/subgraph_builders/conv_bias.hpp"
#include "common_test_utils/subgraph_builders/matmul_bias.hpp"
using namespace InferenceEngine::details;
using namespace InferenceEngine;
@ -72,34 +80,34 @@ std::vector<nGraphFunctionWithName> LoadNetworkCacheTestBase::getNumericTypeOnly
inputShapeWrapper(ngraph::builder::subgraph::makeConvPoolRelu, {1, 1, 32, 32}),
"ConvPoolRelu"});
res.push_back(nGraphFunctionWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeSplitConvConcat, {1, 4, 20, 20}),
inputShapeWrapper(ov::test::utils::make_split_conv_concat, {1, 4, 20, 20}),
"SplitConvConcat"});
res.push_back(nGraphFunctionWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeKSOFunction, {1, 4, 20, 20}),
inputShapeWrapper(ov::test::utils::make_kso_function, {1, 4, 20, 20}),
"KSOFunction"});
res.push_back(nGraphFunctionWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeSingleConv, {1, 3, 24, 24}),
inputShapeWrapper(ov::test::utils::make_single_conv, {1, 3, 24, 24}),
"SingleConv"});
res.push_back(nGraphFunctionWithName {
inputShapeWrapper(ngraph::builder::subgraph::make2InputSubtract, {1, 3, 24, 24}),
inputShapeWrapper(ov::test::utils::make_2_input_subtract, {1, 3, 24, 24}),
"2InputSubtract"});
res.push_back(nGraphFunctionWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeNestedSplitConvConcat, {1, 4, 20, 20}),
inputShapeWrapper(ov::test::utils::make_nested_split_conv_concat, {1, 4, 20, 20}),
"NestedSplitConvConcat"});
res.push_back(nGraphFunctionWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeSplitConvConcatInputInBranch, {1, 4, 20, 20}),
inputShapeWrapper(ov::test::utils::make_cplit_conv_concat_input_in_branch, {1, 4, 20, 20}),
"SplitConvConcatInputInBranch"});
res.push_back(nGraphFunctionWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeSplitConvConcatNestedInBranch, {1, 4, 20, 20}),
inputShapeWrapper(ov::test::utils::make_cplit_conv_concat_nested_in_branch, {1, 4, 20, 20}),
"SplitConvConcatNestedInBranch"});
res.push_back(nGraphFunctionWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeSplitConvConcatNestedInBranchNestedOut, {1, 4, 20, 20}),
inputShapeWrapper(ov::test::utils::make_cplit_conv_concat_nested_in_branch_nested_out, {1, 4, 20, 20}),
"SplitConvConcatNestedInBranchNestedOut"});
res.push_back(nGraphFunctionWithName {
inputShapeWrapper(ngraph::builder::subgraph::makeConvBias, {1, 3, 24, 24}),
inputShapeWrapper(ov::test::utils::make_conv_bias, {1, 3, 24, 24}),
"ConvBias"});
res.push_back(nGraphFunctionWithName{
inputShapeWrapper(ngraph::builder::subgraph::makeMatMulBias, {1, 3, 24, 24}),
inputShapeWrapper(ov::test::utils::make_matmul_bias, {1, 3, 24, 24}),
"MatMulBias" });
return res;
}
@ -113,7 +121,7 @@ std::vector<nGraphFunctionWithName> LoadNetworkCacheTestBase::getAnyTypeOnlyFunc
std::vector<nGraphFunctionWithName> LoadNetworkCacheTestBase::getFloatingPointOnlyFunctions() {
std::vector<nGraphFunctionWithName> res;
res.push_back(nGraphFunctionWithName { [](ngraph::element::Type type, size_t batchSize) {
return ngraph::builder::subgraph::makeTIwithLSTMcell(type, batchSize);
return ov::test::utils::make_ti_with_lstm_cell(type, batchSize);
}, "TIwithLSTMcell1"});
return res;
}

View File

@ -10,15 +10,19 @@
#include "openvino/util/file_util.hpp"
#include <random>
#include "ie_algorithm.hpp"
#include "common_test_utils/subgraph_builders/split_conv_concat.hpp"
#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp"
#include "common_test_utils/subgraph_builders/nested_branch_conv_concat.hpp"
#include "common_test_utils/subgraph_builders/nested_split_conv_concat.hpp"
namespace HeteroTests {
static std::vector<std::function<std::shared_ptr<ngraph::Function>()>> builders = {
[] {return ngraph::builder::subgraph::makeSplitMultiConvConcat();},
[] {return ngraph::builder::subgraph::makeNestedSplitConvConcat();},
[] {return ngraph::builder::subgraph::makeSplitConvConcatNestedInBranch();},
[] {return ngraph::builder::subgraph::makeSplitConvConcatNestedInBranchNestedOut();},
[] {return ngraph::builder::subgraph::makeNestedBranchConvConcat();},
[] {return ov::test::utils::make_split_multi_conv_concat();},
[] {return ov::test::utils::make_nested_split_conv_concat();},
[] {return ov::test::utils::make_cplit_conv_concat_nested_in_branch();},
[] {return ov::test::utils::make_cplit_conv_concat_nested_in_branch_nested_out();},
[] {return ov::test::utils::make_nested_branch_conv_concat();},
};
std::vector<FunctionParameter> HeteroSyntheticTest::withMajorNodesFunctions(

View File

@ -4,7 +4,7 @@
#include <fstream>
#include <ov_models/subgraph_builders.hpp>
#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp"
#include <base/behavior_test_utils.hpp>
#include "behavior/plugin/life_time.hpp"
@ -31,7 +31,7 @@ namespace BehaviorTestsDefinitions {
std::tie(target_device, order) = this->GetParam();
APIBaseTest::SetUp();
SKIP_IF_CURRENT_TEST_IS_DISABLED();
function = ngraph::builder::subgraph::makeConvPoolRelu();
function = ov::test::utils::make_conv_pool_relu();
}
void release_order_test(std::vector<int> order, const std::string &target_device,
@ -109,7 +109,7 @@ namespace BehaviorTestsDefinitions {
void HoldersTestOnImportedNetwork::SetUp() {
target_device = this->GetParam();
APIBaseTest::SetUp();
function = ngraph::builder::subgraph::makeConvPoolRelu();
function = ov::test::utils::make_conv_pool_relu();
SKIP_IF_CURRENT_TEST_IS_DISABLED();
}

View File

@ -4,6 +4,7 @@
#include "behavior/plugin/stress_tests.hpp"
#include "ov_models/subgraph_builders.hpp"
#include "common_test_utils/subgraph_builders/split_conv_concat.hpp"
namespace LayerTestsDefinitions {
@ -21,7 +22,7 @@ std::string MultipleAllocations::getTestCaseName(const testing::TestParamInfo<Mu
void MultipleAllocations::SetUp() {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
std::tie(targetDevice, m_allocationsCount) = this->GetParam();
function = ngraph::builder::subgraph::makeSplitConvConcat();
function = ov::test::utils::make_split_conv_concat();
}
TEST_P(MultipleAllocations, InferWorksCorrectAfterAllocations) {

View File

@ -16,9 +16,6 @@ namespace subgraph {
std::shared_ptr<ov::Model> makeConvPoolRelu(std::vector<size_t> inputShape = {1, 1, 32, 32},
ov::element::Type_t ngPrc = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeConvPoolReluNoReshapes(std::vector<size_t> inputShape = {1, 1, 32, 32},
ov::element::Type_t ngPrc = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeConvPool2Relu2(std::vector<size_t> inputShape = {1, 1, 32, 32},
ov::element::Type_t ngPrc = ov::element::Type_t::f32);
@ -31,73 +28,11 @@ std::shared_ptr<ov::Model> makeSplitConvConcat(std::vector<size_t> inputShape =
std::shared_ptr<ov::Model> makeKSOFunction(std::vector<size_t> inputShape = {1, 4, 20, 20},
ov::element::Type_t ngPrc = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeSplitMultiConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20},
ov::element::Type_t ngPrc = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeTIwithLSTMcell(ov::element::Type_t ngPRC = ov::element::Type_t::f32,
size_t N = 32, // Batch size
size_t L = 10, // Sequence length
size_t I = 8, // Input size
size_t H = 32); // Hidden size
std::shared_ptr<ov::Model> makeSingleConv(std::vector<size_t> inputShape = {1, 3, 24, 24},
ov::element::Type_t type = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeDetectionOutput(ov::element::Type_t type = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeMultiSingleConv(std::vector<size_t> inputShape = {1, 3, 24, 24},
ov::element::Type type = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> make2InputSubtract(std::vector<size_t> inputShape = {1, 3, 24, 24},
ov::element::Type_t type = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeNestedBranchConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20},
ov::element::Type ngPrc = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeNestedSplitConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20},
ov::element::Type ngPrc = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeSplitConvConcatInputInBranch(std::vector<size_t> inputShape = {1, 4, 20, 20},
ov::element::Type ngPrc = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeSplitConvConcatNestedInBranch(std::vector<size_t> inputShape = {1, 4, 20, 20},
ov::element::Type ngPrc = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeSplitConvConcatNestedInBranchNestedOut(
std::vector<size_t> inputShape = {1, 4, 20, 20},
ov::element::Type ngPrc = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeConvBias(std::vector<size_t> inputShape = {1, 3, 24, 24},
ov::element::Type type = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeReadConcatSplitAssign(std::vector<size_t> inputShape = {1, 1, 2, 4},
ov::element::Type type = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeMatMulBias(std::vector<size_t> inputShape = {1, 3, 24, 24},
ov::element::Type type = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeConvertTranspose(std::vector<size_t> inputShape = {1, 3, 24, 24},
std::vector<size_t> inputOrder = {0, 1, 2, 3},
ov::element::Type type = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeMultipleInputOutputReLU(std::vector<size_t> inputShape = {1, 1, 32, 32},
ov::element::Type_t type = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeMultipleInputOutputDoubleConcat(std::vector<size_t> inputShape = {1, 1, 32, 32},
ov::element::Type_t type = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeSingleConcatWithConstant(std::vector<size_t> inputShape = {1, 1, 2, 4},
ov::element::Type type = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeConcatWithParams(std::vector<size_t> inputShape = {1, 1, 32, 32},
ov::element::Type_t type = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeSingleSplit(std::vector<size_t> inputShape = {1, 4, 32, 32},
ov::element::Type_t type = ov::element::Type_t::f32);
std::shared_ptr<ov::Model> makeSplitConcat(std::vector<size_t> inputShape = {1, 4, 24, 24},
ov::element::Type_t type = ov::element::Type_t::f32);
} // namespace subgraph
} // namespace builder
} // namespace ngraph

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#pragma once

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_2_input_subtract(ov::Shape input_shape = {1, 3, 24, 24},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_concat_with_params(ov::Shape input_shape = {1, 1, 32, 32},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_conv_bias(ov::Shape input_shape = {1, 3, 24, 24},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,18 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_conv_pool_relu(ov::Shape input_shape = {1, 1, 32, 32},
ov::element::Type type = ov::element::f32);
std::shared_ptr<ov::Model> make_conv_pool2_relu2(ov::Shape input_shape = {1, 1, 32, 32},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_conv_pool_relu_no_reshapes(ov::Shape input_shape = {1, 1, 32, 32},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,16 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_conv_pool_relu_non_zero(ov::Shape input_shape = {1, 1, 32, 32},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,16 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_convert_transpose(ov::Shape input_shape = {1, 3, 24, 24},
std::vector<size_t> input_order = {0, 1, 2, 3},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,14 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_detection_output(ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_kso_function(ov::Shape input_shape = {1, 4, 20, 20},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_matmul_bias(ov::Shape input_shape = {1, 3, 24, 24},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_multi_single_conv(ov::Shape input_shape = {1, 3, 24, 24},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_multiple_input_output_double_concat(ov::Shape input_shape = {1, 1, 32, 32},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_nested_branch_conv_concat(ov::Shape input_shape = {1, 4, 20, 20},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_nested_split_conv_concat(ov::Shape input_shape = {1, 4, 20, 20},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_read_concat_split_assign(ov::Shape input_shape = {1, 1, 2, 4},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_single_concat_with_constant(ov::Shape input_shape = {1, 1, 2, 4},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_single_conv(ov::Shape input_shape = {1, 3, 24, 24},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_single_split(ov::Shape input_shape = {1, 4, 32, 32},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_split_concat(ov::Shape input_shape = {1, 4, 24, 24},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,25 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_split_conv_concat(ov::Shape input_shape = {1, 4, 20, 20},
ov::element::Type type = ov::element::f32);
std::shared_ptr<ov::Model> make_cplit_conv_concat_input_in_branch(ov::Shape input_shape = {1, 4, 20, 20},
ov::element::Type type = ov::element::f32);
std::shared_ptr<ov::Model> make_cplit_conv_concat_nested_in_branch(ov::Shape input_shape = {1, 4, 20, 20},
ov::element::Type type = ov::element::f32);
std::shared_ptr<ov::Model> make_cplit_conv_concat_nested_in_branch_nested_out(
ov::Shape input_shape = {1, 4, 20, 20},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_split_multi_conv_concat(ov::Shape input_shape = {1, 4, 20, 20},
ov::element::Type type = ov::element::f32);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,18 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/model.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_ti_with_lstm_cell(ov::element::Type type = ov::element::f32,
size_t N = 32, // Batch size
size_t L = 10, // Sequence length
size_t I = 8, // Input size
size_t H = 32); // Hidden size
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -32,7 +32,7 @@ std::shared_ptr<ov::Node> make_convolution(const ov::Output<Node>& in,
if (!filter_weights.empty()) {
filter_weights_node = std::make_shared<ov::op::v0::Constant>(type, filter_weights_shape, filter_weights);
} else {
auto tensor = create_and_fill_tensor(type, filter_weights_shape);
auto tensor = create_and_fill_tensor(type, filter_weights_shape, 9, 1);
filter_weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
}
@ -49,7 +49,7 @@ std::shared_ptr<ov::Node> make_convolution(const ov::Output<Node>& in,
biases_weights_node =
std::make_shared<ov::op::v0::Constant>(type, ov::Shape{1, num_out_channels, 1, 1}, biases_weights);
} else {
auto tensor = create_and_fill_tensor(type, ov::Shape{1, num_out_channels, 1, 1});
auto tensor = create_and_fill_tensor(type, ov::Shape{1, num_out_channels, 1, 1}, 9, 1);
biases_weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
}
@ -86,7 +86,7 @@ std::shared_ptr<ov::Node> make_convolution(const ov::Output<Node>& in_data,
biases_weights_node =
std::make_shared<ov::op::v0::Constant>(type, ov::Shape{1, num_out_channels, 1, 1}, biases_weights);
} else {
auto tensor = create_and_fill_tensor(type, ov::Shape{1, num_out_channels, 1, 1});
auto tensor = create_and_fill_tensor(type, ov::Shape{1, num_out_channels, 1, 1}, 9, 1);
biases_weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
}

View File

@ -0,0 +1,27 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "common_test_utils/subgraph_builders/2_input_subtract.hpp"
#include "common_test_utils/node_builders/convolution.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/result.hpp"
#include "openvino/op/subtract.hpp"
namespace ov {
namespace test {
namespace utils {
std::shared_ptr<ov::Model> make_2_input_subtract(ov::Shape input_shape, ov::element::Type type) {
auto param0 = std::make_shared<ov::op::v0::Parameter>(type, input_shape);
auto param1 = std::make_shared<ov::op::v0::Parameter>(type, input_shape);
auto subtract = std::make_shared<ov::op::v1::Subtract>(param0, param1);
auto result = std::make_shared<ov::op::v0::Result>(subtract);
auto model = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param0, param1});
model->set_friendly_name("TwoInputSubtract");
return model;
}
} // namespace utils
} // namespace test
} // namespace ov

Some files were not shown because too many files have changed in this diff Show More