diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/ov_subgraph.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/ov_subgraph.hpp index 2c0c28864b5..de8870c2125 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/ov_subgraph.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/ov_subgraph.hpp @@ -47,7 +47,7 @@ protected: Config configuration; std::shared_ptr function, functionRefs = nullptr; - std::map inputs; + std::map, ov::runtime::Tensor> inputs; std::vector inputDynamicShapes; std::vector> targetStaticShapes; ElementType inType = ov::element::undefined, outType = ov::element::undefined; diff --git a/inference-engine/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp b/inference-engine/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp index d19fa75342d..a32ed9c77c2 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp @@ -155,11 +155,11 @@ void SubgraphBaseTest::compile_model() { void SubgraphBaseTest::generate_inputs(const std::vector& targetInputStaticShapes) { inputs.clear(); - const auto& params = function->inputs(); - for (int i = 0; i < params.size(); ++i) { - const auto& param = params[i]; - ov::runtime::Tensor tensor = ov::test::utils::create_and_fill_tensor(param.get_element_type(), targetInputStaticShapes[i]); - inputs.insert({param.get_any_name(), tensor}); + const auto& funcInputs = function->inputs(); + for (int i = 0; i < funcInputs.size(); ++i) { + const auto& funcInput = funcInputs[i]; + ov::runtime::Tensor tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); + inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } } @@ -179,7 +179,7 @@ std::vector SubgraphBaseTest::calculate_refs() { std::vector SubgraphBaseTest::get_plugin_outputs() { auto outputs = std::vector{}; for (const auto& output : function->outputs()) { - outputs.push_back(inferRequest.get_tensor(output.get_any_name())); + outputs.push_back(inferRequest.get_tensor(output)); } return outputs; } diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp index a57551544f9..b6883723696 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp @@ -44,32 +44,32 @@ std::string EltwiseLayerTest::getTestCaseName(const testing::TestParamInfo& targetInputStaticShapes) { inputs.clear(); const auto opType = std::get<1>(GetParam()); - const auto& params = function->inputs(); - for (int i = 0; i < params.size(); ++i) { - const auto& param = params[i]; + const auto& funcInputs = function->inputs(); + for (int i = 0; i < funcInputs.size(); ++i) { + const auto& funcInput = funcInputs[i]; ov::runtime::Tensor tensor; - bool isReal = param.get_element_type().is_real(); + bool isReal = funcInput.get_element_type().is_real(); switch (opType) { case ngraph::helpers::EltwiseTypes::POWER: case ngraph::helpers::EltwiseTypes::MOD: case ngraph::helpers::EltwiseTypes::FLOOR_MOD: tensor = isReal ? - ov::test::utils::create_and_fill_tensor(param.get_element_type(), targetInputStaticShapes[i], 2, 2, 128) : - ov::test::utils::create_and_fill_tensor(param.get_element_type(), targetInputStaticShapes[i], 4, 2); + ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 2, 2, 128) : + ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 4, 2); break; case ngraph::helpers::EltwiseTypes::DIVIDE: tensor = isReal ? - ov::test::utils::create_and_fill_tensor(param.get_element_type(), targetInputStaticShapes[i], 2, 2, 128) : - ov::test::utils::create_and_fill_tensor(param.get_element_type(), targetInputStaticShapes[i], 100, 101); + ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 2, 2, 128) : + ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 100, 101); break; case ngraph::helpers::EltwiseTypes::ERF: - tensor = ov::test::utils::create_and_fill_tensor(param.get_element_type(), targetInputStaticShapes[i], 6, -3); + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 6, -3); break; default: - tensor = ov::test::utils::create_and_fill_tensor(param.get_element_type(), targetInputStaticShapes[i]); + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); break; } - inputs.insert({param.get_any_name(), tensor}); + inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } } diff --git a/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp index ac59e122f29..76e4db937db 100644 --- a/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp @@ -271,7 +271,7 @@ std::vector>> std::vector interpretFunction(const std::shared_ptr &function, - const std::map& inputs); + const std::map, ov::runtime::Tensor>& inputs); // // This function compares two nGraph functions and requires them to have exactly one output diff --git a/inference-engine/tests/ngraph_helpers/ngraph_functions/src/utils/ngraph_helpers.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/utils/ngraph_helpers.cpp index 3ea06093cc0..5d1c813a8b2 100644 --- a/inference-engine/tests/ngraph_helpers/ngraph_functions/src/utils/ngraph_helpers.cpp +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/utils/ngraph_helpers.cpp @@ -145,38 +145,41 @@ std::vector>> } std::vector interpretFunction(const std::shared_ptr &function, - const std::map& inputs) { + const std::map, ov::runtime::Tensor>& inputs) { runtime::Backend::set_backend_shared_library_search_directory(""); auto backend = runtime::Backend::create("INTERPRETER"); - const auto ¶meters = function->inputs(); - const auto ¶metersNumber = parameters.size(); + const auto &funcInputs = function->inputs(); + const auto &funcInputsNumber = funcInputs.size(); const auto &inputsNumber = inputs.size(); - NGRAPH_CHECK(parametersNumber == inputsNumber, - "Got function (", function->get_friendly_name(), ") with ", parametersNumber, " parameters, but ", + NGRAPH_CHECK(funcInputsNumber == inputsNumber, + "Got function (", function->get_friendly_name(), ") with ", funcInputsNumber, " parameters, but ", inputsNumber, " input blobs"); auto inputTensors = std::vector>{}; - for (size_t i = 0; i < parametersNumber; ++i) { - const auto ¶meter = parameters[i]; - const auto ¶meterShape = parameter.get_shape(); - const auto ¶meterType = parameter.get_element_type(); - const auto ¶meterSize = shape_size(parameterShape) * parameterType.size(); + for (size_t i = 0; i < funcInputsNumber; ++i) { + const auto &input = funcInputs[i]; + const auto &inputShape = input.get_shape(); + const auto &inputType = input.get_element_type(); + const auto &inputSize = shape_size(inputShape) * inputType.size(); - auto inputIt = inputs.find(parameter.get_any_name()); + auto inputIt = std::find_if(inputs.begin(), inputs.end(), + [&input](std::pair, ov::runtime::Tensor> elem) { + return elem.first->get_friendly_name() == input.get_node_shared_ptr()->get_friendly_name(); + }); if (inputIt == inputs.end()) { - throw std::runtime_error("Parameter: " + parameter.get_any_name()+ " was not find in input parameters"); + throw std::runtime_error("Parameter: " + input.get_node_shared_ptr()->get_friendly_name() + " was not find in input parameters"); } - auto input = inputIt->second; + auto inputTensor = inputIt->second; - const auto &inputSize = input.get_byte_size(); - NGRAPH_CHECK(parameterSize == inputSize, - "Got parameter (", parameter.get_any_name(), ") of size ", parameterSize, + const auto &inputTensorSize = inputTensor.get_byte_size(); + NGRAPH_CHECK(inputSize == inputTensorSize, + "Got parameter (", input.get_node_shared_ptr()->get_friendly_name(), ") of size ", inputSize, " bytes, but corresponding input ", - " has ", inputSize, " bytes"); + " has ", inputTensorSize, " bytes"); - auto tensor = backend->create_tensor(parameterType, parameterShape); - tensor->write(input.data(), parameterSize); + auto tensor = backend->create_tensor(inputType, inputShape); + tensor->write(inputTensor.data(), inputSize); inputTensors.push_back(tensor); } @@ -902,14 +905,14 @@ std::ostream& operator<<(std::ostream & os, MemoryTransformation type) { void resize_function(std::shared_ptr function, const std::vector& targetInputStaticShapes) { - auto params = function->get_parameters(); - std::map shapes; - if (params.size() > targetInputStaticShapes.size()) { - throw std::runtime_error("targetInputStaticShapes.size() = " + std::to_string(targetInputStaticShapes.size()) + " != params.size() = " - + std::to_string(params.size())); + auto inputs = function->inputs(); + std::map, ov::PartialShape> shapes; + if (inputs.size() > targetInputStaticShapes.size()) { + throw std::runtime_error("targetInputStaticShapes.size() = " + std::to_string(targetInputStaticShapes.size()) + " != inputs.size() = " + + std::to_string(inputs.size())); } - for (size_t i = 0; i < params.size(); i++) { - shapes.insert({params[i]->get_output_tensor(0).get_any_name(), targetInputStaticShapes[i]}); + for (size_t i = 0; i < inputs.size(); i++) { + shapes.insert({inputs[i], targetInputStaticShapes[i]}); } function->reshape(shapes); }