From 209331d9dfff314dc6b496dac74dcc6cb0ae6f07 Mon Sep 17 00:00:00 2001 From: Zhang Yi Date: Mon, 13 Jun 2022 20:25:59 +0800 Subject: [PATCH] Pr/8669 (#11840) * Added tests * Apply comments * Update * Apply comments * Fixed remaining comments * Use ov::test::SubgraphBaseTest Co-authored-by: Egor Shulman --- .../interface/ie_iinfer_request_internal.cpp | 4 +- .../skip_tests_config.cpp | 2 + .../skip_tests_config.cpp | 4 + .../skip_tests_config.cpp | 3 + .../infer_request_dynamic.hpp | 8 +- .../infer_request_dynamic.cpp | 135 +++++++++++++++++- .../behavior/ov_infer_request/io_tensor.cpp | 42 ++++++ 7 files changed, 192 insertions(+), 6 deletions(-) diff --git a/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp index 072e7a22bab..fe2dc70d92f 100644 --- a/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp +++ b/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp @@ -119,7 +119,7 @@ void IInferRequestInternal::SetBlob(const std::string& name, const Blob::Ptr& us DataPtr foundOutput; const bool isInput = findInputAndOutputBlobByName(name, foundInput, foundOutput); const auto input = findInputByNodeName(name); - const auto output = findInputByNodeName(name); + const auto output = findOutputByNodeName(name); const bool compoundBlobPassed = userBlob->is(); const bool remoteBlobPassed = userBlob->is(); @@ -130,7 +130,7 @@ void IInferRequestInternal::SetBlob(const std::string& name, const Blob::Ptr& us IE_THROW() << "Input data is empty. Input name: \'" << name << "\'"; } const bool isInputDynamic = input && input->get_output_partial_shape(0).is_dynamic(); - const bool isOutputDynamic = output && output->get_output_partial_shape(0).is_dynamic(); + const bool isOutputDynamic = output && output->get_input_partial_shape(0).is_dynamic(); size_t dataSize = userBlob->size(); if (isInput) { diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp index 1eda6aff332..403957a9211 100644 --- a/src/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp @@ -162,6 +162,8 @@ std::vector disabledTestPatterns() { R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*KSOFunction.*)", R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*NonMaxSuppression.*)", R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*Nms.*)", + // Issue: 76980 + R"(.*smoke_Auto_BehaviorTests.*InferDynamicNetwork/.*)", // enable after other plugins support nms9 as setup with nms5 in // tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp // is shared across plugins diff --git a/src/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp b/src/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp index c912a24048a..6f6abb85623 100644 --- a/src/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp +++ b/src/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp @@ -87,5 +87,9 @@ std::vector disabledTestPatterns() { R"(.*registerPluginsXMLUnicodePath.*)", // Not supported yet R"(.*CompileModelCacheTestBase.*)", + // Issue: 83014 + R"(.*smoke_RemoteBlob.*canInferOnUserQueue.*)", + // Issue: CVS-76980 + R"(.*smoke_Auto_BehaviorTests.*InferDynamicNetwork/.*)", }; } diff --git a/src/tests/functional/plugin/myriad/shared_tests_instances/skip_tests_config.cpp b/src/tests/functional/plugin/myriad/shared_tests_instances/skip_tests_config.cpp index 3c63ab772b9..1e1c9daa39a 100644 --- a/src/tests/functional/plugin/myriad/shared_tests_instances/skip_tests_config.cpp +++ b/src/tests/functional/plugin/myriad/shared_tests_instances/skip_tests_config.cpp @@ -24,6 +24,9 @@ std::vector disabledTestPatterns() { ".*InferDynamicNetworkWithSetTensor2times.*", ".*InferRequestDynamicTests.GetSameTensor2times.*", ".*InferRequestDynamicTests.InferDynamicNetworkWithSetTensor.*", + ".*InferRequestDynamicTests.InferDynamicNetworkSetOutputShapeBeforeInfer.*", + ".*InferRequestDynamicTests.InferDynamicNetworkSetUnexpectedOutputTensorBeforeInfer.*", + ".*InferRequestDynamicTests.InferDynamicNetworkSetOutputTensorPreAllocatedMemoryBeforeInfer.*", // TODO: Issue: 26268 ".*ConcatLayerTest.*axis=0.*", // TODO: Issue 31197 diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp index 8bb1ee51a08..b00276342ec 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp @@ -28,6 +28,7 @@ #include "functional_test_utils/blob_utils.hpp" #include "ngraph_functions/subgraph_builders.hpp" #include "shared_test_classes/subgraph/basic_lstm.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" // TODO [mandrono]: move current test case inside CPU plug-in and return the original tests namespace ov { @@ -35,14 +36,14 @@ namespace test { namespace behavior { using OVInferRequestDynamicParams = std::tuple< - std::shared_ptr, // ov Model + std::shared_ptr, // ov Model std::vector, std::vector>>, // input/expected output shapes per inference std::string, // Device name - ov::AnyMap // Config + ov::AnyMap // Config >; class OVInferRequestDynamicTests : public testing::WithParamInterface, - public CommonTestUtils::TestsCommon { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj); @@ -50,6 +51,7 @@ protected: void SetUp() override; void TearDown() override; + bool checkOutput(const ov::runtime::Tensor& in, const ov::runtime::Tensor& actual); std::shared_ptr ie = utils::PluginCache::get().core(); std::shared_ptr function; diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp index 0910760614a..46913e9236b 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -27,6 +27,7 @@ #include "ngraph_functions/subgraph_builders.hpp" #include "shared_test_classes/subgraph/basic_lstm.hpp" #include "behavior/ov_infer_request/infer_request_dynamic.hpp" +#include namespace ov { namespace test { @@ -45,7 +46,7 @@ std::string OVInferRequestDynamicTests::getTestCaseName(testing::TestParamInfoGetParam(); } +bool OVInferRequestDynamicTests::checkOutput(const ov::runtime::Tensor& in, const ov::runtime::Tensor& actual) { + bool result = true; + auto net = ie->compile_model(function, CommonTestUtils::DEVICE_TEMPLATE); + ov::InferRequest req; + req = net.create_infer_request(); + auto tensor = req.get_tensor(function->inputs().back().get_any_name()); + tensor.set_shape(in.get_shape()); + for (int i = 0; i < in.get_size(); i++) { + tensor.data()[i] = in.data()[i]; + } + req.infer(); + for (int i = 0; i < actual.get_size(); i++) { + if (fabs(req.get_output_tensor(0).data()[i] - actual.data()[i]) > std::numeric_limits::epsilon()) + return false; + } + return result; +} + void OVInferRequestDynamicTests::TearDown() { if (!configuration.empty()) { PluginCache::get().reset(); @@ -68,6 +87,111 @@ void OVInferRequestDynamicTests::TearDown() { function.reset(); } +/* +We have to check that we don't get a segmentation fault during +inference if we set the first two times to the same shape and +then a different one for the case with upper bounds. + +Previously, this resulted in a segmentation fault for the CPU plugin. +*/ +TEST_P(OVInferRequestDynamicTests, InferDynamicNetwork) { + std::vector vectorShapes{inOutShapes[0].first, inOutShapes[0].first, inOutShapes[1].first}; + const std::string tensor_name = "input_tensor"; + std::map shapes; + shapes[tensor_name] = { ov::Dimension(1, inOutShapes[1].first[0]), + ov::Dimension(1, inOutShapes[1].first[1]), + ov::Dimension(1, inOutShapes[1].first[2]), + ov::Dimension(1, inOutShapes[1].first[3]) + }; + OV_ASSERT_NO_THROW(function->reshape(shapes)); + // Load ov::Model to target plugins + auto execNet = ie->compile_model(function, targetDevice, configuration); + // Create InferRequest + ov::InferRequest req; + const std::string outputname = function->outputs().back().get_any_name(); + for (auto& shape : vectorShapes) { + ov::runtime::Tensor inTensor = ov::test::utils::create_and_fill_tensor(element::f32, shape, 100, -50); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", inTensor)); + OV_ASSERT_NO_THROW(req.infer()); + ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputname))); + } +} + +TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetUnexpectedOutputTensorBeforeInfer) { + const std::string tensor_name = "input_tensor"; + const ov::Shape refShape = inOutShapes[0].first; + const ov::Shape refOutShape = inOutShapes[0].second; + std::map shapes; + shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20}; + OV_ASSERT_NO_THROW(function->reshape(shapes)); + // Load ov::Model to target plugins + auto execNet = ie->compile_model(function, targetDevice, configuration); + // Create InferRequest + ov::InferRequest req; + ov::runtime::Tensor tensor, otensor; + const std::string outputname = function->outputs().back().get_any_name(); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + tensor = ov::test::utils::create_and_fill_tensor(element::f32, refShape, 100, -50); + OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", tensor)); + auto outShape = refOutShape; + outShape[0] += 1; + otensor = ov::test::utils::create_and_fill_tensor(element::f32, outShape, 100, 50); + OV_ASSERT_NO_THROW(req.set_tensor(outputname, otensor)); + OV_ASSERT_NO_THROW(req.infer()); + ASSERT_EQ(otensor.get_shape(), refOutShape); + ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputname))); +} + +TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetOutputTensorPreAllocatedMemoryBeforeInfer) { + const std::string tensor_name = "input_tensor"; + const ov::Shape refShape = inOutShapes[0].first; + const ov::Shape refOutShape = inOutShapes[0].second; + std::map shapes; + shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20}; + OV_ASSERT_NO_THROW(function->reshape(shapes)); + // Load ov::Model to target plugins + auto execNet = ie->compile_model(function, targetDevice, configuration); + // Create InferRequest + ov::InferRequest req; + ov::runtime::Tensor tensor; + const std::string outputname = function->outputs().back().get_any_name(); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + tensor = ov::test::utils::create_and_fill_tensor(element::f32, refShape, 100, -50); + OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", tensor)); + float ptr[5000]; + ov::runtime::Tensor otensor(element::f32, refOutShape, ptr); + OV_ASSERT_NO_THROW(req.set_tensor(outputname, otensor)); + OV_ASSERT_NO_THROW(req.infer()); + ASSERT_EQ(req.get_tensor(outputname).data(), ptr); + ASSERT_EQ(req.get_tensor(outputname).get_shape(), refOutShape); + ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputname))); +} + +TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetOutputShapeBeforeInfer) { + const std::string tensor_name = "input_tensor"; + const ov::Shape refShape = inOutShapes[0].first; + const ov::Shape refOutShape = inOutShapes[0].second; + std::map shapes; + shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20}; + OV_ASSERT_NO_THROW(function->reshape(shapes)); + // Load ov::Model to target plugins + auto execNet = ie->compile_model(function, targetDevice, configuration); + // Create InferRequest + ov::InferRequest req; + ov::runtime::Tensor tensor, otensor; + const std::string outputname = function->outputs().back().get_any_name(); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + tensor = ov::test::utils::create_and_fill_tensor(element::f32, refShape, 100, -50); + OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", tensor)); + OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputname)); + OV_ASSERT_NO_THROW(otensor.set_shape(refOutShape)); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputname)); + ASSERT_EQ(otensor.get_shape(), refOutShape); + ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputname))); +} + TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithoutSetShape) { const std::string tensor_name = "input_tensor"; std::map shapes; @@ -124,6 +248,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithGetTensor) { EXPECT_NE(0, otensor.get_size()); // output tensor is allocated after infer OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputname)); ASSERT_EQ(otensor.get_shape(), refOutShape); + ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputname))); } TEST_P(OVInferRequestDynamicTests, InferUpperBoundNetworkWithGetTensor) { @@ -151,6 +276,7 @@ TEST_P(OVInferRequestDynamicTests, InferUpperBoundNetworkWithGetTensor) { OV_ASSERT_NO_THROW(req.start_async()); OV_ASSERT_NO_THROW(req.wait()); ASSERT_EQ(otensor.get_shape(), refOutShape); + ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputname))); } TEST_P(OVInferRequestDynamicTests, InferFullyDynamicNetworkWithGetTensor) { @@ -179,6 +305,7 @@ TEST_P(OVInferRequestDynamicTests, InferFullyDynamicNetworkWithGetTensor) { OV_ASSERT_NO_THROW(req.wait()); OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputName)); ASSERT_EQ(otensor.get_shape(), refOutShape); + ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputName))); } TEST_P(OVInferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetTensorLower) { @@ -243,6 +370,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithGetTensor2times) { const std::string outputName = function->outputs().back().get_any_name(); OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); ASSERT_EQ(tensor.get_shape(), refOutShape); + ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputName))); OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); OV_ASSERT_NO_THROW(tensor.set_shape(refShape2)); @@ -252,6 +380,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithGetTensor2times) { req.wait(); OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); ASSERT_EQ(tensor.get_shape(), refOutShape2); + ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputName))); } @@ -295,6 +424,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithSetTensor) { const std::string outputName = function->outputs().back().get_any_name(); OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); ASSERT_EQ(tensor.get_shape(), refOutShape); + ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputName))); } TEST_P(OVInferRequestDynamicTests, InferFullyDynamicNetworkWithSetTensor) { @@ -324,6 +454,7 @@ TEST_P(OVInferRequestDynamicTests, InferFullyDynamicNetworkWithSetTensor) { OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); ASSERT_EQ(tensor.get_shape(), refOutShape); ASSERT_EQ(otensor.get_shape(), refOutShape); + ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputName))); } TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) { @@ -350,6 +481,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) { OV_ASSERT_NO_THROW(req.wait()); OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); ASSERT_EQ(tensor.get_shape(), refOutShape); + ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputName))); tensor = ov::Tensor(ov::element::f32, refShape2); OV_ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor)); @@ -359,6 +491,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) { OV_ASSERT_NO_THROW(req.wait()); OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); ASSERT_EQ(tensor.get_shape(), refOutShape2); + ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputName))); } TEST_P(OVNotSupportRequestDynamicTests, InferDynamicNotSupported) { diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp index 3adfd5abe94..519ce6c2de0 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp @@ -179,6 +179,48 @@ TEST_P(OVInferRequestIOTensorTest, canInferWithGetOut) { OV_ASSERT_NO_THROW(req.get_tensor(output)); } +TEST_P(OVInferRequestIOTensorTest, InferStaticNetworkSetInputTensor) { + const ov::Shape shape1 = {1, 1, 32, 32}; + const ov::Shape shape2 = {1, 1, 40, 40}; + std::map shapes; + shapes[function->inputs().back().get_any_name()] = shape1; + OV_ASSERT_NO_THROW(function->reshape(shapes)); + // Load ov::Model to target plugins + std::shared_ptr ie = utils::PluginCache::get().core(); + auto execNet = ie->compile_model(function, targetDevice, configuration); + // Create InferRequest + ov::InferRequest req; + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req.infer()); + // Get input_tensor + ov::runtime::Tensor tensor; + OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); + // Set shape + OV_ASSERT_NO_THROW(tensor.set_shape(shape2)); + ASSERT_ANY_THROW(req.infer()); +} + +TEST_P(OVInferRequestIOTensorTest, InferStaticNetworkSetOutputTensor) { + const ov::Shape shape1 = {1, 1, 32, 32}; + const ov::Shape shape2 = {1, 20}; + std::map shapes; + shapes[function->inputs().back().get_any_name()] = shape1; + OV_ASSERT_NO_THROW(function->reshape(shapes)); + // Load ov::Model to target plugins + std::shared_ptr ie = utils::PluginCache::get().core(); + auto execNet = ie->compile_model(function, targetDevice, configuration); + // Create InferRequest + ov::InferRequest req; + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req.infer()); + // Get output_tensor + ov::runtime::Tensor tensor; + OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->outputs().back().get_any_name());); + // Set shape + OV_ASSERT_NO_THROW(tensor.set_shape(shape2)); + ASSERT_ANY_THROW(req.infer()); +} + std::string OVInferRequestIOTensorSetPrecisionTest::getTestCaseName(const testing::TestParamInfo& obj) { element::Type type; std::string targetDevice;