Pr/8669 (#11840)
* Added tests * Apply comments * Update * Apply comments * Fixed remaining comments * Use ov::test::SubgraphBaseTest Co-authored-by: Egor Shulman <egor.shulman@intel.com>
This commit is contained in:
parent
8603acecba
commit
209331d9df
@ -119,7 +119,7 @@ void IInferRequestInternal::SetBlob(const std::string& name, const Blob::Ptr& us
|
|||||||
DataPtr foundOutput;
|
DataPtr foundOutput;
|
||||||
const bool isInput = findInputAndOutputBlobByName(name, foundInput, foundOutput);
|
const bool isInput = findInputAndOutputBlobByName(name, foundInput, foundOutput);
|
||||||
const auto input = findInputByNodeName(name);
|
const auto input = findInputByNodeName(name);
|
||||||
const auto output = findInputByNodeName(name);
|
const auto output = findOutputByNodeName(name);
|
||||||
|
|
||||||
const bool compoundBlobPassed = userBlob->is<CompoundBlob>();
|
const bool compoundBlobPassed = userBlob->is<CompoundBlob>();
|
||||||
const bool remoteBlobPassed = userBlob->is<RemoteBlob>();
|
const bool remoteBlobPassed = userBlob->is<RemoteBlob>();
|
||||||
@ -130,7 +130,7 @@ void IInferRequestInternal::SetBlob(const std::string& name, const Blob::Ptr& us
|
|||||||
IE_THROW() << "Input data is empty. Input name: \'" << name << "\'";
|
IE_THROW() << "Input data is empty. Input name: \'" << name << "\'";
|
||||||
}
|
}
|
||||||
const bool isInputDynamic = input && input->get_output_partial_shape(0).is_dynamic();
|
const bool isInputDynamic = input && input->get_output_partial_shape(0).is_dynamic();
|
||||||
const bool isOutputDynamic = output && output->get_output_partial_shape(0).is_dynamic();
|
const bool isOutputDynamic = output && output->get_input_partial_shape(0).is_dynamic();
|
||||||
|
|
||||||
size_t dataSize = userBlob->size();
|
size_t dataSize = userBlob->size();
|
||||||
if (isInput) {
|
if (isInput) {
|
||||||
|
@ -162,6 +162,8 @@ std::vector<std::string> disabledTestPatterns() {
|
|||||||
R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*KSOFunction.*)",
|
R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*KSOFunction.*)",
|
||||||
R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*NonMaxSuppression.*)",
|
R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*NonMaxSuppression.*)",
|
||||||
R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*Nms.*)",
|
R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*Nms.*)",
|
||||||
|
// Issue: 76980
|
||||||
|
R"(.*smoke_Auto_BehaviorTests.*InferDynamicNetwork/.*)",
|
||||||
// enable after other plugins support nms9 as setup with nms5 in
|
// enable after other plugins support nms9 as setup with nms5 in
|
||||||
// tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp
|
// tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp
|
||||||
// is shared across plugins
|
// is shared across plugins
|
||||||
|
@ -87,5 +87,9 @@ std::vector<std::string> disabledTestPatterns() {
|
|||||||
R"(.*registerPluginsXMLUnicodePath.*)",
|
R"(.*registerPluginsXMLUnicodePath.*)",
|
||||||
// Not supported yet
|
// Not supported yet
|
||||||
R"(.*CompileModelCacheTestBase.*)",
|
R"(.*CompileModelCacheTestBase.*)",
|
||||||
|
// Issue: 83014
|
||||||
|
R"(.*smoke_RemoteBlob.*canInferOnUserQueue.*)",
|
||||||
|
// Issue: CVS-76980
|
||||||
|
R"(.*smoke_Auto_BehaviorTests.*InferDynamicNetwork/.*)",
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -24,6 +24,9 @@ std::vector<std::string> disabledTestPatterns() {
|
|||||||
".*InferDynamicNetworkWithSetTensor2times.*",
|
".*InferDynamicNetworkWithSetTensor2times.*",
|
||||||
".*InferRequestDynamicTests.GetSameTensor2times.*",
|
".*InferRequestDynamicTests.GetSameTensor2times.*",
|
||||||
".*InferRequestDynamicTests.InferDynamicNetworkWithSetTensor.*",
|
".*InferRequestDynamicTests.InferDynamicNetworkWithSetTensor.*",
|
||||||
|
".*InferRequestDynamicTests.InferDynamicNetworkSetOutputShapeBeforeInfer.*",
|
||||||
|
".*InferRequestDynamicTests.InferDynamicNetworkSetUnexpectedOutputTensorBeforeInfer.*",
|
||||||
|
".*InferRequestDynamicTests.InferDynamicNetworkSetOutputTensorPreAllocatedMemoryBeforeInfer.*",
|
||||||
// TODO: Issue: 26268
|
// TODO: Issue: 26268
|
||||||
".*ConcatLayerTest.*axis=0.*",
|
".*ConcatLayerTest.*axis=0.*",
|
||||||
// TODO: Issue 31197
|
// TODO: Issue 31197
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#include "functional_test_utils/blob_utils.hpp"
|
#include "functional_test_utils/blob_utils.hpp"
|
||||||
#include "ngraph_functions/subgraph_builders.hpp"
|
#include "ngraph_functions/subgraph_builders.hpp"
|
||||||
#include "shared_test_classes/subgraph/basic_lstm.hpp"
|
#include "shared_test_classes/subgraph/basic_lstm.hpp"
|
||||||
|
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||||
|
|
||||||
// TODO [mandrono]: move current test case inside CPU plug-in and return the original tests
|
// TODO [mandrono]: move current test case inside CPU plug-in and return the original tests
|
||||||
namespace ov {
|
namespace ov {
|
||||||
@ -35,14 +36,14 @@ namespace test {
|
|||||||
namespace behavior {
|
namespace behavior {
|
||||||
|
|
||||||
using OVInferRequestDynamicParams = std::tuple<
|
using OVInferRequestDynamicParams = std::tuple<
|
||||||
std::shared_ptr<Model>, // ov Model
|
std::shared_ptr<Model>, // ov Model
|
||||||
std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>>, // input/expected output shapes per inference
|
std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>>, // input/expected output shapes per inference
|
||||||
std::string, // Device name
|
std::string, // Device name
|
||||||
ov::AnyMap // Config
|
ov::AnyMap // Config
|
||||||
>;
|
>;
|
||||||
|
|
||||||
class OVInferRequestDynamicTests : public testing::WithParamInterface<OVInferRequestDynamicParams>,
|
class OVInferRequestDynamicTests : public testing::WithParamInterface<OVInferRequestDynamicParams>,
|
||||||
public CommonTestUtils::TestsCommon {
|
virtual public ov::test::SubgraphBaseTest {
|
||||||
public:
|
public:
|
||||||
static std::string getTestCaseName(testing::TestParamInfo<OVInferRequestDynamicParams> obj);
|
static std::string getTestCaseName(testing::TestParamInfo<OVInferRequestDynamicParams> obj);
|
||||||
|
|
||||||
@ -50,6 +51,7 @@ protected:
|
|||||||
void SetUp() override;
|
void SetUp() override;
|
||||||
|
|
||||||
void TearDown() override;
|
void TearDown() override;
|
||||||
|
bool checkOutput(const ov::runtime::Tensor& in, const ov::runtime::Tensor& actual);
|
||||||
|
|
||||||
std::shared_ptr<ov::Core> ie = utils::PluginCache::get().core();
|
std::shared_ptr<ov::Core> ie = utils::PluginCache::get().core();
|
||||||
std::shared_ptr<Model> function;
|
std::shared_ptr<Model> function;
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
#include "ngraph_functions/subgraph_builders.hpp"
|
#include "ngraph_functions/subgraph_builders.hpp"
|
||||||
#include "shared_test_classes/subgraph/basic_lstm.hpp"
|
#include "shared_test_classes/subgraph/basic_lstm.hpp"
|
||||||
#include "behavior/ov_infer_request/infer_request_dynamic.hpp"
|
#include "behavior/ov_infer_request/infer_request_dynamic.hpp"
|
||||||
|
#include <common_test_utils/ov_tensor_utils.hpp>
|
||||||
|
|
||||||
namespace ov {
|
namespace ov {
|
||||||
namespace test {
|
namespace test {
|
||||||
@ -45,7 +46,7 @@ std::string OVInferRequestDynamicTests::getTestCaseName(testing::TestParamInfo<O
|
|||||||
result << "(" << CommonTestUtils::vec2str(inOutShape.first) << "_" << CommonTestUtils::vec2str(inOutShape.second) << ")";
|
result << "(" << CommonTestUtils::vec2str(inOutShape.first) << "_" << CommonTestUtils::vec2str(inOutShape.second) << ")";
|
||||||
}
|
}
|
||||||
result << ")_";
|
result << ")_";
|
||||||
result << "targetDevice=" << targetDevice;
|
result << "targetDevice=" << targetDevice << "_";
|
||||||
if (!configuration.empty()) {
|
if (!configuration.empty()) {
|
||||||
for (auto& configItem : configuration) {
|
for (auto& configItem : configuration) {
|
||||||
result << "configItem=" << configItem.first << "_";
|
result << "configItem=" << configItem.first << "_";
|
||||||
@ -61,6 +62,24 @@ void OVInferRequestDynamicTests::SetUp() {
|
|||||||
std::tie(function, inOutShapes, targetDevice, configuration) = this->GetParam();
|
std::tie(function, inOutShapes, targetDevice, configuration) = this->GetParam();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool OVInferRequestDynamicTests::checkOutput(const ov::runtime::Tensor& in, const ov::runtime::Tensor& actual) {
|
||||||
|
bool result = true;
|
||||||
|
auto net = ie->compile_model(function, CommonTestUtils::DEVICE_TEMPLATE);
|
||||||
|
ov::InferRequest req;
|
||||||
|
req = net.create_infer_request();
|
||||||
|
auto tensor = req.get_tensor(function->inputs().back().get_any_name());
|
||||||
|
tensor.set_shape(in.get_shape());
|
||||||
|
for (int i = 0; i < in.get_size(); i++) {
|
||||||
|
tensor.data<float>()[i] = in.data<float>()[i];
|
||||||
|
}
|
||||||
|
req.infer();
|
||||||
|
for (int i = 0; i < actual.get_size(); i++) {
|
||||||
|
if (fabs(req.get_output_tensor(0).data<float>()[i] - actual.data<float>()[i]) > std::numeric_limits<float>::epsilon())
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
void OVInferRequestDynamicTests::TearDown() {
|
void OVInferRequestDynamicTests::TearDown() {
|
||||||
if (!configuration.empty()) {
|
if (!configuration.empty()) {
|
||||||
PluginCache::get().reset();
|
PluginCache::get().reset();
|
||||||
@ -68,6 +87,111 @@ void OVInferRequestDynamicTests::TearDown() {
|
|||||||
function.reset();
|
function.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
We have to check that we don't get a segmentation fault during
|
||||||
|
inference if we set the first two times to the same shape and
|
||||||
|
then a different one for the case with upper bounds.
|
||||||
|
|
||||||
|
Previously, this resulted in a segmentation fault for the CPU plugin.
|
||||||
|
*/
|
||||||
|
TEST_P(OVInferRequestDynamicTests, InferDynamicNetwork) {
|
||||||
|
std::vector<ov::Shape> vectorShapes{inOutShapes[0].first, inOutShapes[0].first, inOutShapes[1].first};
|
||||||
|
const std::string tensor_name = "input_tensor";
|
||||||
|
std::map<std::string, ov::PartialShape> shapes;
|
||||||
|
shapes[tensor_name] = { ov::Dimension(1, inOutShapes[1].first[0]),
|
||||||
|
ov::Dimension(1, inOutShapes[1].first[1]),
|
||||||
|
ov::Dimension(1, inOutShapes[1].first[2]),
|
||||||
|
ov::Dimension(1, inOutShapes[1].first[3])
|
||||||
|
};
|
||||||
|
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||||
|
// Load ov::Model to target plugins
|
||||||
|
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||||
|
// Create InferRequest
|
||||||
|
ov::InferRequest req;
|
||||||
|
const std::string outputname = function->outputs().back().get_any_name();
|
||||||
|
for (auto& shape : vectorShapes) {
|
||||||
|
ov::runtime::Tensor inTensor = ov::test::utils::create_and_fill_tensor(element::f32, shape, 100, -50);
|
||||||
|
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||||
|
OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", inTensor));
|
||||||
|
OV_ASSERT_NO_THROW(req.infer());
|
||||||
|
ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputname)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetUnexpectedOutputTensorBeforeInfer) {
|
||||||
|
const std::string tensor_name = "input_tensor";
|
||||||
|
const ov::Shape refShape = inOutShapes[0].first;
|
||||||
|
const ov::Shape refOutShape = inOutShapes[0].second;
|
||||||
|
std::map<std::string, ov::PartialShape> shapes;
|
||||||
|
shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20};
|
||||||
|
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||||
|
// Load ov::Model to target plugins
|
||||||
|
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||||
|
// Create InferRequest
|
||||||
|
ov::InferRequest req;
|
||||||
|
ov::runtime::Tensor tensor, otensor;
|
||||||
|
const std::string outputname = function->outputs().back().get_any_name();
|
||||||
|
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||||
|
tensor = ov::test::utils::create_and_fill_tensor(element::f32, refShape, 100, -50);
|
||||||
|
OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", tensor));
|
||||||
|
auto outShape = refOutShape;
|
||||||
|
outShape[0] += 1;
|
||||||
|
otensor = ov::test::utils::create_and_fill_tensor(element::f32, outShape, 100, 50);
|
||||||
|
OV_ASSERT_NO_THROW(req.set_tensor(outputname, otensor));
|
||||||
|
OV_ASSERT_NO_THROW(req.infer());
|
||||||
|
ASSERT_EQ(otensor.get_shape(), refOutShape);
|
||||||
|
ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputname)));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetOutputTensorPreAllocatedMemoryBeforeInfer) {
|
||||||
|
const std::string tensor_name = "input_tensor";
|
||||||
|
const ov::Shape refShape = inOutShapes[0].first;
|
||||||
|
const ov::Shape refOutShape = inOutShapes[0].second;
|
||||||
|
std::map<std::string, ov::PartialShape> shapes;
|
||||||
|
shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20};
|
||||||
|
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||||
|
// Load ov::Model to target plugins
|
||||||
|
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||||
|
// Create InferRequest
|
||||||
|
ov::InferRequest req;
|
||||||
|
ov::runtime::Tensor tensor;
|
||||||
|
const std::string outputname = function->outputs().back().get_any_name();
|
||||||
|
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||||
|
tensor = ov::test::utils::create_and_fill_tensor(element::f32, refShape, 100, -50);
|
||||||
|
OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", tensor));
|
||||||
|
float ptr[5000];
|
||||||
|
ov::runtime::Tensor otensor(element::f32, refOutShape, ptr);
|
||||||
|
OV_ASSERT_NO_THROW(req.set_tensor(outputname, otensor));
|
||||||
|
OV_ASSERT_NO_THROW(req.infer());
|
||||||
|
ASSERT_EQ(req.get_tensor(outputname).data<float>(), ptr);
|
||||||
|
ASSERT_EQ(req.get_tensor(outputname).get_shape(), refOutShape);
|
||||||
|
ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputname)));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetOutputShapeBeforeInfer) {
|
||||||
|
const std::string tensor_name = "input_tensor";
|
||||||
|
const ov::Shape refShape = inOutShapes[0].first;
|
||||||
|
const ov::Shape refOutShape = inOutShapes[0].second;
|
||||||
|
std::map<std::string, ov::PartialShape> shapes;
|
||||||
|
shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20};
|
||||||
|
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||||
|
// Load ov::Model to target plugins
|
||||||
|
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||||
|
// Create InferRequest
|
||||||
|
ov::InferRequest req;
|
||||||
|
ov::runtime::Tensor tensor, otensor;
|
||||||
|
const std::string outputname = function->outputs().back().get_any_name();
|
||||||
|
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||||
|
tensor = ov::test::utils::create_and_fill_tensor(element::f32, refShape, 100, -50);
|
||||||
|
OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", tensor));
|
||||||
|
OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputname));
|
||||||
|
OV_ASSERT_NO_THROW(otensor.set_shape(refOutShape));
|
||||||
|
OV_ASSERT_NO_THROW(req.infer());
|
||||||
|
OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputname));
|
||||||
|
ASSERT_EQ(otensor.get_shape(), refOutShape);
|
||||||
|
ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputname)));
|
||||||
|
}
|
||||||
|
|
||||||
TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithoutSetShape) {
|
TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithoutSetShape) {
|
||||||
const std::string tensor_name = "input_tensor";
|
const std::string tensor_name = "input_tensor";
|
||||||
std::map<std::string, ov::PartialShape> shapes;
|
std::map<std::string, ov::PartialShape> shapes;
|
||||||
@ -124,6 +248,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithGetTensor) {
|
|||||||
EXPECT_NE(0, otensor.get_size()); // output tensor is allocated after infer
|
EXPECT_NE(0, otensor.get_size()); // output tensor is allocated after infer
|
||||||
OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputname));
|
OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputname));
|
||||||
ASSERT_EQ(otensor.get_shape(), refOutShape);
|
ASSERT_EQ(otensor.get_shape(), refOutShape);
|
||||||
|
ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputname)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(OVInferRequestDynamicTests, InferUpperBoundNetworkWithGetTensor) {
|
TEST_P(OVInferRequestDynamicTests, InferUpperBoundNetworkWithGetTensor) {
|
||||||
@ -151,6 +276,7 @@ TEST_P(OVInferRequestDynamicTests, InferUpperBoundNetworkWithGetTensor) {
|
|||||||
OV_ASSERT_NO_THROW(req.start_async());
|
OV_ASSERT_NO_THROW(req.start_async());
|
||||||
OV_ASSERT_NO_THROW(req.wait());
|
OV_ASSERT_NO_THROW(req.wait());
|
||||||
ASSERT_EQ(otensor.get_shape(), refOutShape);
|
ASSERT_EQ(otensor.get_shape(), refOutShape);
|
||||||
|
ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputname)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(OVInferRequestDynamicTests, InferFullyDynamicNetworkWithGetTensor) {
|
TEST_P(OVInferRequestDynamicTests, InferFullyDynamicNetworkWithGetTensor) {
|
||||||
@ -179,6 +305,7 @@ TEST_P(OVInferRequestDynamicTests, InferFullyDynamicNetworkWithGetTensor) {
|
|||||||
OV_ASSERT_NO_THROW(req.wait());
|
OV_ASSERT_NO_THROW(req.wait());
|
||||||
OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputName));
|
OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputName));
|
||||||
ASSERT_EQ(otensor.get_shape(), refOutShape);
|
ASSERT_EQ(otensor.get_shape(), refOutShape);
|
||||||
|
ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputName)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(OVInferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetTensorLower) {
|
TEST_P(OVInferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetTensorLower) {
|
||||||
@ -243,6 +370,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithGetTensor2times) {
|
|||||||
const std::string outputName = function->outputs().back().get_any_name();
|
const std::string outputName = function->outputs().back().get_any_name();
|
||||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||||
ASSERT_EQ(tensor.get_shape(), refOutShape);
|
ASSERT_EQ(tensor.get_shape(), refOutShape);
|
||||||
|
ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputName)));
|
||||||
|
|
||||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||||
OV_ASSERT_NO_THROW(tensor.set_shape(refShape2));
|
OV_ASSERT_NO_THROW(tensor.set_shape(refShape2));
|
||||||
@ -252,6 +380,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithGetTensor2times) {
|
|||||||
req.wait();
|
req.wait();
|
||||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||||
ASSERT_EQ(tensor.get_shape(), refOutShape2);
|
ASSERT_EQ(tensor.get_shape(), refOutShape2);
|
||||||
|
ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputName)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -295,6 +424,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithSetTensor) {
|
|||||||
const std::string outputName = function->outputs().back().get_any_name();
|
const std::string outputName = function->outputs().back().get_any_name();
|
||||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||||
ASSERT_EQ(tensor.get_shape(), refOutShape);
|
ASSERT_EQ(tensor.get_shape(), refOutShape);
|
||||||
|
ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputName)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(OVInferRequestDynamicTests, InferFullyDynamicNetworkWithSetTensor) {
|
TEST_P(OVInferRequestDynamicTests, InferFullyDynamicNetworkWithSetTensor) {
|
||||||
@ -324,6 +454,7 @@ TEST_P(OVInferRequestDynamicTests, InferFullyDynamicNetworkWithSetTensor) {
|
|||||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||||
ASSERT_EQ(tensor.get_shape(), refOutShape);
|
ASSERT_EQ(tensor.get_shape(), refOutShape);
|
||||||
ASSERT_EQ(otensor.get_shape(), refOutShape);
|
ASSERT_EQ(otensor.get_shape(), refOutShape);
|
||||||
|
ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputName)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) {
|
TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) {
|
||||||
@ -350,6 +481,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) {
|
|||||||
OV_ASSERT_NO_THROW(req.wait());
|
OV_ASSERT_NO_THROW(req.wait());
|
||||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||||
ASSERT_EQ(tensor.get_shape(), refOutShape);
|
ASSERT_EQ(tensor.get_shape(), refOutShape);
|
||||||
|
ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputName)));
|
||||||
|
|
||||||
tensor = ov::Tensor(ov::element::f32, refShape2);
|
tensor = ov::Tensor(ov::element::f32, refShape2);
|
||||||
OV_ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor));
|
OV_ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor));
|
||||||
@ -359,6 +491,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) {
|
|||||||
OV_ASSERT_NO_THROW(req.wait());
|
OV_ASSERT_NO_THROW(req.wait());
|
||||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||||
ASSERT_EQ(tensor.get_shape(), refOutShape2);
|
ASSERT_EQ(tensor.get_shape(), refOutShape2);
|
||||||
|
ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputName)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(OVNotSupportRequestDynamicTests, InferDynamicNotSupported) {
|
TEST_P(OVNotSupportRequestDynamicTests, InferDynamicNotSupported) {
|
||||||
|
@ -179,6 +179,48 @@ TEST_P(OVInferRequestIOTensorTest, canInferWithGetOut) {
|
|||||||
OV_ASSERT_NO_THROW(req.get_tensor(output));
|
OV_ASSERT_NO_THROW(req.get_tensor(output));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_P(OVInferRequestIOTensorTest, InferStaticNetworkSetInputTensor) {
|
||||||
|
const ov::Shape shape1 = {1, 1, 32, 32};
|
||||||
|
const ov::Shape shape2 = {1, 1, 40, 40};
|
||||||
|
std::map<std::string, ov::PartialShape> shapes;
|
||||||
|
shapes[function->inputs().back().get_any_name()] = shape1;
|
||||||
|
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||||
|
// Load ov::Model to target plugins
|
||||||
|
std::shared_ptr<ov::Core> ie = utils::PluginCache::get().core();
|
||||||
|
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||||
|
// Create InferRequest
|
||||||
|
ov::InferRequest req;
|
||||||
|
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||||
|
OV_ASSERT_NO_THROW(req.infer());
|
||||||
|
// Get input_tensor
|
||||||
|
ov::runtime::Tensor tensor;
|
||||||
|
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||||
|
// Set shape
|
||||||
|
OV_ASSERT_NO_THROW(tensor.set_shape(shape2));
|
||||||
|
ASSERT_ANY_THROW(req.infer());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_P(OVInferRequestIOTensorTest, InferStaticNetworkSetOutputTensor) {
|
||||||
|
const ov::Shape shape1 = {1, 1, 32, 32};
|
||||||
|
const ov::Shape shape2 = {1, 20};
|
||||||
|
std::map<std::string, ov::PartialShape> shapes;
|
||||||
|
shapes[function->inputs().back().get_any_name()] = shape1;
|
||||||
|
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||||
|
// Load ov::Model to target plugins
|
||||||
|
std::shared_ptr<ov::Core> ie = utils::PluginCache::get().core();
|
||||||
|
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||||
|
// Create InferRequest
|
||||||
|
ov::InferRequest req;
|
||||||
|
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||||
|
OV_ASSERT_NO_THROW(req.infer());
|
||||||
|
// Get output_tensor
|
||||||
|
ov::runtime::Tensor tensor;
|
||||||
|
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->outputs().back().get_any_name()););
|
||||||
|
// Set shape
|
||||||
|
OV_ASSERT_NO_THROW(tensor.set_shape(shape2));
|
||||||
|
ASSERT_ANY_THROW(req.infer());
|
||||||
|
}
|
||||||
|
|
||||||
std::string OVInferRequestIOTensorSetPrecisionTest::getTestCaseName(const testing::TestParamInfo<OVInferRequestSetPrecisionParams>& obj) {
|
std::string OVInferRequestIOTensorSetPrecisionTest::getTestCaseName(const testing::TestParamInfo<OVInferRequestSetPrecisionParams>& obj) {
|
||||||
element::Type type;
|
element::Type type;
|
||||||
std::string targetDevice;
|
std::string targetDevice;
|
||||||
|
Loading…
Reference in New Issue
Block a user