[CPU] Fix input output tensor binding (#19589)

* Fix input output tensor binding plus test

* Clean up code
This commit is contained in:
Maksim Kutakov 2023-09-06 11:04:38 +02:00 committed by GitHub
parent 7a4e765600
commit 45cc4fdb33
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 110 additions and 18 deletions

View File

@ -214,6 +214,19 @@ static inline void changeEdgePtr(const EdgePtr &edge, InferenceEngine::Blob::Ptr
void InferRequestBase::changeDefaultPtr() {
const auto& inputNodesMap = graph->GetInputNodesMap();
const auto& outputNodesMap = graph->GetOutputNodesMap();
std::unordered_set<const void*> inputPtrs;
std::function<void(const EdgePtr &edge, InferenceEngine::Blob::Ptr blob)> changeInpPtr;
if (Graph::Status::ReadyDynamic == graph->getStatus()) {
changeInpPtr = [&inputPtrs](const EdgePtr &edge, InferenceEngine::Blob::Ptr blob) {
changeEdgePtr(edge, blob);
inputPtrs.insert(blob->buffer());
};
} else {
changeInpPtr = [](const EdgePtr &edge, InferenceEngine::Blob::Ptr blob) {
changeEdgePtr(edge, blob);
};
}
for (auto& it : externalPtr) {
auto input = inputNodesMap.find(it.first);
if (inputNodesMap.end() == input) {
@ -261,7 +274,7 @@ void InferRequestBase::changeDefaultPtr() {
if (!e)
IE_THROW() << "Node " << inputNodePtr->getName() << " contains empty child edge";
changeEdgePtr(e, it.second);
changeInpPtr(e, it.second);
}
}
}
@ -321,18 +334,9 @@ void InferRequestBase::changeDefaultPtr() {
OPENVINO_ASSERT(outputNodesMap.end() != output, "Node with name: ", name, " is absent in the outputNodesMap");
auto parentEdge = output->second->getParentEdgeAt(0);
//avoid cyclic memory use
auto parentNode = parentEdge->getParent();
const auto& parentNodeInpEdges = parentNode->getParentEdges();
std::unordered_set<const void*> parentInputPtrs(parentNodeInpEdges.size());
for (auto&& edge : parentNodeInpEdges) {
if (auto edgePtr = edge.lock()) {
parentInputPtrs.insert(edgePtr->getMemoryPtr()->getData());
}
}
auto&& controlBlock = controlBlockItr->second;
std::shared_ptr<IMemoryMngr> memMngr = parentInputPtrs.count(controlBlock.rawPtr()) ? // same memory is used on the input and output
std::shared_ptr<IMemoryMngr> memMngr = inputPtrs.count(controlBlock.rawPtr()) ? // same memory is used on the input and output
controlBlock.nextMemMngr() : // then swap internal buffer to avoid data corruption
controlBlock.currentMemMngr(); // else reuse the existing buffer

View File

@ -122,13 +122,13 @@ TEST_P(ConcatReshapeConcatSubgraphTest, CompareWithRefs) {
namespace {
const std::vector<std::vector<InputShape>> inputShapes = {
// {
// // {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
// {{2, 64}, {{2, 64}}}, // input 0
// {{2, 64}, {{2, 64}}}, // input 1
// {{2, 64}, {{2, 64}}}, // input 2
// {{2, 64}, {{2, 64}}} // input 3
// },
{
// {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{2, 64}, {{2, 64}}}, // input 0
{{2, 64}, {{2, 64}}}, // input 1
{{2, 64}, {{2, 64}}}, // input 2
{{2, 64}, {{2, 64}}} // input 3
},
{
// {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{2, -1}, {{2, 64}}}, // input 0

View File

@ -0,0 +1,88 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp"
using namespace InferenceEngine;
using namespace ov::test;
/*This test runs the following subgraph:
Param_0 Param_1
\ |
\ Softmax
\ /
\ /
Concat
|
Softmax
|
Output_1
Output_1 -> Param_1
The main purpose of this test is checking the code path when the output tensor is reused as an input tensor of the
next infer request.
*/
namespace SubgraphTestsDefinitions {
class InputOutputTensorReuse : public SubgraphBaseTest {
public:
void SetUp() override {
constexpr size_t softmax_axis = 1ul;
constexpr int concat_axis = 2;
targetDevice = ov::test::utils::DEVICE_CPU;
auto netPrc = ov::element::f32;
ov::ParameterVector input_params;
input_params.push_back(std::make_shared<ov::op::v0::Parameter>(netPrc, ov::PartialShape{1, 32, -1, 16}));
input_params.push_back(std::make_shared<ov::op::v0::Parameter>(netPrc, ov::PartialShape{1, 32, -1, 16}));
input_params[0]->set_friendly_name("Param_0");
input_params[1]->set_friendly_name("Param_1");
auto first_soft_max = std::make_shared<ov::op::v1::Softmax>(input_params[1], softmax_axis);
auto concat = std::make_shared<ov::op::v0::Concat>(ov::NodeVector{input_params[0], first_soft_max}, concat_axis);
auto last_soft_max = std::make_shared<ov::op::v1::Softmax>(concat, softmax_axis);
ngraph::ResultVector results;
for (size_t i = 0; i < last_soft_max->get_output_size(); i++)
results.push_back(std::make_shared<ngraph::opset1::Result>(last_soft_max->output(i)));
results.front()->set_friendly_name("Output_1");
function = std::make_shared<ngraph::Function>(results, input_params, "InputOutputTensorReuseTest");
}
};
TEST_F(InputOutputTensorReuse, smoke_Input_Output_Binding) {
compile_model();
std::vector<ov::Shape> inputShapes = {{1, 32, 5, 16}, {1, 32, 1, 16}};
init_ref_function(functionRefs, inputShapes);
generate_inputs(inputShapes);
validate();
constexpr size_t num_iter = 10;
for (size_t i = 0; i < num_iter; i++) {
auto outputTensor = inferRequest.get_output_tensor(0);
inputShapes.back() = outputTensor.get_shape();
init_ref_function(functionRefs, inputShapes);
auto itr = std::find_if(inputs.begin(), inputs.end(), [](const std::pair<std::shared_ptr<ov::Node>, ov::Tensor>& item) {
return item.first->get_friendly_name() == "Param_1";
});
ASSERT_NE(itr, inputs.end());
itr->second = outputTensor;
const auto& expectedOutputs = calculate_refs();
for (const auto& input : inputs) {
inferRequest.set_tensor(input.first, input.second);
}
inferRequest.infer();
compare(expectedOutputs, {outputTensor});
}
}
} // namespace SubgraphTestsDefinitions