Fixed evaluate for ov::Tensor (#11354)

* Fixed evaluate for ov::Tensor

* Fixed old ops with EvaluationContext
This commit is contained in:
Ilya Churaev 2022-03-31 07:47:49 +03:00 committed by GitHub
parent 78285f9db4
commit 3e58ccbce7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 63 additions and 2 deletions

View File

@ -38,7 +38,7 @@ bool Identity::evaluate(ov::TensorVector& outputs, const ov::TensorVector& input
auto in = inputs[0];
auto out = outputs[0];
out.set_shape(in.get_shape());
memcpy(out.data(), in.data(), in.get_size());
memcpy(out.data(), in.data(), in.get_byte_size());
return true;
}

View File

@ -743,13 +743,15 @@ bool ov::Node::evaluate(ov::TensorVector& output_values, const ov::TensorVector&
bool ov::Node::evaluate(ov::TensorVector& output_values,
const ov::TensorVector& input_values,
const ov::EvaluationContext& evaluationContext) const {
// Call evaluate for old implementation with EvaluationContext
HostTensorVector output = create_tmp_tensors(output_values);
HostTensorVector input = create_tmp_tensors(input_values);
OPENVINO_SUPPRESS_DEPRECATED_START
bool sts = evaluate(output, input, evaluationContext);
OPENVINO_SUPPRESS_DEPRECATED_END
update_output_tensors(output_values, output);
return sts;
// Call evaluate for ov::Tensor if op doesn't have evaluate with EvaluationContext
return sts ? sts : evaluate(output_values, input_values);
}
bool ov::Node::evaluate_lower(ov::TensorVector& output_values) const {

View File

@ -8,6 +8,7 @@
#include "openvino/core/graph_util.hpp"
#include "openvino/core/op_extension.hpp"
#include "openvino/opsets/opset9.hpp"
#include "openvino/util/file_util.hpp"
#include "so_extension.hpp"
@ -36,3 +37,61 @@ TEST(extension, load_extension_and_cast) {
EXPECT_NE(nullptr, std::dynamic_pointer_cast<ov::BaseOpExtension>(extensions[0]));
extensions.clear();
}
namespace {
class DummyAdapter : public ov::AttributeVisitor {
public:
void on_adapter(const std::string& name, ov::ValueAccessor<void>& adapter) override {}
};
} // namespace
TEST(extension, create_model_from_extension) {
std::vector<ov::Extension::Ptr> so_extensions = ov::detail::load_extensions(get_extension_path());
ASSERT_LE(1, so_extensions.size());
std::vector<ov::Extension::Ptr> extensions;
std::vector<std::shared_ptr<void>> so;
for (const auto& ext : so_extensions) {
if (auto so_ext = std::dynamic_pointer_cast<ov::detail::SOExtension>(ext)) {
extensions.emplace_back(so_ext->extension());
so.emplace_back(so_ext->shared_object());
}
}
so_extensions.clear();
EXPECT_LE(1, extensions.size());
auto op_extension = std::dynamic_pointer_cast<ov::BaseOpExtension>(extensions[0]);
EXPECT_NE(nullptr, op_extension);
{
// Create model to check evaluate for custom operation
std::shared_ptr<ov::Model> model;
{
auto parameter = std::make_shared<ov::opset9::Parameter>(ov::element::i32, ov::Shape{1, 2, 2, 2});
DummyAdapter visitor;
auto outputs = op_extension->create(ov::OutputVector{parameter}, visitor);
EXPECT_EQ(1, outputs.size());
EXPECT_NE(nullptr, outputs[0].get_node());
const std::string ref_name = "Identity";
EXPECT_EQ(ref_name, outputs[0].get_node()->get_type_info().name);
auto result = std::make_shared<ov::opset9::Result>(outputs[0]);
model = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{parameter});
}
auto fill_tensor = [](ov::Tensor& tensor) {
int32_t* data = tensor.data<int32_t>();
for (size_t i = 0; i < tensor.get_size(); i++)
data[i] = i;
};
ov::TensorVector inputs;
inputs.emplace_back(ov::Tensor(ov::element::i32, ov::Shape{1, 2, 2, 2}));
fill_tensor(*inputs.begin());
ov::TensorVector outputs;
outputs.emplace_back(ov::Tensor(ov::element::i32, ov::Shape{1, 2, 2, 2}));
EXPECT_NE(std::memcmp(inputs.begin()->data(), outputs.begin()->data(), inputs.begin()->get_byte_size()), 0);
model->evaluate(outputs, inputs);
EXPECT_EQ(std::memcmp(inputs.begin()->data(), outputs.begin()->data(), inputs.begin()->get_byte_size()), 0);
}
extensions.clear();
}