From f0421d94a6701f58ff83904fc4b897e2aa97f4aa Mon Sep 17 00:00:00 2001 From: Nikolay Shchegolev Date: Fri, 8 Sep 2023 19:40:44 +0400 Subject: [PATCH] [CPU] Scalar is passed as a tensor with shape [1] in custom op evaluate. (#19521) [CPU] Scalar is passed as a tensor with shape [1] in custom op evaluate. --- src/plugins/intel_cpu/src/nodes/reference.cpp | 10 +- .../subgraph_tests/src/custom_op_scalar.cpp | 151 ++++++++++++++++++ 2 files changed, 157 insertions(+), 4 deletions(-) create mode 100644 src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_scalar.cpp diff --git a/src/plugins/intel_cpu/src/nodes/reference.cpp b/src/plugins/intel_cpu/src/nodes/reference.cpp index 490610b4f0c..1d830ff0728 100644 --- a/src/plugins/intel_cpu/src/nodes/reference.cpp +++ b/src/plugins/intel_cpu/src/nodes/reference.cpp @@ -123,8 +123,9 @@ ov::TensorVector Reference::prepareInputs() const { ov::TensorVector inputs; for (size_t i = 0; i < inputShapes.size(); i++) { void *srcDataPtr = getParentEdgesAtPort(i)[0]->getMemory().getData(); - inputs.push_back(ov::Tensor(ngraphOp->get_input_element_type(i), - getParentEdgesAtPort(i)[0]->getMemory().getStaticDims(), srcDataPtr)); + ov::Shape shape = ngraphOp->get_input_partial_shape(i).rank().get_length() == 0 ? + ov::Shape{} : getParentEdgesAtPort(i)[0]->getMemory().getStaticDims(); + inputs.push_back(ov::Tensor(ngraphOp->get_input_element_type(i), shape, srcDataPtr)); } return inputs; } @@ -133,8 +134,9 @@ ov::TensorVector Reference::prepareOutputs() const { ov::TensorVector outputs; for (size_t i = 0; i < outputShapes.size(); i++) { void *dstDataPtr = getChildEdgesAtPort(i)[0]->getMemory().getData(); - outputs.push_back(ov::Tensor(ngraphOp->get_output_element_type(i), - getChildEdgesAtPort(i)[0]->getMemory().getStaticDims(), dstDataPtr)); + ov::Shape shape = ngraphOp->get_output_partial_shape(i).rank().get_length() == 0 ? + ov::Shape{} : getChildEdgesAtPort(i)[0]->getMemory().getStaticDims(); + outputs.push_back(ov::Tensor(ngraphOp->get_output_element_type(i), shape, dstDataPtr)); } return outputs; } diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_scalar.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_scalar.cpp new file mode 100644 index 00000000000..95b26d3b24c --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_scalar.cpp @@ -0,0 +1,151 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include "test_utils/cpu_test_utils.hpp" + +using namespace CPUTestUtils; +using namespace ov::test; + +namespace CPULayerTestsDefinitions { + +using CustomOpScalarCPUTestParams = std::tuple; + +class CustomOpScalar : public ov::op::Op { +public: + OPENVINO_OP("CustomOpScalar"); + + CustomOpScalar() = default; + CustomOpScalar(const ov::OutputVector& args) : Op(args) { + constructor_validate_and_infer_types(); + } + + void validate_and_infer_types() override { + const auto& inputs_count = input_values().size(); + OPENVINO_ASSERT(inputs_count == 2, + "Input count must be 1, Got: ", + inputs_count); + OPENVINO_ASSERT(get_input_element_type(0) == ov::element::Type_t::u8, + "The input must be u8."); + set_output_size(1); + + auto inShape = get_input_partial_shape(0); + + set_output_type(0, ov::element::Type_t::u8, inShape); + } + + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override { + OPENVINO_ASSERT(new_args.size() == 2, "Incorrect number of new arguments: ", new_args.size(), ". 2 is expected."); + + return std::make_shared(new_args); + } + + bool visit_attributes(ov::AttributeVisitor& visitor) override { + return true; + } + + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override { + for (size_t i = 0llu; i < inputs.size(); i++) { + OPENVINO_ASSERT(inputs[i].get_shape().size() == get_input_partial_shape(i).rank().get_length(), + "Invalid input shape rank: ", inputs[i].get_shape().size()); + } + for (size_t i = 0llu; i < outputs.size(); i++) { + OPENVINO_ASSERT(outputs[i].get_shape().size() == get_output_partial_shape(i).rank().get_length(), + "Invalid outputs shape rank: ", outputs[i].get_shape().size()); + } + + const auto& in_0 = inputs[0]; + auto& out = outputs[0]; + + memcpy(out.data(), in_0.data(), out.get_byte_size()); + + return true; + } + + bool evaluate(ov::TensorVector& output_values, + const ov::TensorVector& input_values, + const ov::EvaluationContext& evaluationContext) const override { + return evaluate(output_values, input_values); + } + + bool has_evaluate() const override { + return true; + } +}; + +class CustomOpScalarCPUTest : public testing::WithParamInterface, + virtual public SubgraphBaseTest, + public CPUTestsBase { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + ElementType inType; + InputShape inputShape; + std::tie(inType, inputShape) = obj.param; + + std::ostringstream result; + result << "IS=" << inputShape << "_"; + result << "Prc=" << inType; + return result.str(); + } + +protected: + void SetUp() override { + targetDevice = utils::DEVICE_CPU; + + ElementType inType; + InputShape inputShape; + std::tie(inType, inputShape) = this->GetParam(); + + init_input_shapes({inputShape}); + + auto in_0 = std::make_shared(inType, inputDynamicShapes[0]); + auto in_1 = std::make_shared(inType, ov::Shape({}), std::vector{0}); + ov::OutputVector param_outs({in_0, in_1}); + auto custom_op = std::make_shared(param_outs); + + ov::ParameterVector input_params{in_0}; + ov::ResultVector results{std::make_shared(custom_op)}; + function = std::make_shared(results, input_params, "CustomOpScalar"); + } + + void generate_inputs(const std::vector& targetInputStaticShapes) override { + inputs.clear(); + const auto& funcInputs = function->inputs(); + for (size_t i = 0; i < funcInputs.size(); ++i) { + const auto& funcInput = funcInputs[i]; + auto tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); + inputs.insert({funcInput.get_node_shared_ptr(), tensor}); + } + } + + void compare(const std::vector& expected, const std::vector& actual) override { + ASSERT_EQ(expected.size(), actual.size()); + ASSERT_EQ(expected.size(), function->get_results().size()); + const auto& results = function->get_results(); + for (size_t j = 0; j < results.size(); j++) { + const auto result = results[j]; + for (size_t i = 0; i < result->get_input_size(); ++i) { + utils::compare(expected[j], actual[j], abs_threshold, rel_threshold); + } + } + } +}; + +TEST_P(CustomOpScalarCPUTest, CompareWithRefs) { + run(); +} + +const std::vector inputShapes = { + {{}, {{2, 3, 16}}}, + {{}, {{}}} +}; + +INSTANTIATE_TEST_SUITE_P(smoke_CustomOp, + CustomOpScalarCPUTest, + ::testing::Combine(::testing::Values(ElementType::u8), ::testing::ValuesIn(inputShapes)), + CustomOpScalarCPUTest::getTestCaseName); + +} // namespace CPULayerTestsDefinitions