[Core] Add evaluate to CumSum core op (#14566)
* Add evaluate to CumSum core op * Use new API * Remove unneeded types * Revert evaluate in template plugin * Correct evaluete availability * Improve code * Add test Co-authored-by: Michal Lukaszewski <michal.lukaszewski@intel.com>
This commit is contained in:
parent
4dc44a5a26
commit
b9749a984d
@ -38,6 +38,9 @@ public:
|
||||
|
||||
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
|
||||
bool has_evaluate() const override;
|
||||
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
void validate_and_infer_types() override;
|
||||
|
||||
|
@ -2,17 +2,16 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "ngraph/op/cum_sum.hpp"
|
||||
#include "openvino/op/cum_sum.hpp"
|
||||
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/graph_util.hpp"
|
||||
#include "ngraph/op/broadcast.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/runtime/reference/cum_sum.hpp"
|
||||
#include "openvino/core/attribute_visitor.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
namespace ov {
|
||||
op::v0::CumSum::CumSum(const Output<Node>& arg, const Output<Node>& axis, const bool exclusive, const bool reverse)
|
||||
: Op({arg, axis}),
|
||||
m_exclusive(exclusive),
|
||||
@ -21,7 +20,7 @@ op::v0::CumSum::CumSum(const Output<Node>& arg, const Output<Node>& axis, const
|
||||
}
|
||||
|
||||
op::v0::CumSum::CumSum(const Output<Node>& arg, const bool exclusive, const bool reverse)
|
||||
: Op({arg, op::v0::Constant::create(element::i32, ov::Shape{}, {0})}),
|
||||
: Op({arg, op::v0::Constant::create(element::i32, Shape{}, {0})}),
|
||||
m_exclusive(exclusive),
|
||||
m_reverse(reverse) {
|
||||
constructor_validate_and_infer_types();
|
||||
@ -58,8 +57,72 @@ shared_ptr<Node> op::v0::CumSum::clone_with_new_inputs(const OutputVector& new_a
|
||||
}
|
||||
}
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
shared_ptr<Node> op::v0::CumSum::get_default_value() const {
|
||||
return ngraph::make_constant_from_string("0", get_element_type(), get_shape());
|
||||
}
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
namespace {
|
||||
template <element::Type_t DATA_ET, element::Type_t AXIS_ET>
|
||||
bool evaluate_cum_sum(TensorVector& outputs, const TensorVector& inputs, const bool exclusive, const bool reverse) {
|
||||
using data_t = fundamental_type_for<DATA_ET>;
|
||||
using axis_t = fundamental_type_for<AXIS_ET>;
|
||||
ngraph::runtime::reference::cumsum<data_t, axis_t>(inputs[0].data<data_t>(),
|
||||
inputs[1].data<axis_t>(),
|
||||
outputs[0].data<data_t>(),
|
||||
inputs[0].get_shape(),
|
||||
exclusive,
|
||||
reverse);
|
||||
return true;
|
||||
}
|
||||
|
||||
#define CUM_SUM_TYPE_CASE(a, ...) \
|
||||
case element::Type_t::a: { \
|
||||
OV_OP_SCOPE(OV_PP_CAT4(evaluate_cum_sum, _, a, AXIS_ET)); \
|
||||
return evaluate_cum_sum<element::Type_t::a, AXIS_ET>(__VA_ARGS__); \
|
||||
}
|
||||
|
||||
template <element::Type_t AXIS_ET>
|
||||
bool evaluate(TensorVector& outputs, const TensorVector& inputs, const bool exclusive, const bool reverse) {
|
||||
switch (inputs[0].get_element_type()) {
|
||||
CUM_SUM_TYPE_CASE(f32, outputs, inputs, exclusive, reverse);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool evaluate_cum_sum(TensorVector& outputs, const TensorVector& inputs, const bool exclusive, const bool reverse) {
|
||||
auto rc = true;
|
||||
switch (inputs[1].get_element_type()) {
|
||||
NGRAPH_TYPE_CASE(evaluate_cum_sum, i32, outputs, inputs, exclusive, reverse);
|
||||
NGRAPH_TYPE_CASE(evaluate_cum_sum, i64, outputs, inputs, exclusive, reverse);
|
||||
default:
|
||||
rc = false;
|
||||
break;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
bool op::v0::CumSum::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
||||
OV_OP_SCOPE(v0_CumSum_evaluate);
|
||||
OPENVINO_ASSERT(inputs.size() == 2,
|
||||
"Invalid size of inputs argument of evaluate method of CumSum operation. Provided: ",
|
||||
inputs.size(),
|
||||
". Expected: 2");
|
||||
OPENVINO_ASSERT(outputs.size() == 1,
|
||||
"Invalid size of outputs argument of evaluate method of CumSum operation. Provided: ",
|
||||
outputs.size(),
|
||||
". Expected: 1");
|
||||
|
||||
return evaluate_cum_sum(outputs, inputs, is_exclusive(), is_reverse());
|
||||
}
|
||||
|
||||
bool op::v0::CumSum::has_evaluate() const {
|
||||
OV_OP_SCOPE(v0_CumSum_has_evaluate);
|
||||
const auto& input_0_et = get_input_element_type(0);
|
||||
const auto& input_1_et = get_input_element_type(1);
|
||||
return input_0_et == element::f32 && (input_1_et == element::i32 || input_1_et == element::i64);
|
||||
}
|
||||
} // namespace ov
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "ngraph/op/convert.hpp"
|
||||
#include "ngraph/op/cos.hpp"
|
||||
#include "ngraph/op/cosh.hpp"
|
||||
#include "ngraph/op/cum_sum.hpp"
|
||||
#include "ngraph/op/erf.hpp"
|
||||
#include "ngraph/op/exp.hpp"
|
||||
#include "ngraph/op/fake_quantize.hpp"
|
||||
@ -1889,3 +1890,35 @@ TEST(eval, evaluate_fake_quantize_dynamic_input) {
|
||||
EXPECT_THAT(read_vector<float>(result),
|
||||
Pointwise(FloatEq(), std::vector<float>{2.f, 2.6666667f, 2.6666667f, 3.3333333f, 3.3333333f, 4.f}));
|
||||
}
|
||||
|
||||
TEST(eval, evaluate_cum_sum_v0) {
|
||||
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 3});
|
||||
auto axis = op::Constant::create<int32_t>(element::i32, Shape{1}, {1});
|
||||
auto cs = make_shared<op::v0::CumSum>(data, axis);
|
||||
auto m = make_shared<ov::Model>(OutputVector{cs}, ParameterVector{data});
|
||||
|
||||
float input_values[6] = {1.f, 2.f, 3.f, 4.f, 5.f, 6.f};
|
||||
float out_expected[6] = {1.f, 3.f, 6.f, 4.f, 9.f, 15.f};
|
||||
|
||||
auto outputs = ov::TensorVector(1);
|
||||
ASSERT_TRUE(m->evaluate(outputs, {{ov::element::f32, {2, 3}, input_values}}));
|
||||
EXPECT_EQ(outputs[0].get_element_type(), data->get_element_type());
|
||||
EXPECT_EQ(outputs[0].get_shape(), data->get_shape());
|
||||
EXPECT_EQ(memcmp(outputs[0].data(), out_expected, sizeof(out_expected)), 0);
|
||||
}
|
||||
|
||||
TEST(eval, evaluate_cum_sum_v0_exclusive_reversed) {
|
||||
auto data = make_shared<op::Parameter>(element::f32, Shape{5});
|
||||
auto axis = op::Constant::create<int32_t>(element::i32, Shape{1}, {0});
|
||||
auto cs = make_shared<op::v0::CumSum>(data, axis, true, true);
|
||||
auto m = make_shared<ov::Model>(OutputVector{cs}, ParameterVector{data});
|
||||
|
||||
float input_values[5] = {1.f, 2.f, 3.f, 4.f, 5.f};
|
||||
float out_expected[5] = {14.f, 12.f, 9.f, 5.f, 0.f};
|
||||
|
||||
auto outputs = ov::TensorVector(1);
|
||||
ASSERT_TRUE(m->evaluate(outputs, {{ov::element::f32, {5}, input_values}}));
|
||||
EXPECT_EQ(outputs[0].get_element_type(), data->get_element_type());
|
||||
EXPECT_EQ(outputs[0].get_shape(), data->get_shape());
|
||||
EXPECT_EQ(memcmp(outputs[0].data(), out_expected, sizeof(out_expected)), 0);
|
||||
}
|
||||
|
@ -591,7 +591,7 @@ bool call(const HostTensorVector& func_outputs,
|
||||
op->validate_and_infer_types();
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
if (!op->evaluate(op_outputs, op_inputs)) {
|
||||
auto evaluates_map = ngraph::runtime::interpreter::get_evaluators_map();
|
||||
const auto& evaluates_map = ngraph::runtime::interpreter::get_evaluators_map();
|
||||
auto it = evaluates_map.find(op->get_type_info());
|
||||
if (!it->second(op, op_outputs, op_inputs)) {
|
||||
return false;
|
||||
|
Loading…
Reference in New Issue
Block a user