Enable Conditional Compilation for nGraph evaluate methods (#3666)

* Added CC macro to nGraph

* Add CC to evaluate methods

* Fixed tests

* Fixed comments

* Add private evaluates

* Fixed code style and names

* Fixed code style

* Fixed build
This commit is contained in:
Ilya Churaev 2020-12-21 14:32:40 +03:00 committed by GitHub
parent 0b05653d7a
commit b2399ce0d9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
114 changed files with 1723 additions and 1879 deletions

View File

@ -58,7 +58,7 @@ set_target_properties(ngraph PROPERTIES
C_VISIBILITY_PRESET hidden
VISIBILITY_INLINES_HIDDEN ON)
target_link_libraries(ngraph PRIVATE openvino::itt ngraph::builder ngraph::reference)
target_link_libraries(ngraph PRIVATE openvino::conditional_compilation openvino::itt ngraph::builder ngraph::reference)
find_package(Graphviz QUIET)
if (GRAPHVIZ_FOUND)

View File

@ -82,6 +82,10 @@ namespace ngraph
std::pair<bool, AxisSet> get_broadcast_axes() const override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const override;
private:
bool broadcast_evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
} // namespace v3

View File

@ -79,6 +79,10 @@ namespace ngraph
std::size_t m_blocksize;
DepthToSpaceMode m_mode;
DepthToSpaceMode mode_from_string(const std::string& mode) const;
private:
bool evaluate_depth_to_space(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
using v0::DepthToSpace;

View File

@ -57,6 +57,9 @@ namespace ngraph
static const int PARAMS;
static const int INDICES;
static const int AXIS;
bool evaluate_gather(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
} // namespace v1
} // namespace op

View File

@ -234,6 +234,8 @@ namespace ngraph
std::vector<int64_t> get_axes() const;
private:
bool evaluate_interpolate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
InterpolateAttrs m_attrs;
/// \brief Corrects pads_begin and pads_end attributes.

View File

@ -98,6 +98,8 @@ namespace ngraph
bool update_auto_padding(const PartialShape& in_shape,
Shape& new_pads_end,
Shape& new_pads_begin) const;
bool evaluate_maxpool(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
} // namespace v1
} // namespace op

View File

@ -89,6 +89,8 @@ namespace ngraph
private:
PadMode m_pad_mode;
bool evaluate_pad(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
}

View File

@ -71,6 +71,8 @@ namespace ngraph
protected:
bool m_special_zero;
bool evaluate_reshape(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
}

View File

@ -73,6 +73,10 @@ namespace ngraph
/// Alternatively it can contain a boolean mask that indicates which axes should be
/// reversed.
Mode m_mode;
private:
bool evaluate_reverse(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
}

View File

@ -52,6 +52,10 @@ namespace ngraph
clone_with_new_inputs(const OutputVector& inputs) const override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const override;
private:
bool evaluate_scatter_element_update(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
using v3::ScatterElementsUpdate;

View File

@ -53,6 +53,10 @@ namespace ngraph
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const override;
private:
bool evaluate_scatter_update(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
}

View File

@ -69,6 +69,8 @@ namespace ngraph
/// \param data_shape - Shape of the original input data tensor
/// \return A 4D tensor to be used to reshape the input data before shuffling it
Shape get_pre_shuffle_shape(const Shape& data_shape) const;
bool evaluate_shuffle_channels(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
int64_t m_axis;
int64_t m_group;

View File

@ -63,6 +63,10 @@ namespace ngraph
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const override;
private:
bool evaluate_space_to_batch(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
using v1::SpaceToBatch;

View File

@ -76,6 +76,10 @@ namespace ngraph
protected:
std::size_t m_blocksize;
SpaceToDepthMode m_mode;
private:
bool evaluate_space_to_depth(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
using v0::SpaceToDepth;

View File

@ -47,6 +47,10 @@ namespace ngraph
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const override;
private:
bool evaluate_tile(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
}

View File

@ -115,6 +115,10 @@ namespace ngraph
const PartialShape input_partial_shape,
const int64_t k) const;
void set_axis(const Rank input_rank, const int64_t axis);
private:
bool evaluate_topk(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
} // namespace v1

View File

@ -56,6 +56,10 @@ namespace ngraph
size_t get_default_output_index() const override { return no_default_index(); }
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const override;
private:
bool evaluate_variadic_split(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
} // namespace v1

View File

@ -21,6 +21,8 @@
#pragma once
#include <openvino/cc/factory.h>
#include <openvino/cc/selective_build.h>
#include <openvino/itt.hpp>
namespace ngraph
@ -31,7 +33,33 @@ namespace ngraph
{
OV_ITT_DOMAIN(nGraph);
OV_ITT_DOMAIN(nGraphPass_LT);
OV_ITT_DOMAIN(nGraphOp, "nGraph::Op");
OV_ITT_DOMAIN(ngraph_op, "nGraph::Op");
}
}
OV_CC_DOMAINS(ngraph_op);
}
#if defined(SELECTIVE_BUILD) || defined(SELECTIVE_BUILD_ANALYZER)
#define NGRAPH_OP_SCOPE(region, ...) OV_SCOPE(ngraph_op, region, __VA_ARGS__)
#else
#define NGRAPH_OP_SCOPE(region, ...) \
OV_ITT_SCOPED_TASK(itt::domains::ngraph_op, #region); \
__VA_ARGS__
#endif
#define NGRAPH_TYPE_CASE(region, a, ...) \
case element::Type_t::a: \
{ \
OV_SCOPE( \
ngraph_op, OV_CC_CAT3(region, _, a), rc = evaluate<element::Type_t::a>(__VA_ARGS__)); \
} \
break;
#define NGRAPH_COPY_TENSOR(region, a, ...) \
case element::Type_t::a: \
{ \
OV_SCOPE(ngraph_op, \
OV_CC_CAT3(region, _, a), \
rc = copy_tensor<element::Type_t::a>(__VA_ARGS__)); \
} \
break;

View File

@ -57,22 +57,14 @@ namespace absop
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
TYPE_CASE(bf16)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_abs, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_abs, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_abs, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_abs, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_abs, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_abs, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_abs, f32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_abs, bf16, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -81,6 +73,9 @@ namespace absop
bool op::Abs::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Abs::evaluate");
return absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0)));
bool rc = false;
NGRAPH_OP_SCOPE(
v0_Abs_evaluate,
rc = absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return rc;
}

View File

@ -66,20 +66,13 @@ namespace acosop
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_acos, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -88,6 +81,9 @@ namespace acosop
bool op::Acos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Acos::evaluate");
return acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
bool rc = false;
NGRAPH_OP_SCOPE(
v0_Acos_evaluate,
rc = acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return rc;
}

View File

@ -56,18 +56,12 @@ namespace acoshop
out->set_unary(arg0);
switch (arg0->get_element_type())
{
TYPE_CASE(i32)(arg0, out);
break;
TYPE_CASE(i64)(arg0, out);
break;
TYPE_CASE(u32)(arg0, out);
break;
TYPE_CASE(u64)(arg0, out);
break;
TYPE_CASE(f16)(arg0, out);
break;
TYPE_CASE(f32)(arg0, out);
break;
NGRAPH_TYPE_CASE(evaluate_acosh, i32, arg0, out);
NGRAPH_TYPE_CASE(evaluate_acosh, i64, arg0, out);
NGRAPH_TYPE_CASE(evaluate_acosh, u32, arg0, out);
NGRAPH_TYPE_CASE(evaluate_acosh, u64, arg0, out);
NGRAPH_TYPE_CASE(evaluate_acosh, f16, arg0, out);
NGRAPH_TYPE_CASE(evaluate_acosh, f32, arg0, out);
default: rc = false; break;
}
return rc;
@ -76,6 +70,7 @@ namespace acoshop
bool op::v3::Acosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v3::Acosh::evaluate");
return acoshop::evaluate_acosh(inputs[0], outputs[0]);
bool rc = false;
NGRAPH_OP_SCOPE(v3_Acosh_evaluate, rc = acoshop::evaluate_acosh(inputs[0], outputs[0]));
return rc;
}

View File

@ -50,28 +50,17 @@ namespace add
out->set_broadcast(broadcast_spec, arg0, arg1);
switch (arg0->get_element_type())
{
TYPE_CASE(i8)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u8)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(bf16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
break;
NGRAPH_TYPE_CASE(evaluate_add, i8, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, i16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, u8, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, u16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, bf16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, f32, arg0, arg1, out, broadcast_spec);
default: rc = false; break;
}
return rc;
@ -104,6 +93,8 @@ shared_ptr<Node> op::v1::Add::clone_with_new_inputs(const OutputVector& new_args
bool op::v1::Add::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Add::evaluate");
return add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob());
}
bool rc = false;
NGRAPH_OP_SCOPE(v1_Add_evaluate,
rc = add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob()));
return rc;
}

View File

@ -70,20 +70,13 @@ namespace logand
out->set_broadcast(broadcast_spec, arg0, arg1);
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
break;
NGRAPH_TYPE_CASE(evaluate_logand, boolean, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logand, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logand, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logand, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logand, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logand, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logand, f32, arg0, arg1, out, broadcast_spec);
default: rc = false; break;
}
return rc;
@ -93,6 +86,8 @@ namespace logand
bool op::v1::LogicalAnd::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::LogicalAnd::evaluate");
return logand::evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob());
bool rc = false;
NGRAPH_OP_SCOPE(v1_LogicalAnd_evaluate,
rc = logand::evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob()));
return rc;
}

View File

@ -67,20 +67,13 @@ namespace asinop
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_asin, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asin, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asin, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asin, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asin, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asin, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asin, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -89,6 +82,9 @@ namespace asinop
bool op::Asin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Asin::evaluate");
return asinop::evaluate_asin(inputs[0], outputs[0], shape_size(get_output_shape(0)));
bool rc = false;
NGRAPH_OP_SCOPE(
v0_Asin_evaluate,
rc = asinop::evaluate_asin(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return rc;
}

View File

@ -56,18 +56,12 @@ namespace asinhop
out->set_unary(arg0);
switch (arg0->get_element_type())
{
TYPE_CASE(i32)(arg0, out);
break;
TYPE_CASE(i64)(arg0, out);
break;
TYPE_CASE(u32)(arg0, out);
break;
TYPE_CASE(u64)(arg0, out);
break;
TYPE_CASE(f16)(arg0, out);
break;
TYPE_CASE(f32)(arg0, out);
break;
NGRAPH_TYPE_CASE(evaluate_asinh, i32, arg0, out);
NGRAPH_TYPE_CASE(evaluate_asinh, i64, arg0, out);
NGRAPH_TYPE_CASE(evaluate_asinh, u32, arg0, out);
NGRAPH_TYPE_CASE(evaluate_asinh, u64, arg0, out);
NGRAPH_TYPE_CASE(evaluate_asinh, f16, arg0, out);
NGRAPH_TYPE_CASE(evaluate_asinh, f32, arg0, out);
default: rc = false; break;
}
return rc;
@ -76,6 +70,7 @@ namespace asinhop
bool op::v3::Asinh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v3::Asinh::evaluate");
return asinhop::evaluate_asinh(inputs[0], outputs[0]);
bool rc = false;
NGRAPH_OP_SCOPE(v3_Asinh_evaluate, rc = asinhop::evaluate_asinh(inputs[0], outputs[0]));
return rc;
}

View File

@ -66,20 +66,13 @@ namespace atanop
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_atan, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_atan, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_atan, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_atan, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_atan, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_atan, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_atan, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -88,6 +81,9 @@ namespace atanop
bool op::Atan::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Atan::evaluate");
return atanop::evaluate_atan(inputs[0], outputs[0], shape_size(get_output_shape(0)));
bool rc = false;
NGRAPH_OP_SCOPE(
v0_Atan_evaluate,
rc = atanop::evaluate_atan(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return rc;
}

View File

@ -56,18 +56,12 @@ namespace atanhop
out->set_unary(arg0);
switch (arg0->get_element_type())
{
TYPE_CASE(i32)(arg0, out);
break;
TYPE_CASE(i64)(arg0, out);
break;
TYPE_CASE(u32)(arg0, out);
break;
TYPE_CASE(u64)(arg0, out);
break;
TYPE_CASE(f16)(arg0, out);
break;
TYPE_CASE(f32)(arg0, out);
break;
NGRAPH_TYPE_CASE(evaluate_atanh, i32, arg0, out);
NGRAPH_TYPE_CASE(evaluate_atanh, i64, arg0, out);
NGRAPH_TYPE_CASE(evaluate_atanh, u32, arg0, out);
NGRAPH_TYPE_CASE(evaluate_atanh, u64, arg0, out);
NGRAPH_TYPE_CASE(evaluate_atanh, f16, arg0, out);
NGRAPH_TYPE_CASE(evaluate_atanh, f32, arg0, out);
default: rc = false; break;
}
return rc;
@ -76,6 +70,7 @@ namespace atanhop
bool op::v3::Atanh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v3::Atanh::evaluate");
return atanhop::evaluate_atanh(inputs[0], outputs[0]);
bool rc = false;
NGRAPH_OP_SCOPE(v3_Atanh_evaluate, rc = atanhop::evaluate_atanh(inputs[0], outputs[0]));
return rc;
}

View File

@ -18,6 +18,7 @@
#include <memory>
#include <numeric>
#include <ops.hpp>
#include "itt.hpp"
#include "ngraph/builder/make_constant.hpp"
#include "ngraph/node.hpp"
@ -141,114 +142,123 @@ bool ngraph::op::v1::BatchToSpace::visit_attributes(ngraph::AttributeVisitor& vi
return true;
}
namespace
{
bool batch_to_space_evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
{
auto data = inputs[0];
size_t elem_size = data->get_element_type().size();
if (data->get_partial_shape().is_dynamic())
{
return false;
}
auto data_shape = data->get_shape();
if (!(data->get_shape().size() == 4 || data->get_shape().size() == 5))
{
return false;
}
size_t block_values_size = shape_size(inputs[1]->get_shape());
const auto* block_values = inputs[1]->get_data_ptr<int64_t>();
const auto* crops_begin_values = inputs[2]->get_data_ptr<int64_t>();
const auto* crops_end_values = inputs[3]->get_data_ptr<int64_t>();
Shape dispersed_shape(1);
dispersed_shape.insert(dispersed_shape.end(), data_shape.begin(), data_shape.end());
std::vector<size_t> axes_order(block_values_size + 1);
std::vector<size_t> plain_axes_order(block_values_size + 1);
std::iota(plain_axes_order.begin(), plain_axes_order.end(), 0);
Shape squeezed_shape(data_shape.begin(), data_shape.end());
if (squeezed_shape.size() > block_values_size)
{
return false;
}
auto* flat_data = data->get_data_ptr<char>();
std::vector<char> dispersed_data(shape_size(data_shape) * elem_size);
Shape post_transpose_shape(axes_order.size());
std::vector<char> post_transpose_data(shape_size(data_shape) * elem_size);
for (size_t block_idx = 1; block_idx < block_values_size; ++block_idx)
{
dispersed_shape[0] = block_values[block_idx];
dispersed_shape[1] /= block_values[block_idx];
runtime::opt_kernel::reshape(flat_data,
dispersed_data.data(),
data_shape,
plain_axes_order,
dispersed_shape,
elem_size);
size_t val = 1;
for (size_t axis_idx = 0; axis_idx <= block_values_size; ++axis_idx)
{
if ((block_idx + 1) == axis_idx)
{
axes_order[axis_idx] = 0;
}
else
{
axes_order[axis_idx] = val;
val++;
}
}
for (size_t axis_idx = 0; axis_idx < axes_order.size(); ++axis_idx)
{
post_transpose_shape[axis_idx] = dispersed_shape[axes_order[axis_idx]];
}
runtime::opt_kernel::reshape(dispersed_data.data(),
post_transpose_data.data(),
dispersed_shape,
axes_order,
post_transpose_shape,
elem_size);
squeezed_shape[0] = dispersed_shape[1];
squeezed_shape[block_idx] *= block_values[block_idx];
dispersed_shape[block_idx + 1] = squeezed_shape[block_idx];
runtime::opt_kernel::reshape(post_transpose_data.data(),
flat_data,
post_transpose_shape,
plain_axes_order,
squeezed_shape,
elem_size);
data_shape = squeezed_shape;
}
std::vector<int64_t> upperbounds_values(data_shape.size());
for (size_t i = 0; i < data_shape.size(); ++i)
{
upperbounds_values[i] = data_shape[i] - crops_end_values[i];
}
std::vector<size_t> begin_mask(data_shape.size(), 0);
std::vector<size_t> end_mask(data_shape.size(), 0);
std::vector<int64_t> begins(shape_size(inputs[2]->get_shape()));
begins.assign(crops_begin_values, crops_begin_values + shape_size(inputs[2]->get_shape()));
std::vector<int64_t> default_strides(begins.size(), 1);
SlicePlan slice_plan = make_slice_plan(data_shape,
begins,
upperbounds_values,
default_strides,
begin_mask,
end_mask,
AxisSet(),
AxisSet(),
AxisSet());
runtime::reference::strided_slice(
flat_data, outputs[0]->get_data_ptr<char>(), data_shape, slice_plan, elem_size);
return true;
}
}
bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
auto data = inputs[0];
size_t elem_size = data->get_element_type().size();
if (data->get_partial_shape().is_dynamic())
{
return false;
}
auto data_shape = data->get_shape();
if (!(data->get_shape().size() == 4 || data->get_shape().size() == 5))
{
return false;
}
size_t block_values_size = shape_size(inputs[1]->get_shape());
const auto* block_values = inputs[1]->get_data_ptr<int64_t>();
const auto* crops_begin_values = inputs[2]->get_data_ptr<int64_t>();
const auto* crops_end_values = inputs[3]->get_data_ptr<int64_t>();
Shape dispersed_shape(1);
dispersed_shape.insert(dispersed_shape.end(), data_shape.begin(), data_shape.end());
std::vector<size_t> axes_order(block_values_size + 1);
std::vector<size_t> plain_axes_order(block_values_size + 1);
std::iota(plain_axes_order.begin(), plain_axes_order.end(), 0);
Shape squeezed_shape(data_shape.begin(), data_shape.end());
if (squeezed_shape.size() > block_values_size)
{
return false;
}
auto* flat_data = data->get_data_ptr<char>();
std::vector<char> dispersed_data(shape_size(data_shape) * elem_size);
Shape post_transpose_shape(axes_order.size());
std::vector<char> post_transpose_data(shape_size(data_shape) * elem_size);
for (size_t block_idx = 1; block_idx < block_values_size; ++block_idx)
{
dispersed_shape[0] = block_values[block_idx];
dispersed_shape[1] /= block_values[block_idx];
runtime::opt_kernel::reshape(flat_data,
dispersed_data.data(),
data_shape,
plain_axes_order,
dispersed_shape,
elem_size);
size_t val = 1;
for (size_t axis_idx = 0; axis_idx <= block_values_size; ++axis_idx)
{
if ((block_idx + 1) == axis_idx)
{
axes_order[axis_idx] = 0;
}
else
{
axes_order[axis_idx] = val;
val++;
}
}
for (size_t axis_idx = 0; axis_idx < axes_order.size(); ++axis_idx)
{
post_transpose_shape[axis_idx] = dispersed_shape[axes_order[axis_idx]];
}
runtime::opt_kernel::reshape(dispersed_data.data(),
post_transpose_data.data(),
dispersed_shape,
axes_order,
post_transpose_shape,
elem_size);
squeezed_shape[0] = dispersed_shape[1];
squeezed_shape[block_idx] *= block_values[block_idx];
dispersed_shape[block_idx + 1] = squeezed_shape[block_idx];
runtime::opt_kernel::reshape(post_transpose_data.data(),
flat_data,
post_transpose_shape,
plain_axes_order,
squeezed_shape,
elem_size);
data_shape = squeezed_shape;
}
std::vector<int64_t> upperbounds_values(data_shape.size());
for (size_t i = 0; i < data_shape.size(); ++i)
{
upperbounds_values[i] = data_shape[i] - crops_end_values[i];
}
std::vector<size_t> begin_mask(data_shape.size(), 0);
std::vector<size_t> end_mask(data_shape.size(), 0);
std::vector<int64_t> begins(shape_size(inputs[2]->get_shape()));
begins.assign(crops_begin_values, crops_begin_values + shape_size(inputs[2]->get_shape()));
std::vector<int64_t> default_strides(begins.size(), 1);
SlicePlan slice_plan = make_slice_plan(data_shape,
begins,
upperbounds_values,
default_strides,
begin_mask,
end_mask,
AxisSet(),
AxisSet(),
AxisSet());
runtime::reference::strided_slice(
flat_data, outputs[0]->get_data_ptr<char>(), data_shape, slice_plan, elem_size);
return true;
}
NGRAPH_OP_SCOPE(v1_BatchToSpace, return batch_to_space_evaluate(outputs, inputs));
return false;
}

View File

@ -142,6 +142,23 @@ namespace
}
}
bool op::v3::Broadcast::broadcast_evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
if (get_broadcast_spec().m_type == op::BroadcastType::BIDIRECTIONAL)
{
auto arg_shape = inputs[0]->get_shape();
Shape target_shape = op::util::BroadcastBase::get_target_shape(inputs[1]);
PartialShape result_shape =
get_result_shape_bidirectional(this, PartialShape{arg_shape}, target_shape);
auto pair_broadcast_axes =
get_broadcast_axes_bidirectional(arg_shape, result_shape.to_shape());
return op::util::BroadcastBase::evaluate_broadcast(
inputs[0], outputs[0], pair_broadcast_axes, result_shape.to_shape());
}
return op::util::BroadcastBase::evaluate(outputs, inputs);
}
void op::v3::Broadcast::validate_and_infer_types()
{
if (m_mode.m_type == BroadcastType::NONE)
@ -211,19 +228,8 @@ bool op::v3::Broadcast::visit_attributes(AttributeVisitor& visitor)
bool op::v3::Broadcast::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v3::Broadcast::evaluate");
if (get_broadcast_spec().m_type == op::BroadcastType::BIDIRECTIONAL)
{
auto arg_shape = inputs[0]->get_shape();
Shape target_shape = op::util::BroadcastBase::get_target_shape(inputs[1]);
PartialShape result_shape =
get_result_shape_bidirectional(this, PartialShape{arg_shape}, target_shape);
auto pair_broadcast_axes =
get_broadcast_axes_bidirectional(arg_shape, result_shape.to_shape());
return op::util::BroadcastBase::evaluate_broadcast(
inputs[0], outputs[0], pair_broadcast_axes, result_shape.to_shape());
}
return op::util::BroadcastBase::evaluate(outputs, inputs);
NGRAPH_OP_SCOPE(v3_Broadcast_evaluate, return broadcast_evaluate(outputs, inputs));
return false;
}
namespace
@ -312,6 +318,7 @@ bool op::v1::Broadcast::visit_attributes(AttributeVisitor& visitor)
bool op::v1::Broadcast::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Broadcast::evaluate");
return op::util::BroadcastBase::evaluate(outputs, inputs);
NGRAPH_OP_SCOPE(v1_Broadcast_evaluate,
return op::util::BroadcastBase::evaluate(outputs, inputs));
return false;
}

View File

@ -64,28 +64,17 @@ namespace ceiling
switch (arg0->get_element_type())
{
COPY_TENSOR(boolean)(arg0, out, count);
break;
COPY_TENSOR(i8)(arg0, out, count);
break;
COPY_TENSOR(i16)(arg0, out, count);
break;
COPY_TENSOR(i32)(arg0, out, count);
break;
COPY_TENSOR(i64)(arg0, out, count);
break;
COPY_TENSOR(u8)(arg0, out, count);
break;
COPY_TENSOR(u16)(arg0, out, count);
break;
COPY_TENSOR(u32)(arg0, out, count);
break;
COPY_TENSOR(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_COPY_TENSOR(evaluate_ceiling, boolean, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_ceiling, i8, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_ceiling, i16, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_ceiling, i32, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_ceiling, i64, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_ceiling, u8, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_ceiling, u16, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_ceiling, u32, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_ceiling, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_ceiling, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_ceiling, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -94,6 +83,8 @@ namespace ceiling
bool op::Ceiling::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Ceiling::evaluate");
return ceiling::evaluate_ceiling(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v0_Ceiling_evaluate,
return ceiling::evaluate_ceiling(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}

View File

@ -86,9 +86,11 @@ namespace clamp
bool op::v0::Clamp::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Clamp::evaluate");
return clamp::evaluate_clamp(
inputs[0], outputs[0], get_min(), get_max(), shape_size(get_input_shape(0)));
NGRAPH_OP_SCOPE(
v0_Clamp_evaluate,
return clamp::evaluate_clamp(
inputs[0], outputs[0], get_min(), get_max(), shape_size(get_input_shape(0))));
return false;
}
NGRAPH_RTTI_DEFINITION(op::v0::Clamp, "Clamp", 0);

View File

@ -144,7 +144,9 @@ namespace
bool op::Concat::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Concat::evaluate");
auto concat_axis = get_axis() < 0 ? get_axis() + inputs[0]->get_shape().size() : get_axis();
return evaluate_concat(inputs, outputs[0], concat_axis);
NGRAPH_OP_SCOPE(v0_Concat_evaluate,
auto concat_axis =
get_axis() < 0 ? get_axis() + inputs[0]->get_shape().size() : get_axis();
return evaluate_concat(inputs, outputs[0], concat_axis));
return false;
}

View File

@ -638,10 +638,10 @@ bool op::v0::Constant::visit_attributes(AttributeVisitor& visitor)
bool op::v0::Constant::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Constant::evaluate");
auto output = outputs[0];
output->write(get_data_ptr(), output->get_size_in_bytes());
return true;
NGRAPH_OP_SCOPE(v0_Constant_evaluate, auto output = outputs[0];
output->write(get_data_ptr(), output->get_size_in_bytes());
return true);
return false;
}
//

View File

@ -63,8 +63,13 @@ namespace convert
true);
}
#define TYPE_OUT_CASE(a) \
case element::Type_t::a: rc = evaluate<INPUT_ET, element::Type_t::a>
#define TYPE_OUT_CASE(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_covert_out, _, a), \
rc = evaluate<INPUT_ET, element::Type_t::a>(__VA_ARGS__)); \
} \
break
template <element::Type_t INPUT_ET>
bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out)
@ -73,30 +78,18 @@ namespace convert
switch (out->get_element_type())
{
TYPE_OUT_CASE(i8)(arg, out);
break;
TYPE_OUT_CASE(i16)(arg, out);
break;
TYPE_OUT_CASE(i32)(arg, out);
break;
TYPE_OUT_CASE(i64)(arg, out);
break;
TYPE_OUT_CASE(u8)(arg, out);
break;
TYPE_OUT_CASE(u16)(arg, out);
break;
TYPE_OUT_CASE(u32)(arg, out);
break;
TYPE_OUT_CASE(u64)(arg, out);
break;
TYPE_OUT_CASE(bf16)(arg, out);
break;
TYPE_OUT_CASE(f16)(arg, out);
break;
TYPE_OUT_CASE(f32)(arg, out);
break;
TYPE_OUT_CASE(f64)(arg, out);
break;
TYPE_OUT_CASE(i8, arg, out);
TYPE_OUT_CASE(i16, arg, out);
TYPE_OUT_CASE(i32, arg, out);
TYPE_OUT_CASE(i64, arg, out);
TYPE_OUT_CASE(u8, arg, out);
TYPE_OUT_CASE(u16, arg, out);
TYPE_OUT_CASE(u32, arg, out);
TYPE_OUT_CASE(u64, arg, out);
TYPE_OUT_CASE(bf16, arg, out);
TYPE_OUT_CASE(f16, arg, out);
TYPE_OUT_CASE(f32, arg, out);
TYPE_OUT_CASE(f64, arg, out);
default: rc = false; break;
}
return rc;
@ -107,24 +100,15 @@ namespace convert
bool rc = true;
switch (arg->get_element_type())
{
TYPE_CASE(u8)(arg, out);
break;
TYPE_CASE(i8)(arg, out);
break;
TYPE_CASE(i32)(arg, out);
break;
TYPE_CASE(i16)(arg, out);
break;
TYPE_CASE(i64)(arg, out);
break;
TYPE_CASE(u32)(arg, out);
break;
TYPE_CASE(u64)(arg, out);
break;
TYPE_CASE(f16)(arg, out);
break;
TYPE_CASE(f32)(arg, out);
break;
NGRAPH_TYPE_CASE(evaluate_convert, u8, arg, out);
NGRAPH_TYPE_CASE(evaluate_convert, i8, arg, out);
NGRAPH_TYPE_CASE(evaluate_convert, i32, arg, out);
NGRAPH_TYPE_CASE(evaluate_convert, i16, arg, out);
NGRAPH_TYPE_CASE(evaluate_convert, i64, arg, out);
NGRAPH_TYPE_CASE(evaluate_convert, u32, arg, out);
NGRAPH_TYPE_CASE(evaluate_convert, u64, arg, out);
NGRAPH_TYPE_CASE(evaluate_convert, f16, arg, out);
NGRAPH_TYPE_CASE(evaluate_convert, f32, arg, out);
default: rc = false; break;
}
return rc;
@ -133,6 +117,7 @@ namespace convert
bool op::v0::Convert::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Convert::evaluate");
return convert::evaluate_convert(input_values[0], output_values[0]);
NGRAPH_OP_SCOPE(v0_Convert_evaluate,
return convert::evaluate_convert(input_values[0], output_values[0]));
return false;
}

View File

@ -63,20 +63,13 @@ namespace cosop
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_cos, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cos, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cos, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cos, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cos, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cos, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cos, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -85,6 +78,8 @@ namespace cosop
bool op::Cos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Cos::evaluate");
return cosop::evaluate_cos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v0_Cos_evaluate,
return cosop::evaluate_cos(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}

View File

@ -62,20 +62,13 @@ namespace coshop
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_cosh, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cosh, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cosh, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cosh, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cosh, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cosh, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cosh, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -84,6 +77,8 @@ namespace coshop
bool op::Cosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Cosh::evaluate");
return coshop::evaluate_cosh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v0_Cosh_evaluate,
return coshop::evaluate_cosh(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}

View File

@ -19,6 +19,7 @@
#include <ngraph/op/constant.hpp>
#include <ngraph/ops.hpp>
#include <numeric>
#include "itt.hpp"
#include "depth_to_space.hpp"
#include "ngraph/builder/reshape.hpp"
@ -112,8 +113,8 @@ void op::DepthToSpace::validate_and_infer_types()
}
}
bool op::DepthToSpace::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
bool op::DepthToSpace::evaluate_depth_to_space(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
const auto& data = inputs[0];
const auto& out = outputs[0];
@ -158,10 +159,12 @@ bool op::DepthToSpace::evaluate(const HostTensorVector& outputs,
vector<size_t> axes_order{0};
switch (m_mode)
{
// x' = reshape(data, [N, C / (block_size ^ K), block_size, block_size, ..., block_size, D1, D2,
// x' = reshape(data, [N, C / (block_size ^ K), block_size, block_size, ..., block_size,
// D1, D2,
// ..., DK])
// x'' = transpose(x', [0, 1, K + 2, 2, K + 3, 3, K + 4, 4, ..., K + (K + 1), K + 1])
// y = reshape(x'', [N, C / (block_size ^ K), D1 * block_size, D2 * block_size, D3 * block_size,
// y = reshape(x'', [N, C / (block_size ^ K), D1 * block_size, D2 * block_size, D3 *
// block_size,
// ..., DK * block_size])
case DepthToSpaceMode::DEPTH_FIRST:
{
@ -175,10 +178,12 @@ bool op::DepthToSpace::evaluate(const HostTensorVector& outputs,
break;
}
// x' = reshape(data, [N, block_size, block_size, ..., block_size, C / (block_size ^ K), D1, D2,
// x' = reshape(data, [N, block_size, block_size, ..., block_size, C / (block_size ^ K),
// D1, D2,
// ..., DK])
// x'' = transpose(x', [0, K + 1, K + 2, 1, K + 3, 2, K + 4, 3, ..., K + (K + 1), K])
// y = reshape(x'', [N, C / (block_size ^ K), D1 * block_size, D2 * block_size, D3 * block_size,
// y = reshape(x'', [N, C / (block_size ^ K), D1 * block_size, D2 * block_size, D3 *
// block_size,
// ..., DK * block_size])
case DepthToSpaceMode::BLOCKS_FIRST:
default:
@ -234,6 +239,13 @@ bool op::DepthToSpace::evaluate(const HostTensorVector& outputs,
elem_size);
return true;
}
bool op::DepthToSpace::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v0_DepthToSpace_evaluate, return evaluate_depth_to_space(outputs, inputs));
return false;
}
namespace ngraph
{
template <>

View File

@ -55,20 +55,13 @@ namespace divide
out->set_broadcast(broadcast_spec, arg0, arg1);
switch (arg0->get_element_type())
{
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec, pythondiv);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec, pythondiv);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec, pythondiv);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec, pythondiv);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec, pythondiv);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec, pythondiv);
break;
TYPE_CASE(bf16)(arg0, arg1, out, broadcast_spec, pythondiv);
break;
NGRAPH_TYPE_CASE(evaluate_divide, i32, arg0, arg1, out, broadcast_spec, pythondiv);
NGRAPH_TYPE_CASE(evaluate_divide, i64, arg0, arg1, out, broadcast_spec, pythondiv);
NGRAPH_TYPE_CASE(evaluate_divide, u32, arg0, arg1, out, broadcast_spec, pythondiv);
NGRAPH_TYPE_CASE(evaluate_divide, u64, arg0, arg1, out, broadcast_spec, pythondiv);
NGRAPH_TYPE_CASE(evaluate_divide, f16, arg0, arg1, out, broadcast_spec, pythondiv);
NGRAPH_TYPE_CASE(evaluate_divide, f32, arg0, arg1, out, broadcast_spec, pythondiv);
NGRAPH_TYPE_CASE(evaluate_divide, bf16, arg0, arg1, out, broadcast_spec, pythondiv);
default: rc = false; break;
}
return rc;
@ -113,6 +106,8 @@ shared_ptr<Node> op::v1::Divide::clone_with_new_inputs(const OutputVector& new_a
bool op::v1::Divide::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Divide::evaluate");
return divide::evaluate_divide(inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv());
NGRAPH_OP_SCOPE(v1_Divide_evaluate,
return divide::evaluate_divide(
inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv()));
return false;
}

View File

@ -50,20 +50,13 @@ namespace equal
out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
break;
NGRAPH_TYPE_CASE(evaluate_equal, boolean, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_equal, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_equal, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_equal, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_equal, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_equal, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_equal, f32, arg0, arg1, out, broadcast_spec);
default: rc = false; break;
}
return rc;
@ -90,6 +83,7 @@ shared_ptr<Node> op::v1::Equal::clone_with_new_inputs(const OutputVector& new_ar
bool op::v1::Equal::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Equal::evaluate");
return equal::evaluate_equal(inputs[0], inputs[1], outputs[0], get_autob());
NGRAPH_OP_SCOPE(v1_Equal_evaluate,
return equal::evaluate_equal(inputs[0], inputs[1], outputs[0], get_autob()));
return false;
}

View File

@ -61,20 +61,13 @@ namespace erfop
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_erf, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_erf, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_erf, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_erf, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_erf, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_erf, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_erf, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -83,6 +76,8 @@ namespace erfop
bool op::Erf::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Erf::evaluate");
return erfop::evaluate_erf(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v0_Erf_evaluate,
return erfop::evaluate_erf(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}

View File

@ -61,20 +61,13 @@ namespace expop
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_exp, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_exp, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_exp, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_exp, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_exp, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_exp, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_exp, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -83,6 +76,8 @@ namespace expop
bool op::Exp::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Exp::evaluate");
return expop::evaluate_exp(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v0_Exp_evaluate,
return expop::evaluate_exp(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}

View File

@ -69,28 +69,17 @@ namespace floorop
switch (arg0->get_element_type())
{
COPY_TENSOR(boolean)(arg0, out, count);
break;
COPY_TENSOR(i8)(arg0, out, count);
break;
COPY_TENSOR(i16)(arg0, out, count);
break;
COPY_TENSOR(i32)(arg0, out, count);
break;
COPY_TENSOR(i64)(arg0, out, count);
break;
COPY_TENSOR(u8)(arg0, out, count);
break;
COPY_TENSOR(u16)(arg0, out, count);
break;
COPY_TENSOR(u32)(arg0, out, count);
break;
COPY_TENSOR(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_COPY_TENSOR(evaluate_floor, boolean, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_floor, i8, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_floor, i16, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_floor, i32, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_floor, i64, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_floor, u8, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_floor, u16, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_floor, u32, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_floor, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_floor, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_floor, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -99,6 +88,8 @@ namespace floorop
bool op::Floor::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Floor::evaluate");
return floorop::evaluate_floor(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v0_Floor_evaluate,
return floorop::evaluate_floor(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}

View File

@ -64,24 +64,15 @@ namespace floor_mod
out->set_broadcast(broadcast_spec, arg0, arg1);
switch (arg0->get_element_type())
{
TYPE_CASE(i8)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u8)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(bf16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
break;
NGRAPH_TYPE_CASE(evaluate_floor_mod, i8, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_floor_mod, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_floor_mod, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_floor_mod, u8, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_floor_mod, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_floor_mod, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_floor_mod, bf16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_floor_mod, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_floor_mod, f32, arg0, arg1, out, broadcast_spec);
default: rc = false; break;
}
return rc;
@ -91,8 +82,10 @@ namespace floor_mod
bool op::v1::FloorMod::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::FloorMod::evaluate");
return floor_mod::evaluate_floor_mod(inputs[0], inputs[1], outputs[0], get_autob());
NGRAPH_OP_SCOPE(
v1_FloorMod_evaluate,
return floor_mod::evaluate_floor_mod(inputs[0], inputs[1], outputs[0], get_autob()));
return false;
}
bool op::v1::FloorMod::visit_attributes(AttributeVisitor& visitor)

View File

@ -204,20 +204,13 @@ namespace gather
switch (out->get_element_type())
{
TYPE_CASE(i32)(arg0, arg1, out, axis);
break;
TYPE_CASE(i64)(arg0, arg1, out, axis);
break;
TYPE_CASE(u32)(arg0, arg1, out, axis);
break;
TYPE_CASE(u64)(arg0, arg1, out, axis);
break;
TYPE_CASE(f16)(arg0, arg1, out, axis);
break;
TYPE_CASE(f32)(arg0, arg1, out, axis);
break;
TYPE_CASE(boolean)(arg0, arg1, out, axis);
break;
NGRAPH_TYPE_CASE(evaluate_gather, i32, arg0, arg1, out, axis);
NGRAPH_TYPE_CASE(evaluate_gather, i64, arg0, arg1, out, axis);
NGRAPH_TYPE_CASE(evaluate_gather, u32, arg0, arg1, out, axis);
NGRAPH_TYPE_CASE(evaluate_gather, u64, arg0, arg1, out, axis);
NGRAPH_TYPE_CASE(evaluate_gather, f16, arg0, arg1, out, axis);
NGRAPH_TYPE_CASE(evaluate_gather, f32, arg0, arg1, out, axis);
NGRAPH_TYPE_CASE(evaluate_gather, boolean, arg0, arg1, out, axis);
default: rc = false; break;
}
return rc;
@ -290,9 +283,9 @@ namespace gather
}
} // namespace gather
bool op::v1::Gather::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
bool op::v1::Gather::evaluate_gather(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Gather::evaluate");
int64_t axis = 0;
switch (inputs[2]->get_element_type())
{
@ -318,6 +311,12 @@ bool op::v1::Gather::evaluate(const HostTensorVector& outputs, const HostTensorV
return gather::evaluate_gather(inputs[0], inputs[1], outputs[0], axis);
}
bool op::v1::Gather::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_Gather_evaluate, return evaluate_gather(outputs, inputs));
return false;
}
bool op::v1::Gather::constant_fold(OutputVector& output_values, const OutputVector& input_values)
{
// try the regular constant folding just for the Gather node

View File

@ -50,20 +50,13 @@ namespace greaterop
out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
break;
NGRAPH_TYPE_CASE(evaluate_greater, boolean, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_greater, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_greater, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_greater, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_greater, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_greater, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_greater, f32, arg0, arg1, out, broadcast_spec);
default: rc = false; break;
}
return rc;
@ -91,6 +84,8 @@ shared_ptr<Node> op::v1::Greater::clone_with_new_inputs(const OutputVector& new_
bool op::v1::Greater::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Greater::evaluate");
return greaterop::evaluate_greater(inputs[0], inputs[1], outputs[0], get_autob());
NGRAPH_OP_SCOPE(
v1_Greater_evaluate,
return greaterop::evaluate_greater(inputs[0], inputs[1], outputs[0], get_autob()));
return false;
}

View File

@ -50,20 +50,13 @@ namespace greater_equalop
out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
break;
NGRAPH_TYPE_CASE(evaluate_greater_equal, boolean, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_greater_equal, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_greater_equal, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_greater_equal, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_greater_equal, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_greater_equal, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_greater_equal, f32, arg0, arg1, out, broadcast_spec);
default: rc = false; break;
}
return rc;
@ -91,6 +84,8 @@ shared_ptr<Node> op::v1::GreaterEqual::clone_with_new_inputs(const OutputVector&
bool op::v1::GreaterEqual::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::GreaterEqual::evaluate");
return greater_equalop::evaluate_greater_equal(inputs[0], inputs[1], outputs[0], get_autob());
NGRAPH_OP_SCOPE(v1_GreaterEqual_evaluate,
return greater_equalop::evaluate_greater_equal(
inputs[0], inputs[1], outputs[0], get_autob()));
return false;
}

View File

@ -15,6 +15,7 @@
//*****************************************************************************
#include "ngraph/op/hsigmoid.hpp"
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/op/constant.hpp"
@ -60,12 +61,9 @@ namespace
switch (arg->get_element_type())
{
TYPE_CASE(bf16)(arg, out, count);
break;
TYPE_CASE(f16)(arg, out, count);
break;
TYPE_CASE(f32)(arg, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_hsigmoid, bf16, arg, out, count);
NGRAPH_TYPE_CASE(evaluate_hsigmoid, f16, arg, out, count);
NGRAPH_TYPE_CASE(evaluate_hsigmoid, f32, arg, out, count);
default: rc = false; break;
}
return rc;
@ -75,5 +73,8 @@ namespace
bool op::v5::HSigmoid::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
return evaluate_hsigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v5_HSigmoid_evaluate,
return evaluate_hsigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}

View File

@ -15,6 +15,7 @@
//*****************************************************************************
#include "ngraph/op/hswish.hpp"
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/op/constant.hpp"
@ -60,12 +61,9 @@ namespace hswish
switch (arg->get_element_type())
{
TYPE_CASE(bf16)(arg, out, count);
break;
TYPE_CASE(f16)(arg, out, count);
break;
TYPE_CASE(f32)(arg, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_hswish, bf16, arg, out, count);
NGRAPH_TYPE_CASE(evaluate_hswish, f16, arg, out, count);
NGRAPH_TYPE_CASE(evaluate_hswish, f32, arg, out, count);
default: rc = false; break;
}
return rc;
@ -74,5 +72,8 @@ namespace hswish
bool op::v4::HSwish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
return hswish::evaluate_hswish(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v4_HSwish_evaluate,
return hswish::evaluate_hswish(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}

View File

@ -19,6 +19,7 @@
#include <cmath>
#include <cstring>
#include <numeric>
#include "itt.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/runtime/reference/interpolate.hpp"
@ -417,8 +418,8 @@ static void pad_input_data(const uint8_t* data_ptr,
}
}
bool op::v4::Interpolate::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
bool op::v4::Interpolate::evaluate_interpolate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
element::Type input_et = get_input_element_type(0);
size_t type_size = input_et.size();
@ -493,6 +494,13 @@ bool op::v4::Interpolate::evaluate(const HostTensorVector& outputs,
return true;
}
bool op::v4::Interpolate::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v4_Interpolate_evaluate, return evaluate_interpolate(outputs, inputs));
return false;
}
namespace ngraph
{
template <>

View File

@ -50,20 +50,13 @@ namespace lessop
out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
break;
NGRAPH_TYPE_CASE(evaluate_less, boolean, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_less, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_less, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_less, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_less, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_less, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_less, f32, arg0, arg1, out, broadcast_spec);
default: rc = false; break;
}
return rc;
@ -90,6 +83,7 @@ shared_ptr<Node> op::v1::Less::clone_with_new_inputs(const OutputVector& new_arg
bool op::v1::Less::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Less::evaluate");
return lessop::evaluate_less(inputs[0], inputs[1], outputs[0], get_autob());
NGRAPH_OP_SCOPE(v1_Less_evaluate,
return lessop::evaluate_less(inputs[0], inputs[1], outputs[0], get_autob()));
return false;
}

View File

@ -68,20 +68,13 @@ namespace less_equalop
out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
break;
NGRAPH_TYPE_CASE(evaluate_less_equal, boolean, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_less_equal, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_less_equal, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_less_equal, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_less_equal, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_less_equal, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_less_equal, f32, arg0, arg1, out, broadcast_spec);
default: rc = false; break;
}
return rc;
@ -91,6 +84,8 @@ namespace less_equalop
bool op::v1::LessEqual::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::LessEqual::evaluate");
return less_equalop::evaluate_less_equal(inputs[0], inputs[1], outputs[0], get_autob());
NGRAPH_OP_SCOPE(
v1_LessEqual_evaluate,
return less_equalop::evaluate_less_equal(inputs[0], inputs[1], outputs[0], get_autob()));
return false;
}

View File

@ -61,20 +61,13 @@ namespace logop
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_log, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_log, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_log, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_log, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_log, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_log, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_log, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -83,6 +76,8 @@ namespace logop
bool op::Log::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Log::evaluate");
return logop::evaluate_log(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v0_Log_evaluate,
return logop::evaluate_log(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}

View File

@ -404,8 +404,13 @@ Output<Node> op::v5::Loop::get_concatenated_slices(const Output<Node>& value,
bool op::v5::Loop::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v5::Loop::evaluate");
runtime::reference::loop(
m_body, m_output_descriptions, m_input_descriptions, m_special_body_ports, outputs, inputs);
return true;
}
NGRAPH_OP_SCOPE(v5_Loop_evaluate,
runtime::reference::loop(m_body,
m_output_descriptions,
m_input_descriptions,
m_special_body_ports,
outputs,
inputs);
return true);
return false;
}

View File

@ -245,18 +245,12 @@ namespace matmul
switch (arg0->get_element_type())
{
TYPE_CASE(i32)(arg0, arg1, output, transpose_a, transpose_b);
break;
TYPE_CASE(i64)(arg0, arg1, output, transpose_a, transpose_b);
break;
TYPE_CASE(u32)(arg0, arg1, output, transpose_a, transpose_b);
break;
TYPE_CASE(u64)(arg0, arg1, output, transpose_a, transpose_b);
break;
TYPE_CASE(f16)(arg0, arg1, output, transpose_a, transpose_b);
break;
TYPE_CASE(f32)(arg0, arg1, output, transpose_a, transpose_b);
break;
NGRAPH_TYPE_CASE(evaluate_matmul, i32, arg0, arg1, output, transpose_a, transpose_b);
NGRAPH_TYPE_CASE(evaluate_matmul, i64, arg0, arg1, output, transpose_a, transpose_b);
NGRAPH_TYPE_CASE(evaluate_matmul, u32, arg0, arg1, output, transpose_a, transpose_b);
NGRAPH_TYPE_CASE(evaluate_matmul, u64, arg0, arg1, output, transpose_a, transpose_b);
NGRAPH_TYPE_CASE(evaluate_matmul, f16, arg0, arg1, output, transpose_a, transpose_b);
NGRAPH_TYPE_CASE(evaluate_matmul, f32, arg0, arg1, output, transpose_a, transpose_b);
default: rc = false; break;
}
return rc;
@ -265,9 +259,10 @@ namespace matmul
bool op::MatMul::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::MatMul::evaluate");
return matmul::evaluate_matmul(
inputs[0], inputs[1], outputs[0], get_transpose_a(), get_transpose_b());
NGRAPH_OP_SCOPE(v0_MatMul_evaluate,
return matmul::evaluate_matmul(
inputs[0], inputs[1], outputs[0], get_transpose_a(), get_transpose_b()));
return false;
}
void ngraph::op::v0::MatMul::validate_and_infer_types()

View File

@ -46,18 +46,12 @@ namespace maxop
bool rc = true;
switch (arg->get_element_type())
{
TYPE_CASE(i32)(arg, out, axes, keep_dims);
break;
TYPE_CASE(i64)(arg, out, axes, keep_dims);
break;
TYPE_CASE(u32)(arg, out, axes, keep_dims);
break;
TYPE_CASE(u64)(arg, out, axes, keep_dims);
break;
TYPE_CASE(f16)(arg, out, axes, keep_dims);
break;
TYPE_CASE(f32)(arg, out, axes, keep_dims);
break;
NGRAPH_TYPE_CASE(evaluate_max, i32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_max, i64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_max, u32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_max, u64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_max, f16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_max, f32, arg, out, axes, keep_dims);
default: rc = false; break;
}
return rc;
@ -83,6 +77,8 @@ shared_ptr<Node> op::v1::ReduceMax::clone_with_new_inputs(const OutputVector& ne
bool op::v1::ReduceMax::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::ReduceMax::evaluate");
return maxop::evaluate_max(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
NGRAPH_OP_SCOPE(
v1_ReduceMax_evaluate,
return maxop::evaluate_max(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()));
return false;
}

View File

@ -182,29 +182,27 @@ namespace maxpool
switch (out->get_element_type())
{
TYPE_CASE(i32)(arg, out, out_shape, kernel, strides, pad_begin, pad_end);
break;
TYPE_CASE(i64)(arg, out, out_shape, kernel, strides, pad_begin, pad_end);
break;
TYPE_CASE(u32)(arg, out, out_shape, kernel, strides, pad_begin, pad_end);
break;
TYPE_CASE(u64)(arg, out, out_shape, kernel, strides, pad_begin, pad_end);
break;
TYPE_CASE(f16)(arg, out, out_shape, kernel, strides, pad_begin, pad_end);
break;
TYPE_CASE(f32)(arg, out, out_shape, kernel, strides, pad_begin, pad_end);
break;
NGRAPH_TYPE_CASE(
evaluate_maxpool, i32, arg, out, out_shape, kernel, strides, pad_begin, pad_end);
NGRAPH_TYPE_CASE(
evaluate_maxpool, i64, arg, out, out_shape, kernel, strides, pad_begin, pad_end);
NGRAPH_TYPE_CASE(
evaluate_maxpool, u32, arg, out, out_shape, kernel, strides, pad_begin, pad_end);
NGRAPH_TYPE_CASE(
evaluate_maxpool, u64, arg, out, out_shape, kernel, strides, pad_begin, pad_end);
NGRAPH_TYPE_CASE(
evaluate_maxpool, f16, arg, out, out_shape, kernel, strides, pad_begin, pad_end);
NGRAPH_TYPE_CASE(
evaluate_maxpool, f32, arg, out, out_shape, kernel, strides, pad_begin, pad_end);
default: rc = false; break;
}
return rc;
}
} // namespace
bool op::v1::MaxPool::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
bool op::v1::MaxPool::evaluate_maxpool(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::MaxPool::evaluate");
auto arg_shape = inputs[0]->get_partial_shape();
auto pads_begin_s = get_pads_begin();
auto pads_end_s = get_pads_end();
@ -228,3 +226,9 @@ bool op::v1::MaxPool::evaluate(const HostTensorVector& outputs,
get_pads_begin(),
get_pads_end());
}
bool op::v1::MaxPool::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_MaxPool_evaluate, return evaluate_maxpool(outputs, inputs));
return false;
}

View File

@ -58,18 +58,12 @@ namespace maximumop
out->set_broadcast(broadcast_spec, arg0, arg1);
switch (arg0->get_element_type())
{
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
break;
NGRAPH_TYPE_CASE(evaluate_maximum, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_maximum, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_maximum, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_maximum, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_maximum, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_maximum, f32, arg0, arg1, out, broadcast_spec);
default: rc = false; break;
}
return rc;
@ -97,6 +91,8 @@ shared_ptr<Node> op::v1::Maximum::clone_with_new_inputs(const OutputVector& new_
bool op::v1::Maximum::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Maximum::evaluate");
return maximumop::evaluate_maximum(inputs[0], inputs[1], outputs[0], get_autob());
NGRAPH_OP_SCOPE(
v1_Maximum_evaluate,
return maximumop::evaluate_maximum(inputs[0], inputs[1], outputs[0], get_autob()));
return false;
}

View File

@ -48,18 +48,12 @@ namespace minop
bool rc = true;
switch (arg->get_element_type())
{
TYPE_CASE(i32)(arg, out, axes, keep_dims);
break;
TYPE_CASE(i64)(arg, out, axes, keep_dims);
break;
TYPE_CASE(u32)(arg, out, axes, keep_dims);
break;
TYPE_CASE(u64)(arg, out, axes, keep_dims);
break;
TYPE_CASE(f16)(arg, out, axes, keep_dims);
break;
TYPE_CASE(f32)(arg, out, axes, keep_dims);
break;
NGRAPH_TYPE_CASE(evaluate_min, i32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_min, i64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_min, u32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_min, u64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_min, f16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_min, f32, arg, out, axes, keep_dims);
default: rc = false; break;
}
return rc;
@ -85,6 +79,8 @@ shared_ptr<Node> op::v1::ReduceMin::clone_with_new_inputs(const OutputVector& ne
bool op::v1::ReduceMin::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::ReduceMin::evaluate");
return minop::evaluate_min(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
NGRAPH_OP_SCOPE(
v1_ReduceMin_evaluate,
return minop::evaluate_min(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()));
return false;
}

View File

@ -56,18 +56,12 @@ namespace minimumop
out->set_broadcast(broadcast_spec, arg0, arg1);
switch (arg0->get_element_type())
{
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
break;
NGRAPH_TYPE_CASE(evaluate_minimum, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_minimum, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_minimum, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_minimum, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_minimum, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_minimum, f32, arg0, arg1, out, broadcast_spec);
default: rc = false; break;
}
return rc;
@ -95,6 +89,8 @@ shared_ptr<Node> op::v1::Minimum::clone_with_new_inputs(const OutputVector& new_
bool op::v1::Minimum::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Minimum::evaluate");
return minimumop::evaluate_minimum(inputs[0], inputs[1], outputs[0], get_autob());
NGRAPH_OP_SCOPE(
v1_Minimum_evaluate,
return minimumop::evaluate_minimum(inputs[0], inputs[1], outputs[0], get_autob()));
return false;
}

View File

@ -67,10 +67,8 @@ namespace mish
switch (arg0->get_element_type())
{
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_mish, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_mish, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -79,6 +77,8 @@ namespace mish
bool op::v4::Mish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v4::Mish::evaluate");
return mish::evaluate_mish(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v4_Mish_evaluate,
return mish::evaluate_mish(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}

View File

@ -50,20 +50,13 @@ namespace multiplyop
out->set_broadcast(broadcast_spec, arg0, arg1);
switch (arg0->get_element_type())
{
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(bf16)(arg0, arg1, out, broadcast_spec);
break;
NGRAPH_TYPE_CASE(evaluate_multiply, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_multiply, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_multiply, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_multiply, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_multiply, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_multiply, f32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_multiply, bf16, arg0, arg1, out, broadcast_spec);
default: rc = false; break;
}
return rc;
@ -91,8 +84,10 @@ shared_ptr<Node> op::v0::Multiply::clone_with_new_inputs(const OutputVector& new
bool op::v0::Multiply::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Multiply::evaluate");
return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob());
NGRAPH_OP_SCOPE(
v0_Multiply_evaluate,
return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob()));
return false;
}
// ------------------------------------ v1 -------------------------------------
@ -116,6 +111,8 @@ shared_ptr<Node> op::v1::Multiply::clone_with_new_inputs(const OutputVector& new
bool op::v1::Multiply::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Multiply::evaluate");
return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob());
NGRAPH_OP_SCOPE(
v1_Multiply_evaluate,
return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob()));
return false;
}

View File

@ -58,20 +58,13 @@ namespace negativeop
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_negative, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_negative, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_negative, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_negative, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_negative, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_negative, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_negative, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -80,8 +73,10 @@ namespace negativeop
bool op::Negative::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Negative::evaluate");
return negativeop::evaluate_negative(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(v0_Negative_evaluate,
return negativeop::evaluate_negative(
inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}
shared_ptr<Node> ngraph::operator-(const Output<Node>& arg0)

View File

@ -115,18 +115,21 @@ namespace nonzero
return true;
}
#define TYPE_OUT_CASE(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_nonzero_out, _, a), \
rc = evaluate_nonzero_execute<INPUT_ET, element::Type_t::a>(__VA_ARGS__)); \
} \
break;
template <element::Type_t INPUT_ET>
bool evaluate(const HostTensorPtr& input, const HostTensorPtr& output)
{
bool rc = true;
switch (output->get_element_type())
{
case element::Type_t::i64:
rc = evaluate_nonzero_execute<INPUT_ET, element::Type_t::i64>(input, output);
break;
case element::Type_t::i32:
rc = evaluate_nonzero_execute<INPUT_ET, element::Type_t::i32>(input, output);
break;
TYPE_OUT_CASE(i64, input, output);
TYPE_OUT_CASE(i32, input, output);
default: rc = false; break;
}
@ -139,20 +142,13 @@ namespace nonzero
switch (input->get_element_type())
{
TYPE_CASE(i32)(input, output);
break;
TYPE_CASE(i64)(input, output);
break;
TYPE_CASE(u8)(input, output);
break;
TYPE_CASE(u32)(input, output);
break;
TYPE_CASE(u64)(input, output);
break;
TYPE_CASE(f16)(input, output);
break;
TYPE_CASE(f32)(input, output);
break;
NGRAPH_TYPE_CASE(evaluate_nonzero, i32, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, i64, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, u8, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, u32, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, u64, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, f16, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, f32, input, output);
default: rc = false; break;
}
return rc;
@ -162,6 +158,6 @@ namespace nonzero
bool op::v3::NonZero::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v3::NonZero::evaluate");
return nonzero::evaluate_nonzero(inputs[0], outputs[0]);
NGRAPH_OP_SCOPE(v3_NonZero_evaluate, return nonzero::evaluate_nonzero(inputs[0], outputs[0]));
return false;
}

View File

@ -75,20 +75,13 @@ namespace notop
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_not, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_not, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_not, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_not, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_not, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_not, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_not, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -98,6 +91,8 @@ namespace notop
bool op::v1::LogicalNot::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::LogicalNot::evaluate");
return notop::evaluate_not(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v1_LogicalNot_evaluate,
return notop::evaluate_not(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}

View File

@ -50,20 +50,13 @@ namespace not_equalop
out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
break;
NGRAPH_TYPE_CASE(evaluate_not_equal, boolean, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_not_equal, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_not_equal, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_not_equal, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_not_equal, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_not_equal, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_not_equal, f32, arg0, arg1, out, broadcast_spec);
default: rc = false; break;
}
return rc;
@ -91,8 +84,10 @@ shared_ptr<Node> op::v1::NotEqual::clone_with_new_inputs(const OutputVector& new
bool op::v1::NotEqual::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::NotEqual::evaluate");
return not_equalop::evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob());
NGRAPH_OP_SCOPE(
v1_NotEqual_evaluate,
return not_equalop::evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob()));
return false;
}
bool op::v1::NotEqual::visit_attributes(AttributeVisitor& visitor)

View File

@ -15,6 +15,7 @@
//*****************************************************************************
#include "ngraph/op/one_hot.hpp"
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/op/util/op_types.hpp"
#include "ngraph/runtime/reference/one_hot.hpp"
@ -134,7 +135,7 @@ shared_ptr<Node> op::v1::OneHot::clone_with_new_inputs(const OutputVector& new_a
namespace detail
{
template <typename ind_t, typename out_t>
void evaluate(const HostTensorVector& output_values,
bool evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values,
const int64_t axis)
{
@ -152,27 +153,35 @@ namespace detail
axis,
on_value->get_data_ptr<out_t>()[0],
off_value->get_data_ptr<out_t>()[0]);
return true;
}
template <typename out_t>
bool dispatch_by_output_type(const HostTensorVector& output_values,
const HostTensorVector& input_values,
const int64_t axis)
#define TYPE_OUT_CASE(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_one_hot_out, _, a), \
using IT = typename element_type_traits<element::Type_t::a>::value_type; \
using OT = typename element_type_traits<out_t>::value_type; \
rc = evaluate<IT, OT>(__VA_ARGS__)); \
} \
break;
template <element::Type_t out_t>
bool evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values,
const int64_t axis)
{
const auto& indices = input_values[0];
bool rc = true;
switch (indices->get_element_type())
{
case element::Type_t::i32:
evaluate<int32_t, out_t>(output_values, input_values, axis);
break;
case element::Type_t::i64:
evaluate<int64_t, out_t>(output_values, input_values, axis);
break;
default: return false; break;
TYPE_OUT_CASE(i32, output_values, input_values, axis);
TYPE_OUT_CASE(i64, output_values, input_values, axis);
default: rc = false; break;
}
return true;
return rc;
}
bool evaluate_onehot(const HostTensorVector& output_values,
@ -181,27 +190,23 @@ namespace detail
{
const auto& on_value = input_values[2];
bool rc = false;
switch (on_value->get_element_type())
{
case element::Type_t::boolean:
return dispatch_by_output_type<char>(output_values, input_values, axis);
break;
case element::Type_t::f32:
return dispatch_by_output_type<float>(output_values, input_values, axis);
break;
case element::Type_t::i32:
return dispatch_by_output_type<int32_t>(output_values, input_values, axis);
break;
case element::Type_t::i64:
return dispatch_by_output_type<int64_t>(output_values, input_values, axis);
break;
default: return false;
NGRAPH_TYPE_CASE(evaluate_onehot, boolean, output_values, input_values, axis);
NGRAPH_TYPE_CASE(evaluate_onehot, f32, output_values, input_values, axis);
NGRAPH_TYPE_CASE(evaluate_onehot, i32, output_values, input_values, axis);
NGRAPH_TYPE_CASE(evaluate_onehot, i64, output_values, input_values, axis);
default: rc = false;
}
return rc;
}
} // namespace detail
bool op::v1::OneHot::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const
{
return detail::evaluate_onehot(output_values, input_values, get_axis());
NGRAPH_OP_SCOPE(v1_OneHot_evaluate,
return detail::evaluate_onehot(output_values, input_values, get_axis()););
return false;
}

View File

@ -66,20 +66,13 @@ namespace logor
out->set_broadcast(broadcast_spec, arg0, arg1);
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
break;
NGRAPH_TYPE_CASE(evaluate_logor, boolean, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logor, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logor, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logor, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logor, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logor, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logor, f32, arg0, arg1, out, broadcast_spec);
default: rc = false; break;
}
return rc;
@ -89,6 +82,7 @@ namespace logor
bool op::v1::LogicalOr::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::LogicalOr::evaluate");
return logor::evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob());
NGRAPH_OP_SCOPE(v1_LogicalOr_evaluate,
return logor::evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob()));
return false;
}

View File

@ -15,6 +15,7 @@
//*****************************************************************************
#include "ngraph/op/pad.hpp"
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/except.hpp"
#include "ngraph/op/broadcast.hpp"
@ -209,7 +210,8 @@ shared_ptr<Node> op::v1::Pad::clone_with_new_inputs(const OutputVector& new_args
}
}
bool op::v1::Pad::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
bool op::v1::Pad::evaluate_pad(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
const auto& data = inputs[0];
const auto elem_size = data->get_element_type().size();
@ -238,3 +240,9 @@ bool op::v1::Pad::evaluate(const HostTensorVector& outputs, const HostTensorVect
return true;
}
bool op::v1::Pad::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_Pad_evaluate, return evaluate_pad(outputs, inputs));
return false;
}

View File

@ -53,20 +53,13 @@ namespace power
out->set_broadcast(broadcast_spec, arg0, arg1);
switch (arg0->get_element_type())
{
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(bf16)(arg0, arg1, out, broadcast_spec);
break;
NGRAPH_TYPE_CASE(evaluate_power, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_power, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_power, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_power, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_power, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_power, f32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_power, bf16, arg0, arg1, out, broadcast_spec);
default: rc = false; break;
}
return rc;
@ -93,6 +86,7 @@ shared_ptr<Node> op::v1::Power::clone_with_new_inputs(const OutputVector& new_ar
bool op::v1::Power::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Power::evaluate");
return power::evaluate_power(inputs[0], inputs[1], outputs[0], get_autob());
NGRAPH_OP_SCOPE(v1_Power_evaluate,
return power::evaluate_power(inputs[0], inputs[1], outputs[0], get_autob()));
return false;
}

View File

@ -115,14 +115,10 @@ namespace prelu
bool rc = true;
switch (arg->get_element_type())
{
TYPE_CASE(i8)(arg, slope, out);
break;
TYPE_CASE(bf16)(arg, slope, out);
break;
TYPE_CASE(f16)(arg, slope, out);
break;
TYPE_CASE(f32)(arg, slope, out);
break;
NGRAPH_TYPE_CASE(evaluate_prelu, i8, arg, slope, out);
NGRAPH_TYPE_CASE(evaluate_prelu, bf16, arg, slope, out);
NGRAPH_TYPE_CASE(evaluate_prelu, f16, arg, slope, out);
NGRAPH_TYPE_CASE(evaluate_prelu, f32, arg, slope, out);
default: rc = false; break;
}
return rc;
@ -131,6 +127,7 @@ namespace prelu
bool op::PRelu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::PRelu::evaluate");
return prelu::evaluate_prelu(inputs[0], inputs[1], outputs[0]);
NGRAPH_OP_SCOPE(v0_PRelu_evaluate,
return prelu::evaluate_prelu(inputs[0], inputs[1], outputs[0]););
return false;
}

View File

@ -175,22 +175,14 @@ namespace prior_box
bool rc = true;
switch (arg0->get_element_type())
{
TYPE_CASE(i8)(arg0, arg1, out, attrs);
break;
TYPE_CASE(i16)(arg0, arg1, out, attrs);
break;
TYPE_CASE(i32)(arg0, arg1, out, attrs);
break;
TYPE_CASE(i64)(arg0, arg1, out, attrs);
break;
TYPE_CASE(u8)(arg0, arg1, out, attrs);
break;
TYPE_CASE(u16)(arg0, arg1, out, attrs);
break;
TYPE_CASE(u32)(arg0, arg1, out, attrs);
break;
TYPE_CASE(u64)(arg0, arg1, out, attrs);
break;
NGRAPH_TYPE_CASE(evaluate_prior_box, i8, arg0, arg1, out, attrs);
NGRAPH_TYPE_CASE(evaluate_prior_box, i16, arg0, arg1, out, attrs);
NGRAPH_TYPE_CASE(evaluate_prior_box, i32, arg0, arg1, out, attrs);
NGRAPH_TYPE_CASE(evaluate_prior_box, i64, arg0, arg1, out, attrs);
NGRAPH_TYPE_CASE(evaluate_prior_box, u8, arg0, arg1, out, attrs);
NGRAPH_TYPE_CASE(evaluate_prior_box, u16, arg0, arg1, out, attrs);
NGRAPH_TYPE_CASE(evaluate_prior_box, u32, arg0, arg1, out, attrs);
NGRAPH_TYPE_CASE(evaluate_prior_box, u64, arg0, arg1, out, attrs);
default: rc = false; break;
}
return rc;
@ -200,9 +192,11 @@ namespace prior_box
bool op::v0::PriorBox::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::PriorBox::evaluate");
NGRAPH_OP_SCOPE(v0_PriorBox_evaluate,
// Todo (itikhono): enable the use of the reference implementation after
// supporting constants as
// outputs in plugins
// return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
return false);
return false;
// Todo (itikhono): enable the use of the reference implementation after supporting constants as
// outputs in plugins
// return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
}

View File

@ -148,22 +148,14 @@ namespace prior_box_clustered
bool rc = true;
switch (arg0->get_element_type())
{
TYPE_CASE(i8)(arg0, arg1, out, attrs);
break;
TYPE_CASE(i16)(arg0, arg1, out, attrs);
break;
TYPE_CASE(i32)(arg0, arg1, out, attrs);
break;
TYPE_CASE(i64)(arg0, arg1, out, attrs);
break;
TYPE_CASE(u8)(arg0, arg1, out, attrs);
break;
TYPE_CASE(u16)(arg0, arg1, out, attrs);
break;
TYPE_CASE(u32)(arg0, arg1, out, attrs);
break;
TYPE_CASE(u64)(arg0, arg1, out, attrs);
break;
NGRAPH_TYPE_CASE(evaluate_prior_box, i8, arg0, arg1, out, attrs);
NGRAPH_TYPE_CASE(evaluate_prior_box, i16, arg0, arg1, out, attrs);
NGRAPH_TYPE_CASE(evaluate_prior_box, i32, arg0, arg1, out, attrs);
NGRAPH_TYPE_CASE(evaluate_prior_box, i64, arg0, arg1, out, attrs);
NGRAPH_TYPE_CASE(evaluate_prior_box, u8, arg0, arg1, out, attrs);
NGRAPH_TYPE_CASE(evaluate_prior_box, u16, arg0, arg1, out, attrs);
NGRAPH_TYPE_CASE(evaluate_prior_box, u32, arg0, arg1, out, attrs);
NGRAPH_TYPE_CASE(evaluate_prior_box, u64, arg0, arg1, out, attrs);
default: rc = false; break;
}
return rc;
@ -173,9 +165,11 @@ namespace prior_box_clustered
bool op::v0::PriorBoxClustered::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::PriorBoxClustered::evaluate");
NGRAPH_OP_SCOPE(v0_PriorBoxClustered_evaluate,
// Todo (itikhono): enable the use of the reference implementation after
// supporting constants as
// outputs in plugins
// return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
return false);
return false;
// Todo (itikhono): enable the use of the reference implementation after supporting constants as
// outputs in plugins
// return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
}

View File

@ -26,6 +26,36 @@
using namespace std;
using namespace ngraph;
//
// The code in the following three functions is a bit awkward, to work around some compiler
// warnings and the need to support our custom float16/bfloat16 type:
//
// (1) We can't use STL things like isnan, because our custom float16/bfloat16 types don't always
// support them.
// (2) We check whether (x - x) == (x - x) to check for "is_finite".
// (3) We have to break (x - x) out into a temporary because otherwise the compiler throws a
// warning about == on floats.
// (4) We check <0 || >0 to check for != 0, because otherwise the compiler throws a warning about
// == on floats.
//
template <typename T>
static typename std::enable_if<std::is_integral<T>::value, bool>::type check_value(T value)
{
// Nothing to check for integral types.
return true;
}
template <typename T>
static
typename std::enable_if<std::is_floating_point<T>::value || std::is_same<T, float16>::value ||
std::is_same<T, bfloat16>::value,
bool>::type
check_value(T value)
{
T value_minus_value = value - value;
return value == value && value_minus_value == value_minus_value;
}
NGRAPH_RTTI_DEFINITION(op::v4::Range, "Range", 4);
op::v4::Range::Range(const Output<Node>& start,
@ -193,62 +223,89 @@ bool get_casted_value(const HostTensorPtr& tensor, T* val)
return true;
}
template <element::Type_t ET>
bool evaluate_v4_range(const HostTensorPtr& out,
const HostTensorPtr& start,
const HostTensorPtr& stop,
const HostTensorPtr& step)
namespace rangeop
{
using T = typename element_type_traits<ET>::value_type;
T start_val;
T stop_val;
T step_val;
if (!(get_casted_value<T>(start, &start_val) && get_casted_value<T>(stop, &stop_val) &&
get_casted_value<T>(step, &step_val)))
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& out,
const HostTensorPtr& start,
const HostTensorPtr& stop,
const HostTensorPtr& step,
int version)
{
return false;
using T = typename element_type_traits<ET>::value_type;
T start_val;
T stop_val;
T step_val;
if (version < 4)
{
start_val = *start->get_data_ptr<ET>();
stop_val = *stop->get_data_ptr<ET>();
step_val = *step->get_data_ptr<ET>();
if (!(check_value(start_val) && check_value(stop_val) && check_value(step_val) &&
(step_val != static_cast<T>(0))))
{
return false;
}
}
else
{
if (!(get_casted_value<T>(start, &start_val) && get_casted_value<T>(stop, &stop_val) &&
get_casted_value<T>(step, &step_val)))
{
return false;
}
}
int64_t out_size = 0;
int64_t steps = static_cast<int64_t>(std::ceil(double(stop_val - start_val) / step_val));
if (steps > 0)
{
out_size = steps;
}
Shape out_shape = Shape({static_cast<size_t>(out_size)});
out->set_shape(out_shape);
runtime::reference::range(
&start_val, &step_val, shape_size(out_shape), out->get_data_ptr<ET>());
return true;
}
int64_t out_size = 0;
int64_t steps = static_cast<int64_t>(std::ceil(double(stop_val - start_val) / step_val));
if (steps > 0)
bool evaluate_power(const HostTensorPtr& out,
const HostTensorPtr& start,
const HostTensorPtr& stop,
const HostTensorPtr& step,
const element::Type& output_type,
int version)
{
out_size = steps;
bool rc = true;
switch (output_type)
{
NGRAPH_TYPE_CASE(evaluate_range, bf16, out, start, stop, step, version);
NGRAPH_TYPE_CASE(evaluate_range, f16, out, start, stop, step, version);
NGRAPH_TYPE_CASE(evaluate_range, f32, out, start, stop, step, version);
NGRAPH_TYPE_CASE(evaluate_range, f64, out, start, stop, step, version);
NGRAPH_TYPE_CASE(evaluate_range, i8, out, start, stop, step, version);
NGRAPH_TYPE_CASE(evaluate_range, i16, out, start, stop, step, version);
NGRAPH_TYPE_CASE(evaluate_range, i32, out, start, stop, step, version);
NGRAPH_TYPE_CASE(evaluate_range, i64, out, start, stop, step, version);
NGRAPH_TYPE_CASE(evaluate_range, u8, out, start, stop, step, version);
NGRAPH_TYPE_CASE(evaluate_range, u16, out, start, stop, step, version);
NGRAPH_TYPE_CASE(evaluate_range, u32, out, start, stop, step, version);
NGRAPH_TYPE_CASE(evaluate_range, u64, out, start, stop, step, version);
default: rc = false; break;
}
return rc;
}
Shape out_shape = Shape({static_cast<size_t>(out_size)});
out->set_shape(out_shape);
runtime::reference::range(
&start_val, &step_val, shape_size(out_shape), out->get_data_ptr<ET>());
return true;
}
bool op::v4::Range::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
HostTensorPtr out = outputs[0];
HostTensorPtr start = inputs[0];
HostTensorPtr stop = inputs[1];
HostTensorPtr step = inputs[2];
switch (m_output_type)
{
case element::Type_t::bf16:
return evaluate_v4_range<element::Type_t::bf16>(out, start, stop, step);
case element::Type_t::f16:
return evaluate_v4_range<element::Type_t::f16>(out, start, stop, step);
case element::Type_t::f32:
return evaluate_v4_range<element::Type_t::f32>(out, start, stop, step);
case element::Type_t::i8: return evaluate_v4_range<element::Type_t::i8>(out, start, stop, step);
case element::Type_t::i32:
return evaluate_v4_range<element::Type_t::i32>(out, start, stop, step);
case element::Type_t::i64:
return evaluate_v4_range<element::Type_t::i64>(out, start, stop, step);
case element::Type_t::u8: return evaluate_v4_range<element::Type_t::u8>(out, start, stop, step);
case element::Type_t::u32:
return evaluate_v4_range<element::Type_t::u32>(out, start, stop, step);
case element::Type_t::u64:
return evaluate_v4_range<element::Type_t::u64>(out, start, stop, step);
default: return false;
}
NGRAPH_OP_SCOPE(v4_Range_evaluate, HostTensorPtr out = outputs[0];
HostTensorPtr start = inputs[0];
HostTensorPtr stop = inputs[1];
HostTensorPtr step = inputs[2];
return rangeop::evaluate_power(out, start, stop, step, m_output_type, 4));
return false;
}
constexpr NodeTypeInfo op::v0::Range::type_info;
@ -259,36 +316,6 @@ op::v0::Range::Range(const Output<Node>& start, const Output<Node>& stop, const
constructor_validate_and_infer_types();
}
//
// The code in the following three functions is a bit awkward, to work around some compiler
// warnings and the need to support our custom float16/bfloat16 type:
//
// (1) We can't use STL things like isnan, because our custom float16/bfloat16 types don't always
// support them.
// (2) We check whether (x - x) == (x - x) to check for "is_finite".
// (3) We have to break (x - x) out into a temporary because otherwise the compiler throws a
// warning about == on floats.
// (4) We check <0 || >0 to check for != 0, because otherwise the compiler throws a warning about
// == on floats.
//
template <typename T>
static typename std::enable_if<std::is_integral<T>::value, bool>::type check_value(T value)
{
// Nothing to check for integral types.
return true;
}
template <typename T>
static
typename std::enable_if<std::is_floating_point<T>::value || std::is_same<T, float16>::value ||
std::is_same<T, bfloat16>::value,
bool>::type
check_value(T value)
{
T value_minus_value = value - value;
return value == value && value_minus_value == value_minus_value;
}
template <typename T>
static void check_start(const op::v0::Range* node, T start)
{
@ -467,61 +494,12 @@ void positive_range(T start_val, T stop_val, T step_val)
{
}
template <element::Type_t ET>
bool try_evaluate_range(const HostTensorPtr& out,
const HostTensorPtr& start,
const HostTensorPtr& stop,
const HostTensorPtr& step)
{
using T = typename element_type_traits<ET>::value_type;
if (ET == start->get_element_type())
{
T start_val = *start->get_data_ptr<ET>();
T stop_val = *stop->get_data_ptr<ET>();
T step_val = *step->get_data_ptr<ET>();
if (!(check_value(start_val) && check_value(stop_val) && check_value(step_val) &&
(step_val != static_cast<T>(0))))
{
return false;
}
int64_t out_size = 0;
int64_t steps = static_cast<int64_t>(std::ceil(double(stop_val - start_val) / step_val));
if (steps > 0)
{
out_size = steps;
}
Shape out_shape = Shape({static_cast<size_t>(out_size)});
out->set_shape(out_shape);
runtime::reference::range(
&start_val, &step_val, shape_size(out_shape), out->get_data_ptr<ET>());
return true;
}
else
{
return false;
}
}
bool op::v0::Range::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Range::evaluate");
HostTensorPtr out = outputs[0];
HostTensorPtr start = inputs[0];
HostTensorPtr stop = inputs[1];
HostTensorPtr step = inputs[2];
return try_evaluate_range<element::Type_t::i8>(out, start, stop, step) ||
try_evaluate_range<element::Type_t::i16>(out, start, stop, step) ||
try_evaluate_range<element::Type_t::i32>(out, start, stop, step) ||
try_evaluate_range<element::Type_t::i64>(out, start, stop, step) ||
try_evaluate_range<element::Type_t::u8>(out, start, stop, step) ||
try_evaluate_range<element::Type_t::u16>(out, start, stop, step) ||
try_evaluate_range<element::Type_t::u32>(out, start, stop, step) ||
try_evaluate_range<element::Type_t::u64>(out, start, stop, step) ||
try_evaluate_range<element::Type_t::f32>(out, start, stop, step) ||
try_evaluate_range<element::Type_t::f16>(out, start, stop, step) ||
try_evaluate_range<element::Type_t::bf16>(out, start, stop, step) ||
try_evaluate_range<element::Type_t::f64>(out, start, stop, step);
NGRAPH_OP_SCOPE(
op_v0_Range_evaluate, HostTensorPtr out = outputs[0]; HostTensorPtr start = inputs[0];
HostTensorPtr stop = inputs[1];
HostTensorPtr step = inputs[2];
return rangeop::evaluate_power(out, start, stop, step, start->get_element_type(), 0));
return false;
}

View File

@ -67,16 +67,11 @@ namespace reduce_l1
bool rc = true;
switch (arg->get_element_type())
{
TYPE_CASE(i32)(arg, out, axes, keep_dims);
break;
TYPE_CASE(i64)(arg, out, axes, keep_dims);
break;
TYPE_CASE(bf16)(arg, out, axes, keep_dims);
break;
TYPE_CASE(f16)(arg, out, axes, keep_dims);
break;
TYPE_CASE(f32)(arg, out, axes, keep_dims);
break;
NGRAPH_TYPE_CASE(evaluate_reducel1_sum, i32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reducel1_sum, i64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reducel1_sum, bf16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reducel1_sum, f16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reducel1_sum, f32, arg, out, axes, keep_dims);
default: rc = false; break;
}
return rc;
@ -86,6 +81,8 @@ namespace reduce_l1
bool op::v4::ReduceL1::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v4::ReduceL1::evaluate");
return reduce_l1::evaluate_sum(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
NGRAPH_OP_SCOPE(v4_ReduceL1_evaluate,
return reduce_l1::evaluate_sum(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()));
return false;
}

View File

@ -67,12 +67,9 @@ namespace reduce_l2
bool rc = true;
switch (arg->get_element_type())
{
TYPE_CASE(bf16)(arg, out, axes, keep_dims);
break;
TYPE_CASE(f16)(arg, out, axes, keep_dims);
break;
TYPE_CASE(f32)(arg, out, axes, keep_dims);
break;
NGRAPH_TYPE_CASE(evaluate_reduce_l2, bf16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reduce_l2, f16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reduce_l2, f32, arg, out, axes, keep_dims);
default: rc = false; break;
}
return rc;
@ -82,7 +79,8 @@ namespace reduce_l2
bool op::v4::ReduceL2::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v4::ReduceL2::evaluate");
return reduce_l2::evaluate_reduce_l2(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
NGRAPH_OP_SCOPE(v4_ReduceL2_evaluate,
return reduce_l2::evaluate_reduce_l2(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()));
return false;
}

View File

@ -47,6 +47,11 @@ namespace
const HostTensorPtr& out,
bool keep_dims)
{
if (data->get_element_type() != element::boolean ||
!axes->get_element_type().is_integral_number())
{
return false;
}
try
{
const AxisSet reduction_axes = eval::extract_reduction_axes(axes, "ReduceLogicalAnd");
@ -70,19 +75,9 @@ namespace
bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::ReduceLogicalAnd::evaluate");
const auto& data = inputs[0];
const auto& axes = inputs[1];
const auto& out = outputs[0];
if (data->get_element_type() != element::boolean ||
!axes->get_element_type().is_integral_number())
{
return false;
}
else
{
return evaluate_reduce_logical_and(data, axes, out, get_keep_dims());
}
NGRAPH_OP_SCOPE(v1_ReduceLogicalAnd_evaluate, const auto& data = inputs[0];
const auto& axes = inputs[1];
const auto& out = outputs[0];
return evaluate_reduce_logical_and(data, axes, out, get_keep_dims()));
return false;
}

View File

@ -47,6 +47,11 @@ namespace
const HostTensorPtr& out,
bool keep_dims)
{
if (data->get_element_type() != element::boolean ||
!axes->get_element_type().is_integral_number())
{
return false;
}
try
{
const AxisSet reduction_axes = eval::extract_reduction_axes(axes, "ReduceLogicalOr");
@ -70,19 +75,9 @@ namespace
bool op::v1::ReduceLogicalOr::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::ReduceLogicalOr::evaluate");
const auto& data = inputs[0];
const auto& axes = inputs[1];
const auto& out = outputs[0];
if (data->get_element_type() != element::boolean ||
!axes->get_element_type().is_integral_number())
{
return false;
}
else
{
return evaluate_reduce_logical_or(data, axes, out, get_keep_dims());
}
NGRAPH_OP_SCOPE(v1_ReduceLogicalOr_evaluate, const auto& data = inputs[0];
const auto& axes = inputs[1];
const auto& out = outputs[0];
return evaluate_reduce_logical_or(data, axes, out, get_keep_dims()));
return false;
}

View File

@ -63,18 +63,12 @@ namespace mean
bool rc = true;
switch (arg->get_element_type())
{
TYPE_CASE(i32)(arg, out, axes, keep_dims);
break;
TYPE_CASE(i64)(arg, out, axes, keep_dims);
break;
TYPE_CASE(u32)(arg, out, axes, keep_dims);
break;
TYPE_CASE(u64)(arg, out, axes, keep_dims);
break;
TYPE_CASE(f16)(arg, out, axes, keep_dims);
break;
TYPE_CASE(f32)(arg, out, axes, keep_dims);
break;
NGRAPH_TYPE_CASE(evaluate_mean, i32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_mean, i64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_mean, u32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_mean, u64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_mean, f16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_mean, f32, arg, out, axes, keep_dims);
default: rc = false; break;
}
return rc;
@ -84,6 +78,8 @@ namespace mean
bool op::v1::ReduceMean::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::ReduceMean::evaluate");
return mean::evaluate_mean(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
NGRAPH_OP_SCOPE(
v1_ReduceMean_evaluate,
return mean::evaluate_mean(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()));
return false;
}

View File

@ -67,18 +67,12 @@ namespace reduce_prod
bool rc = true;
switch (arg->get_element_type())
{
TYPE_CASE(i32)(arg, out, axes, keep_dims);
break;
TYPE_CASE(i64)(arg, out, axes, keep_dims);
break;
TYPE_CASE(u32)(arg, out, axes, keep_dims);
break;
TYPE_CASE(u64)(arg, out, axes, keep_dims);
break;
TYPE_CASE(f16)(arg, out, axes, keep_dims);
break;
TYPE_CASE(f32)(arg, out, axes, keep_dims);
break;
NGRAPH_TYPE_CASE(evaluate_product, i32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_product, i64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_product, u32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_product, u64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_product, f16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_product, f32, arg, out, axes, keep_dims);
default: rc = false; break;
}
return rc;
@ -88,7 +82,8 @@ namespace reduce_prod
bool op::v1::ReduceProd::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::ReduceProd::evaluate");
return reduce_prod::evaluate_product(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
NGRAPH_OP_SCOPE(v1_ReduceProd_evaluate,
return reduce_prod::evaluate_product(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()));
return false;
}

View File

@ -68,18 +68,12 @@ namespace reduce_sum
bool rc = true;
switch (arg->get_element_type())
{
TYPE_CASE(i32)(arg, out, axes, keep_dims);
break;
TYPE_CASE(i64)(arg, out, axes, keep_dims);
break;
TYPE_CASE(u32)(arg, out, axes, keep_dims);
break;
TYPE_CASE(u64)(arg, out, axes, keep_dims);
break;
TYPE_CASE(f16)(arg, out, axes, keep_dims);
break;
TYPE_CASE(f32)(arg, out, axes, keep_dims);
break;
NGRAPH_TYPE_CASE(evaluate_reduce_sum, i32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reduce_sum, i64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reduce_sum, u32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reduce_sum, u64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reduce_sum, f16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reduce_sum, f32, arg, out, axes, keep_dims);
default: rc = false; break;
}
return rc;
@ -89,6 +83,8 @@ namespace reduce_sum
bool op::v1::ReduceSum::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::ReduceSum::evaluate");
return reduce_sum::evaluate_sum(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
NGRAPH_OP_SCOPE(v1_ReduceSum_evaluate,
return reduce_sum::evaluate_sum(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()));
return false;
}

View File

@ -56,20 +56,13 @@ namespace relu
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_relu, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_relu, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_relu, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_relu, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_relu, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_relu, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_relu, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -78,8 +71,10 @@ namespace relu
bool op::Relu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Relu::evaluate");
return relu::evaluate_relu(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v0_Relu_evaluate,
return relu::evaluate_relu(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}
bool op::Relu::visit_attributes(AttributeVisitor& visitor)

View File

@ -27,7 +27,7 @@
using namespace std;
using namespace ngraph;
namespace
namespace reshapeop
{
bool evaluate_reshape(const HostTensorPtr& arg0,
const HostTensorPtr& out,
@ -227,11 +227,17 @@ shared_ptr<Node> op::v1::Reshape::clone_with_new_inputs(const OutputVector& new_
return make_shared<v1::Reshape>(new_args.at(0), new_args.at(1), m_special_zero);
}
bool op::v1::Reshape::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Reshape::evaluate");
#define COMPUTE_OUT_SHAPE_CASE(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(compute_reshape_out_shape, _, a), \
reshapeop::compute_output_shape<element::Type_t::a>(__VA_ARGS__)); \
} \
break;
bool op::v1::Reshape::evaluate_reshape(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
// infer and set output shape if the output shape contain -1
// and zero value dimension
size_t output_rank = inputs[1]->get_shape()[0];
@ -239,30 +245,14 @@ bool op::v1::Reshape::evaluate(const HostTensorVector& outputs,
switch (inputs[1]->get_element_type())
{
case element::Type_t::i8:
compute_output_shape<element::Type_t::i8>(inputs[1], out_shape_val);
break;
case element::Type_t::i16:
compute_output_shape<element::Type_t::i16>(inputs[1], out_shape_val);
break;
case element::Type_t::i32:
compute_output_shape<element::Type_t::i32>(inputs[1], out_shape_val);
break;
case element::Type_t::i64:
compute_output_shape<element::Type_t::i64>(inputs[1], out_shape_val);
break;
case element::Type_t::u8:
compute_output_shape<element::Type_t::u8>(inputs[1], out_shape_val);
break;
case element::Type_t::u16:
compute_output_shape<element::Type_t::u16>(inputs[1], out_shape_val);
break;
case element::Type_t::u32:
compute_output_shape<element::Type_t::u32>(inputs[1], out_shape_val);
break;
case element::Type_t::u64:
compute_output_shape<element::Type_t::u64>(inputs[1], out_shape_val);
break;
COMPUTE_OUT_SHAPE_CASE(i8, inputs[1], out_shape_val);
COMPUTE_OUT_SHAPE_CASE(i16, inputs[1], out_shape_val);
COMPUTE_OUT_SHAPE_CASE(i32, inputs[1], out_shape_val);
COMPUTE_OUT_SHAPE_CASE(i64, inputs[1], out_shape_val);
COMPUTE_OUT_SHAPE_CASE(u8, inputs[1], out_shape_val);
COMPUTE_OUT_SHAPE_CASE(u16, inputs[1], out_shape_val);
COMPUTE_OUT_SHAPE_CASE(u32, inputs[1], out_shape_val);
COMPUTE_OUT_SHAPE_CASE(u64, inputs[1], out_shape_val);
default: throw ngraph_error("shape_pattern element type is not integral data type");
}
@ -347,7 +337,14 @@ bool op::v1::Reshape::evaluate(const HostTensorVector& outputs,
outputs[0]->set_shape(output_shape);
}
const AxisVector order = get_default_order(inputs[0]->get_shape());
return evaluate_reshape(inputs[0], outputs[0], order);
return reshapeop::evaluate_reshape(inputs[0], outputs[0], order);
}
bool op::v1::Reshape::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_Reshape_evaluate, return evaluate_reshape(outputs, inputs));
return false;
}
bool op::v1::Reshape::constant_fold(OutputVector& output_values, const OutputVector& inputs_values)

View File

@ -58,12 +58,12 @@ shared_ptr<Node> op::Result::clone_with_new_inputs(const OutputVector& new_args)
bool op::Result::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Result::evaluate");
outputs[0]->set_unary(inputs[0]);
void* output = outputs[0]->get_data_ptr();
void* input = inputs[0]->get_data_ptr();
memcpy(output, input, outputs[0]->get_size_in_bytes());
return true;
NGRAPH_OP_SCOPE(Result_evaluate, outputs[0]->set_unary(inputs[0]);
void* output = outputs[0]->get_data_ptr();
void* input = inputs[0]->get_data_ptr();
memcpy(output, input, outputs[0]->get_size_in_bytes());
return true);
return false;
}
bool op::Result::constant_fold(OutputVector& output_values, const OutputVector& inputs_values)

View File

@ -17,6 +17,7 @@
#include <algorithm>
#include <iterator>
#include <sstream>
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/function.hpp"
@ -148,8 +149,27 @@ op::v1::Reverse::Mode op::v1::Reverse::mode_from_string(const std::string& mode)
return allowed_values.at(mode);
}
bool op::v1::Reverse::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
namespace reverseop
{
template <element::Type_t ET>
void get_axes(AxisSet& axes, const HostTensorPtr& in)
{
auto axes_indices = in->get_data_ptr<ET>();
size_t axes_rank = in->get_element_count();
std::copy(axes_indices, axes_indices + axes_rank, std::inserter(axes, axes.end()));
}
}
#define GET_AXES(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(get_reverse_axes, _, a), \
reverseop::get_axes<element::Type_t::a>(__VA_ARGS__)); \
} \
break;
bool op::v1::Reverse::evaluate_reverse(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
AxisSet axes{};
size_t axes_rank = inputs[1]->get_element_count();
@ -157,65 +177,15 @@ bool op::v1::Reverse::evaluate(const HostTensorVector& outputs,
{
switch (inputs[1]->get_element_type())
{
case element::Type_t::i8:
{
auto axes_indices = inputs[1]->get_data_ptr<int8_t>();
std::copy(axes_indices, axes_indices + axes_rank, std::inserter(axes, axes.end()));
break;
}
case element::Type_t::u8:
{
auto axes_indices = inputs[1]->get_data_ptr<uint8_t>();
std::copy(axes_indices, axes_indices + axes_rank, std::inserter(axes, axes.end()));
break;
}
case element::Type_t::i16:
{
auto axes_indices = inputs[1]->get_data_ptr<int16_t>();
std::copy(axes_indices, axes_indices + axes_rank, std::inserter(axes, axes.end()));
break;
}
case element::Type_t::u16:
{
auto axes_indices = inputs[1]->get_data_ptr<uint16_t>();
std::copy(axes_indices, axes_indices + axes_rank, std::inserter(axes, axes.end()));
break;
}
case element::Type_t::i32:
{
auto axes_indices = inputs[1]->get_data_ptr<int32_t>();
std::copy(axes_indices, axes_indices + axes_rank, std::inserter(axes, axes.end()));
break;
}
case element::Type_t::u32:
{
auto axes_indices = inputs[1]->get_data_ptr<uint32_t>();
std::copy(axes_indices, axes_indices + axes_rank, std::inserter(axes, axes.end()));
break;
}
case element::Type_t::i64:
{
auto axes_indices = inputs[1]->get_data_ptr<int64_t>();
std::copy(axes_indices, axes_indices + axes_rank, std::inserter(axes, axes.end()));
break;
}
case element::Type_t::u64:
{
auto axes_indices = inputs[1]->get_data_ptr<uint64_t>();
std::copy(axes_indices, axes_indices + axes_rank, std::inserter(axes, axes.end()));
break;
}
case element::Type_t::undefined:
case element::Type_t::dynamic:
case element::Type_t::boolean:
case element::Type_t::bf16:
case element::Type_t::f16:
case element::Type_t::f32:
case element::Type_t::f64:
case element::Type_t::u1:
default:
NGRAPH_CHECK(false, "Not supported axes type", inputs[1]->get_element_type());
break;
GET_AXES(i8, axes, inputs[1]);
GET_AXES(i16, axes, inputs[1]);
GET_AXES(i32, axes, inputs[1]);
GET_AXES(i64, axes, inputs[1]);
GET_AXES(u8, axes, inputs[1]);
GET_AXES(u16, axes, inputs[1]);
GET_AXES(u32, axes, inputs[1]);
GET_AXES(u64, axes, inputs[1]);
default: NGRAPH_CHECK(false, "Not supported axes type", inputs[1]->get_element_type());
}
}
else // Mode::MASK
@ -238,6 +208,13 @@ bool op::v1::Reverse::evaluate(const HostTensorVector& outputs,
return true;
}
bool op::v1::Reverse::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_Reverse_evaluate, return evaluate_reverse(outputs, inputs));
return false;
}
namespace ngraph
{
template <>

View File

@ -15,6 +15,7 @@
//*****************************************************************************
#include "roi_align.hpp"
#include "itt.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/reference/roi_align.hpp"
@ -203,8 +204,38 @@ namespace ngraph
return s << as_string(type);
}
} // namespace ngraph
namespace
namespace roi_alinop
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& feature_maps,
const HostTensorPtr& rois,
const std::vector<int64_t>& batch_indices_vec_scaled_up,
const HostTensorPtr& out,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const float spatial_scale,
const op::v3::ROIAlign::PoolingMode& pooling_mode,
const Shape& batch_indices_shape)
{
using T = typename element_type_traits<ET>::value_type;
runtime::reference::roi_align<T>(feature_maps->get_data_ptr<ET>(),
rois->get_data_ptr<ET>(),
batch_indices_vec_scaled_up.data(),
out->get_data_ptr<ET>(),
feature_maps->get_shape(),
rois->get_shape(),
batch_indices_shape,
out->get_shape(),
pooled_height,
pooled_width,
sampling_ratio,
spatial_scale,
pooling_mode);
return true;
}
bool evaluate_roi_align(const HostTensorVector& args,
const HostTensorPtr& out,
const int pooled_height,
@ -216,73 +247,61 @@ namespace
auto feature_maps = args[0];
auto rois = args[1];
auto batch_indices = args[2];
std::vector<int64_t> batch_indices_vec_scaled_up =
host_tensor_2_vector<int64_t>(batch_indices);
bool rc = true;
switch (feature_maps->get_element_type())
{
case element::Type_t::bf16:
{
runtime::reference::roi_align<bfloat16>(feature_maps->get_data_ptr<bfloat16>(),
rois->get_data_ptr<bfloat16>(),
batch_indices_vec_scaled_up.data(),
out->get_data_ptr<bfloat16>(),
feature_maps->get_shape(),
rois->get_shape(),
batch_indices->get_shape(),
out->get_shape(),
pooled_height,
pooled_width,
sampling_ratio,
spatial_scale,
pooling_mode);
break;
}
case element::Type_t::f16:
{
runtime::reference::roi_align<float16>(feature_maps->get_data_ptr<float16>(),
rois->get_data_ptr<float16>(),
batch_indices_vec_scaled_up.data(),
out->get_data_ptr<float16>(),
feature_maps->get_shape(),
rois->get_shape(),
batch_indices->get_shape(),
out->get_shape(),
pooled_height,
pooled_width,
sampling_ratio,
spatial_scale,
pooling_mode);
break;
}
case element::Type_t::f32:
{
runtime::reference::roi_align<float>(feature_maps->get_data_ptr<float>(),
rois->get_data_ptr<float>(),
batch_indices_vec_scaled_up.data(),
out->get_data_ptr<float>(),
feature_maps->get_shape(),
rois->get_shape(),
batch_indices->get_shape(),
out->get_shape(),
pooled_height,
pooled_width,
sampling_ratio,
spatial_scale,
pooling_mode);
break;
}
default: NGRAPH_UNREACHABLE("unsupported input type for roi_align");
NGRAPH_TYPE_CASE(evaluate_roi_align,
bf16,
feature_maps,
rois,
batch_indices_vec_scaled_up,
out,
pooled_height,
pooled_width,
sampling_ratio,
spatial_scale,
pooling_mode,
batch_indices->get_shape());
NGRAPH_TYPE_CASE(evaluate_roi_align,
f16,
feature_maps,
rois,
batch_indices_vec_scaled_up,
out,
pooled_height,
pooled_width,
sampling_ratio,
spatial_scale,
pooling_mode,
batch_indices->get_shape());
NGRAPH_TYPE_CASE(evaluate_roi_align,
f32,
feature_maps,
rois,
batch_indices_vec_scaled_up,
out,
pooled_height,
pooled_width,
sampling_ratio,
spatial_scale,
pooling_mode,
batch_indices->get_shape());
default: rc = false; break;
}
return true;
return rc;
}
} // namespace
bool op::v3::ROIAlign::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
return evaluate_roi_align(
inputs, outputs[0], m_pooled_h, m_pooled_w, m_sampling_ratio, m_spatial_scale, m_mode);
NGRAPH_OP_SCOPE(
v3_ROIAlign_evaluate,
return roi_alinop::evaluate_roi_align(
inputs, outputs[0], m_pooled_h, m_pooled_w, m_sampling_ratio, m_spatial_scale, m_mode));
return false;
}

View File

@ -58,30 +58,18 @@ namespace roundop
switch (arg0->get_element_type())
{
COPY_TENSOR(boolean)(arg0, out, count);
break;
COPY_TENSOR(i8)(arg0, out, count);
break;
COPY_TENSOR(i16)(arg0, out, count);
break;
COPY_TENSOR(i32)(arg0, out, count);
break;
COPY_TENSOR(i64)(arg0, out, count);
break;
COPY_TENSOR(u8)(arg0, out, count);
break;
COPY_TENSOR(u16)(arg0, out, count);
break;
COPY_TENSOR(u32)(arg0, out, count);
break;
COPY_TENSOR(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count, mode);
break;
TYPE_CASE(f32)(arg0, out, count, mode);
break;
TYPE_CASE(bf16)(arg0, out, count, mode);
break;
NGRAPH_COPY_TENSOR(evaluate_round, boolean, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_round, i8, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_round, i16, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_round, i32, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_round, i64, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_round, u8, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_round, u16, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_round, u32, arg0, out, count);
NGRAPH_COPY_TENSOR(evaluate_round, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_round, f16, arg0, out, count, mode);
NGRAPH_TYPE_CASE(evaluate_round, f32, arg0, out, count, mode);
NGRAPH_TYPE_CASE(evaluate_round, bf16, arg0, out, count, mode);
default: rc = false; break;
}
return rc;
@ -117,9 +105,10 @@ shared_ptr<Node> op::v5::Round::clone_with_new_inputs(const OutputVector& new_ar
bool op::v5::Round::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v5::Round::evaluate");
return roundop::evaluate_round(
inputs[0], outputs[0], shape_size(get_output_shape(0)), get_mode());
NGRAPH_OP_SCOPE(v5_Round_evaluate,
return roundop::evaluate_round(
inputs[0], outputs[0], shape_size(get_output_shape(0)), get_mode()));
return false;
}
namespace ngraph

View File

@ -162,8 +162,13 @@ namespace scatter_element_update
return true;
}
#define TYPE_AXS_CASE(a) \
case element::Type_t::a: rc = evaluate<DT, IT, element::Type_t::a>
#define TYPE_AXS_CASE(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(scatter_element_update_axs, _, a), \
rc = evaluate<DT, IT, element::Type_t::a>(__VA_ARGS__)); \
} \
break;
template <element::Type_t DT, element::Type_t IT>
bool evaluate(const HostTensorPtr& arg0,
@ -180,29 +185,26 @@ namespace scatter_element_update
switch (axis_type)
{
TYPE_AXS_CASE(i8)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_AXS_CASE(i16)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_AXS_CASE(i32)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_AXS_CASE(i64)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_AXS_CASE(u8)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_AXS_CASE(u16)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_AXS_CASE(u32)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_AXS_CASE(u64)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_AXS_CASE(i8, arg0, arg1, arg2, arg3, out, normalized_axis);
TYPE_AXS_CASE(i16, arg0, arg1, arg2, arg3, out, normalized_axis);
TYPE_AXS_CASE(i32, arg0, arg1, arg2, arg3, out, normalized_axis);
TYPE_AXS_CASE(i64, arg0, arg1, arg2, arg3, out, normalized_axis);
TYPE_AXS_CASE(u8, arg0, arg1, arg2, arg3, out, normalized_axis);
TYPE_AXS_CASE(u16, arg0, arg1, arg2, arg3, out, normalized_axis);
TYPE_AXS_CASE(u32, arg0, arg1, arg2, arg3, out, normalized_axis);
TYPE_AXS_CASE(u64, arg0, arg1, arg2, arg3, out, normalized_axis);
default: rc = false; break;
}
return rc;
}
#define TYPE_IND_CASE(a) \
case element::Type_t::a: rc = evaluate<DT, element::Type_t::a>
#define TYPE_IND_CASE(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(scatter_element_update_ind, _, a), \
rc = evaluate<DT, element::Type_t::a>(__VA_ARGS__)); \
} \
break;
template <element::Type_t DT>
bool evaluate(const HostTensorPtr& arg0,
@ -219,22 +221,14 @@ namespace scatter_element_update
switch (indices_type)
{
TYPE_IND_CASE(i8)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_IND_CASE(i16)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_IND_CASE(i32)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_IND_CASE(i64)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_IND_CASE(u8)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_IND_CASE(u16)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_IND_CASE(u32)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_IND_CASE(u64)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_IND_CASE(i8, arg0, arg1, arg2, arg3, out, normalized_axis);
TYPE_IND_CASE(i16, arg0, arg1, arg2, arg3, out, normalized_axis);
TYPE_IND_CASE(i32, arg0, arg1, arg2, arg3, out, normalized_axis);
TYPE_IND_CASE(i64, arg0, arg1, arg2, arg3, out, normalized_axis);
TYPE_IND_CASE(u8, arg0, arg1, arg2, arg3, out, normalized_axis);
TYPE_IND_CASE(u16, arg0, arg1, arg2, arg3, out, normalized_axis);
TYPE_IND_CASE(u32, arg0, arg1, arg2, arg3, out, normalized_axis);
TYPE_IND_CASE(u64, arg0, arg1, arg2, arg3, out, normalized_axis);
default: rc = false; break;
}
return rc;
@ -251,31 +245,29 @@ namespace scatter_element_update
switch (out->get_element_type())
{
TYPE_CASE(i16)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_CASE(i32)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_CASE(i64)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_CASE(u32)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_CASE(u64)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_CASE(f16)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
TYPE_CASE(f32)(arg0, arg1, arg2, arg3, out, normalized_axis);
break;
NGRAPH_TYPE_CASE(
evaluate_scatter_element_update, i16, arg0, arg1, arg2, arg3, out, normalized_axis);
NGRAPH_TYPE_CASE(
evaluate_scatter_element_update, i32, arg0, arg1, arg2, arg3, out, normalized_axis);
NGRAPH_TYPE_CASE(
evaluate_scatter_element_update, i64, arg0, arg1, arg2, arg3, out, normalized_axis);
NGRAPH_TYPE_CASE(
evaluate_scatter_element_update, u32, arg0, arg1, arg2, arg3, out, normalized_axis);
NGRAPH_TYPE_CASE(
evaluate_scatter_element_update, u64, arg0, arg1, arg2, arg3, out, normalized_axis);
NGRAPH_TYPE_CASE(
evaluate_scatter_element_update, f16, arg0, arg1, arg2, arg3, out, normalized_axis);
NGRAPH_TYPE_CASE(
evaluate_scatter_element_update, f32, arg0, arg1, arg2, arg3, out, normalized_axis);
default: rc = false; break;
}
return rc;
}
}
bool op::v3::ScatterElementsUpdate::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
bool op::v3::ScatterElementsUpdate::evaluate_scatter_element_update(
const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v3::ScatterElementsUpdate::evaluate");
NGRAPH_CHECK(inputs[3]->get_element_type().is_integral_number(),
"axis element type is not integral data type");
@ -299,3 +291,11 @@ bool op::v3::ScatterElementsUpdate::evaluate(const HostTensorVector& outputs,
return scatter_element_update::evaluate_scatter_element_update(
inputs[0], inputs[1], inputs[2], inputs[3], outputs[0], normalized_axis);
}
bool op::v3::ScatterElementsUpdate::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v3_ScatterElementsUpdate_evaluate,
return evaluate_scatter_element_update(outputs, inputs));
return false;
}

View File

@ -15,6 +15,7 @@
//*****************************************************************************
#include "ngraph/op/scatter_update.hpp"
#include "itt.hpp"
#include "ngraph/runtime/reference/scatter_update.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/type/element_type.hpp"
@ -41,8 +42,27 @@ shared_ptr<Node> op::v3::ScatterUpdate::clone_with_new_inputs(const OutputVector
new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3));
}
bool op::v3::ScatterUpdate::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
namespace scatter_update
{
template <element::Type_t ET>
std::vector<int64_t> get_indices(const HostTensorPtr& in)
{
auto data_ptr = in->get_data_ptr<ET>();
return std::vector<int64_t>(data_ptr, data_ptr + in->get_element_count());
}
}
#define GET_INDICES(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(get_scatter_update_indices, _, a), \
indices_casted_vector = \
scatter_update::get_indices<element::Type_t::a>(__VA_ARGS__)); \
} \
break;
bool op::v3::ScatterUpdate::evaluate_scatter_update(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
const auto& data = inputs[0];
const auto& indices = inputs[1];
@ -66,63 +86,15 @@ bool op::v3::ScatterUpdate::evaluate(const HostTensorVector& outputs,
std::vector<int64_t> indices_casted_vector;
switch (indices->get_element_type())
{
case element::Type_t::i8:
{
auto indices_ptr = indices->get_data_ptr<element::Type_t::i8>();
indices_casted_vector =
std::vector<int64_t>(indices_ptr, indices_ptr + indices->get_element_count());
break;
}
case element::Type_t::i16:
{
auto indices_ptr = indices->get_data_ptr<element::Type_t::i16>();
indices_casted_vector =
std::vector<int64_t>(indices_ptr, indices_ptr + indices->get_element_count());
break;
}
case element::Type_t::i32:
{
auto indices_ptr = indices->get_data_ptr<element::Type_t::i32>();
indices_casted_vector =
std::vector<int64_t>(indices_ptr, indices_ptr + indices->get_element_count());
break;
}
case element::Type_t::i64:
{
auto indices_ptr = indices->get_data_ptr<element::Type_t::i64>();
indices_casted_vector =
std::vector<int64_t>(indices_ptr, indices_ptr + indices->get_element_count());
break;
}
case element::Type_t::u8:
{
auto indices_ptr = indices->get_data_ptr<element::Type_t::u8>();
indices_casted_vector =
std::vector<int64_t>(indices_ptr, indices_ptr + indices->get_element_count());
break;
}
case element::Type_t::u16:
{
auto indices_ptr = indices->get_data_ptr<element::Type_t::u16>();
indices_casted_vector =
std::vector<int64_t>(indices_ptr, indices_ptr + indices->get_element_count());
break;
}
case element::Type_t::u32:
{
auto indices_ptr = indices->get_data_ptr<element::Type_t::u32>();
indices_casted_vector =
std::vector<int64_t>(indices_ptr, indices_ptr + indices->get_element_count());
break;
}
case element::Type_t::u64:
{
auto indices_ptr = indices->get_data_ptr<element::Type_t::u64>();
indices_casted_vector =
std::vector<int64_t>(indices_ptr, indices_ptr + indices->get_element_count());
break;
}
default: throw ngraph_error("indices element type is not integral data type");
GET_INDICES(i8, indices);
GET_INDICES(i16, indices);
GET_INDICES(i32, indices);
GET_INDICES(i64, indices);
GET_INDICES(u8, indices);
GET_INDICES(u16, indices);
GET_INDICES(u32, indices);
GET_INDICES(u64, indices);
default: return false;
}
runtime::reference::scatter_update(data->get_data_ptr<char>(),
@ -137,3 +109,10 @@ bool op::v3::ScatterUpdate::evaluate(const HostTensorVector& outputs,
return true;
}
bool op::v3::ScatterUpdate::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v3_ScatterUpdate_evaluate, return evaluate_scatter_update(outputs, inputs));
return false;
}

View File

@ -16,6 +16,7 @@
#include <memory>
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/log.hpp"
#include "ngraph/op/convert.hpp"
@ -133,30 +134,18 @@ namespace detail
switch (et)
{
TYPE_CASE(i8)(output_values, input_values, autob);
break;
TYPE_CASE(i16)(output_values, input_values, autob);
break;
TYPE_CASE(i32)(output_values, input_values, autob);
break;
TYPE_CASE(i64)(output_values, input_values, autob);
break;
TYPE_CASE(u8)(output_values, input_values, autob);
break;
TYPE_CASE(u16)(output_values, input_values, autob);
break;
TYPE_CASE(u32)(output_values, input_values, autob);
break;
TYPE_CASE(u64)(output_values, input_values, autob);
break;
TYPE_CASE(bf16)(output_values, input_values, autob);
break;
TYPE_CASE(f32)(output_values, input_values, autob);
break;
TYPE_CASE(f64)(output_values, input_values, autob);
break;
TYPE_CASE(boolean)(output_values, input_values, autob);
break;
NGRAPH_TYPE_CASE(evaluate_select, i8, output_values, input_values, autob);
NGRAPH_TYPE_CASE(evaluate_select, i16, output_values, input_values, autob);
NGRAPH_TYPE_CASE(evaluate_select, i32, output_values, input_values, autob);
NGRAPH_TYPE_CASE(evaluate_select, i64, output_values, input_values, autob);
NGRAPH_TYPE_CASE(evaluate_select, u8, output_values, input_values, autob);
NGRAPH_TYPE_CASE(evaluate_select, u16, output_values, input_values, autob);
NGRAPH_TYPE_CASE(evaluate_select, u32, output_values, input_values, autob);
NGRAPH_TYPE_CASE(evaluate_select, u64, output_values, input_values, autob);
NGRAPH_TYPE_CASE(evaluate_select, bf16, output_values, input_values, autob);
NGRAPH_TYPE_CASE(evaluate_select, f32, output_values, input_values, autob);
NGRAPH_TYPE_CASE(evaluate_select, f64, output_values, input_values, autob);
NGRAPH_TYPE_CASE(evaluate_select, boolean, output_values, input_values, autob);
default: rc = false; break;
}
@ -167,7 +156,9 @@ namespace detail
bool op::v1::Select::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const
{
const auto autob = get_auto_broadcast();
NGRAPH_OP_SCOPE(v1_Select_evaluate, const auto autob = get_auto_broadcast();
return detail::evaluate_select(output_values, input_values, autob, get_output_element_type(0));
return detail::evaluate_select(
output_values, input_values, autob, get_output_element_type(0)));
return false;
}

View File

@ -78,14 +78,10 @@ namespace shape_of
output_value->set_shape(Shape{shape.size()});
switch (output_value->get_element_type())
{
TYPE_CASE(i32)(shape, output_value);
break;
TYPE_CASE(i64)(shape, output_value);
break;
TYPE_CASE(u32)(shape, output_value);
break;
TYPE_CASE(u64)(shape, output_value);
break;
NGRAPH_TYPE_CASE(evaluate_shape_of, i32, shape, output_value);
NGRAPH_TYPE_CASE(evaluate_shape_of, i64, shape, output_value);
NGRAPH_TYPE_CASE(evaluate_shape_of, u32, shape, output_value);
NGRAPH_TYPE_CASE(evaluate_shape_of, u64, shape, output_value);
default: rc = false; break;
}
return rc;
@ -158,8 +154,9 @@ namespace shape_of
bool op::v3::ShapeOf::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v3::ShapeOf::evaluate");
return shape_of::evaluate_shape_of(output_values[0], input_values[0]);
NGRAPH_OP_SCOPE(v3_ShapeOf_evaluate,
return shape_of::evaluate_shape_of(output_values[0], input_values[0]););
return false;
}
bool op::v3::ShapeOf::constant_fold(OutputVector& output_values, const OutputVector& input_values)
@ -207,8 +204,9 @@ shared_ptr<Node> op::v0::ShapeOf::clone_with_new_inputs(const OutputVector& new_
bool op::v0::ShapeOf::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::ShapeOf::evaluate");
return shape_of::evaluate_shape_of(output_values[0], input_values[0]);
NGRAPH_OP_SCOPE(v0_ShapeOf_evaluate,
return shape_of::evaluate_shape_of(output_values[0], input_values[0]));
return false;
}
bool op::v0::ShapeOf::constant_fold(OutputVector& output_values, const OutputVector& input_values)

View File

@ -15,6 +15,7 @@
//*****************************************************************************
#include <numeric>
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/builder/reshape.hpp"
#include "ngraph/op/shuffle_channels.hpp"
@ -139,8 +140,8 @@ Shape op::ShuffleChannels::get_pre_shuffle_shape(const Shape& data_shape) const
return res;
}
bool op::ShuffleChannels::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
bool op::ShuffleChannels::evaluate_shuffle_channels(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
const auto arg = inputs[0]->get_data_ptr<const char>();
auto out = outputs[0]->get_data_ptr<char>();
@ -164,7 +165,8 @@ bool op::ShuffleChannels::evaluate(const HostTensorVector& outputs,
}
size_t data_size = shape_size(data_shape) * elem_size;
// first reshape from data_shape to reshaped_out_shape is skipped since it doesn't affect out
// first reshape from data_shape to reshaped_out_shape is skipped since it doesn't affect
// out
// data
Shape transpose_axes_order = {0, 2, 1, 3};
@ -178,6 +180,13 @@ bool op::ShuffleChannels::evaluate(const HostTensorVector& outputs,
runtime::opt_kernel::reshape(
arg, out, reshaped_out_shape, axis_vector, transposed_shape, elem_size);
// last reshape from transposed_shape to data_shape is skipped since it doesn't affect out data
// last reshape from transposed_shape to data_shape is skipped since it doesn't affect out
// data
return true;
}
bool op::ShuffleChannels::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(ShuffleChannels_evaluate, return evaluate_shuffle_channels(outputs, inputs));
return false;
}

View File

@ -57,20 +57,13 @@ namespace sigmoid
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_sigmoid, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sigmoid, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sigmoid, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sigmoid, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sigmoid, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sigmoid, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sigmoid, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -79,6 +72,8 @@ namespace sigmoid
bool op::Sigmoid::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Sigmoid::evaluate");
return sigmoid::evaluate_sigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v0_Sigmoid_evaluate,
return sigmoid::evaluate_sigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}

View File

@ -60,20 +60,13 @@ namespace signop
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_sign, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sign, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sign, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sign, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sign, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sign, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sign, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -82,6 +75,8 @@ namespace signop
bool op::Sign::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Sign::evaluate");
return signop::evaluate_sign(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v0_Sign_evaluate,
return signop::evaluate_sign(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}

View File

@ -62,20 +62,13 @@ namespace sinop
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_sin, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sin, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sin, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sin, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sin, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sin, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sin, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -84,6 +77,8 @@ namespace sinop
bool op::Sin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Sin::evaluate");
return sinop::evaluate_sin(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v0_Sin_evaluate,
return sinop::evaluate_sin(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}

View File

@ -62,20 +62,13 @@ namespace sinhop
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_sinh, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sinh, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sinh, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sinh, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sinh, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sinh, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sinh, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -84,6 +77,8 @@ namespace sinhop
bool op::Sinh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Sinh::evaluate");
return sinhop::evaluate_sinh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v0_Sinh_evaluate,
return sinhop::evaluate_sinh(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}

View File

@ -35,23 +35,29 @@ using namespace ngraph;
namespace
{
template <element::Type_t ET>
inline bool try_evaluate_softmax(const HostTensorPtr& arg,
const HostTensorPtr& out,
const Shape& shape,
const AxisSet& axes)
inline bool evaluate(const HostTensorPtr& arg,
const HostTensorPtr& out,
const Shape& shape,
const AxisSet& axes)
{
return (ET == arg->get_element_type()) &&
(runtime::reference::softmax(
arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), shape, axes),
true);
runtime::reference::softmax(arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), shape, axes);
return true;
}
bool evaluate_softmax(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes)
{
auto shape = out->get_shape();
return try_evaluate_softmax<element::Type_t::f16>(arg, out, shape, axes) ||
try_evaluate_softmax<element::Type_t::f32>(arg, out, shape, axes) ||
try_evaluate_softmax<element::Type_t::f64>(arg, out, shape, axes);
bool rc = true;
switch (arg->get_element_type())
{
NGRAPH_TYPE_CASE(evaluate_softmax, bf16, arg, out, shape, axes);
NGRAPH_TYPE_CASE(evaluate_softmax, f16, arg, out, shape, axes);
NGRAPH_TYPE_CASE(evaluate_softmax, f32, arg, out, shape, axes);
NGRAPH_TYPE_CASE(evaluate_softmax, f64, arg, out, shape, axes);
default: rc = false; break;
}
return rc;
}
}
@ -95,7 +101,7 @@ shared_ptr<Node> op::v1::Softmax::clone_with_new_inputs(const OutputVector& new_
bool op::v1::Softmax::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Softmax::evaluate");
outputs[0]->set_unary(inputs[0]);
return evaluate_softmax(inputs[0], outputs[0], AxisSet{m_axis});
NGRAPH_OP_SCOPE(v1_Softmax_evaluate, outputs[0]->set_unary(inputs[0]);
return evaluate_softmax(inputs[0], outputs[0], AxisSet{m_axis}));
return false;
}

View File

@ -65,12 +65,9 @@ namespace softplus
switch (arg->get_element_type())
{
TYPE_CASE(bf16)(arg, out, count);
break;
TYPE_CASE(f16)(arg, out, count);
break;
TYPE_CASE(f32)(arg, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_softplus, bf16, arg, out, count);
NGRAPH_TYPE_CASE(evaluate_softplus, f16, arg, out, count);
NGRAPH_TYPE_CASE(evaluate_softplus, f32, arg, out, count);
default: rc = false; break;
}
return rc;
@ -80,6 +77,8 @@ namespace softplus
bool op::v4::SoftPlus::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::SoftPlus::evaluate");
return softplus::evaluate_softplus(inputs[0], outputs[0], shape_size(get_output_shape(0)));
NGRAPH_OP_SCOPE(
v4_SoftPlus_evaluate,
return softplus::evaluate_softplus(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return false;
}

View File

@ -17,6 +17,7 @@
#include <cstddef>
#include <memory>
#include <numeric>
#include "itt.hpp"
#include "ngraph/builder/make_constant.hpp"
#include "ngraph/node.hpp"
@ -140,8 +141,8 @@ bool ngraph::op::v1::SpaceToBatch::visit_attributes(ngraph::AttributeVisitor& vi
return true;
}
bool ngraph::op::v1::SpaceToBatch::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
const auto& data = inputs[0];
const auto& out = outputs[0];
@ -267,4 +268,11 @@ bool ngraph::op::v1::SpaceToBatch::evaluate(const HostTensorVector& outputs,
out->write(flat_data.data(), elem_size * shape_size(out->get_shape()));
return true;
}
}
bool ngraph::op::v1::SpaceToBatch::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_SpaceToBatch, return evaluate_space_to_batch(outputs, inputs));
return false;
}

View File

@ -18,6 +18,7 @@
#include <memory>
#include <numeric>
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/builder/reshape.hpp"
#include "ngraph/op/space_to_depth.hpp"
@ -109,8 +110,8 @@ void ngraph::op::v0::SpaceToDepth::validate_and_infer_types()
}
}
bool ngraph::op::v0::SpaceToDepth::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
bool ngraph::op::v0::SpaceToDepth::evaluate_space_to_depth(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
const auto& data = inputs[0];
const auto& out = outputs[0];
@ -174,7 +175,8 @@ bool ngraph::op::v0::SpaceToDepth::evaluate(const HostTensorVector& outputs,
// x' = reshape(data, [N, C, D1/block_size, block_size, D2/block_size, block_size, ...,
// DK/block_size, block_size])
// x'' = transpose(x', [0, 1, 3, 5, ..., K + (K + 1), 2, 4, ..., K + K])
// y = reshape(x'', [N, C * (block_size ^ K), D1 / block_size, D2 / block_size, ..., DK /
// y = reshape(x'', [N, C * (block_size ^ K), D1 / block_size, D2 / block_size, ..., DK
// /
// block_size])
case SpaceToDepthMode::DEPTH_FIRST:
{
@ -184,7 +186,8 @@ bool ngraph::op::v0::SpaceToDepth::evaluate(const HostTensorVector& outputs,
// x' = reshape(data, [N, C, D1/block_size, block_size, D2/block_size, block_size, ... ,
// DK/block_size, block_size])
// x'' = transpose(x', [0, 3, 5, ..., K + (K + 1), 1, 2, 4, ..., K + K])
// y = reshape(x'', [N, C * (block_size ^ K), D1 / block_size, D2 / block_size, ..., DK /
// y = reshape(x'', [N, C * (block_size ^ K), D1 / block_size, D2 / block_size, ..., DK
// /
// block_size])
case SpaceToDepthMode::BLOCKS_FIRST:
default: { axes_order.insert(axes_order.begin() + spatial_dims + 1, 1);
@ -222,6 +225,12 @@ bool ngraph::op::v0::SpaceToDepth::evaluate(const HostTensorVector& outputs,
elem_size);
return true;
}
bool ngraph::op::v0::SpaceToDepth::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v0_SpaceToDepth_evaluate, return evaluate_space_to_depth(outputs, inputs));
return false;
}
namespace ngraph
{

View File

@ -15,6 +15,7 @@
//*****************************************************************************
#include "ngraph/runtime/reference/split.hpp"
#include <numeric>
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/builder/split.hpp"
#include "ngraph/op/constant.hpp"
@ -148,8 +149,7 @@ namespace split
bool op::v1::Split::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
const auto& data = inputs[0];
const auto& axis = inputs[1];
return split::evaluate_split(data, axis, outputs, m_num_splits, this);
NGRAPH_OP_SCOPE(v1_Split_evaluate, const auto& data = inputs[0]; const auto& axis = inputs[1];
return split::evaluate_split(data, axis, outputs, m_num_splits, this));
return false;
}

Some files were not shown because too many files have changed in this diff Show More