Added CC macros to validate, clone and visit (#3730)
* Added CC macros to validate, clone and visit
* Fixed names
* Fixed code style
* Add exceptions
* Revert "Add exceptions"
This reverts commit 0489ba376f
.
* Update ngraph macros, throw an exception if code is disable for nGraph op
* Fixed code style
* Simplified NGRAPH_OP_SCOPE
* Changed TYPE_CASE macros
* Fixed compilation
* Fixed code style
* Fixed build
This commit is contained in:
parent
55f1f9606f
commit
d6721c395b
@ -36,13 +36,18 @@ namespace ngraph
|
||||
OV_ITT_DOMAIN(ngraph_op, "nGraph::Op");
|
||||
}
|
||||
}
|
||||
OV_CC_DOMAINS(ngraph_op);
|
||||
}
|
||||
OV_CC_DOMAINS(ngraph_op);
|
||||
|
||||
#if defined(SELECTIVE_BUILD) || defined(SELECTIVE_BUILD_ANALYZER)
|
||||
#if defined(SELECTIVE_BUILD_ANALYZER)
|
||||
#define NGRAPH_OP_SCOPE(region) OV_SCOPE(ngraph_op, region)
|
||||
#elif defined(SELECTIVE_BUILD)
|
||||
#define NGRAPH_OP_SCOPE(region) \
|
||||
if (OV_CC_SCOPE_IS_ENABLED(OV_CC_CAT3(ngraph_op, _, region)) == 0) \
|
||||
throw ngraph::ngraph_error(std::string(OV_CC_TOSTRING(OV_CC_CAT3(ngraph_op, _, region))) + \
|
||||
" is disabled!")
|
||||
#else
|
||||
#define NGRAPH_OP_SCOPE(region) OV_ITT_SCOPED_TASK(itt::domains::ngraph_op, #region);
|
||||
#define NGRAPH_OP_SCOPE(region) OV_ITT_SCOPED_TASK(ngraph::itt::domains::ngraph_op, #region)
|
||||
#endif
|
||||
|
||||
#define NGRAPH_TYPE_CASE(region, a, ...) \
|
||||
|
@ -36,6 +36,7 @@ op::Abs::Abs(const Output<Node>& arg)
|
||||
|
||||
shared_ptr<Node> op::Abs::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Abs_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Abs>(new_args.at(0));
|
||||
}
|
||||
@ -73,10 +74,6 @@ namespace absop
|
||||
|
||||
bool op::Abs::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
bool rc = false;
|
||||
NGRAPH_OP_SCOPE(v0_Abs_evaluate)
|
||||
{
|
||||
rc = absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
return rc;
|
||||
NGRAPH_OP_SCOPE(v0_Abs_evaluate);
|
||||
return absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
@ -45,6 +45,7 @@ op::Acos::Acos(const Output<Node>& arg)
|
||||
|
||||
shared_ptr<Node> op::Acos::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Acos_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Acos>(new_args.at(0));
|
||||
}
|
||||
@ -81,10 +82,6 @@ namespace acosop
|
||||
|
||||
bool op::Acos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
bool rc = false;
|
||||
NGRAPH_OP_SCOPE(v0_Acos_evaluate)
|
||||
{
|
||||
rc = acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
return rc;
|
||||
NGRAPH_OP_SCOPE(v0_Acos_evaluate);
|
||||
return acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ op::v3::Acosh::Acosh(const Output<Node>& arg)
|
||||
|
||||
shared_ptr<Node> op::v3::Acosh::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_Acosh_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Acosh>(new_args.at(0));
|
||||
}
|
||||
@ -70,7 +71,6 @@ namespace acoshop
|
||||
|
||||
bool op::v3::Acosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
bool rc = false;
|
||||
NGRAPH_OP_SCOPE(v3_Acosh_evaluate) { rc = acoshop::evaluate_acosh(inputs[0], outputs[0]); }
|
||||
return rc;
|
||||
NGRAPH_OP_SCOPE(v3_Acosh_evaluate);
|
||||
return acoshop::evaluate_acosh(inputs[0], outputs[0]);
|
||||
}
|
||||
|
@ -81,22 +81,20 @@ op::v1::Add::Add(const Output<Node>& arg0,
|
||||
|
||||
bool op::v1::Add::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Add_visit_attributes);
|
||||
BinaryElementwiseArithmetic::visit_attributes(visitor);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v1::Add::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Add_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v1::Add>(new_args.at(0), new_args.at(1), this->get_autob());
|
||||
}
|
||||
|
||||
bool op::v1::Add::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
bool rc = false;
|
||||
NGRAPH_OP_SCOPE(v1_Add_evaluate)
|
||||
{
|
||||
rc = add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
return rc;
|
||||
NGRAPH_OP_SCOPE(v1_Add_evaluate);
|
||||
return add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
|
@ -34,12 +34,14 @@ op::v1::LogicalAnd::LogicalAnd(const Output<Node>& arg0,
|
||||
|
||||
bool op::v1::LogicalAnd::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_LogicalAnd_visit_attributes);
|
||||
BinaryElementwiseLogical::visit_attributes(visitor);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v1::LogicalAnd::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_LogicalAnd_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::LogicalAnd>(new_args.at(0), new_args.at(1), this->get_autob());
|
||||
}
|
||||
@ -86,10 +88,6 @@ namespace logand
|
||||
bool op::v1::LogicalAnd::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
bool rc = false;
|
||||
NGRAPH_OP_SCOPE(v1_LogicalAnd_evaluate)
|
||||
{
|
||||
rc = logand::evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
return rc;
|
||||
NGRAPH_OP_SCOPE(v1_LogicalAnd_evaluate);
|
||||
return logand::evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
|
@ -46,6 +46,7 @@ op::Asin::Asin(const Output<Node>& arg)
|
||||
|
||||
shared_ptr<Node> op::Asin::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Asin_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Asin>(new_args.at(0));
|
||||
}
|
||||
@ -82,10 +83,6 @@ namespace asinop
|
||||
|
||||
bool op::Asin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
bool rc = false;
|
||||
NGRAPH_OP_SCOPE(v0_Asin_evaluate)
|
||||
{
|
||||
rc = asinop::evaluate_asin(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
return rc;
|
||||
NGRAPH_OP_SCOPE(v0_Asin_evaluate);
|
||||
return asinop::evaluate_asin(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ op::v3::Asinh::Asinh(const Output<Node>& arg)
|
||||
|
||||
shared_ptr<Node> op::v3::Asinh::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_Asinh_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Asinh>(new_args.at(0));
|
||||
}
|
||||
@ -70,7 +71,6 @@ namespace asinhop
|
||||
|
||||
bool op::v3::Asinh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
bool rc = false;
|
||||
NGRAPH_OP_SCOPE(v3_Asinh_evaluate) { rc = asinhop::evaluate_asinh(inputs[0], outputs[0]); }
|
||||
return rc;
|
||||
NGRAPH_OP_SCOPE(v3_Asinh_evaluate);
|
||||
return asinhop::evaluate_asinh(inputs[0], outputs[0]);
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include "ngraph/op/assign.hpp"
|
||||
#include <ops.hpp>
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/op/read_value.hpp"
|
||||
|
||||
using namespace std;
|
||||
@ -32,6 +33,7 @@ op::v3::Assign::Assign(const Output<Node>& new_value, const std::string& variabl
|
||||
|
||||
void op::v3::Assign::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_Assign_validate_and_infer_types);
|
||||
auto value = input_value(0);
|
||||
auto arg_t = get_input_element_type(0);
|
||||
auto output_shape = get_input_partial_shape(0);
|
||||
@ -78,12 +80,14 @@ void op::v3::Assign::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v3::Assign::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_Assign_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Assign>(new_args.at(0), m_variable_id);
|
||||
}
|
||||
|
||||
bool op::v3::Assign::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_Assign_visit_attributes);
|
||||
visitor.on_attribute("variable_id", m_variable_id);
|
||||
return true;
|
||||
}
|
||||
|
@ -45,6 +45,7 @@ op::Atan::Atan(const Output<Node>& arg)
|
||||
|
||||
shared_ptr<Node> op::Atan::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Atan_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Atan>(new_args.at(0));
|
||||
}
|
||||
@ -81,10 +82,6 @@ namespace atanop
|
||||
|
||||
bool op::Atan::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
bool rc = false;
|
||||
NGRAPH_OP_SCOPE(v0_Atan_evaluate)
|
||||
{
|
||||
rc = atanop::evaluate_atan(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
return rc;
|
||||
NGRAPH_OP_SCOPE(v0_Atan_evaluate);
|
||||
return atanop::evaluate_atan(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ op::v3::Atanh::Atanh(const Output<Node>& arg)
|
||||
|
||||
shared_ptr<Node> op::v3::Atanh::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_Atanh_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Atanh>(new_args.at(0));
|
||||
}
|
||||
@ -70,7 +71,6 @@ namespace atanhop
|
||||
|
||||
bool op::v3::Atanh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
bool rc = false;
|
||||
NGRAPH_OP_SCOPE(v3_Atanh_evaluate) { rc = atanhop::evaluate_atanh(inputs[0], outputs[0]); }
|
||||
return rc;
|
||||
NGRAPH_OP_SCOPE(v3_Atanh_evaluate);
|
||||
return atanhop::evaluate_atanh(inputs[0], outputs[0]);
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/avg_pool.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/graph_util.hpp"
|
||||
#include "ngraph/validation_util.hpp"
|
||||
@ -65,6 +66,7 @@ op::v1::AvgPool::AvgPool(const Output<Node>& arg,
|
||||
|
||||
bool op::v1::AvgPool::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_AvgPool_visit_attributes);
|
||||
visitor.on_attribute("kernel", m_kernel);
|
||||
visitor.on_attribute("strides", m_strides);
|
||||
visitor.on_attribute("pads_begin", m_pads_begin);
|
||||
@ -77,6 +79,7 @@ bool op::v1::AvgPool::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
void op::v1::AvgPool::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_AvgPool_validate_and_infer_types);
|
||||
if (0 == m_strides.size())
|
||||
{
|
||||
m_strides = Strides(m_kernel.size(), 1);
|
||||
@ -214,6 +217,7 @@ void op::v1::AvgPool::set_rounding_type(op::RoundingType rounding_type)
|
||||
|
||||
shared_ptr<Node> op::v1::AvgPool::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_AvgPool_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::AvgPool>(new_args.at(0),
|
||||
m_strides,
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include <sstream>
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/op/batch_norm.hpp"
|
||||
@ -39,12 +40,14 @@ op::v0::BatchNormInference::BatchNormInference(const Output<Node>& input,
|
||||
|
||||
bool op::v0::BatchNormInference::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_BatchNormInference_visit_attributes);
|
||||
visitor.on_attribute("epsilon", m_epsilon);
|
||||
return true;
|
||||
}
|
||||
|
||||
void op::v0::BatchNormInference::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_BatchNormInference_validate_and_infer_types);
|
||||
element::Type result_et;
|
||||
PartialShape result_batch_shape;
|
||||
PartialShape result_channel_shape; // unused here
|
||||
@ -69,6 +72,7 @@ void op::v0::BatchNormInference::validate_and_infer_types()
|
||||
std::shared_ptr<Node>
|
||||
op::v0::BatchNormInference::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_BatchNormInference_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return std::make_shared<BatchNormInference>(
|
||||
new_args.at(2), new_args.at(0), new_args.at(1), new_args.at(3), new_args.at(4), m_epsilon);
|
||||
@ -90,12 +94,14 @@ op::v5::BatchNormInference::BatchNormInference(const Output<Node>& input,
|
||||
|
||||
bool op::v5::BatchNormInference::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_BatchNormInference_visit_attributes);
|
||||
visitor.on_attribute("epsilon", m_epsilon);
|
||||
return true;
|
||||
}
|
||||
|
||||
void op::v5::BatchNormInference::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_BatchNormInference_validate_and_infer_types);
|
||||
element::Type result_et;
|
||||
PartialShape result_batch_shape;
|
||||
PartialShape result_channel_shape; // unused here
|
||||
@ -120,6 +126,7 @@ void op::v5::BatchNormInference::validate_and_infer_types()
|
||||
std::shared_ptr<Node>
|
||||
op::v5::BatchNormInference::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_BatchNormInference_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return std::make_shared<BatchNormInference>(
|
||||
new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), new_args.at(4), m_epsilon);
|
||||
|
@ -46,6 +46,7 @@ ngraph::op::v1::BatchToSpace::BatchToSpace(const ngraph::Output<ngraph::Node>& d
|
||||
|
||||
void op::v1::BatchToSpace::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_BatchToSpace_validate_and_infer_types);
|
||||
PartialShape data_pshape = get_input_partial_shape(0);
|
||||
|
||||
const auto& data_type = get_input_element_type(0);
|
||||
@ -132,6 +133,7 @@ void op::v1::BatchToSpace::validate_and_infer_types()
|
||||
std::shared_ptr<ngraph::Node>
|
||||
ngraph::op::v1::BatchToSpace::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_BatchToSpace_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<BatchToSpace>(
|
||||
new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3));
|
||||
@ -139,6 +141,7 @@ std::shared_ptr<ngraph::Node>
|
||||
|
||||
bool ngraph::op::v1::BatchToSpace::visit_attributes(ngraph::AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_BatchToSpace_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -259,6 +262,6 @@ namespace
|
||||
bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_BatchToSpace) { return batch_to_space_evaluate(outputs, inputs); }
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_BatchToSpace);
|
||||
return batch_to_space_evaluate(outputs, inputs);
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/binary_convolution.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/axis_vector.hpp"
|
||||
#include "ngraph/coordinate_diff.hpp"
|
||||
@ -71,6 +72,7 @@ op::v1::BinaryConvolution::BinaryConvolution(const Output<Node>& data,
|
||||
|
||||
void op::v1::BinaryConvolution::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_BinaryConvolution_validate_and_infer_types);
|
||||
const PartialShape& data_batch_shape = get_input_partial_shape(0);
|
||||
element::Type data_batch_et = get_input_element_type(0);
|
||||
const PartialShape& filters_shape = get_input_partial_shape(1);
|
||||
@ -152,6 +154,7 @@ void op::v1::BinaryConvolution::validate_and_infer_types()
|
||||
shared_ptr<Node>
|
||||
op::v1::BinaryConvolution::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_BinaryConvolution_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::BinaryConvolution>(new_args.at(0),
|
||||
new_args.at(1),
|
||||
@ -166,6 +169,7 @@ shared_ptr<Node>
|
||||
|
||||
bool op::v1::BinaryConvolution::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_BinaryConvolution_visit_attributes);
|
||||
visitor.on_attribute("strides", m_strides);
|
||||
visitor.on_attribute("pads_begin", m_pads_begin);
|
||||
visitor.on_attribute("pads_end", m_pads_end);
|
||||
|
@ -161,6 +161,7 @@ bool op::v3::Broadcast::broadcast_evaluate(const HostTensorVector& outputs,
|
||||
|
||||
void op::v3::Broadcast::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_Broadcast_validate_and_infer_types);
|
||||
if (m_mode.m_type == BroadcastType::NONE)
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
@ -204,6 +205,7 @@ void op::v3::Broadcast::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v3::Broadcast::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_Broadcast_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
if (new_args.size() == 2)
|
||||
{
|
||||
@ -221,6 +223,7 @@ shared_ptr<Node> op::v3::Broadcast::clone_with_new_inputs(const OutputVector& ne
|
||||
|
||||
bool op::v3::Broadcast::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_Broadcast_visit_attributes);
|
||||
visitor.on_attribute("mode", m_mode);
|
||||
return true;
|
||||
}
|
||||
@ -228,8 +231,8 @@ bool op::v3::Broadcast::visit_attributes(AttributeVisitor& visitor)
|
||||
bool op::v3::Broadcast::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_Broadcast_evaluate) { return broadcast_evaluate(outputs, inputs); }
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v3_Broadcast_evaluate);
|
||||
return broadcast_evaluate(outputs, inputs);
|
||||
}
|
||||
|
||||
namespace
|
||||
@ -275,6 +278,7 @@ op::v1::Broadcast::Broadcast(const Output<Node>& arg,
|
||||
|
||||
void op::v1::Broadcast::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Broadcast_validate_and_infer_types);
|
||||
// m_type is deduced and not always explicitly stated, for cases where broadcast
|
||||
// has 2 inputs its always NUMPY mode
|
||||
if (m_broadcast_spec.m_type == AutoBroadcastType::NONE && get_input_size() < 3)
|
||||
@ -304,6 +308,7 @@ void op::v1::Broadcast::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v1::Broadcast::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Broadcast_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::Broadcast>(
|
||||
new_args.at(0), new_args.at(1), new_args.at(2), m_broadcast_spec);
|
||||
@ -311,6 +316,7 @@ shared_ptr<Node> op::v1::Broadcast::clone_with_new_inputs(const OutputVector& ne
|
||||
|
||||
bool op::v1::Broadcast::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Broadcast_visit_attributes);
|
||||
visitor.on_attribute("mode", m_broadcast_spec);
|
||||
return true;
|
||||
}
|
||||
@ -318,9 +324,6 @@ bool op::v1::Broadcast::visit_attributes(AttributeVisitor& visitor)
|
||||
bool op::v1::Broadcast::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Broadcast_evaluate)
|
||||
{
|
||||
return op::util::BroadcastBase::evaluate(outputs, inputs);
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_Broadcast_evaluate);
|
||||
return op::util::BroadcastBase::evaluate(outputs, inputs);
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "bucketize.hpp"
|
||||
#include "itt.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace std;
|
||||
@ -34,6 +35,7 @@ op::v3::Bucketize::Bucketize(const Output<Node>& data,
|
||||
|
||||
bool op::v3::Bucketize::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_Bucketize_visit_attributes);
|
||||
visitor.on_attribute("output_type", m_output_type);
|
||||
visitor.on_attribute("with_right_bound", m_with_right_bound);
|
||||
return true;
|
||||
@ -41,6 +43,7 @@ bool op::v3::Bucketize::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
void op::v3::Bucketize::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_Bucketize_validate_and_infer_types);
|
||||
const PartialShape& data_pshape = get_input_partial_shape(0);
|
||||
const PartialShape& buckets_pshape = get_input_partial_shape(1);
|
||||
|
||||
@ -65,6 +68,7 @@ void op::v3::Bucketize::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v3::Bucketize::clone_with_new_inputs(const OutputVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_Bucketize_clone_with_new_inputs);
|
||||
check_new_args_count(this, inputs);
|
||||
|
||||
return make_shared<v3::Bucketize>(
|
||||
|
@ -34,6 +34,7 @@ op::Ceiling::Ceiling(const Output<Node>& arg)
|
||||
|
||||
shared_ptr<Node> op::Ceiling::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Ceiling_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Ceiling>(new_args.at(0));
|
||||
}
|
||||
@ -83,9 +84,6 @@ namespace ceiling
|
||||
|
||||
bool op::Ceiling::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Ceiling_evaluate)
|
||||
{
|
||||
return ceiling::evaluate_ceiling(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_Ceiling_evaluate);
|
||||
return ceiling::evaluate_ceiling(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
@ -86,12 +86,9 @@ namespace clamp
|
||||
|
||||
bool op::v0::Clamp::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Clamp_evaluate)
|
||||
{
|
||||
return clamp::evaluate_clamp(
|
||||
inputs[0], outputs[0], get_min(), get_max(), shape_size(get_input_shape(0)));
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_Clamp_evaluate);
|
||||
return clamp::evaluate_clamp(
|
||||
inputs[0], outputs[0], get_min(), get_max(), shape_size(get_input_shape(0)));
|
||||
}
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(op::v0::Clamp, "Clamp", 0);
|
||||
@ -230,6 +227,7 @@ OutputVector op::Clamp::decompose_op() const
|
||||
|
||||
shared_ptr<Node> op::Clamp::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Clamp_clone_with_new_inputs);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
new_args.size() == 1,
|
||||
"Expected 1 element in new_args for the Clamp op but got ",
|
||||
@ -240,6 +238,7 @@ shared_ptr<Node> op::Clamp::clone_with_new_inputs(const OutputVector& new_args)
|
||||
|
||||
bool op::Clamp::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Clamp_visit_attributes);
|
||||
visitor.on_attribute("min", m_min);
|
||||
visitor.on_attribute("max", m_max);
|
||||
return true;
|
||||
|
@ -41,12 +41,14 @@ op::Concat::Concat(const NodeVector& args, int64_t axis)
|
||||
|
||||
bool op::Concat::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Concat_visit_attributes);
|
||||
visitor.on_attribute("axis", m_axis);
|
||||
return true;
|
||||
}
|
||||
|
||||
void op::Concat::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Concat_validate_and_infer_types);
|
||||
NODE_VALIDATION_CHECK(this, get_input_size() >= 1, "At least one argument required.");
|
||||
|
||||
PartialShape inputs_shape_scheme{PartialShape::dynamic()};
|
||||
@ -85,7 +87,8 @@ void op::Concat::validate_and_infer_types()
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
PartialShape::merge_into(inputs_shape_scheme, this_input_shape),
|
||||
"Argument shapes are inconsistent; they must have the same rank, and must have ",
|
||||
"Argument shapes are inconsistent; they must have the same rank, and must "
|
||||
"have ",
|
||||
"equal dimension everywhere except on the concatenation axis (axis ",
|
||||
concat_axis,
|
||||
").");
|
||||
@ -110,6 +113,7 @@ void op::Concat::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::Concat::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Concat_clone_with_new_inputs);
|
||||
// TODO(amprocte): Should we check the new_args count here?
|
||||
return make_shared<Concat>(new_args, m_axis);
|
||||
}
|
||||
@ -144,10 +148,7 @@ namespace
|
||||
|
||||
bool op::Concat::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Concat_evaluate)
|
||||
{
|
||||
auto concat_axis = get_axis() < 0 ? get_axis() + inputs[0]->get_shape().size() : get_axis();
|
||||
return evaluate_concat(inputs, outputs[0], concat_axis);
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_Concat_evaluate);
|
||||
auto concat_axis = get_axis() < 0 ? get_axis() + inputs[0]->get_shape().size() : get_axis();
|
||||
return evaluate_concat(inputs, outputs[0], concat_axis);
|
||||
}
|
||||
|
@ -549,6 +549,7 @@ void op::Constant::set_data_shape(const Shape& shape)
|
||||
|
||||
shared_ptr<Node> op::Constant::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Constant_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Constant>(*this);
|
||||
}
|
||||
@ -625,6 +626,7 @@ bool op::Constant::are_all_data_elements_bitwise_identical() const
|
||||
|
||||
bool op::v0::Constant::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Constant_visit_attributes);
|
||||
visitor.on_attribute("element_type", m_element_type);
|
||||
visitor.on_attribute("shape", m_shape);
|
||||
if (m_data == nullptr)
|
||||
@ -639,13 +641,10 @@ bool op::v0::Constant::visit_attributes(AttributeVisitor& visitor)
|
||||
bool op::v0::Constant::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Constant_evaluate)
|
||||
{
|
||||
auto output = outputs[0];
|
||||
output->write(get_data_ptr(), output->get_size_in_bytes());
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_Constant_evaluate);
|
||||
auto output = outputs[0];
|
||||
output->write(get_data_ptr(), output->get_size_in_bytes());
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -34,17 +34,20 @@ op::Convert::Convert(const Output<Node>& arg, const element::Type& destination_t
|
||||
|
||||
void op::Convert::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Convert_validate_and_infer_types);
|
||||
set_output_type(0, m_destination_type, get_input_partial_shape(0));
|
||||
}
|
||||
|
||||
bool op::Convert::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Convert_visit_attributes);
|
||||
visitor.on_attribute("destination_type", m_destination_type);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::Convert::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Convert_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Convert>(new_args.at(0), m_destination_type);
|
||||
}
|
||||
@ -66,10 +69,8 @@ namespace convert
|
||||
#define TYPE_OUT_CASE(a, ...) \
|
||||
case element::Type_t::a: \
|
||||
{ \
|
||||
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_covert_out, _, a)) \
|
||||
{ \
|
||||
rc = evaluate<INPUT_ET, element::Type_t::a>(__VA_ARGS__); \
|
||||
} \
|
||||
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_covert_out, _, a)); \
|
||||
rc = evaluate<INPUT_ET, element::Type_t::a>(__VA_ARGS__); \
|
||||
} \
|
||||
break
|
||||
|
||||
@ -119,9 +120,6 @@ namespace convert
|
||||
bool op::v0::Convert::evaluate(const HostTensorVector& output_values,
|
||||
const HostTensorVector& input_values) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Convert_evaluate)
|
||||
{
|
||||
return convert::evaluate_convert(input_values[0], output_values[0]);
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_Convert_evaluate);
|
||||
return convert::evaluate_convert(input_values[0], output_values[0]);
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include <memory>
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "ngraph/op/convert_like.hpp"
|
||||
|
||||
@ -31,16 +32,19 @@ op::v1::ConvertLike::ConvertLike(const Output<Node>& data, const Output<Node>& l
|
||||
|
||||
void op::v1::ConvertLike::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_ConvertLike_validate_and_infer_types);
|
||||
set_output_type(0, get_input_element_type(1), get_input_partial_shape(0));
|
||||
}
|
||||
|
||||
bool op::v1::ConvertLike::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_ConvertLike_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v1::ConvertLike::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_ConvertLike_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<ConvertLike>(new_args.at(0), new_args.at(1));
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/convolution.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/axis_vector.hpp"
|
||||
#include "ngraph/coordinate_diff.hpp"
|
||||
#include "ngraph/op/reshape.hpp"
|
||||
@ -46,6 +47,7 @@ op::v1::Convolution::Convolution(const Output<Node>& data_batch,
|
||||
|
||||
bool op::v1::Convolution::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Convolution_visit_attributes);
|
||||
visitor.on_attribute("strides", m_strides);
|
||||
visitor.on_attribute("dilations", m_dilations);
|
||||
visitor.on_attribute("pads_begin", m_pads_begin);
|
||||
@ -56,6 +58,7 @@ bool op::v1::Convolution::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
void op::v1::Convolution::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Convolution_validate_and_infer_types);
|
||||
const PartialShape& data_batch_shape = get_input_partial_shape(0);
|
||||
element::Type data_batch_et = get_input_element_type(0);
|
||||
const PartialShape& filters_shape = get_input_partial_shape(1);
|
||||
@ -145,6 +148,7 @@ void op::v1::Convolution::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v1::Convolution::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Convolution_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::Convolution>(new_args.at(0),
|
||||
new_args.at(1),
|
||||
@ -183,6 +187,7 @@ op::v1::ConvolutionBackpropData::ConvolutionBackpropData(const Output<Node>& dat
|
||||
|
||||
bool op::v1::ConvolutionBackpropData::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_ConvolutionBackpropData_visit_attributes);
|
||||
visitor.on_attribute("strides", m_strides);
|
||||
visitor.on_attribute("dilations", m_dilations);
|
||||
visitor.on_attribute("pads_begin", m_pads_begin);
|
||||
@ -291,6 +296,7 @@ void op::v1::ConvolutionBackpropData::infer_conv_backprop_output_spatial_shape(
|
||||
|
||||
void op::v1::ConvolutionBackpropData::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_ConvolutionBackpropData_validate_and_infer_types);
|
||||
auto data_pshape = get_input_partial_shape(0);
|
||||
element::Type delta_et = get_input_element_type(0);
|
||||
const PartialShape& filters_pshape = get_input_partial_shape(1);
|
||||
@ -432,6 +438,7 @@ void op::v1::ConvolutionBackpropData::validate_and_infer_types()
|
||||
shared_ptr<Node>
|
||||
op::v1::ConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_ConvolutionBackpropData_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
if (new_args.size() == 3)
|
||||
{
|
||||
|
@ -37,11 +37,13 @@ op::Cos::Cos(const Output<Node>& arg)
|
||||
|
||||
bool op::Cos::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Cos_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::Cos::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Cos_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Cos>(new_args.at(0));
|
||||
}
|
||||
@ -78,9 +80,6 @@ namespace cosop
|
||||
|
||||
bool op::Cos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Cos_evaluate)
|
||||
{
|
||||
return cosop::evaluate_cos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_Cos_evaluate);
|
||||
return cosop::evaluate_cos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
@ -36,11 +36,13 @@ op::Cosh::Cosh(const Output<Node>& arg)
|
||||
|
||||
bool op::Cosh::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Cosh_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::Cosh::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Cosh_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Cosh>(new_args.at(0));
|
||||
}
|
||||
@ -77,9 +79,6 @@ namespace coshop
|
||||
|
||||
bool op::Cosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Cosh_evaluate)
|
||||
{
|
||||
return coshop::evaluate_cosh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_Cosh_evaluate);
|
||||
return coshop::evaluate_cosh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/ctc_greedy_decoder.hpp"
|
||||
#include "itt.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
@ -32,6 +33,7 @@ op::CTCGreedyDecoder::CTCGreedyDecoder(const Output<Node>& input,
|
||||
|
||||
void op::CTCGreedyDecoder::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_CTCGreedyDecoder_validate_and_infer_types);
|
||||
const auto& logits_pshape = get_input_partial_shape(0);
|
||||
const auto& seq_mask_pshape = get_input_partial_shape(1);
|
||||
auto input_et = get_input_element_type(0);
|
||||
@ -99,12 +101,14 @@ void op::CTCGreedyDecoder::validate_and_infer_types()
|
||||
|
||||
bool op::CTCGreedyDecoder::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_CTCGreedyDecoder_visit_attributes);
|
||||
visitor.on_attribute("ctc_merge_repeated", m_ctc_merge_repeated);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::CTCGreedyDecoder::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_CTCGreedyDecoder_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<CTCGreedyDecoder>(new_args.at(0), new_args.at(1), m_ctc_merge_repeated);
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/ctc_loss.hpp"
|
||||
#include "itt.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
@ -54,6 +55,7 @@ op::v4::CTCLoss::CTCLoss(const Output<Node>& logits,
|
||||
|
||||
void op::v4::CTCLoss::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_CTCLoss_validate_and_infer_types);
|
||||
// check types of input tensors
|
||||
const auto& logits_type = get_input_element_type(0);
|
||||
const auto& logit_length_type = get_input_element_type(1);
|
||||
@ -229,6 +231,7 @@ void op::v4::CTCLoss::validate_and_infer_types()
|
||||
|
||||
bool op::v4::CTCLoss::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_CTCLoss_visit_attributes);
|
||||
visitor.on_attribute("preprocess_collapse_repeated", preprocess_collapse_repeated_);
|
||||
visitor.on_attribute("ctc_merge_repeated", ctc_merge_repeated_);
|
||||
visitor.on_attribute("unique", unique_);
|
||||
@ -237,6 +240,7 @@ bool op::v4::CTCLoss::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
shared_ptr<Node> op::v4::CTCLoss::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_CTCLoss_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
if (new_args.size() == 4)
|
||||
{
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/cum_sum.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/graph_util.hpp"
|
||||
#include "ngraph/op/broadcast.hpp"
|
||||
@ -46,6 +47,7 @@ op::v0::CumSum::CumSum(const Output<Node>& arg, const bool exclusive, const bool
|
||||
|
||||
bool op::v0::CumSum::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_CumSum_visit_attributes);
|
||||
visitor.on_attribute("exclusive", m_exclusive);
|
||||
visitor.on_attribute("reverse", m_reverse);
|
||||
return true;
|
||||
@ -53,6 +55,7 @@ bool op::v0::CumSum::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
void op::v0::CumSum::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_CumSum_validate_and_infer_types);
|
||||
element::Type arg_type = get_input_element_type(0);
|
||||
PartialShape arg_shape = get_input_partial_shape(0);
|
||||
set_output_type(0, arg_type, arg_shape);
|
||||
@ -73,6 +76,7 @@ void op::v0::CumSum::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v0::CumSum::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_CumSum_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::CumSum>(new_args.at(0), new_args.at(1), m_exclusive, m_reverse);
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/deformable_convolution.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/axis_vector.hpp"
|
||||
#include "ngraph/coordinate_diff.hpp"
|
||||
#include "ngraph/op/reshape.hpp"
|
||||
@ -50,6 +51,7 @@ op::v1::DeformableConvolution::DeformableConvolution(const Output<Node>& arg,
|
||||
|
||||
bool op::v1::DeformableConvolution::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_DeformableConvolution_visit_attributes);
|
||||
visitor.on_attribute("strides", m_strides);
|
||||
visitor.on_attribute("dilations", m_dilations);
|
||||
visitor.on_attribute("pads_begin", m_pads_begin);
|
||||
@ -62,6 +64,7 @@ bool op::v1::DeformableConvolution::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
void op::v1::DeformableConvolution::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_DeformableConvolution_validate_and_infer_types);
|
||||
const PartialShape& data_batch_shape = get_input_partial_shape(0);
|
||||
const PartialShape& deformable_values_shape = get_input_partial_shape(1);
|
||||
const PartialShape& filters_shape = get_input_partial_shape(2);
|
||||
@ -101,14 +104,14 @@ void op::v1::DeformableConvolution::validate_and_infer_types()
|
||||
|
||||
if (m_deformable_group > 1 && deformable_values_shape[1].is_static())
|
||||
{
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
deformable_values_shape[1].get_length() % m_deformable_group == 0,
|
||||
"The deformable values input must be evenly divisible by the 'deformable group' value "
|
||||
"along the channels axis. Current input shape: ",
|
||||
deformable_values_shape,
|
||||
", 'deformable group' attribute value: ",
|
||||
m_deformable_group);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
deformable_values_shape[1].get_length() % m_deformable_group == 0,
|
||||
"The deformable values input must be evenly divisible by the "
|
||||
"'deformable group' value "
|
||||
"along the channels axis. Current input shape: ",
|
||||
deformable_values_shape,
|
||||
", 'deformable group' attribute value: ",
|
||||
m_deformable_group);
|
||||
}
|
||||
|
||||
element::Type result_et;
|
||||
@ -197,6 +200,7 @@ void op::v1::DeformableConvolution::validate_and_infer_types()
|
||||
shared_ptr<Node>
|
||||
op::v1::DeformableConvolution::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_DeformableConvolution_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::DeformableConvolution>(new_args.at(0),
|
||||
new_args.at(1),
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "deformable_psroi_pooling.hpp"
|
||||
#include "itt.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
@ -70,6 +71,7 @@ op::v1::DeformablePSROIPooling::DeformablePSROIPooling(const Output<Node>& input
|
||||
|
||||
bool op::v1::DeformablePSROIPooling::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_DeformablePSROIPooling_visit_attributes);
|
||||
visitor.on_attribute("output_dim", m_output_dim);
|
||||
visitor.on_attribute("spatial_scale", m_spatial_scale);
|
||||
visitor.on_attribute("group_size", m_group_size);
|
||||
@ -83,6 +85,7 @@ bool op::v1::DeformablePSROIPooling::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
void op::v1::DeformablePSROIPooling::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_DeformablePSROIPooling_validate_and_infer_types);
|
||||
const auto& input_et = get_input_element_type(0);
|
||||
|
||||
const auto& input_pshape = get_input_partial_shape(0);
|
||||
@ -128,6 +131,7 @@ void op::v1::DeformablePSROIPooling::validate_and_infer_types()
|
||||
shared_ptr<Node>
|
||||
op::v1::DeformablePSROIPooling::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_DeformablePSROIPooling_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
if (new_args.size() == 3)
|
||||
{
|
||||
|
@ -54,6 +54,7 @@ op::DepthToSpace::DepthToSpace(const Output<Node>& data,
|
||||
|
||||
bool op::DepthToSpace::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_DepthToSpace_visit_attributes);
|
||||
visitor.on_attribute("block_size", m_blocksize);
|
||||
visitor.on_attribute("mode", m_mode);
|
||||
return true;
|
||||
@ -61,6 +62,7 @@ bool op::DepthToSpace::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
shared_ptr<Node> op::DepthToSpace::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_DepthToSpace_clone_with_new_inputs);
|
||||
if (new_args.size() != 1)
|
||||
{
|
||||
throw ngraph_error("Incorrect number of new arguments");
|
||||
@ -70,6 +72,7 @@ shared_ptr<Node> op::DepthToSpace::clone_with_new_inputs(const OutputVector& new
|
||||
|
||||
void op::DepthToSpace::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_DepthToSpace_validate_and_infer_types);
|
||||
PartialShape data_pshape = get_input_partial_shape(0);
|
||||
|
||||
const auto& data_type = get_input_element_type(0);
|
||||
@ -243,8 +246,8 @@ bool op::DepthToSpace::evaluate_depth_to_space(const HostTensorVector& outputs,
|
||||
bool op::DepthToSpace::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_DepthToSpace_evaluate) { return evaluate_depth_to_space(outputs, inputs); }
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_DepthToSpace_evaluate);
|
||||
return evaluate_depth_to_space(outputs, inputs);
|
||||
}
|
||||
namespace ngraph
|
||||
{
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/detection_output.hpp"
|
||||
#include "itt.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
@ -45,6 +46,7 @@ op::DetectionOutput::DetectionOutput(const Output<Node>& box_logits,
|
||||
|
||||
void op::DetectionOutput::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_DetectionOutput_validate_and_infer_types);
|
||||
NODE_VALIDATION_CHECK(
|
||||
this, m_attrs.num_classes > 0, "Number of classes must be greater than zero");
|
||||
|
||||
@ -266,6 +268,7 @@ void op::DetectionOutput::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::DetectionOutput::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_DetectionOutput_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
|
||||
auto num_args = new_args.size();
|
||||
@ -291,6 +294,7 @@ shared_ptr<Node> op::DetectionOutput::clone_with_new_inputs(const OutputVector&
|
||||
|
||||
bool op::DetectionOutput::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_DetectionOutput_visit_attributes);
|
||||
visitor.on_attribute("num_classes", m_attrs.num_classes);
|
||||
visitor.on_attribute("background_label_id", m_attrs.background_label_id);
|
||||
visitor.on_attribute("top_k", m_attrs.top_k);
|
||||
|
@ -92,6 +92,7 @@ op::v1::Divide::Divide(const Output<Node>& arg0,
|
||||
|
||||
bool op::v1::Divide::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Divide_visit_attributes);
|
||||
BinaryElementwiseArithmetic::visit_attributes(visitor);
|
||||
visitor.on_attribute("m_pythondiv", m_pythondiv);
|
||||
return true;
|
||||
@ -99,6 +100,7 @@ bool op::v1::Divide::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
shared_ptr<Node> op::v1::Divide::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Divide_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v1::Divide>(
|
||||
new_args.at(0), new_args.at(1), this->is_pythondiv(), this->get_autob());
|
||||
@ -106,10 +108,6 @@ shared_ptr<Node> op::v1::Divide::clone_with_new_inputs(const OutputVector& new_a
|
||||
|
||||
bool op::v1::Divide::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Divide_evaluate)
|
||||
{
|
||||
return divide::evaluate_divide(
|
||||
inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_Divide_evaluate);
|
||||
return divide::evaluate_divide(inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv());
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/elu.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/builder/autobroadcast.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
@ -33,18 +34,21 @@ op::Elu::Elu(const Output<Node>& data, const double alpha)
|
||||
|
||||
bool ngraph::op::v0::Elu::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Elu_visit_attributes);
|
||||
visitor.on_attribute("alpha", m_alpha);
|
||||
return true;
|
||||
}
|
||||
|
||||
void op::v0::Elu::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Elu_validate_and_infer_types);
|
||||
set_output_size(1);
|
||||
set_output_type(0, get_input_element_type(0), get_input_partial_shape(0));
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::Elu::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Elu_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Elu>(new_args.at(0), m_alpha);
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/embedding_segments_sum.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/opsets/opset3.hpp"
|
||||
|
||||
@ -55,6 +56,7 @@ op::v3::EmbeddingSegmentsSum::EmbeddingSegmentsSum(const Output<Node>& emb_table
|
||||
|
||||
void op::v3::EmbeddingSegmentsSum::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_EmbeddingSegmentsSum_validate_and_infer_types);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(SEGMENT_IDS) == element::i64 ||
|
||||
get_input_element_type(SEGMENT_IDS) == element::i32,
|
||||
@ -182,6 +184,7 @@ void op::v3::EmbeddingSegmentsSum::validate_and_infer_types()
|
||||
shared_ptr<Node>
|
||||
op::v3::EmbeddingSegmentsSum::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_EmbeddingSegmentsSum_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
if (new_args.size() == 4)
|
||||
{
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/embeddingbag_offsets_sum.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
|
||||
using namespace std;
|
||||
@ -49,6 +50,7 @@ op::v3::EmbeddingBagOffsetsSum::EmbeddingBagOffsetsSum(const Output<Node>& emb_t
|
||||
shared_ptr<Node>
|
||||
op::v3::EmbeddingBagOffsetsSum::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_EmbeddingBagOffsetsSum_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
if (new_args.size() == 3)
|
||||
{
|
||||
@ -69,4 +71,4 @@ shared_ptr<Node>
|
||||
{
|
||||
throw ngraph_error("Incorrect number of arguments");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/embeddingbag_packedsum.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
|
||||
using namespace std;
|
||||
@ -38,6 +39,7 @@ op::v3::EmbeddingBagPackedSum::EmbeddingBagPackedSum(const Output<Node>& emb_tab
|
||||
shared_ptr<Node>
|
||||
op::v3::EmbeddingBagPackedSum::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_EmbeddingBagPackedSum_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
if (new_args.size() == 2)
|
||||
{
|
||||
|
@ -77,15 +77,13 @@ op::v1::Equal::Equal(const Output<Node>& arg0,
|
||||
|
||||
shared_ptr<Node> op::v1::Equal::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Equal_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v1::Equal>(new_args.at(0), new_args.at(1), this->get_autob());
|
||||
}
|
||||
|
||||
bool op::v1::Equal::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Equal_evaluate)
|
||||
{
|
||||
return equal::evaluate_equal(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_Equal_evaluate);
|
||||
return equal::evaluate_equal(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
|
@ -29,11 +29,13 @@ constexpr NodeTypeInfo op::Erf::type_info;
|
||||
|
||||
bool ngraph::op::v0::Erf::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Erf_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::Erf::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Erf_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Erf>(new_args.at(0));
|
||||
}
|
||||
@ -76,9 +78,6 @@ namespace erfop
|
||||
|
||||
bool op::Erf::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Erf_evaluate)
|
||||
{
|
||||
return erfop::evaluate_erf(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_Erf_evaluate);
|
||||
return erfop::evaluate_erf(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
@ -35,11 +35,13 @@ op::Exp::Exp(const Output<Node>& arg)
|
||||
|
||||
bool ngraph::op::v0::Exp::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Exp_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::Exp::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Exp_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Exp>(new_args.at(0));
|
||||
}
|
||||
@ -76,9 +78,6 @@ namespace expop
|
||||
|
||||
bool op::Exp::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Exp_evaluate)
|
||||
{
|
||||
return expop::evaluate_exp(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_Exp_evaluate);
|
||||
return expop::evaluate_exp(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/extractimagepatches.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
|
||||
using namespace std;
|
||||
@ -40,6 +41,7 @@ op::v3::ExtractImagePatches::ExtractImagePatches(const Output<Node>& image,
|
||||
|
||||
void op::v3::ExtractImagePatches::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_ExtractImagePatches_validate_and_infer_types);
|
||||
const PartialShape input_Pshape = get_input_partial_shape(0);
|
||||
|
||||
NODE_VALIDATION_CHECK(this, input_Pshape.rank() == 4, "input tensor must be 4D tensor.");
|
||||
@ -148,6 +150,7 @@ void op::v3::ExtractImagePatches::validate_and_infer_types()
|
||||
|
||||
bool op::v3::ExtractImagePatches::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_ExtractImagePatches_visit_attributes);
|
||||
visitor.on_attribute("sizes", m_patch_sizes);
|
||||
visitor.on_attribute("strides", m_patch_movement_strides);
|
||||
visitor.on_attribute("rates", m_patch_selection_rates);
|
||||
@ -158,6 +161,7 @@ bool op::v3::ExtractImagePatches::visit_attributes(AttributeVisitor& visitor)
|
||||
shared_ptr<Node>
|
||||
op::v3::ExtractImagePatches::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_ExtractImagePatches_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v3::ExtractImagePatches>(new_args.at(0),
|
||||
m_patch_sizes,
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include <memory>
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "fake_quantize.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
@ -56,6 +57,7 @@ op::FakeQuantize::FakeQuantize(const Output<Node>& data,
|
||||
|
||||
void op::FakeQuantize::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_FakeQuantize_validate_and_infer_types);
|
||||
PartialShape data_pshape = get_input_partial_shape(0);
|
||||
|
||||
for (auto i = 1; i <= 4; i++)
|
||||
@ -84,6 +86,7 @@ void op::FakeQuantize::validate_and_infer_types()
|
||||
|
||||
bool ngraph::op::v0::FakeQuantize::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_FakeQuantize_visit_attributes);
|
||||
visitor.on_attribute("levels", m_levels);
|
||||
visitor.on_attribute("auto_broadcast", m_auto_broadcast);
|
||||
return true;
|
||||
@ -165,6 +168,7 @@ OutputVector op::FakeQuantize::decompose_op() const
|
||||
|
||||
shared_ptr<Node> op::FakeQuantize::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_FakeQuantize_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<FakeQuantize>(new_args.at(0), // X
|
||||
new_args.at(1), // input_low
|
||||
|
@ -34,11 +34,13 @@ op::Floor::Floor(const Output<Node>& arg)
|
||||
|
||||
bool ngraph::op::v0::Floor::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Floor_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::Floor::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Floor_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Floor>(new_args.at(0));
|
||||
}
|
||||
@ -88,9 +90,6 @@ namespace floorop
|
||||
|
||||
bool op::Floor::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Floor_evaluate)
|
||||
{
|
||||
return floorop::evaluate_floor(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_Floor_evaluate);
|
||||
return floorop::evaluate_floor(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ op::v1::FloorMod::FloorMod(const Output<Node>& arg0,
|
||||
|
||||
shared_ptr<Node> op::v1::FloorMod::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_FloorMod_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<FloorMod>(new_args.at(0), new_args.at(1), this->get_autob());
|
||||
}
|
||||
@ -82,14 +83,12 @@ namespace floor_mod
|
||||
bool op::v1::FloorMod::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_FloorMod_evaluate)
|
||||
{
|
||||
return floor_mod::evaluate_floor_mod(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_FloorMod_evaluate);
|
||||
return floor_mod::evaluate_floor_mod(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
|
||||
bool op::v1::FloorMod::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_FloorMod_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
@ -47,11 +47,15 @@ op::v1::Gather::Gather(const Output<Node>& params,
|
||||
|
||||
bool ngraph::op::v1::Gather::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
return true;
|
||||
NGRAPH_OP_SCOPE(v1_Gather_visit_attributes);
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
void op::v1::Gather::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Gather_validate_and_infer_types);
|
||||
const auto& input_rank = get_input_partial_shape(PARAMS).rank();
|
||||
const auto& axis_shape = get_input_partial_shape(AXIS);
|
||||
const auto& axis_rank = axis_shape.rank();
|
||||
@ -135,6 +139,7 @@ int64_t op::v1::Gather::get_axis() const
|
||||
|
||||
shared_ptr<Node> op::v1::Gather::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Gather_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::Gather>(new_args.at(PARAMS), new_args.at(INDICES), new_args.at(AXIS));
|
||||
}
|
||||
@ -313,8 +318,8 @@ bool op::v1::Gather::evaluate_gather(const HostTensorVector& outputs,
|
||||
|
||||
bool op::v1::Gather::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Gather_evaluate) { return evaluate_gather(outputs, inputs); }
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_Gather_evaluate);
|
||||
return evaluate_gather(outputs, inputs);
|
||||
}
|
||||
|
||||
bool op::v1::Gather::constant_fold(OutputVector& output_values, const OutputVector& input_values)
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/gather_elements.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/shape.hpp"
|
||||
|
||||
using namespace std;
|
||||
@ -35,6 +36,7 @@ op::v6::GatherElements::GatherElements(const Output<Node>& data,
|
||||
|
||||
void op::v6::GatherElements::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v6_GatherElements_validate_and_infer_types);
|
||||
const auto& data_type = get_input_element_type(0);
|
||||
const auto& indices_type = get_input_element_type(1);
|
||||
|
||||
@ -120,12 +122,14 @@ void op::v6::GatherElements::validate_and_infer_types()
|
||||
|
||||
bool op::v6::GatherElements::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v6_GatherElements_visit_attributes);
|
||||
visitor.on_attribute("axis", m_axis);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v6::GatherElements::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v6_GatherElements_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v6::GatherElements>(new_args.at(0), new_args.at(1), m_axis);
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/gather_nd.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/shape.hpp"
|
||||
|
||||
using namespace std;
|
||||
@ -35,6 +36,7 @@ op::v5::GatherND::GatherND(const Output<Node>& data,
|
||||
|
||||
void op::v5::GatherND::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_GatherND_validate_and_infer_types);
|
||||
// check types of input tensors
|
||||
const auto& data_type = get_input_element_type(0);
|
||||
const auto& indices_type = get_input_element_type(1);
|
||||
@ -88,7 +90,8 @@ void op::v5::GatherND::validate_and_infer_types()
|
||||
this,
|
||||
(indices_pshape[indices_pshape.rank().get_length() - 1].get_length() +
|
||||
m_batch_dims) <= data_pshape.rank().get_length(),
|
||||
"Length of a tuple with indices must not exceed a rank of data tensor excluding "
|
||||
"Length of a tuple with indices must not exceed a rank of data tensor "
|
||||
"excluding "
|
||||
"batch dimensions.");
|
||||
}
|
||||
}
|
||||
@ -148,12 +151,14 @@ void op::v5::GatherND::validate_and_infer_types()
|
||||
|
||||
bool op::v5::GatherND::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_GatherND_visit_attributes);
|
||||
visitor.on_attribute("batch_dims", m_batch_dims);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v5::GatherND::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_GatherND_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v5::GatherND>(new_args.at(0), new_args.at(1), m_batch_dims);
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/gather_tree.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/shape.hpp"
|
||||
|
||||
using namespace std;
|
||||
@ -33,6 +34,7 @@ op::v1::GatherTree::GatherTree(const Output<Node>& step_ids,
|
||||
|
||||
shared_ptr<Node> op::v1::GatherTree::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_GatherTree_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::GatherTree>(
|
||||
new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3));
|
||||
@ -40,11 +42,13 @@ shared_ptr<Node> op::v1::GatherTree::clone_with_new_inputs(const OutputVector& n
|
||||
|
||||
bool ngraph::op::v1::GatherTree::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_GatherTree_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
void op::v1::GatherTree::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_GatherTree_validate_and_infer_types);
|
||||
const auto& step_ids_rank = get_input_partial_shape(0);
|
||||
const auto& parent_idx_rank = get_input_partial_shape(1);
|
||||
const auto& max_seq_len_rank = get_input_partial_shape(2);
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include <cmath>
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "ngraph/builder/make_constant.hpp"
|
||||
#include "ngraph/op/add.hpp"
|
||||
@ -41,6 +42,7 @@ op::Gelu::Gelu(const Output<Node>& data)
|
||||
|
||||
bool ngraph::op::v0::Gelu::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Gelu_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -67,6 +69,7 @@ OutputVector op::Gelu::decompose_op() const
|
||||
|
||||
shared_ptr<Node> op::Gelu::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Gelu_clone_with_new_inputs);
|
||||
if (new_args.size() != 1)
|
||||
{
|
||||
throw ngraph_error("Incorrect number of new arguments");
|
||||
|
@ -77,6 +77,7 @@ op::v1::Greater::Greater(const Output<Node>& arg0,
|
||||
|
||||
shared_ptr<Node> op::v1::Greater::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Greater_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v1::Greater>(new_args.at(0), new_args.at(1), this->get_autob());
|
||||
}
|
||||
@ -84,9 +85,6 @@ shared_ptr<Node> op::v1::Greater::clone_with_new_inputs(const OutputVector& new_
|
||||
bool op::v1::Greater::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Greater_evaluate)
|
||||
{
|
||||
return greaterop::evaluate_greater(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_Greater_evaluate);
|
||||
return greaterop::evaluate_greater(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
|
@ -77,6 +77,7 @@ op::v1::GreaterEqual::GreaterEqual(const Output<Node>& arg0,
|
||||
|
||||
shared_ptr<Node> op::v1::GreaterEqual::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_GreaterEqual_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v1::GreaterEqual>(new_args.at(0), new_args.at(1), this->get_autob());
|
||||
}
|
||||
@ -84,10 +85,6 @@ shared_ptr<Node> op::v1::GreaterEqual::clone_with_new_inputs(const OutputVector&
|
||||
bool op::v1::GreaterEqual::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_GreaterEqual_evaluate)
|
||||
{
|
||||
return greater_equalop::evaluate_greater_equal(
|
||||
inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_GreaterEqual_evaluate);
|
||||
return greater_equalop::evaluate_greater_equal(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "grn.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
@ -43,6 +44,7 @@ op::GRN::GRN(const Output<Node>& data, float bias)
|
||||
|
||||
bool ngraph::op::v0::GRN::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_GRN_visit_attributes);
|
||||
visitor.on_attribute("bias", m_bias);
|
||||
return true;
|
||||
}
|
||||
@ -96,6 +98,7 @@ OutputVector op::GRN::decompose_op() const
|
||||
|
||||
shared_ptr<Node> op::GRN::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_GRN_clone_with_new_inputs);
|
||||
if (new_args.size() != 1)
|
||||
{
|
||||
throw ngraph_error("Incorrect number of new arguments");
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include <numeric>
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/builder/reshape.hpp"
|
||||
@ -60,6 +61,7 @@ op::v1::GroupConvolution::GroupConvolution(const Output<Node>& data_batch,
|
||||
|
||||
bool ngraph::op::v1::GroupConvolution::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_GroupConvolution_visit_attributes);
|
||||
visitor.on_attribute("strides", m_strides);
|
||||
visitor.on_attribute("pads_begin", m_pads_begin);
|
||||
visitor.on_attribute("pads_end", m_pads_end);
|
||||
@ -70,6 +72,7 @@ bool ngraph::op::v1::GroupConvolution::visit_attributes(AttributeVisitor& visito
|
||||
|
||||
void op::v1::GroupConvolution::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_GroupConvolution_validate_and_infer_types);
|
||||
PartialShape data_batch_shape = get_input_partial_shape(0);
|
||||
PartialShape filters_shape = get_input_partial_shape(1);
|
||||
element::Type data_batch_et = get_input_element_type(0);
|
||||
@ -175,6 +178,7 @@ void op::v1::GroupConvolution::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v1::GroupConvolution::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_GroupConvolution_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::GroupConvolution>(new_args.at(0),
|
||||
new_args.at(1),
|
||||
@ -254,6 +258,7 @@ op::v1::GroupConvolutionBackpropData::GroupConvolutionBackpropData(
|
||||
|
||||
bool ngraph::op::v1::GroupConvolutionBackpropData::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_GroupConvolutionBackpropData_visit_attributes);
|
||||
visitor.on_attribute("strides", m_strides);
|
||||
visitor.on_attribute("pads_begin", m_pads_begin);
|
||||
visitor.on_attribute("pads_end", m_pads_end);
|
||||
@ -552,6 +557,7 @@ OutputVector op::v1::GroupConvolutionBackpropData::decompose_op() const
|
||||
shared_ptr<Node>
|
||||
op::v1::GroupConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_GroupConvolutionBackpropData_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
if (new_args.size() == 3)
|
||||
{
|
||||
|
@ -103,12 +103,14 @@ op::v3::GRUCell::GRUCell(const Output<Node>& X,
|
||||
|
||||
bool op::v3::GRUCell::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_GRUCell_visit_attributes);
|
||||
visitor.on_attribute("linear_before_reset", m_linear_before_reset);
|
||||
return op::util::RNNCellBase::visit_attributes(visitor);
|
||||
}
|
||||
|
||||
void op::v3::GRUCell::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_GRUCell_validate_and_infer_types);
|
||||
for (const auto& input : inputs())
|
||||
{
|
||||
if (input.get_partial_shape().rank().is_dynamic())
|
||||
@ -216,6 +218,7 @@ void op::v3::GRUCell::add_default_bias_input()
|
||||
|
||||
shared_ptr<Node> op::v3::GRUCell::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_GRUCell_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
if (new_args.size() == 4)
|
||||
{
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "ngraph/op/gru_sequence.hpp"
|
||||
#include "ngraph/op/util/recurrent_sequence.hpp"
|
||||
@ -60,6 +61,7 @@ op::v5::GRUSequence::GRUSequence(const Output<Node>& X,
|
||||
|
||||
void op::v5::GRUSequence::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_GRUSequence_validate_and_infer_types);
|
||||
for (const auto& input : inputs())
|
||||
{
|
||||
if (input.get_partial_shape().rank().is_dynamic())
|
||||
@ -175,6 +177,7 @@ void op::v5::GRUSequence::validate_and_infer_types()
|
||||
|
||||
bool op::v5::GRUSequence::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_GRUSequence_visit_attributes);
|
||||
visitor.on_attribute("direction", m_direction);
|
||||
visitor.on_attribute("linear_before_reset", m_linear_before_reset);
|
||||
return op::util::RNNCellBase::visit_attributes(visitor);
|
||||
@ -182,6 +185,7 @@ bool op::v5::GRUSequence::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
shared_ptr<Node> op::v5::GRUSequence::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_GRUSequence_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v5::GRUSequence>(new_args.at(0),
|
||||
new_args.at(1),
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include <memory>
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "ngraph/op/add.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
@ -41,6 +42,7 @@ op::HardSigmoid::HardSigmoid(const Output<Node>& data,
|
||||
|
||||
bool ngraph::op::v0::HardSigmoid::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_HardSigmoid_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -102,6 +104,7 @@ OutputVector op::HardSigmoid::decompose_op() const
|
||||
|
||||
shared_ptr<Node> op::HardSigmoid::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_HardSigmoid_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
|
||||
return make_shared<HardSigmoid>(new_args.at(0), new_args.at(1), new_args.at(2));
|
||||
|
@ -35,11 +35,13 @@ op::v5::HSigmoid::HSigmoid(const Output<Node>& arg)
|
||||
|
||||
bool op::v5::HSigmoid::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_HSigmoid_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v5::HSigmoid::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_HSigmoid_clone_with_new_inputs);
|
||||
return make_shared<op::v5::HSigmoid>(new_args.at(0));
|
||||
}
|
||||
|
||||
@ -73,9 +75,6 @@ namespace
|
||||
bool op::v5::HSigmoid::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_HSigmoid_evaluate)
|
||||
{
|
||||
return evaluate_hsigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v5_HSigmoid_evaluate);
|
||||
return evaluate_hsigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
@ -35,11 +35,13 @@ op::v4::HSwish::HSwish(const Output<Node>& arg)
|
||||
|
||||
bool op::v4::HSwish::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_HSwish_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v4::HSwish::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_HSwish_clone_with_new_inputs);
|
||||
return make_shared<op::v4::HSwish>(new_args.at(0));
|
||||
}
|
||||
|
||||
@ -72,9 +74,6 @@ namespace hswish
|
||||
|
||||
bool op::v4::HSwish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_HSwish_evaluate)
|
||||
{
|
||||
return hswish::evaluate_hswish(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v4_HSwish_evaluate);
|
||||
return hswish::evaluate_hswish(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
@ -39,6 +39,7 @@ op::v0::Interpolate::Interpolate(const Output<Node>& image,
|
||||
|
||||
bool op::v0::Interpolate::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Interpolate_visit_attributes);
|
||||
visitor.on_attribute("align_corners", m_attrs.align_corners);
|
||||
visitor.on_attribute("antialias", m_attrs.antialias);
|
||||
visitor.on_attribute("axes", m_attrs.axes);
|
||||
@ -50,6 +51,7 @@ bool op::v0::Interpolate::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
void op::v0::Interpolate::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Interpolate_validate_and_infer_types);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(1).is_integral_number(),
|
||||
"output shape must be an integral number.");
|
||||
@ -79,6 +81,7 @@ void op::v0::Interpolate::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v0::Interpolate::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Interpolate_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v0::Interpolate>(new_args.at(0), new_args.at(1), m_attrs);
|
||||
}
|
||||
@ -133,6 +136,7 @@ op::v4::Interpolate::Interpolate(const Output<Node>& image,
|
||||
|
||||
bool op::v4::Interpolate::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_Interpolate_visit_attributes);
|
||||
visitor.on_attribute("mode", m_attrs.mode);
|
||||
visitor.on_attribute("shape_calculation_mode", m_attrs.shape_calculation_mode);
|
||||
visitor.on_attribute("coordinate_transformation_mode", m_attrs.coordinate_transformation_mode);
|
||||
@ -220,6 +224,7 @@ PartialShape op::v4::Interpolate::get_padded_input_shape(const PartialShape& inp
|
||||
|
||||
void op::v4::Interpolate::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_Interpolate_validate_and_infer_types);
|
||||
element::Type input_et = get_input_element_type(0);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
input_et == element::f32 || input_et == element::f16 ||
|
||||
@ -274,6 +279,7 @@ void op::v4::Interpolate::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v4::Interpolate::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_Interpolate_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
if (new_args.size() <= 3)
|
||||
{
|
||||
@ -497,8 +503,8 @@ bool op::v4::Interpolate::evaluate_interpolate(const HostTensorVector& outputs,
|
||||
bool op::v4::Interpolate::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_Interpolate_evaluate) { return evaluate_interpolate(outputs, inputs); }
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v4_Interpolate_evaluate);
|
||||
return evaluate_interpolate(outputs, inputs);
|
||||
}
|
||||
|
||||
namespace ngraph
|
||||
|
@ -77,15 +77,13 @@ op::v1::Less::Less(const Output<Node>& arg0,
|
||||
|
||||
shared_ptr<Node> op::v1::Less::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Less_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v1::Less>(new_args.at(0), new_args.at(1), this->get_autob());
|
||||
}
|
||||
|
||||
bool op::v1::Less::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Less_evaluate)
|
||||
{
|
||||
return lessop::evaluate_less(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_Less_evaluate);
|
||||
return lessop::evaluate_less(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ op::v1::LessEqual::LessEqual(const Output<Node>& arg0,
|
||||
|
||||
shared_ptr<Node> op::v1::LessEqual::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_LessEqual_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::LessEqual>(new_args.at(0), new_args.at(1), this->get_autob());
|
||||
}
|
||||
@ -84,9 +85,6 @@ namespace less_equalop
|
||||
bool op::v1::LessEqual::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_LessEqual_evaluate)
|
||||
{
|
||||
return less_equalop::evaluate_less_equal(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_LessEqual_evaluate);
|
||||
return less_equalop::evaluate_less_equal(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
|
@ -35,11 +35,13 @@ op::Log::Log(const Output<Node>& arg)
|
||||
|
||||
bool ngraph::op::v0::Log::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Log_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::Log::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Log_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Log>(new_args.at(0));
|
||||
}
|
||||
@ -76,9 +78,6 @@ namespace logop
|
||||
|
||||
bool op::Log::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Log_evaluate)
|
||||
{
|
||||
return logop::evaluate_log(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_Log_evaluate);
|
||||
return logop::evaluate_log(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
@ -33,12 +33,14 @@ op::v5::LogSoftmax::LogSoftmax(const Output<Node>& arg, const int64_t axis)
|
||||
|
||||
bool op::v5::LogSoftmax::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_LogSoftmax_visit_attributes);
|
||||
visitor.on_attribute("axis", m_axis);
|
||||
return true;
|
||||
}
|
||||
|
||||
void op::v5::LogSoftmax::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_LogSoftmax_validate_and_infer_types);
|
||||
const PartialShape& input_shape = get_input_partial_shape(0);
|
||||
if (input_shape.rank().is_static())
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
@ -55,6 +57,7 @@ void op::v5::LogSoftmax::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v5::LogSoftmax::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_LogSoftmax_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v5::LogSoftmax>(new_args.at(0), m_axis);
|
||||
}
|
||||
|
@ -37,6 +37,7 @@ op::v5::Loop::Loop(const Output<Node>& trip_count, const Output<Node>& execution
|
||||
|
||||
bool op::v5::Loop::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_Loop_visit_attributes);
|
||||
visitor.on_attribute("body", m_body);
|
||||
visitor.on_attribute("input_descriptions", m_input_descriptions);
|
||||
visitor.on_attribute("output_descriptions", m_output_descriptions);
|
||||
@ -46,6 +47,7 @@ bool op::v5::Loop::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
void op::v5::Loop::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_Loop_validate_and_infer_types);
|
||||
if (m_special_body_ports.current_iteration_input_idx >= 0)
|
||||
{
|
||||
const auto& cur_iter_rank = m_body->get_parameters()
|
||||
@ -299,7 +301,9 @@ void op::v5::Loop::validate_and_infer_types()
|
||||
|
||||
std::shared_ptr<Node> op::v5::Loop::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
// 0 - trip_count, 1 - execution condition, these inputs are not connected to the body params
|
||||
NGRAPH_OP_SCOPE(v5_Loop_clone_with_new_inputs);
|
||||
// 0 - trip_count, 1 - execution condition, these inputs are not connected to the body
|
||||
// params
|
||||
OutputVector body_params_args(new_args.begin() + 2, new_args.end());
|
||||
auto op = make_shared<op::v5::Loop>(new_args[0], new_args[1]);
|
||||
for (int idx = 2; idx < new_args.size(); ++idx)
|
||||
@ -390,15 +394,8 @@ Output<Node> op::v5::Loop::get_concatenated_slices(const Output<Node>& value,
|
||||
|
||||
bool op::v5::Loop::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_Loop_evaluate)
|
||||
{
|
||||
runtime::reference::loop(m_body,
|
||||
m_output_descriptions,
|
||||
m_input_descriptions,
|
||||
m_special_body_ports,
|
||||
outputs,
|
||||
inputs);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v5_Loop_evaluate);
|
||||
runtime::reference::loop(
|
||||
m_body, m_output_descriptions, m_input_descriptions, m_special_body_ports, outputs, inputs);
|
||||
return true;
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/lrn.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/op/multiply.hpp"
|
||||
@ -58,6 +59,7 @@ AxisSet op::LRN::get_reduction_axes() const
|
||||
|
||||
void op::LRN::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_LRN_validate_and_infer_types);
|
||||
element::Type arg_type = get_input_element_type(0);
|
||||
PartialShape arg_shape = get_input_partial_shape(0);
|
||||
set_output_type(0, arg_type, arg_shape);
|
||||
@ -114,6 +116,7 @@ void op::LRN::validate_and_infer_types()
|
||||
|
||||
bool ngraph::op::v0::LRN::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_LRN_visit_attributes);
|
||||
visitor.on_attribute("alpha", m_alpha);
|
||||
visitor.on_attribute("beta", m_beta);
|
||||
visitor.on_attribute("bias", m_bias);
|
||||
@ -123,6 +126,7 @@ bool ngraph::op::v0::LRN::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
shared_ptr<Node> op::LRN::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_LRN_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::LRN>(new_args.at(0), new_args.at(1), m_alpha, m_beta, m_bias, m_size);
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include <cmath>
|
||||
#include <functional>
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/op/concat.hpp"
|
||||
@ -129,6 +130,7 @@ op::v0::LSTMCell::LSTMCell(const Output<Node>& X,
|
||||
|
||||
bool ngraph::op::v0::LSTMCell::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_LSTMCell_visit_attributes);
|
||||
visitor.on_attribute("hidden_size", m_hidden_size);
|
||||
visitor.on_attribute("activations", m_activations);
|
||||
visitor.on_attribute("activations_alpha", m_activations_alpha);
|
||||
@ -142,6 +144,7 @@ bool ngraph::op::v0::LSTMCell::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
void op::v0::LSTMCell::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_LSTMCell_validate_and_infer_types);
|
||||
for (const auto& input : inputs())
|
||||
{
|
||||
if (input.get_partial_shape().rank().is_dynamic())
|
||||
@ -158,7 +161,8 @@ void op::v0::LSTMCell::validate_and_infer_types()
|
||||
auto merged_hidden_size = Dimension::dynamic();
|
||||
auto result_et = element::dynamic;
|
||||
|
||||
// Copy all inputs without peephole (7th input) and initial_cell_state (2nd input) information
|
||||
// Copy all inputs without peephole (7th input) and initial_cell_state (2nd input)
|
||||
// information
|
||||
// for further validation
|
||||
for (size_t i = 0; i < get_input_size() - 1; i++)
|
||||
{
|
||||
@ -206,7 +210,8 @@ void op::v0::LSTMCell::validate_and_infer_types()
|
||||
element::Type::merge(result_et, result_et, get_input_element_type(3)) &&
|
||||
element::Type::merge(result_et, result_et, get_input_element_type(4)) &&
|
||||
element::Type::merge(result_et, result_et, get_input_element_type(5)),
|
||||
"Element types for X, initial_hidden_state, initial_cell_state, W, R and B do not match.");
|
||||
"Element types for X, initial_hidden_state, initial_cell_state, W, R and B do not "
|
||||
"match.");
|
||||
|
||||
// Merge batch_size dimension across all inputs to evaluate output[0] dimension
|
||||
NODE_VALIDATION_CHECK(
|
||||
@ -305,6 +310,7 @@ Output<Node> op::v0::LSTMCell::get_default_peepholes_input() const
|
||||
|
||||
shared_ptr<Node> op::v0::LSTMCell::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_LSTMCell_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
if (new_args.size() == 5)
|
||||
{
|
||||
@ -441,11 +447,13 @@ op::v4::LSTMCell::LSTMCell(const Output<Node>& X,
|
||||
|
||||
bool ngraph::op::v4::LSTMCell::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_LSTMCell_visit_attributes);
|
||||
return op::util::RNNCellBase::visit_attributes(visitor);
|
||||
}
|
||||
|
||||
void op::v4::LSTMCell::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_LSTMCell_validate_and_infer_types);
|
||||
for (const auto& input : inputs())
|
||||
{
|
||||
if (input.get_partial_shape().rank().is_dynamic())
|
||||
@ -482,7 +490,8 @@ void op::v4::LSTMCell::validate_and_infer_types()
|
||||
element::Type::merge(result_et, result_et, get_input_element_type(3)) &&
|
||||
element::Type::merge(result_et, result_et, get_input_element_type(4)) &&
|
||||
element::Type::merge(result_et, result_et, get_input_element_type(5)),
|
||||
"Element types for X, initial_hidden_state, initial_cell_state, W, R and B do not match.");
|
||||
"Element types for X, initial_hidden_state, initial_cell_state, W, R and B do not "
|
||||
"match.");
|
||||
|
||||
// Merge batch_size dimension across all inputs to evaluate output[0] dimension
|
||||
NODE_VALIDATION_CHECK(
|
||||
@ -562,6 +571,7 @@ Output<Node> op::v4::LSTMCell::get_default_bias_input() const
|
||||
|
||||
shared_ptr<Node> op::v4::LSTMCell::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_LSTMCell_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
if (new_args.size() == 5)
|
||||
{
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/lstm_sequence.hpp"
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/builder/autobroadcast.hpp"
|
||||
@ -34,6 +35,7 @@ NGRAPH_RTTI_DEFINITION(op::v5::LSTMSequence, "LSTMSequence", 5);
|
||||
|
||||
bool ngraph::op::v0::LSTMSequence::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_LSTMSequence_visit_attributes);
|
||||
visitor.on_attribute("hidden_size", m_hidden_size);
|
||||
visitor.on_attribute("activations", m_activations);
|
||||
visitor.on_attribute("activations_alpha", m_activations_alpha);
|
||||
@ -71,6 +73,7 @@ OutputVector op::v0::LSTMSequence::decompose_op() const
|
||||
|
||||
shared_ptr<Node> op::v0::LSTMSequence::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_LSTMSequence_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
if (new_args.size() == 8)
|
||||
{
|
||||
@ -263,6 +266,7 @@ shared_ptr<Node> op::v0::LSTMSequence::prepare_input(Output<Node> node,
|
||||
|
||||
void op::v0::LSTMSequence::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_LSTMSequence_validate_and_infer_types);
|
||||
std::vector<ngraph::PartialShape> input_param{};
|
||||
|
||||
auto lstm_seq_gates_count = 4;
|
||||
@ -272,7 +276,8 @@ void op::v0::LSTMSequence::validate_and_infer_types()
|
||||
auto merged_num_directions = Dimension::dynamic();
|
||||
auto result_et = element::dynamic;
|
||||
|
||||
// Copy all inputs without peephole and initial_cell_state information for further validation
|
||||
// Copy all inputs without peephole and initial_cell_state information for further
|
||||
// validation
|
||||
for (size_t i = 0; i < get_input_size() - 1; i++)
|
||||
{
|
||||
// exclude initial_cell_state from the loop
|
||||
@ -320,7 +325,8 @@ void op::v0::LSTMSequence::validate_and_infer_types()
|
||||
element::Type::merge(result_et, result_et, get_input_element_type(4)) &&
|
||||
element::Type::merge(result_et, result_et, get_input_element_type(5)) &&
|
||||
element::Type::merge(result_et, result_et, get_input_element_type(6)),
|
||||
"Element types for X, initial_hidden_state, initial_cell_state, W, R and B inputs do not "
|
||||
"Element types for X, initial_hidden_state, initial_cell_state, W, R and B inputs do "
|
||||
"not "
|
||||
"match.");
|
||||
|
||||
// Merge batch_size dimension across all inputs to evaluate output[0] dimension
|
||||
@ -421,12 +427,14 @@ void op::v0::LSTMSequence::validate_and_infer_types()
|
||||
|
||||
bool ngraph::op::v5::LSTMSequence::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_LSTMSequence_visit_attributes);
|
||||
visitor.on_attribute("direction", m_direction);
|
||||
return op::util::RNNCellBase::visit_attributes(visitor);
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v5::LSTMSequence::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_LSTMSequence_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
if (new_args.size() == 7)
|
||||
{
|
||||
@ -452,6 +460,7 @@ shared_ptr<Node> op::v5::LSTMSequence::clone_with_new_inputs(const OutputVector&
|
||||
|
||||
void op::v5::LSTMSequence::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_LSTMSequence_validate_and_infer_types);
|
||||
for (const auto& input : inputs())
|
||||
{
|
||||
if (input.get_partial_shape().rank().is_dynamic())
|
||||
@ -505,7 +514,8 @@ void op::v5::LSTMSequence::validate_and_infer_types()
|
||||
element::Type::merge(result_et, result_et, get_input_element_type(4)) &&
|
||||
element::Type::merge(result_et, result_et, get_input_element_type(5)) &&
|
||||
element::Type::merge(result_et, result_et, get_input_element_type(6)),
|
||||
"Element types for X, initial_hidden_state, initial_cell_state, W, R and B inputs do not "
|
||||
"Element types for X, initial_hidden_state, initial_cell_state, W, R and B inputs do "
|
||||
"not "
|
||||
"match.");
|
||||
|
||||
// Merge batch_size dimension across all inputs to evaluate output[0] dimension
|
||||
|
@ -40,6 +40,7 @@ op::MatMul::MatMul(const Output<Node>& A,
|
||||
|
||||
bool ngraph::op::v0::MatMul::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_MatMul_visit_attributes);
|
||||
visitor.on_attribute("transpose_a", m_transpose_a);
|
||||
visitor.on_attribute("transpose_b", m_transpose_b);
|
||||
return true;
|
||||
@ -47,6 +48,7 @@ bool ngraph::op::v0::MatMul::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
shared_ptr<Node> op::MatMul::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_MatMul_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<MatMul>(new_args.at(0), new_args.at(1), m_transpose_a, m_transpose_b);
|
||||
}
|
||||
@ -259,16 +261,14 @@ namespace matmul
|
||||
|
||||
bool op::MatMul::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_MatMul_evaluate)
|
||||
{
|
||||
return matmul::evaluate_matmul(
|
||||
inputs[0], inputs[1], outputs[0], get_transpose_a(), get_transpose_b());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_MatMul_evaluate);
|
||||
return matmul::evaluate_matmul(
|
||||
inputs[0], inputs[1], outputs[0], get_transpose_a(), get_transpose_b());
|
||||
}
|
||||
|
||||
void ngraph::op::v0::MatMul::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_MatMul_validate_and_infer_types);
|
||||
element::Type result_et;
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
|
@ -70,6 +70,7 @@ op::v1::ReduceMax::ReduceMax(const Output<Node>& arg,
|
||||
|
||||
shared_ptr<Node> op::v1::ReduceMax::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_ReduceMax_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v1::ReduceMax>(new_args.at(0), new_args.at(1), get_keep_dims());
|
||||
}
|
||||
@ -77,9 +78,6 @@ shared_ptr<Node> op::v1::ReduceMax::clone_with_new_inputs(const OutputVector& ne
|
||||
bool op::v1::ReduceMax::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_ReduceMax_evaluate)
|
||||
{
|
||||
return maxop::evaluate_max(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_ReduceMax_evaluate);
|
||||
return maxop::evaluate_max(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
|
||||
}
|
||||
|
@ -70,6 +70,7 @@ op::v1::MaxPool::MaxPool(const Output<Node>& arg,
|
||||
|
||||
bool ngraph::op::v1::MaxPool::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_MaxPool_visit_attributes);
|
||||
visitor.on_attribute("strides", m_strides);
|
||||
visitor.on_attribute("pads_begin", m_pads_begin);
|
||||
visitor.on_attribute("pads_end", m_pads_end);
|
||||
@ -81,6 +82,7 @@ bool ngraph::op::v1::MaxPool::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
void op::v1::MaxPool::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_MaxPool_validate_and_infer_types);
|
||||
if (0 == m_strides.size())
|
||||
{
|
||||
m_strides = Strides(m_kernel.size(), 1);
|
||||
@ -135,6 +137,7 @@ void op::v1::MaxPool::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v1::MaxPool::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_MaxPool_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::MaxPool>(
|
||||
new_args.at(0), m_strides, m_pads_begin, m_pads_end, m_kernel, m_rounding_type, m_auto_pad);
|
||||
@ -229,6 +232,6 @@ bool op::v1::MaxPool::evaluate_maxpool(const HostTensorVector& outputs,
|
||||
bool op::v1::MaxPool::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_MaxPool_evaluate) { return evaluate_maxpool(outputs, inputs); }
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_MaxPool_evaluate);
|
||||
return evaluate_maxpool(outputs, inputs);
|
||||
}
|
||||
|
@ -84,6 +84,7 @@ op::v1::Maximum::Maximum(const Output<Node>& arg0,
|
||||
|
||||
shared_ptr<Node> op::v1::Maximum::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Maximum_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v1::Maximum>(new_args.at(0), new_args.at(1), this->get_autob());
|
||||
}
|
||||
@ -91,9 +92,6 @@ shared_ptr<Node> op::v1::Maximum::clone_with_new_inputs(const OutputVector& new_
|
||||
bool op::v1::Maximum::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Maximum_evaluate)
|
||||
{
|
||||
return maximumop::evaluate_maximum(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_Maximum_evaluate);
|
||||
return maximumop::evaluate_maximum(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
|
@ -72,6 +72,7 @@ op::v1::ReduceMin::ReduceMin(const Output<Node>& arg,
|
||||
|
||||
shared_ptr<Node> op::v1::ReduceMin::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_ReduceMin_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v1::ReduceMin>(new_args.at(0), new_args.at(1), get_keep_dims());
|
||||
}
|
||||
@ -79,9 +80,6 @@ shared_ptr<Node> op::v1::ReduceMin::clone_with_new_inputs(const OutputVector& ne
|
||||
bool op::v1::ReduceMin::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_ReduceMin_evaluate)
|
||||
{
|
||||
return minop::evaluate_min(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_ReduceMin_evaluate);
|
||||
return minop::evaluate_min(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
|
||||
}
|
||||
|
@ -82,6 +82,7 @@ op::v1::Minimum::Minimum(const Output<Node>& arg0,
|
||||
|
||||
shared_ptr<Node> op::v1::Minimum::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Minimum_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v1::Minimum>(new_args.at(0), new_args.at(1), this->get_autob());
|
||||
}
|
||||
@ -89,9 +90,6 @@ shared_ptr<Node> op::v1::Minimum::clone_with_new_inputs(const OutputVector& new_
|
||||
bool op::v1::Minimum::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Minimum_evaluate)
|
||||
{
|
||||
return minimumop::evaluate_minimum(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_Minimum_evaluate);
|
||||
return minimumop::evaluate_minimum(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
|
@ -35,17 +35,20 @@ op::v4::Mish::Mish(const Output<Node>& arg)
|
||||
|
||||
bool op::v4::Mish::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_Mish_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
void op::v4::Mish::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_Mish_validate_and_infer_types);
|
||||
set_output_size(1);
|
||||
set_output_type(0, get_input_element_type(0), get_input_partial_shape(0));
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v4::Mish::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_Mish_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Mish>(new_args.at(0));
|
||||
}
|
||||
@ -77,9 +80,6 @@ namespace mish
|
||||
|
||||
bool op::v4::Mish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_Mish_evaluate)
|
||||
{
|
||||
return mish::evaluate_mish(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v4_Mish_evaluate);
|
||||
return mish::evaluate_mish(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
@ -14,6 +14,7 @@
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
#include "ngraph/op/mod.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/builder/make_constant.hpp"
|
||||
#include "ngraph/op/abs.hpp"
|
||||
@ -40,6 +41,7 @@ op::v1::Mod::Mod(const Output<Node>& A,
|
||||
|
||||
bool ngraph::op::v1::Mod::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Mod_visit_attributes);
|
||||
visitor.on_attribute("auto_broadcast", m_auto_broadcast);
|
||||
return true;
|
||||
}
|
||||
@ -66,5 +68,6 @@ OutputVector op::v1::Mod::decompose_op() const
|
||||
|
||||
shared_ptr<Node> op::v1::Mod::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Mod_clone_with_new_inputs);
|
||||
return make_shared<Mod>(new_args.at(0), new_args.at(1), m_auto_broadcast);
|
||||
}
|
||||
|
@ -77,6 +77,7 @@ op::v0::Multiply::Multiply(const Output<Node>& arg0,
|
||||
|
||||
shared_ptr<Node> op::v0::Multiply::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Multiply_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v0::Multiply>(new_args.at(0), new_args.at(1), this->get_autob());
|
||||
}
|
||||
@ -84,11 +85,8 @@ shared_ptr<Node> op::v0::Multiply::clone_with_new_inputs(const OutputVector& new
|
||||
bool op::v0::Multiply::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Multiply_evaluate)
|
||||
{
|
||||
return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_Multiply_evaluate);
|
||||
return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
|
||||
// ------------------------------------ v1 -------------------------------------
|
||||
@ -105,6 +103,7 @@ op::v1::Multiply::Multiply(const Output<Node>& arg0,
|
||||
|
||||
shared_ptr<Node> op::v1::Multiply::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Multiply_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v1::Multiply>(new_args.at(0), new_args.at(1), this->get_autob());
|
||||
}
|
||||
@ -112,9 +111,6 @@ shared_ptr<Node> op::v1::Multiply::clone_with_new_inputs(const OutputVector& new
|
||||
bool op::v1::Multiply::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Multiply_evaluate)
|
||||
{
|
||||
return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_Multiply_evaluate);
|
||||
return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
|
@ -14,6 +14,7 @@
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
#include <algorithm>
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "mvn.hpp"
|
||||
#include "ngraph/builder/autobroadcast.hpp"
|
||||
@ -60,6 +61,7 @@ op::MVN::MVN(const Output<Node>& data, AxisSet reduction_axes, bool normalize_va
|
||||
// instead of relying on op decomposition.
|
||||
void op::MVN::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_MVN_validate_and_infer_types);
|
||||
// if m_across_channels is true we should calculate mean and variance per batch
|
||||
// else we calculate these per channel
|
||||
if (m_reduction_axes.empty() && input_value(0).get_partial_shape().rank().is_static())
|
||||
@ -106,6 +108,7 @@ OutputVector op::MVN::decompose_op() const
|
||||
|
||||
shared_ptr<Node> op::MVN::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_MVN_clone_with_new_inputs);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
new_args.size() == 1,
|
||||
"Expected 1 element in new_args for the MVN op but got ",
|
||||
@ -115,6 +118,7 @@ shared_ptr<Node> op::MVN::clone_with_new_inputs(const OutputVector& new_args) co
|
||||
|
||||
bool op::MVN::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_MVN_visit_attributes);
|
||||
visitor.on_attribute("eps", m_eps);
|
||||
visitor.on_attribute("across_channels", m_across_channels);
|
||||
visitor.on_attribute("normalize_variance", m_normalize_variance);
|
||||
@ -161,6 +165,7 @@ op::v6::MVN::MVN(const Output<Node>& data,
|
||||
|
||||
void op::v6::MVN::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v6_MVN_validate_and_infer_types);
|
||||
const auto data = get_input_partial_shape(0);
|
||||
const auto axes = get_input_partial_shape(1);
|
||||
|
||||
@ -183,6 +188,7 @@ void op::v6::MVN::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v6::MVN::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v6_MVN_clone_with_new_inputs);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
new_args.size() == 2,
|
||||
"Expected 2 element in new_args for the MVN op but got ",
|
||||
@ -193,6 +199,7 @@ shared_ptr<Node> op::v6::MVN::clone_with_new_inputs(const OutputVector& new_args
|
||||
|
||||
bool op::v6::MVN::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v6_MVN_visit_attributes);
|
||||
visitor.on_attribute("eps", m_eps);
|
||||
visitor.on_attribute("normalize_variance", m_normalize_variance);
|
||||
visitor.on_attribute("eps_mode", m_eps_mode);
|
||||
|
@ -32,11 +32,13 @@ op::Negative::Negative(const Output<Node>& arg)
|
||||
|
||||
bool ngraph::op::v0::Negative::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Negative_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::Negative::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Negative_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Negative>(new_args.at(0));
|
||||
}
|
||||
@ -73,12 +75,8 @@ namespace negativeop
|
||||
|
||||
bool op::Negative::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Negative_evaluate)
|
||||
{
|
||||
return negativeop::evaluate_negative(
|
||||
inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_Negative_evaluate);
|
||||
return negativeop::evaluate_negative(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
||||
shared_ptr<Node> ngraph::operator-(const Output<Node>& arg0)
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include "ngraph/op/non_max_suppression.hpp"
|
||||
#include <cstring>
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/op/util/op_types.hpp"
|
||||
@ -64,6 +65,7 @@ op::v1::NonMaxSuppression::NonMaxSuppression(
|
||||
std::shared_ptr<Node>
|
||||
op::v1::NonMaxSuppression::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_NonMaxSuppression_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
new_args.size() >= 2 && new_args.size() <= 5,
|
||||
@ -85,6 +87,7 @@ std::shared_ptr<Node>
|
||||
|
||||
bool ngraph::op::v1::NonMaxSuppression::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_NonMaxSuppression_visit_attributes);
|
||||
visitor.on_attribute("box_encoding", m_box_encoding);
|
||||
visitor.on_attribute("sort_result_descending", m_sort_result_descending);
|
||||
return true;
|
||||
@ -92,11 +95,13 @@ bool ngraph::op::v1::NonMaxSuppression::visit_attributes(AttributeVisitor& visit
|
||||
|
||||
void op::v1::NonMaxSuppression::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_NonMaxSuppression_validate_and_infer_types);
|
||||
const auto boxes_ps = get_input_partial_shape(0);
|
||||
const auto scores_ps = get_input_partial_shape(1);
|
||||
|
||||
// the spec doesn't say what exact type should be used for the output of this op
|
||||
// that's why we're setting it to 64-bit integer to provide the maximum range of values support
|
||||
// that's why we're setting it to 64-bit integer to provide the maximum range of values
|
||||
// support
|
||||
// this will be changed (configurable) in the next version of this op
|
||||
const auto& output_element_type = element::i64;
|
||||
|
||||
@ -262,6 +267,7 @@ op::v3::NonMaxSuppression::NonMaxSuppression(
|
||||
std::shared_ptr<Node>
|
||||
op::v3::NonMaxSuppression::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_NonMaxSuppression_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
new_args.size() >= 2 && new_args.size() <= 5,
|
||||
@ -289,6 +295,7 @@ std::shared_ptr<Node>
|
||||
|
||||
bool ngraph::op::v3::NonMaxSuppression::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_NonMaxSuppression_visit_attributes);
|
||||
visitor.on_attribute("box_encoding", m_box_encoding);
|
||||
visitor.on_attribute("sort_result_descending", m_sort_result_descending);
|
||||
visitor.on_attribute("output_type", m_output_type);
|
||||
@ -375,6 +382,7 @@ void op::v3::NonMaxSuppression::validate()
|
||||
|
||||
void op::v3::NonMaxSuppression::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_NonMaxSuppression_validate_and_infer_types);
|
||||
const auto boxes_ps = get_input_partial_shape(0);
|
||||
const auto scores_ps = get_input_partial_shape(1);
|
||||
|
||||
@ -481,6 +489,7 @@ op::v4::NonMaxSuppression::NonMaxSuppression(
|
||||
std::shared_ptr<Node>
|
||||
op::v4::NonMaxSuppression::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_NonMaxSuppression_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
new_args.size() >= 2 && new_args.size() <= 5,
|
||||
@ -508,6 +517,7 @@ std::shared_ptr<Node>
|
||||
|
||||
void op::v4::NonMaxSuppression::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_NonMaxSuppression_validate_and_infer_types);
|
||||
const auto boxes_ps = get_input_partial_shape(0);
|
||||
const auto scores_ps = get_input_partial_shape(1);
|
||||
|
||||
@ -627,6 +637,7 @@ op::v5::NonMaxSuppression::NonMaxSuppression(
|
||||
std::shared_ptr<Node>
|
||||
op::v5::NonMaxSuppression::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_NonMaxSuppression_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
new_args.size() >= 2 && new_args.size() <= 6,
|
||||
@ -885,6 +896,7 @@ float op::v5::NonMaxSuppression::soft_nms_sigma_from_input() const
|
||||
|
||||
bool ngraph::op::v5::NonMaxSuppression::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_NonMaxSuppression_visit_attributes);
|
||||
visitor.on_attribute("box_encoding", m_box_encoding);
|
||||
visitor.on_attribute("sort_result_descending", m_sort_result_descending);
|
||||
visitor.on_attribute("output_type", m_output_type);
|
||||
@ -893,6 +905,7 @@ bool ngraph::op::v5::NonMaxSuppression::visit_attributes(AttributeVisitor& visit
|
||||
|
||||
void op::v5::NonMaxSuppression::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v5_NonMaxSuppression_validate_and_infer_types);
|
||||
const auto boxes_ps = get_input_partial_shape(0);
|
||||
const auto scores_ps = get_input_partial_shape(1);
|
||||
|
||||
|
@ -48,12 +48,14 @@ op::v3::NonZero::NonZero(const Output<Node>& arg, const element::Type& output_ty
|
||||
|
||||
bool ngraph::op::v3::NonZero::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_NonZero_visit_attributes);
|
||||
visitor.on_attribute("output_type", m_output_type);
|
||||
return true;
|
||||
}
|
||||
|
||||
void op::v3::NonZero::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_NonZero_validate_and_infer_types);
|
||||
const PartialShape& input_shape = get_input_partial_shape(0);
|
||||
const auto input_et = get_input_element_type(0);
|
||||
|
||||
@ -80,6 +82,7 @@ void op::v3::NonZero::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v3::NonZero::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_NonZero_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v3::NonZero>(new_args.at(0), m_output_type);
|
||||
}
|
||||
@ -118,10 +121,8 @@ namespace nonzero
|
||||
#define TYPE_OUT_CASE(a, ...) \
|
||||
case element::Type_t::a: \
|
||||
{ \
|
||||
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_nonzero_out, _, a)) \
|
||||
{ \
|
||||
rc = evaluate_nonzero_execute<INPUT_ET, element::Type_t::a>(__VA_ARGS__); \
|
||||
} \
|
||||
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_nonzero_out, _, a)); \
|
||||
rc = evaluate_nonzero_execute<INPUT_ET, element::Type_t::a>(__VA_ARGS__); \
|
||||
} \
|
||||
break
|
||||
|
||||
@ -161,9 +162,6 @@ namespace nonzero
|
||||
bool op::v3::NonZero::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_NonZero_evaluate)
|
||||
{
|
||||
return nonzero::evaluate_nonzero(inputs[0], outputs[0]);
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v3_NonZero_evaluate);
|
||||
return nonzero::evaluate_nonzero(inputs[0], outputs[0]);
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/builder/norm.hpp"
|
||||
@ -45,6 +46,7 @@ op::NormalizeL2::NormalizeL2(const Output<Node>& data,
|
||||
|
||||
bool ngraph::op::v0::NormalizeL2::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_NormalizeL2_visit_attributes);
|
||||
visitor.on_attribute("eps", m_eps);
|
||||
visitor.on_attribute("eps_mode", m_eps_mode);
|
||||
return true;
|
||||
@ -116,6 +118,7 @@ OutputVector op::NormalizeL2::decompose_op() const
|
||||
|
||||
shared_ptr<Node> op::NormalizeL2::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_NormalizeL2_clone_with_new_inputs);
|
||||
if (new_args.size() != 2)
|
||||
{
|
||||
throw ngraph_error("Incorrect number of new arguments");
|
||||
|
@ -38,12 +38,14 @@ op::v1::LogicalNot::LogicalNot(const Output<Node>& arg)
|
||||
|
||||
bool ngraph::op::v1::LogicalNot::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_LogicalNot_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
// TODO(amprocte): Update this to allow only boolean, for consistency with logical binops.
|
||||
void op::v1::LogicalNot::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_LogicalNot_validate_and_infer_types);
|
||||
auto args_et_pshape = op::util::validate_and_infer_elementwise_args(this);
|
||||
element::Type& args_et = std::get<0>(args_et_pshape);
|
||||
PartialShape& args_pshape = std::get<1>(args_et_pshape);
|
||||
@ -53,6 +55,7 @@ void op::v1::LogicalNot::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v1::LogicalNot::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_LogicalNot_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::LogicalNot>(new_args.at(0));
|
||||
}
|
||||
@ -91,9 +94,6 @@ namespace notop
|
||||
bool op::v1::LogicalNot::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_LogicalNot_evaluate)
|
||||
{
|
||||
return notop::evaluate_not(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_LogicalNot_evaluate);
|
||||
return notop::evaluate_not(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
@ -77,6 +77,7 @@ op::v1::NotEqual::NotEqual(const Output<Node>& arg0,
|
||||
|
||||
shared_ptr<Node> op::v1::NotEqual::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_NotEqual_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v1::NotEqual>(new_args.at(0), new_args.at(1), this->get_autob());
|
||||
}
|
||||
@ -84,14 +85,12 @@ shared_ptr<Node> op::v1::NotEqual::clone_with_new_inputs(const OutputVector& new
|
||||
bool op::v1::NotEqual::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_NotEqual_evaluate)
|
||||
{
|
||||
return not_equalop::evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_NotEqual_evaluate);
|
||||
return not_equalop::evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
|
||||
bool op::v1::NotEqual::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_NotEqual_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
@ -39,6 +39,7 @@ op::v1::OneHot::OneHot(const Output<Node>& indices,
|
||||
|
||||
void op::v1::OneHot::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_OneHot_validate_and_infer_types);
|
||||
const auto& indices_et = get_input_element_type(0);
|
||||
const auto& depth_et = get_input_element_type(1);
|
||||
const auto& on_value_et = get_input_element_type(2);
|
||||
@ -121,12 +122,14 @@ void op::v1::OneHot::validate_and_infer_types()
|
||||
|
||||
bool ngraph::op::v1::OneHot::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_OneHot_visit_attributes);
|
||||
visitor.on_attribute("axis", m_axis);
|
||||
return true;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v1::OneHot::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_OneHot_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::OneHot>(
|
||||
new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), m_axis);
|
||||
@ -159,12 +162,10 @@ namespace detail
|
||||
#define TYPE_OUT_CASE(a, ...) \
|
||||
case element::Type_t::a: \
|
||||
{ \
|
||||
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_one_hot_out, _, a)) \
|
||||
{ \
|
||||
using IT = typename element_type_traits<element::Type_t::a>::value_type; \
|
||||
using OT = typename element_type_traits<out_t>::value_type; \
|
||||
rc = evaluate<IT, OT>(__VA_ARGS__); \
|
||||
} \
|
||||
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_one_hot_out, _, a)); \
|
||||
using IT = typename element_type_traits<element::Type_t::a>::value_type; \
|
||||
using OT = typename element_type_traits<out_t>::value_type; \
|
||||
rc = evaluate<IT, OT>(__VA_ARGS__); \
|
||||
} \
|
||||
break
|
||||
|
||||
@ -208,9 +209,6 @@ namespace detail
|
||||
bool op::v1::OneHot::evaluate(const HostTensorVector& output_values,
|
||||
const HostTensorVector& input_values) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_OneHot_evaluate)
|
||||
{
|
||||
return detail::evaluate_onehot(output_values, input_values, get_axis());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_OneHot_evaluate);
|
||||
return detail::evaluate_onehot(output_values, input_values, get_axis());
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ op::v1::LogicalOr::LogicalOr(const Output<Node>& arg0,
|
||||
|
||||
shared_ptr<Node> op::v1::LogicalOr::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_LogicalOr_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::LogicalOr>(new_args.at(0), new_args.at(1), this->get_autob());
|
||||
}
|
||||
@ -82,9 +83,6 @@ namespace logor
|
||||
bool op::v1::LogicalOr::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_LogicalOr_evaluate)
|
||||
{
|
||||
return logor::evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_LogicalOr_evaluate);
|
||||
return logor::evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
|
@ -75,12 +75,14 @@ CoordinateDiff op::v1::Pad::get_pads_end() const
|
||||
|
||||
bool ngraph::op::v1::Pad::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Pad_visit_attributes);
|
||||
visitor.on_attribute("pad_mode", m_pad_mode);
|
||||
return true;
|
||||
}
|
||||
|
||||
void op::v1::Pad::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Pad_validate_and_infer_types);
|
||||
element::Type result_et;
|
||||
|
||||
const auto& arg_element_type = get_input_element_type(0);
|
||||
@ -140,12 +142,12 @@ void op::v1::Pad::validate_and_infer_types()
|
||||
const auto& arg_shape_rank = arg_shape.rank();
|
||||
if (arg_shape_rank.is_static() && pads_begin_shape.is_static())
|
||||
{
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
pads_begin_shape[0].get_length() <= arg_shape_rank.get_length(),
|
||||
"Number of elements of pads_begin must be >= 0 and <= arg rank (pads_begin_shape[0]: ",
|
||||
pads_begin_shape[0],
|
||||
").");
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
pads_begin_shape[0].get_length() <= arg_shape_rank.get_length(),
|
||||
"Number of elements of pads_begin must be >= 0 and <= arg rank "
|
||||
"(pads_begin_shape[0]: ",
|
||||
pads_begin_shape[0],
|
||||
").");
|
||||
}
|
||||
if (arg_shape_rank.is_static() && pads_end_shape.is_static())
|
||||
{
|
||||
@ -175,16 +177,18 @@ void op::v1::Pad::validate_and_infer_types()
|
||||
result_dims[i] = static_cast<size_t>(result_dim);
|
||||
if (i > 1)
|
||||
{
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
m_pad_mode != op::PadMode::EDGE || arg_shape[i].get_length() >= 1,
|
||||
"EDGE padding mode requires an input of dimension of at least 1 at each "
|
||||
"spatial axis.");
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
m_pad_mode != op::PadMode::REFLECT || arg_shape[i].get_length() >= 2,
|
||||
"REFLECT padding mode requires an input of dimension of at least 2 at each "
|
||||
"spatial axis.");
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
m_pad_mode != op::PadMode::EDGE ||
|
||||
arg_shape[i].get_length() >= 1,
|
||||
"EDGE padding mode requires an input of dimension of "
|
||||
"at least 1 at each "
|
||||
"spatial axis.");
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
m_pad_mode != op::PadMode::REFLECT ||
|
||||
arg_shape[i].get_length() >= 2,
|
||||
"REFLECT padding mode requires an input of dimension "
|
||||
"of at least 2 at each "
|
||||
"spatial axis.");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -198,6 +202,7 @@ void op::v1::Pad::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v1::Pad::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Pad_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
if (get_input_size() == 4)
|
||||
{
|
||||
@ -243,6 +248,6 @@ bool op::v1::Pad::evaluate_pad(const HostTensorVector& outputs,
|
||||
|
||||
bool op::v1::Pad::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Pad_evaluate) { return evaluate_pad(outputs, inputs); }
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_Pad_evaluate);
|
||||
return evaluate_pad(outputs, inputs);
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include <sstream>
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/op/parameter.hpp"
|
||||
@ -34,6 +35,7 @@ op::Parameter::Parameter(const element::Type& element_type, const PartialShape&
|
||||
|
||||
bool op::Parameter::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Parameter_visit_attributes);
|
||||
visitor.on_attribute("shape", m_partial_shape);
|
||||
visitor.on_attribute("element_type", m_element_type);
|
||||
return true;
|
||||
@ -41,12 +43,15 @@ bool op::Parameter::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
void op::Parameter::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Parameter_validate_and_infer_types);
|
||||
;
|
||||
Op::validate_and_infer_types();
|
||||
set_output_type(0, m_element_type, m_partial_shape);
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::Parameter::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Parameter_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Parameter>(m_element_type, m_partial_shape);
|
||||
}
|
||||
|
@ -80,15 +80,13 @@ op::v1::Power::Power(const Output<Node>& arg0,
|
||||
|
||||
shared_ptr<Node> op::v1::Power::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Power_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v1::Power>(new_args.at(0), new_args.at(1), this->get_autob());
|
||||
}
|
||||
|
||||
bool op::v1::Power::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Power_evaluate)
|
||||
{
|
||||
return power::evaluate_power(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_Power_evaluate);
|
||||
return power::evaluate_power(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
|
@ -42,6 +42,7 @@ op::PRelu::PRelu(const Output<Node>& data, const Output<Node>& slope)
|
||||
|
||||
bool ngraph::op::v0::PRelu::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_PRelu_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -88,6 +89,7 @@ OutputVector op::PRelu::decompose_op() const
|
||||
|
||||
shared_ptr<Node> op::PRelu::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_PRelu_clone_with_new_inputs);
|
||||
if (new_args.size() != 2)
|
||||
{
|
||||
throw ngraph_error("Incorrect number of new arguments");
|
||||
@ -127,9 +129,6 @@ namespace prelu
|
||||
|
||||
bool op::PRelu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_PRelu_evaluate)
|
||||
{
|
||||
return prelu::evaluate_prelu(inputs[0], inputs[1], outputs[0]);
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_PRelu_evaluate);
|
||||
return prelu::evaluate_prelu(inputs[0], inputs[1], outputs[0]);
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ op::PriorBox::PriorBox(const Output<Node>& layer_shape,
|
||||
|
||||
void op::PriorBox::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_PriorBox_validate_and_infer_types);
|
||||
// shape node should have integer data type. For now we only allow i64
|
||||
auto layer_shape_et = get_input_element_type(0);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
@ -85,6 +86,7 @@ void op::PriorBox::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::PriorBox::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_PriorBox_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<PriorBox>(new_args.at(0), new_args.at(1), m_attrs);
|
||||
}
|
||||
@ -136,6 +138,7 @@ std::vector<float> op::PriorBox::normalized_aspect_ratio(const std::vector<float
|
||||
|
||||
bool op::PriorBox::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_PriorBox_visit_attributes);
|
||||
visitor.on_attribute("min_size", m_attrs.min_size);
|
||||
visitor.on_attribute("max_size", m_attrs.max_size);
|
||||
visitor.on_attribute("aspect_ratio", m_attrs.aspect_ratio);
|
||||
@ -192,9 +195,6 @@ namespace prior_box
|
||||
bool op::v0::PriorBox::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_PriorBox_evaluate)
|
||||
{
|
||||
return prior_box::evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_PriorBox_evaluate);
|
||||
return prior_box::evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ op::PriorBoxClustered::PriorBoxClustered(const Output<Node>& layer_shape,
|
||||
|
||||
void op::PriorBoxClustered::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_PriorBoxClustered_validate_and_infer_types);
|
||||
// shape node should have integer data type. For now we only allow i64
|
||||
auto layer_shape_et = get_input_element_type(0);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
@ -90,12 +91,14 @@ void op::PriorBoxClustered::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::PriorBoxClustered::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_PriorBoxClustered_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<PriorBoxClustered>(new_args.at(0), new_args.at(1), m_attrs);
|
||||
}
|
||||
|
||||
bool op::PriorBoxClustered::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_PriorBoxClustered_visit_attributes);
|
||||
float step = 0;
|
||||
float step_w_tmp = m_attrs.step_widths;
|
||||
float step_h_tmp = m_attrs.step_heights;
|
||||
@ -165,10 +168,6 @@ namespace prior_box_clustered
|
||||
bool op::v0::PriorBoxClustered::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_PriorBoxClustered_evaluate)
|
||||
{
|
||||
return prior_box_clustered::evaluate_prior_box(
|
||||
inputs[0], inputs[1], outputs[0], get_attrs());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_PriorBoxClustered_evaluate);
|
||||
return prior_box_clustered::evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/proposal.hpp"
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "ngraph/op/constant.hpp"
|
||||
|
||||
@ -35,6 +36,7 @@ op::v0::Proposal::Proposal(const Output<Node>& class_probs,
|
||||
|
||||
void op::v0::Proposal::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Proposal_validate_and_infer_types);
|
||||
const auto& class_probs_pshape = get_input_partial_shape(0);
|
||||
const auto& class_bbox_deltas_pshape = get_input_partial_shape(1);
|
||||
const auto& image_shape_pshape = get_input_partial_shape(2);
|
||||
@ -84,12 +86,14 @@ void op::v0::Proposal::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v0::Proposal::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Proposal_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v0::Proposal>(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs);
|
||||
}
|
||||
|
||||
bool op::v0::Proposal::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Proposal_visit_attributes);
|
||||
visitor.on_attribute("base_size", m_attrs.base_size);
|
||||
visitor.on_attribute("pre_nms_topn", m_attrs.pre_nms_topn);
|
||||
visitor.on_attribute("post_nms_topn", m_attrs.post_nms_topn);
|
||||
@ -120,6 +124,7 @@ op::v4::Proposal::Proposal(const Output<Node>& class_probs,
|
||||
|
||||
void op::v4::Proposal::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_Proposal_validate_and_infer_types);
|
||||
v0::Proposal::validate_and_infer_types();
|
||||
|
||||
const auto& class_probs_pshape = get_input_partial_shape(0);
|
||||
@ -136,6 +141,7 @@ void op::v4::Proposal::validate_and_infer_types()
|
||||
|
||||
std::shared_ptr<Node> op::v4::Proposal::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_Proposal_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v4::Proposal>(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs);
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/psroi_pooling.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
|
||||
using namespace std;
|
||||
@ -43,6 +44,7 @@ op::PSROIPooling::PSROIPooling(const Output<Node>& input,
|
||||
|
||||
bool ngraph::op::v0::PSROIPooling::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_PSROIPooling_visit_attributes);
|
||||
visitor.on_attribute("output_dim", m_output_dim);
|
||||
visitor.on_attribute("group_size", m_group_size);
|
||||
visitor.on_attribute("spatial_scale", m_spatial_scale);
|
||||
@ -54,6 +56,7 @@ bool ngraph::op::v0::PSROIPooling::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
void op::PSROIPooling::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_PSROIPooling_validate_and_infer_types);
|
||||
auto feat_maps_et = get_input_element_type(0);
|
||||
auto coords_et = get_input_element_type(1);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
@ -134,6 +137,7 @@ void op::PSROIPooling::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::PSROIPooling::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_PSROIPooling_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<PSROIPooling>(new_args.at(0),
|
||||
new_args.at(1),
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/quantize.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/runtime/host_tensor.hpp"
|
||||
#include "ngraph/runtime/reference/quantize.hpp"
|
||||
#include "ngraph/shape_util.hpp"
|
||||
@ -43,6 +44,7 @@ op::Quantize::Quantize(const Output<Node>& input,
|
||||
|
||||
void op::Quantize::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Quantize_validate_and_infer_types);
|
||||
enum
|
||||
{
|
||||
INPUT,
|
||||
@ -159,6 +161,7 @@ void op::Quantize::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::Quantize::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Quantize_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Quantize>(
|
||||
new_args.at(0), new_args.at(1), new_args.at(2), m_type, m_axes, m_round_mode);
|
||||
|
@ -70,12 +70,14 @@ op::v4::Range::Range(const Output<Node>& start,
|
||||
|
||||
bool ngraph::op::v4::Range::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_Range_visit_attributes);
|
||||
visitor.on_attribute("output_type", m_output_type);
|
||||
return true;
|
||||
}
|
||||
|
||||
void op::v4::Range::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_Range_validate_and_infer_types);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
m_output_type.is_integral_number() || m_output_type.is_real(),
|
||||
"output tensor type should be a numeric type. Got: ",
|
||||
@ -182,6 +184,7 @@ void op::v4::Range::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v4::Range::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_Range_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v4::Range>(new_args.at(0), new_args.at(1), new_args.at(2), m_output_type);
|
||||
}
|
||||
@ -300,15 +303,12 @@ namespace rangeop
|
||||
|
||||
bool op::v4::Range::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_Range_evaluate)
|
||||
{
|
||||
HostTensorPtr out = outputs[0];
|
||||
HostTensorPtr start = inputs[0];
|
||||
HostTensorPtr stop = inputs[1];
|
||||
HostTensorPtr step = inputs[2];
|
||||
return rangeop::evaluate_power(out, start, stop, step, m_output_type, 4);
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v4_Range_evaluate);
|
||||
HostTensorPtr out = outputs[0];
|
||||
HostTensorPtr start = inputs[0];
|
||||
HostTensorPtr stop = inputs[1];
|
||||
HostTensorPtr step = inputs[2];
|
||||
return rangeop::evaluate_power(out, start, stop, step, m_output_type, 4);
|
||||
}
|
||||
|
||||
constexpr NodeTypeInfo op::v0::Range::type_info;
|
||||
@ -421,11 +421,13 @@ static PartialShape infer_output_shape(const op::v0::Range* node, const element:
|
||||
|
||||
bool ngraph::op::v0::Range::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Range_visit_attributes);
|
||||
return true;
|
||||
}
|
||||
|
||||
void op::v0::Range::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Range_validate_and_infer_types);
|
||||
set_input_is_relevant_to_shape(0);
|
||||
set_input_is_relevant_to_shape(1);
|
||||
set_input_is_relevant_to_shape(2);
|
||||
@ -488,6 +490,7 @@ void op::v0::Range::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::v0::Range::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v0_Range_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Range>(new_args.at(0), new_args.at(1), new_args.at(2));
|
||||
}
|
||||
@ -499,13 +502,10 @@ void positive_range(T start_val, T stop_val, T step_val)
|
||||
|
||||
bool op::v0::Range::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(op_v0_Range_evaluate)
|
||||
{
|
||||
HostTensorPtr out = outputs[0];
|
||||
HostTensorPtr start = inputs[0];
|
||||
HostTensorPtr stop = inputs[1];
|
||||
HostTensorPtr step = inputs[2];
|
||||
return rangeop::evaluate_power(out, start, stop, step, start->get_element_type(), 0);
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v0_Range_evaluate);
|
||||
HostTensorPtr out = outputs[0];
|
||||
HostTensorPtr start = inputs[0];
|
||||
HostTensorPtr stop = inputs[1];
|
||||
HostTensorPtr step = inputs[2];
|
||||
return rangeop::evaluate_power(out, start, stop, step, start->get_element_type(), 0);
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/read_value.hpp"
|
||||
#include "itt.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
@ -30,6 +31,7 @@ op::ReadValue::ReadValue(const Output<Node>& init_value, const std::string& vari
|
||||
|
||||
void op::ReadValue::validate_and_infer_types()
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_ReadValue_validate_and_infer_types);
|
||||
auto arg_t = get_input_element_type(0);
|
||||
auto output_shape = get_input_partial_shape(0);
|
||||
|
||||
@ -43,12 +45,14 @@ void op::ReadValue::validate_and_infer_types()
|
||||
|
||||
shared_ptr<Node> op::ReadValue::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_ReadValue_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<ReadValue>(new_args.at(0), m_variable_id);
|
||||
}
|
||||
|
||||
bool op::v3::ReadValue::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v3_ReadValue_visit_attributes);
|
||||
visitor.on_attribute("variable_id", m_variable_id);
|
||||
return true;
|
||||
}
|
||||
|
@ -41,6 +41,7 @@ shared_ptr<Node> op::v4::ReduceL1::get_default_value() const
|
||||
|
||||
shared_ptr<Node> op::v4::ReduceL1::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_ReduceL1_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v4::ReduceL1>(new_args.at(0), new_args.at(1), get_keep_dims());
|
||||
}
|
||||
@ -81,10 +82,6 @@ namespace reduce_l1
|
||||
bool op::v4::ReduceL1::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_ReduceL1_evaluate)
|
||||
{
|
||||
return reduce_l1::evaluate_sum(
|
||||
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v4_ReduceL1_evaluate);
|
||||
return reduce_l1::evaluate_sum(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
|
||||
}
|
||||
|
@ -41,6 +41,7 @@ shared_ptr<Node> op::v4::ReduceL2::get_default_value() const
|
||||
|
||||
shared_ptr<Node> op::v4::ReduceL2::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_ReduceL2_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v4::ReduceL2>(new_args.at(0), new_args.at(1), get_keep_dims());
|
||||
}
|
||||
@ -79,10 +80,7 @@ namespace reduce_l2
|
||||
bool op::v4::ReduceL2::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v4_ReduceL2_evaluate)
|
||||
{
|
||||
return reduce_l2::evaluate_reduce_l2(
|
||||
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v4_ReduceL2_evaluate);
|
||||
return reduce_l2::evaluate_reduce_l2(
|
||||
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ op::v1::ReduceLogicalAnd::ReduceLogicalAnd(const Output<Node>& data,
|
||||
|
||||
shared_ptr<Node> op::v1::ReduceLogicalAnd::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_ReduceLogicalAnd_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v1::ReduceLogicalAnd>(new_args.at(0), new_args.at(1), get_keep_dims());
|
||||
}
|
||||
@ -75,12 +76,9 @@ namespace
|
||||
bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_ReduceLogicalAnd_evaluate)
|
||||
{
|
||||
const auto& data = inputs[0];
|
||||
const auto& axes = inputs[1];
|
||||
const auto& out = outputs[0];
|
||||
return evaluate_reduce_logical_and(data, axes, out, get_keep_dims());
|
||||
}
|
||||
return false;
|
||||
NGRAPH_OP_SCOPE(v1_ReduceLogicalAnd_evaluate);
|
||||
const auto& data = inputs[0];
|
||||
const auto& axes = inputs[1];
|
||||
const auto& out = outputs[0];
|
||||
return evaluate_reduce_logical_and(data, axes, out, get_keep_dims());
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user