Moved operations G-L to ov namespace (#7344)

* Moved ngraph::Node to ov namespace

* Fixed code style

* Fixed VPU

* Fixed GNA

* Fixed tests

* Added aliases for backward compatibility

* Fix clDNN

* Try to fix build

* Fixed comment

* Renamed RTTI macros

* Moved op utils to ov namespace

* Fixed ngraph library build

* Fixed unit-tests

* Changed src folder

* Fixed recurrent_sequence

* Changed low latency

* Fixed serialize

* Fixed ieFuncTests

* Try to fix windows

* Remove custom operator<< from tests

* Fixed build

* Moved operations from A to ov namespace

* Moved operations from B and C to ov namespace

* Moved operations D-F to ov namespace

* Update ngraph/core/src/op/embeddingbag_offsets_sum.cpp

Co-authored-by: Katarzyna Mitrus <katarzyna.mitrus@intel.com>

* Update ngraph/core/src/op/embeddingbag_packedsum.cpp

Co-authored-by: Katarzyna Mitrus <katarzyna.mitrus@intel.com>

* Fixed RTTI

* Moved operations G-L to ov namespace

* Fixed RTTI

Co-authored-by: Ilya Lavrenov <ilya.lavrenov@intel.com>
Co-authored-by: Katarzyna Mitrus <katarzyna.mitrus@intel.com>
This commit is contained in:
Ilya Churaev 2021-09-06 11:07:20 +03:00 committed by GitHub
parent 35fef3deea
commit e3aed9854b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
87 changed files with 2728 additions and 2216 deletions

View File

@ -18,11 +18,18 @@
#include "ngraph/function.hpp" #include "ngraph/function.hpp"
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
namespace ov {
namespace op {
namespace v0 {
class Parameter;
}
} // namespace op
} // namespace ov
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v0 { namespace v0 {
class Parameter; using ov::op::v0::Parameter;
} }
} // namespace op } // namespace op

View File

@ -38,6 +38,13 @@
#include "ngraph/variant.hpp" #include "ngraph/variant.hpp"
#include "openvino/core/node.hpp" #include "openvino/core/node.hpp"
namespace ov {
namespace op {
namespace v0 {
class Result;
}
} // namespace op
} // namespace ov
namespace ngraph { namespace ngraph {
using ov::Node; using ov::Node;
@ -52,7 +59,7 @@ using HostTensorVector = std::vector<HostTensorPtr>;
namespace op { namespace op {
namespace v0 { namespace v0 {
class Result; using ov::op::v0::Result;
} }
} // namespace op } // namespace op

View File

@ -5,76 +5,18 @@
#pragma once #pragma once
#include "ngraph/op/util/gather_base.hpp" #include "ngraph/op/util/gather_base.hpp"
#include "openvino/op/gather.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v1 { namespace v1 {
/// \brief Gather slices from axis of data according to indices using ov::op::v1::Gather;
class NGRAPH_API Gather : public op::util::GatherBase {
public:
NGRAPH_RTTI_DECLARATION;
static const int64_t AXIS_NOT_SET_VALUE = std::numeric_limits<int64_t>::max();
Gather() = default;
/// \param data The tensor from which slices are gathered
/// \param indices Tensor with indexes to gather
/// \param axis The tensor is a dimension index to gather data from
Gather(const Output<Node>& params, const Output<Node>& indices, const Output<Node>& axis);
bool visit_attributes(AttributeVisitor& visitor) override;
int64_t get_axis() const override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
} // namespace v1 } // namespace v1
namespace v7 { namespace v7 {
/// \brief Gather slices from axis of data according to indices using ov::op::v7::Gather;
class NGRAPH_API Gather : public op::util::GatherBase {
public:
NGRAPH_RTTI_DECLARATION;
Gather() = default;
/// \param data The tensor from which slices are gathered
/// \param indices Tensor with indexes to gather
/// \param axis The tensor is a dimension index to gather data from
/// \param batch_dims The number of batch dimension in data and indices tensors.
/// If batch_dims = 0 Gather v7 is identical to Gather v1.
Gather(const Output<Node>& data,
const Output<Node>& indices,
const Output<Node>& axis,
const int64_t batch_dims = 0);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
int64_t get_batch_dims() const;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
} // namespace v7 } // namespace v7
namespace v8 { namespace v8 {
/// \brief Gather slices from axis of data according to indices. Negative indices using ov::op::v8::Gather;
/// are supported and indicate reverse indexing from the end
class NGRAPH_API Gather : public op::util::GatherBase {
public:
NGRAPH_RTTI_DECLARATION;
Gather() = default;
/// \param data The tensor from which slices are gathered
/// \param indices Tensor with indexes to gather
/// \param axis The tensor is a dimension index to gather data from
/// \param batch_dims The number of batch dimension in data and indices tensors.
Gather(const Output<Node>& data,
const Output<Node>& indices,
const Output<Node>& axis,
const int64_t batch_dims = 0);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
int64_t get_batch_dims() const;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
} // namespace v8 } // namespace v8
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -5,35 +5,12 @@
#pragma once #pragma once
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "openvino/op/gather_elements.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v6 { namespace v6 {
/// \brief GatherElements operation using ov::op::v6::GatherElements;
///
class NGRAPH_API GatherElements : public Op {
public:
NGRAPH_RTTI_DECLARATION;
GatherElements() = default;
/// \brief Constructs a GatherElements operation.
///
/// \param data Node producing data that are gathered
/// \param indices Node producing indices by which the operation gathers elements
/// \param axis specifies axis along which indices are specified
GatherElements(const Output<Node>& data, const Output<Node>& indices, const int64_t axis);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
int64_t get_axis() const {
return m_axis;
}
private:
int64_t m_axis;
};
} // namespace v6 } // namespace v6
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -5,36 +5,12 @@
#pragma once #pragma once
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "openvino/op/gather_nd.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v5 { namespace v5 {
/// \brief GatherND operation using ov::op::v5::GatherND;
///
class NGRAPH_API GatherND : public Op {
public:
NGRAPH_RTTI_DECLARATION;
GatherND() = default;
/// \brief Constructs a GatherND operation.
///
/// \param data Node producing data that are gathered
/// \param indices Node producing indices by which the operation gathers elements
/// or slices from data
/// \param batch_dims Specifies a number of batch dimensions
GatherND(const Output<Node>& data, const Output<Node>& indices, const size_t batch_dims = 0);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
size_t get_batch_dims() const {
return m_batch_dims;
}
private:
size_t m_batch_dims;
};
} // namespace v5 } // namespace v5
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -5,34 +5,12 @@
#pragma once #pragma once
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "openvino/op/gather_tree.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v1 { namespace v1 {
/// \brief Generates the complete beams from the ids per each step and the parent beam using ov::op::v1::GatherTree;
/// ids.
class NGRAPH_API GatherTree : public Op {
public:
NGRAPH_RTTI_DECLARATION;
GatherTree() = default;
/// \param step_ids Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] with
/// indices from per each step
/// \param parent_idx Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] with
/// parent beam indices
/// \param max_seq_len Tensor of shape [BATCH_SIZE] with maximum lengths for each
/// sequence in the batch
/// \param end_token Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH]
GatherTree(const Output<Node>& step_ids,
const Output<Node>& parent_idx,
const Output<Node>& max_seq_len,
const Output<Node>& end_token);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
} // namespace v1 } // namespace v1
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -7,81 +7,19 @@
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" #include "ngraph/op/util/unary_elementwise_arithmetic.hpp"
#include "openvino/op/gelu.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v0 { namespace v0 {
/// \brief Gaussian Error Linear Unit using ov::op::v0::Gelu;
/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) )
class NGRAPH_API Gelu : public Op {
public:
NGRAPH_RTTI_DECLARATION;
Gelu();
/// \brief Constructs a Gelu operation.
///
/// \param data Input tensor
Gelu(const Output<Node>& data);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
} // namespace v0 } // namespace v0
using v0::Gelu; using v0::Gelu;
/// \brief Specifies the approximation to calculate Gelu using ov::op::GeluApproximationMode;
enum class GeluApproximationMode { TANH, ERF };
NGRAPH_API std::ostream& operator<<(std::ostream& s, const GeluApproximationMode& type);
namespace v7 { namespace v7 {
/// \brief Gaussian Error Linear Unit using ov::op::v7::Gelu;
/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) ) for "approximation" = "erf"
/// f(x) = 0.5 * x * (1 + tanh([sqrt(2 / pi)] * [x + 0.044715^3]) for "approximation" =
/// "tanh"
class NGRAPH_API Gelu : public util::UnaryElementwiseArithmetic {
public:
NGRAPH_RTTI_DECLARATION;
Gelu() = default;
/// \brief Constructs a Gelu operation.
///
/// \param data Input tensor
/// \param mode Approximation mode
Gelu(const Output<Node>& data, GeluApproximationMode mode = GeluApproximationMode::ERF);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
GeluApproximationMode get_approximation_mode() const;
private:
GeluApproximationMode m_approximation_mode = GeluApproximationMode::ERF;
};
} // namespace v7 } // namespace v7
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph
namespace ov {
template <>
class NGRAPH_API AttributeAdapter<ngraph::op::GeluApproximationMode>
: public EnumAttributeAdapterBase<ngraph::op::GeluApproximationMode> {
public:
AttributeAdapter(ngraph::op::GeluApproximationMode& value)
: EnumAttributeAdapterBase<ngraph::op::GeluApproximationMode>(value) {}
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::GeluApproximationMode>", 0};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};
} // namespace ov

View File

@ -5,29 +5,12 @@
#pragma once #pragma once
#include "ngraph/op/util/binary_elementwise_comparison.hpp" #include "ngraph/op/util/binary_elementwise_comparison.hpp"
#include "openvino/op/greater.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v1 { namespace v1 {
/// \brief Elementwise greater-than operation. using ov::op::v1::Greater;
class NGRAPH_API Greater : public util::BinaryElementwiseComparison {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a greater-than operation.
Greater() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {}
/// \brief Constructs a greater-than operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
/// \param auto_broadcast Auto broadcast specification
Greater(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY));
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v1 } // namespace v1
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -5,29 +5,12 @@
#pragma once #pragma once
#include "ngraph/op/util/binary_elementwise_comparison.hpp" #include "ngraph/op/util/binary_elementwise_comparison.hpp"
#include "openvino/op/greater_eq.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v1 { namespace v1 {
/// \brief Elementwise greater-than-or-equal operation. using ov::op::v1::GreaterEqual;
class NGRAPH_API GreaterEqual : public util::BinaryElementwiseComparison {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a greater-than-or-equal operation.
GreaterEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {}
/// \brief Constructs a greater-than-or-equal operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
/// \param auto_broadcast Auto broadcast specification
GreaterEqual(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY));
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v1 } // namespace v1
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -7,35 +7,12 @@
#include <memory> #include <memory>
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "openvino/op/grn.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v0 { namespace v0 {
/// \brief Global Response Normalization with L2 norm (across channels only). using ov::op::v0::GRN;
///
class NGRAPH_API GRN : public Op {
public:
NGRAPH_RTTI_DECLARATION;
GRN() = default;
/// \brief Constructs a GRN operation.
///
/// \param data - Node producing the input tensor
/// \param bias - The bias added to the variance.
///
GRN(const Output<Node>& data, float bias);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
float get_bias() const {
return m_bias;
}
protected:
float m_bias = 1.0f;
};
} // namespace v0 } // namespace v0
using v0::GRN; using v0::GRN;
} // namespace op } // namespace op

View File

@ -7,268 +7,13 @@
#include "ngraph/op/convolution.hpp" #include "ngraph/op/convolution.hpp"
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/attr_types.hpp"
#include "openvino/op/group_conv.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v1 { namespace v1 {
/// \brief Batched convolution operation, with optional window dilation and stride. using ov::op::v1::GroupConvolution;
class NGRAPH_API GroupConvolution : public Op { using ov::op::v1::GroupConvolutionBackpropData;
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a batched convolution operation.
GroupConvolution() = default;
/// \brief Constructs a batched convolution operation.
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[GROUPS, FC_OUT, FC_IN, F1, ... Ff]`
/// \param strides The strides.<br>
/// `[f]`
/// \param dilations The dilations.<br>
/// `[f]`
/// \param pads_begin The beginning of padding shape.<br>
/// `[f]`
/// \param pads_end The end of padding shape.<br>
/// `[f]`
/// \param auto_pad The pad type for automatically computing padding sizes.<br>
/// `[f]`
///
/// Output `[N, FC_OUT * GROUPS, R1, ... Rf]`
///
GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The strides.
const Strides& get_strides() const {
return m_strides;
}
void set_strides(const Strides& strides) {
m_strides = strides;
}
/// \return The dilations.
const Strides& get_dilations() const {
return m_dilations;
}
void set_dilations(const Strides& dilations) {
m_dilations = dilations;
}
/// \return The padding-below sizes (possibly negative).
const CoordinateDiff& get_pads_begin() const {
return m_pads_begin;
}
void set_pads_begin(const CoordinateDiff& pads_begin) {
m_pads_begin = pads_begin;
}
/// \return The padding-above sizes (possibly negative).
const CoordinateDiff& get_pads_end() const {
return m_pads_end;
}
void set_adding_above(const CoordinateDiff& pads_end) {
m_pads_end = pads_end;
}
/// \return The pad type for convolution.
const PadType& get_auto_pad() const {
return m_auto_pad;
}
void set_auto_pad(const PadType& auto_pad) {
m_auto_pad = auto_pad;
}
/// \return The default value for Convolution.
NGRAPH_SUPPRESS_DEPRECATED_START
virtual std::shared_ptr<Node> get_default_value() const override;
NGRAPH_SUPPRESS_DEPRECATED_END
protected:
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
PadType m_auto_pad;
};
/// \brief Data batch backprop for batched convolution operation.
class NGRAPH_API GroupConvolutionBackpropData : public Op {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a batched-convolution data batch-backprop operation.
GroupConvolutionBackpropData();
// clang-format off
//
// \brief Constructs a batched-convolution data batch-backprop operation.
//
// \param data The node producing data from forward-prop. Shape: [N,
// C_INPUT * GROUPS, X1, ..., XD].
// \param filter The node producing the filter from forward-prop. Shape:
// [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1]
// \param output_shape The shape of the data batch from forward-prop. It's size
// should be equal to number of data spatial dimensions.
// \param strides The strides from forward-prop.
// \param pads_begin The padding-below sizes from forward-prop.
// \param pads_end The padding-above sizes from forward-prop.
// \param dilations The dilations from forward-prop.
// \param auto_pad The pad type for automatically computing padding sizes.
// \param output_padding The output padding adds additional amount of paddings per
// each spatial axis in the output tensor.
//
// clang-format on
//
GroupConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& filter,
const Output<Node>& output_shape,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT,
const CoordinateDiff& output_padding = {});
// clang-format off
//
// \brief Constructs a batched-convolution data batch-backprop operation.
//
// \param data The node producing data from forward-prop. Shape: [N,
// C_INPUT * GROUPS, X1, ..., XD].
// \param filter The node producing the filter from forward-prop. Shape:
// [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1]
// \param output_shape The shape of the data batch from forward-prop. It's size
// should be equal to number of data spatial dimensions.
// \param strides The strides from forward-prop.
// \param dilations The dilations from forward-prop.
// \param auto_pad The pad type for automatically computing padding sizes.
// \param output_padding The output padding adds additional amount of paddings per
// each spatial axis in the output tensor.
//
// clang-format on
//
GroupConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& filter,
const Output<Node>& output_shape,
const Strides& strides,
const Strides& dilations,
const PadType& auto_pad,
const CoordinateDiff& output_padding = {});
// clang-format off
//
// \brief Constructs a batched-convolution data batch-backprop operation.
//
// \param data The node producing data from forward-prop. Shape:
// [N, C_INPUT * GROUPS, X1, ..., XD].
// \param filter The node producing the filter from forward-prop. Shape:
// [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1]
// \param strides The strides from forward-prop.
// \param pads_begin The padding-below sizes from forward-prop.
// \param pads_end The padding-above sizes from forward-prop.
// \param dilations The dilations from forward-prop.
// \param auto_pad The pad type for automatically computing padding sizes.
// \param output_padding The output padding adds additional amount of paddings per
// each spatial axis in the output tensor.
//
// clang-format on
GroupConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& filter,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT,
const CoordinateDiff& output_padding = {});
///
/// \brief Calculates output spatial features size.
///
/// \param[in] input_data_shape The input data partial shape
/// \param[in] filters_shape The filters partial shape
/// \param[in] strides The strides values.
/// \param[in] dilations The dilations values.
/// \param[in] pads_begin The paddings at the beginning of axis.
/// \param[in] pads_end The paddings at the end of axis.
/// \param[in] output_padding The output padding values.
/// \param output_spatial_shape The placeholder for computed output spatial
/// partial
/// shape.
///
void infer_conv_backprop_output_spatial_shape(const std::vector<Dimension>& input_data_shape,
const std::vector<Dimension>& filters_shape,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const CoordinateDiff& output_padding,
std::vector<Dimension>& output_spatial_shape);
bool visit_attributes(AttributeVisitor& visitor) override;
virtual bool is_dynamic() const override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The spatial shape of the output.
const PartialShape get_convolution_output_shape() const;
void set_output_shape(const Shape& output_shape);
/// \return The strides from the forward prop.
const Strides& get_strides() const {
return m_strides;
}
void set_strides(const Strides& strides) {
m_strides = strides;
}
/// \return The dilations from the forward prop.
const Strides& get_dilations() const {
return m_dilations;
}
void set_dilations(const Strides& dilations) {
m_dilations = dilations;
}
/// \return The number of pixels to add to the beginning along each axis.
const CoordinateDiff& get_pads_begin() const {
return m_pads_begin;
}
void set_pads_begin(const CoordinateDiff& pads_begin) {
m_pads_begin = pads_begin;
}
/// \return The number of pixels to add to the ending along each axis.
const CoordinateDiff& get_pads_end() const {
return m_pads_end;
}
void set_pads_end(const CoordinateDiff& pads_end) {
m_pads_end = pads_end;
}
/// \return The auto pad.
const PadType& get_auto_pad() const {
return m_auto_pad;
}
void set_auto_pad(const PadType& auto_pad) {
m_auto_pad = auto_pad;
}
/// \return The output padding.
const CoordinateDiff& get_output_padding() const {
return m_output_padding;
}
void set_output_padding(const CoordinateDiff& output_padding) {
m_output_padding = output_padding;
}
protected:
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
PadType m_auto_pad;
CoordinateDiff m_output_padding;
};
} // namespace v1 } // namespace v1
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -13,155 +13,12 @@
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/op/util/activation_functions.hpp" #include "ngraph/op/util/activation_functions.hpp"
#include "ngraph/op/util/rnn_cell_base.hpp" #include "ngraph/op/util/rnn_cell_base.hpp"
#include "openvino/op/gru_cell.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v3 { namespace v3 {
/// using ov::op::v3::GRUCell;
/// \brief Class for GRU cell node.
///
/// \note It follows notation and equations defined as in ONNX standard:
/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#GRU
///
/// Note this class represents only single *cell* and not whole GRU *layer*.
///
class NGRAPH_API GRUCell : public util::RNNCellBase {
public:
static constexpr NodeTypeInfo type_info{"GRUCell", 3};
const NodeTypeInfo& get_type_info() const override {
return type_info;
}
GRUCell();
///
/// \brief Constructs GRUCell node.
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] W The weight tensor with shape:
/// [gates_count * hidden_size, input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [gates_count * hidden_size, hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
///
GRUCell(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& W,
const Output<Node>& R,
std::size_t hidden_size);
///
/// \brief Constructs GRUCell node.
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] W The weight tensor with shape:
/// [gates_count * hidden_size, input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [gates_count * hidden_size, hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on input of activation functions.
///
GRUCell(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& W,
const Output<Node>& R,
std::size_t hidden_size,
const std::vector<std::string>& activations,
const std::vector<float>& activations_alpha,
const std::vector<float>& activations_beta,
float clip,
bool linear_before_reset);
///
/// \brief Constructs GRUCell node.
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] W The weight tensor with shape: [gates_count *
/// hidden_size, input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [gates_count * hidden_size, hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] B The sum of biases (weight and recurrence) for
/// update, reset and hidden gates.
/// If linear_before_reset := true then biases for
/// hidden gates are
/// placed separately (weight and recurrence).
/// Shape: [gates_count * hidden_size] if
/// linear_before_reset := false
/// Shape: [(gates_count + 1) * hidden_size] if
/// linear_before_reset := true
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on input of activation functions.
/// \param[in] linear_before_reset Whether or not to apply the linear
/// transformation before multiplying by the
/// output of the reset gate.
///
GRUCell(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& W,
const Output<Node>& R,
const Output<Node>& B,
std::size_t hidden_size,
const std::vector<std::string>& activations = std::vector<std::string>{"sigmoid", "tanh"},
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
float clip = 0.f,
bool linear_before_reset = false);
virtual void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool get_linear_before_reset() const {
return m_linear_before_reset;
}
private:
/// brief Add and initialize bias input to all zeros.
void add_default_bias_input();
///
/// \brief The Activation function f.
///
util::ActivationFunction m_activation_f;
///
/// \brief The Activation function g.
///
util::ActivationFunction m_activation_g;
static constexpr std::size_t s_gates_count{3};
///
/// \brief Control whether or not apply the linear transformation.
///
/// \note The linear transformation may be applied when computing the output of
/// hidden gate. It's done before multiplying by the output of the reset gate.
///
bool m_linear_before_reset;
};
} // namespace v3 } // namespace v3
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -10,45 +10,12 @@
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/op/util/rnn_cell_base.hpp" #include "ngraph/op/util/rnn_cell_base.hpp"
#include "openvino/op/gru_sequence.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v5 { namespace v5 {
class NGRAPH_API GRUSequence : public util::RNNCellBase { using ov::op::v5::GRUSequence;
public:
NGRAPH_RTTI_DECLARATION;
GRUSequence();
GRUSequence(const Output<Node>& X,
const Output<Node>& H_t,
const Output<Node>& sequence_lengths,
const Output<Node>& W,
const Output<Node>& R,
const Output<Node>& B,
size_t hidden_size,
op::RecurrentSequenceDirection direction,
const std::vector<std::string>& activations = std::vector<std::string>{"sigmoid", "tanh"},
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
float clip = 0.f,
bool linear_before_reset = false);
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
bool get_linear_before_reset() const {
return m_linear_before_reset;
}
op::RecurrentSequenceDirection get_direction() const {
return m_direction;
}
protected:
op::RecurrentSequenceDirection m_direction;
bool m_linear_before_reset;
};
} // namespace v5 } // namespace v5
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -6,31 +6,12 @@
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "openvino/op/hard_sigmoid.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v0 { namespace v0 {
/// \brief Parameterized, bounded sigmoid-like, piecewise linear using ov::op::v0::HardSigmoid;
/// function. min(max(alpha*x + beta, 0), 1)
///
class NGRAPH_API HardSigmoid : public Op {
public:
NGRAPH_RTTI_DECLARATION;
HardSigmoid();
/// \brief Constructs a HardSigmoid operation.
///
/// \param data Input tensor.
/// \param[in] alpha A scalar value representing the alpha parameter.
/// \param[in] beta A scalar value representing the beta parameter.
///
HardSigmoid(const Output<Node>& data, const Output<Node>& alpha, const Output<Node>& beta);
bool visit_attributes(AttributeVisitor& visitor) override;
virtual void validate_and_infer_types() override;
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
} // namespace v0 } // namespace v0
using v0::HardSigmoid; using v0::HardSigmoid;
} // namespace op } // namespace op

View File

@ -7,30 +7,12 @@
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" #include "ngraph/op/util/unary_elementwise_arithmetic.hpp"
#include "openvino/op/hsigmoid.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v5 { namespace v5 {
/// \brief A HSigmoid Activation Function using ov::op::v5::HSigmoid;
/// f(x) = min(max(x + 3, 0), 6) / 6 or
/// f(x) = min(ReLU(x + 3), 6) / 6
///
class NGRAPH_API HSigmoid : public ngraph::op::util::UnaryElementwiseArithmetic {
public:
NGRAPH_RTTI_DECLARATION;
HSigmoid() = default;
/// \brief Constructs a HSigmoid operation.
///
/// \param data Input tensor
HSigmoid(const Output<Node>& arg);
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v5 } // namespace v5
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -7,30 +7,12 @@
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" #include "ngraph/op/util/unary_elementwise_arithmetic.hpp"
#include "openvino/op/hswish.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v4 { namespace v4 {
/// \brief A HSwish Activation Function using ov::op::v4::HSwish;
/// f(x) = x * min(max(x + 3, 0), 6) / 6 or
/// f(x) = x * min(ReLU(x + 3), 6) / 6
///
class NGRAPH_API HSwish : public ngraph::op::util::UnaryElementwiseArithmetic {
public:
NGRAPH_RTTI_DECLARATION;
HSwish() = default;
/// \brief Constructs a HSwish (hard version of Swish) operation.
///
/// \param data Input tensor
HSwish(const Output<Node>& arg);
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v4 } // namespace v4
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -11,33 +11,12 @@
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/attr_types.hpp"
#include "ngraph/op/util/fft_base.hpp" #include "ngraph/op/util/fft_base.hpp"
#include "openvino/op/idft.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v7 { namespace v7 {
/// \brief An operation IDFT that computes the inverse discrete Fourier transformation. using ov::op::v7::IDFT;
class NGRAPH_API IDFT : public util::FFTBase {
public:
NGRAPH_RTTI_DECLARATION;
IDFT() = default;
/// \brief Constructs a IDFT operation. IDFT is performed for full size axes.
///
/// \param data Input data
/// \param axes Axes to perform IDFT
IDFT(const Output<Node>& data, const Output<Node>& axes);
/// \brief Constructs a IDFT operation.
///
/// \param data Input data
/// \param axes Axes to perform IDFT
/// \param signal_size Signal sizes for 'axes'
IDFT(const Output<Node>& data, const Output<Node>& axes, const Output<Node>& signal_size);
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
} // namespace v7 } // namespace v7
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -9,86 +9,12 @@
#include "ngraph/function.hpp" #include "ngraph/function.hpp"
#include "ngraph/op/parameter.hpp" #include "ngraph/op/parameter.hpp"
#include "ngraph/op/util/multi_subgraph_base.hpp" #include "ngraph/op/util/multi_subgraph_base.hpp"
#include "openvino/op/if.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v8 { namespace v8 {
/// \brief If operation. using ov::op::v8::If;
class NGRAPH_API If : public util::MultiSubGraphOp {
public:
enum BodyIndexes { THEN_BODY_INDEX = 0, ELSE_BODY_INDEX = 1 };
NGRAPH_RTTI_DECLARATION;
bool visit_attributes(AttributeVisitor& visitor) override;
/// \brief Constructs If with condition
///
/// \param execution_condition condition node.
If(const Output<Node>& execution_condition);
If();
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
/// \brief gets then_body as ngraph::Function.
///
/// \return then_body as ngraph::Function.
const std::shared_ptr<Function>& get_then_body() const {
return m_bodies[THEN_BODY_INDEX];
}
/// \brief gets else_body as ngraph::Function.
///
/// \return else_body as ngraph::Function.
const std::shared_ptr<Function>& get_else_body() const {
return m_bodies[ELSE_BODY_INDEX];
}
/// \brief sets new ngraph::Function as new then_body.
///
/// \param body new body for 'then' branch.
void set_then_body(const std::shared_ptr<Function>& body) {
m_bodies[THEN_BODY_INDEX] = body;
}
/// \brief sets new ngraph::Function as new else_body.
///
/// \param body new body for 'else' branch.
void set_else_body(const std::shared_ptr<Function>& body) {
m_bodies[ELSE_BODY_INDEX] = body;
}
/// \brief sets new input to the operation associated with parameters
/// of each sub-graphs
///
/// \param value input to operation
/// \param then_parameter parameter for then_body or nullptr
/// \param else_parameter parameter for else_body or nullpt
void set_input(const Output<Node>& value,
const std::shared_ptr<Parameter>& then_parameter,
const std::shared_ptr<Parameter>& else_parameter);
/// \brief sets new output from the operation associated with results
/// of each sub-graphs
///
/// \param then_result result from then_body
/// \param else_parameter result from else_body
/// \return output from operation
Output<Node> set_output(const std::shared_ptr<Result>& then_result, const std::shared_ptr<Result>& else_result);
void validate_and_infer_types() override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
private:
using OutputMap = std::map<int64_t, std::shared_ptr<MultiSubGraphOp::OutputDescription>>;
void validate_and_infer_type_body(const std::shared_ptr<Function>& body,
const ngraph::op::util::MultiSubgraphInputDescriptionVector& input_descriptors);
OutputMap get_mapping_outputs_on_body_description(
const ngraph::op::util::MultiSubgraphOutputDescriptionVector& output_descriptors);
};
} // namespace v8 } // namespace v8
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -10,358 +10,18 @@
#include "ngraph/attribute_adapter.hpp" #include "ngraph/attribute_adapter.hpp"
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/attr_types.hpp"
#include "openvino/op/interpolate.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v0 { namespace v0 {
/// \brief Structure that specifies attributes for interpolation using InterpolateAttrs = ov::op::v0::Interpolate::Attributes;
struct InterpolateAttrs { using ov::op::v0::Interpolate;
// specify dimension indices where interpolation is applied, and `axes` is any
// unordered list of indeces of different dimensions of input tensor. Required.
AxisSet axes;
// specifies type of interpolation
// one of `nearest`, `linear`, `cubic`, `area`. Required.
std::string mode;
// a flag that specifies whether to align corners or not.
// `true` (default) means the alignment is applied,
// `false` means the alignment isn't applied.
bool align_corners = true;
// a flag that specifies whether to perform anti-aliasing. default is `false`
bool antialias = false;
// specify the number of pixels to add to the beginning of the image being
// interpolated. This addition of pixels is done before interpolation calculation.
std::vector<size_t> pads_begin;
// specify the number of pixels to add to the end of the image being interpolated.
// This addition of pixels is done before interpolation calculation.
std::vector<size_t> pads_end;
};
/// \brief Layer which performs bilinear interpolation
class NGRAPH_API Interpolate : public Op {
public:
NGRAPH_RTTI_DECLARATION;
enum class InterpolateMode {
NEAREST,
LINEAR,
CUBIC,
AREA,
nearest NGRAPH_ENUM_DEPRECATED("Please use NEAREST instead") = NEAREST,
linear NGRAPH_ENUM_DEPRECATED("Please use LINEAR instead") = LINEAR,
cubic NGRAPH_ENUM_DEPRECATED("Please use CUBIC instead") = CUBIC,
area NGRAPH_ENUM_DEPRECATED("Please use AREA instead") = AREA
};
Interpolate() = default;
/// \brief Constructs a Interpolate operation
///
/// \param image Input image
/// \param output_shape Output shape of spatial axes
/// \param attrs Interpolation attributes
Interpolate(const Output<Node>& image, const Output<Node>& output_shape, const InterpolateAttrs& attrs);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
const InterpolateAttrs& get_attrs() const {
return m_attrs;
}
private:
InterpolateAttrs m_attrs;
};
} // namespace v0 } // namespace v0
namespace v4 { namespace v4 {
class NGRAPH_API Interpolate : public Op { using ov::op::v4::Interpolate;
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Shape calculation mode
///
/// sizes - output shape for interpolated axes is calculated using input `sizes`
/// scales - output shape for interpolated axes is calculated using input `scales`
enum class ShapeCalcMode {
SIZES,
SCALES,
sizes NGRAPH_ENUM_DEPRECATED("Please use SIZES instead") = SIZES,
scales NGRAPH_ENUM_DEPRECATED("Please use SCALES instead") = SCALES
};
/// \brief Interpolation mode
///
/// nearest - nearest interpolation
/// linear - linear interpolation as in TensorFlow
/// linear_onnx - linear interpolation as in ONNX
/// cubic - cubic interpolation
enum class InterpolateMode {
NEAREST,
LINEAR,
LINEAR_ONNX,
CUBIC,
nearest NGRAPH_ENUM_DEPRECATED("Please use NEAREST instead") = NEAREST,
linear NGRAPH_ENUM_DEPRECATED("Please use LINEAR instead") = LINEAR,
linear_onnx NGRAPH_ENUM_DEPRECATED("Please use LINEAR_ONNX instead") = LINEAR_ONNX,
cubic NGRAPH_ENUM_DEPRECATED("Please use CUBIC instead") = CUBIC
};
/// \brief Mode of the calculation of the source coordinate from resized one
///
/// These modes are modes from ONNX runtime.
enum class CoordinateTransformMode {
HALF_PIXEL,
PYTORCH_HALF_PIXEL,
ASYMMETRIC,
TF_HALF_PIXEL_FOR_NN,
ALIGN_CORNERS,
half_pixel NGRAPH_ENUM_DEPRECATED("Please use HALF_PIXEL instead") = HALF_PIXEL,
pytorch_half_pixel NGRAPH_ENUM_DEPRECATED("Please use PYTORCH_HALF_PIXEL instead") = PYTORCH_HALF_PIXEL,
asymmetric NGRAPH_ENUM_DEPRECATED("Please use ASYMMETRIC instead") = ASYMMETRIC,
tf_half_pixel_for_nn NGRAPH_ENUM_DEPRECATED("Please use TF_HALF_PIXEL_FOR_NN instead") = TF_HALF_PIXEL_FOR_NN,
align_corners NGRAPH_ENUM_DEPRECATED("Please use ALIGN_CORNERS instead") = ALIGN_CORNERS
};
/// \brief Round modes for the nearest interpolation.
enum class NearestMode {
ROUND_PREFER_FLOOR,
ROUND_PREFER_CEIL,
FLOOR,
CEIL,
SIMPLE,
round_prefer_floor NGRAPH_ENUM_DEPRECATED("Please use ROUND_PREFER_FLOOR instead") = ROUND_PREFER_FLOOR,
round_prefer_ceil NGRAPH_ENUM_DEPRECATED("Please use ROUND_PREFER_CEIL instead") = ROUND_PREFER_CEIL,
floor NGRAPH_ENUM_DEPRECATED("Please use FLOOR instead") = FLOOR,
ceil NGRAPH_ENUM_DEPRECATED("Please use CEIL instead") = CEIL,
simple NGRAPH_ENUM_DEPRECATED("Please use SIMPLE instead") = SIMPLE
};
struct InterpolateAttrs {
// specifies type of interpolation
// one of `nearest`, `linear`, `linear_onnx`, `cubic` Required.
InterpolateMode mode = InterpolateMode::NEAREST;
// specifies shape calculation mode
// one of `sizes`, `scales` Required
ShapeCalcMode shape_calculation_mode = ShapeCalcMode::SIZES;
// specify the number of pixels to add to the beginning of the image being
// interpolated. This addition of pixels is done before interpolation
// calculation.
std::vector<size_t> pads_begin;
// specify the number of pixels to add to the end of the image being
// interpolated. This addition of pixels is done before interpolation
// calculation.
std::vector<size_t> pads_end;
// specifies how to transform the coordinate in the resized tensor to the
// coordinate in the original tensor. one of `half_pixel`, `pytorch_half_pixel`,
// `asymmetric`, `tf_half_pixel_for_nn`, `align_corners`
CoordinateTransformMode coordinate_transformation_mode = CoordinateTransformMode::HALF_PIXEL;
// specifies round mode when `mode == nearest` and is used only when `mode ==
// nearest`. one of `round_prefer_floor`, `round_prefer_ceil`, `floor`, `ceil`,
// `simple`
NearestMode nearest_mode = NearestMode::ROUND_PREFER_FLOOR;
// a flag that specifies whether to perform anti-aliasing. default is `false`
bool antialias = false;
// specifies the parameter *a* for cubic interpolation (see, e.g.
// [article](https://ieeexplore.ieee.org/document/1163711/)). *cube_coeff* is
// used only when `mode == cubic`
double cube_coeff = -0.75f;
InterpolateAttrs() = default;
InterpolateAttrs(InterpolateMode mode,
ShapeCalcMode shape_calculation_mode,
const std::vector<size_t>& pads_begin,
const std::vector<size_t>& pads_end,
CoordinateTransformMode coordinate_transformation_mode = CoordinateTransformMode::HALF_PIXEL,
NearestMode nearest_mode = NearestMode::ROUND_PREFER_FLOOR,
bool antialias = false,
double cube_coeff = -0.75)
: mode(mode),
shape_calculation_mode(shape_calculation_mode),
pads_begin(pads_begin),
pads_end(pads_end),
coordinate_transformation_mode(coordinate_transformation_mode),
nearest_mode(nearest_mode),
antialias(antialias),
cube_coeff(cube_coeff) {}
};
Interpolate() = default;
/// \brief Constructs a Interpolate operation without 'axes' input.
///
/// \param image Input image
/// \param output_shape Output shape of spatial axes
/// \param scales Scales of spatial axes, i.e. output_shape / input_shape
/// \param attrs Interpolation attributes
Interpolate(const Output<Node>& image,
const Output<Node>& output_shape,
const Output<Node>& scales,
const InterpolateAttrs& attrs);
/// \brief Constructs a Interpolate operation with 'axes' input.
///
/// \param image Input image
/// \param output_shape Output shape of spatial axes
/// \param scales Scales of spatial axes, i.e. output_shape / input_shape
/// \param axes Interpolation axes
/// \param attrs Interpolation attributes
Interpolate(const Output<Node>& image,
const Output<Node>& output_shape,
const Output<Node>& scales,
const Output<Node>& axes,
const InterpolateAttrs& attrs);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
const InterpolateAttrs& get_attrs() const {
return m_attrs;
}
protected:
/// \return The interpolation axes.
std::vector<int64_t> get_axes() const;
private:
bool evaluate_interpolate(const HostTensorVector& outputs, const HostTensorVector& inputs) const;
InterpolateAttrs m_attrs;
/// \brief Corrects pads_begin and pads_end attributes.
///
/// \details When Interpolate-4 is a result of some transformation, it is possible
/// that pads_begin.size() != pads_end.size() or
/// pads_begin.size() != input_rank. In such case, we should correct
/// pads_begin and pads_end, using padding of pads_begin and pads_end by
/// zeros or using pads_begin[0 : input_rank], pads_end[0 : input_rank].
///
/// Padding of pads_begin is performed when pads_begin.size() < input_rank,
/// and pads_begin[0 : input_rank] is used when
/// pads_begin.size() < input_rank.
///
/// Similarly for pads_end.
void correct_pads();
/// \brief Calculates input shape after padding.
///
/// \param input_shape Shape of input data.
///
/// \return Padded input shape, i.e. input_shape + pads_begin + pads_end
PartialShape get_padded_input_shape(const PartialShape& input_shape) const;
/// \brief Infers output shape using scales.
///
/// \param output_shape[in,out] output shape
/// \param axes Interpolation axes
/// \param scales Scales for interpolated axes
/// \param padded_input_shape input shape after padding
void infer_using_scales(PartialShape& output_shape,
const std::vector<int64_t>& axes,
const std::vector<float>& scales,
const PartialShape& padded_input_shape) const;
/// \brief Infers output shape using sizes.
///
/// \param output_shape[in,out] output shape
/// \param axes Interpolation axes
/// \param sizes sizes for interpolated axes
void infer_using_shapes(PartialShape& output_shape,
const std::vector<int64_t>& axes,
const std::vector<int64_t>& sizes) const;
};
} // namespace v4 } // namespace v4
using v0::Interpolate; using v0::Interpolate;
using v0::InterpolateAttrs; using v0::InterpolateAttrs;
} // namespace op } // namespace op
//---------------------------------------- v0 --------------------------------------------------
NGRAPH_API
std::ostream& operator<<(std::ostream& s, const op::v0::Interpolate::InterpolateMode& type);
//---------------------------------------- v4 --------------------------------------------------
NGRAPH_API
std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::InterpolateMode& type);
NGRAPH_API
std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::CoordinateTransformMode& type);
NGRAPH_API
std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::NearestMode& type);
NGRAPH_API
std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::ShapeCalcMode& type);
} // namespace ngraph } // namespace ngraph
namespace ov {
template <>
class NGRAPH_API AttributeAdapter<ngraph::op::v0::Interpolate::InterpolateMode>
: public EnumAttributeAdapterBase<ngraph::op::v0::Interpolate::InterpolateMode> {
public:
AttributeAdapter(ngraph::op::v0::Interpolate::InterpolateMode& value)
: EnumAttributeAdapterBase<ngraph::op::v0::Interpolate::InterpolateMode>(value) {}
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::v0::Interpolate::InterpolateMode>", 0};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};
template <>
class NGRAPH_API AttributeAdapter<ngraph::op::v4::Interpolate::InterpolateMode>
: public EnumAttributeAdapterBase<ngraph::op::v4::Interpolate::InterpolateMode> {
public:
AttributeAdapter(ngraph::op::v4::Interpolate::InterpolateMode& value)
: EnumAttributeAdapterBase<ngraph::op::v4::Interpolate::InterpolateMode>(value) {}
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::v4::Interpolate::InterpolateMode>", 4};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};
template <>
class NGRAPH_API AttributeAdapter<ngraph::op::v4::Interpolate::CoordinateTransformMode>
: public EnumAttributeAdapterBase<ngraph::op::v4::Interpolate::CoordinateTransformMode> {
public:
AttributeAdapter(ngraph::op::v4::Interpolate::CoordinateTransformMode& value)
: EnumAttributeAdapterBase<ngraph::op::v4::Interpolate::CoordinateTransformMode>(value) {}
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::v4::Interpolate::CoordinateTransformMode>", 4};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};
template <>
class NGRAPH_API AttributeAdapter<ngraph::op::v4::Interpolate::NearestMode>
: public EnumAttributeAdapterBase<ngraph::op::v4::Interpolate::NearestMode> {
public:
AttributeAdapter(ngraph::op::v4::Interpolate::NearestMode& value)
: EnumAttributeAdapterBase<ngraph::op::v4::Interpolate::NearestMode>(value) {}
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::v4::Interpolate::NearestMode>", 4};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};
template <>
class NGRAPH_API AttributeAdapter<ngraph::op::v4::Interpolate::ShapeCalcMode>
: public EnumAttributeAdapterBase<ngraph::op::v4::Interpolate::ShapeCalcMode> {
public:
AttributeAdapter(ngraph::op::v4::Interpolate::ShapeCalcMode& value)
: EnumAttributeAdapterBase<ngraph::op::v4::Interpolate::ShapeCalcMode>(value) {}
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::v4::Interpolate::ShapeCalcMode>", 4};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};
} // namespace ov

View File

@ -5,29 +5,12 @@
#pragma once #pragma once
#include "ngraph/op/util/binary_elementwise_comparison.hpp" #include "ngraph/op/util/binary_elementwise_comparison.hpp"
#include "openvino/op/less.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v1 { namespace v1 {
/// \brief Elementwise less-than operation. using ov::op::v1::Less;
class NGRAPH_API Less : public util::BinaryElementwiseComparison {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a less-than operation.
Less() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {}
/// \brief Constructs a less-than operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
/// \param auto_broadcast Auto broadcast specification
Less(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY));
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v1 } // namespace v1
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -5,30 +5,12 @@
#pragma once #pragma once
#include "ngraph/op/util/binary_elementwise_comparison.hpp" #include "ngraph/op/util/binary_elementwise_comparison.hpp"
#include "openvino/op/less_eq.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v1 { namespace v1 {
/// \brief Elementwise less-than-or-equal operation. using ov::op::v1::LessEqual;
class NGRAPH_API LessEqual : public util::BinaryElementwiseComparison {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a less-than-or-equal operation.
LessEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {}
/// \brief Constructs a less-than-or-equal operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
/// \param auto_broadcast Auto broadcast specification
LessEqual(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY));
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v1 } // namespace v1
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -5,26 +5,12 @@
#pragma once #pragma once
#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" #include "ngraph/op/util/unary_elementwise_arithmetic.hpp"
#include "openvino/op/log.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v0 { namespace v0 {
/// \brief Elementwise natural log operation. using ov::op::v0::Log;
class NGRAPH_API Log : public util::UnaryElementwiseArithmetic {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a natural log operation.
Log() = default;
/// \brief Constructs a natural log operation.
///
/// \param arg Node that produces the input tensor.
Log(const Output<Node>& arg);
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v0 } // namespace v0
using v0::Log; using v0::Log;
} // namespace op } // namespace op

View File

@ -5,39 +5,12 @@
#pragma once #pragma once
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "openvino/op/log_softmax.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v5 { namespace v5 {
class NGRAPH_API LogSoftmax : public Op { using ov::op::v5::LogSoftmax;
public:
NGRAPH_RTTI_DECLARATION;
LogSoftmax() = default;
/// \brief Constructs a LogSoftmax operation.
///
/// \param arg Node that produces the first input tensor.<br>
/// `[d0, ...]`
/// \param axis The axis position (0-based) on which to calculate the LogSoftmax.
///
/// Output `[d0, ...]`
///
LogSoftmax(const Output<Node>& arg, const int64_t axis);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
int64_t get_axis() const {
return m_axis;
}
void set_axis(const int64_t axis) {
m_axis = axis;
}
private:
int64_t m_axis = 1;
};
} // namespace v5 } // namespace v5
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -12,84 +12,12 @@
#include "ngraph/op/parameter.hpp" #include "ngraph/op/parameter.hpp"
#include "ngraph/op/tensor_iterator.hpp" #include "ngraph/op/tensor_iterator.hpp"
#include "ngraph/op/util/sub_graph_base.hpp" #include "ngraph/op/util/sub_graph_base.hpp"
#include "openvino/op/loop.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v5 { namespace v5 {
/// \brief Iterate a body over tensors, accumulating into tensors. using ov::op::v5::Loop;
class NGRAPH_API Loop : public op::util::SubGraphOp {
public:
/// \brief Allows to define the purpose of inputs/outputs in the body
struct SpecialBodyPorts {
SpecialBodyPorts() = default;
SpecialBodyPorts(int64_t in_current_iteration_input_idx, int64_t in_body_condition_output_idx)
: current_iteration_input_idx(in_current_iteration_input_idx),
body_condition_output_idx(in_body_condition_output_idx) {}
// -1 means the input is not provided, this input is optional
int64_t current_iteration_input_idx = -1;
// -1 means the output is not provided,
// this output is required, throw an exception if not provided
int64_t body_condition_output_idx = -1;
};
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a Loop operation.
Loop() = default;
/// \brief Constructs a Loop operation.
///
/// \param trip_count Node specifies the maximum number of iterations.
/// \param execution_condition Node determines whether to execute the first
/// iteration or not.
Loop(const Output<Node>& trip_count, const Output<Node>& execution_condition);
Output<Node> get_concatenated_slices(const Output<Node>& value,
int64_t start,
int64_t stride,
int64_t part_size,
int64_t end,
int64_t axis) override;
void set_special_body_ports(const SpecialBodyPorts& special_body_ports) {
m_special_body_ports = special_body_ports;
}
SpecialBodyPorts get_special_body_ports() const {
return m_special_body_ports;
}
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
protected:
Loop(const Loop&);
private:
void clone_to(Loop& dst, const OutputVector& new_args) const;
SpecialBodyPorts m_special_body_ports;
};
} // namespace v5 } // namespace v5
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph
namespace ov {
template <>
class NGRAPH_API AttributeAdapter<ngraph::op::v5::Loop::SpecialBodyPorts>
: public DirectValueAccessor<ngraph::op::v5::Loop::SpecialBodyPorts> {
public:
AttributeAdapter(ngraph::op::v5::Loop::SpecialBodyPorts& value)
: DirectValueAccessor<ngraph::op::v5::Loop::SpecialBodyPorts>(value) {}
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::v5::Loop::SpecialBodyPorts>", 0};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};
} // namespace ov

View File

@ -5,74 +5,12 @@
#pragma once #pragma once
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "openvino/op/lrn.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v0 { namespace v0 {
// clang-format off using ov::op::v0::LRN;
/// \brief Elementwise Local Response Normalization (LRN) operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[n, c, d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$N[n, c, d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[n, c, d_1,\dots,d_n] = \frac{N[n,i,d_1,\dots,d_n]}{ (bias + alpha * (\sum_{i=max(0,(nsize-1)/2)}^{min(C, (nsize-1)/2)+1} N[n,i,d_1,\dots,d_n]^{2}) ^ {2})}\f$ |
// clang-format on
class NGRAPH_API LRN : public Op {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a LRN operation.
LRN() = default;
/// \brief Constructs a LRN operation.
///
/// \param arg Node that produces the input tensor.
LRN(const Output<Node>& arg, double alpha, double beta, double bias, size_t size);
LRN(const Output<Node>& arg, const Output<Node>& axes, double alpha, double beta, double bias, size_t size);
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
double get_alpha() const {
return m_alpha;
}
void set_alpha(double alpha) {
m_alpha = alpha;
}
double get_beta() const {
return m_beta;
}
void set_beta(double beta) {
m_beta = beta;
}
double get_bias() const {
return m_bias;
}
void set_bias(double bias) {
m_bias = bias;
}
size_t get_nsize() const {
return m_size;
}
void set_nsize(size_t size) {
m_size = size;
}
AxisSet get_reduction_axes() const;
protected:
double m_alpha;
double m_beta;
double m_bias;
size_t m_size;
};
} // namespace v0 } // namespace v0
using v0::LRN; using v0::LRN;
} // namespace op } // namespace op

View File

@ -13,391 +13,18 @@
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/op/util/activation_functions.hpp" #include "ngraph/op/util/activation_functions.hpp"
#include "ngraph/op/util/rnn_cell_base.hpp" #include "ngraph/op/util/rnn_cell_base.hpp"
#include "openvino/op/lstm_cell.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
enum class LSTMWeightsFormat { using ov::op::LSTMWeightsFormat;
FICO, // IE
ICOF, // PyTorch
IFCO, // DNNL, TF, MxNet
IFOC, // Caffe
IOFC, // ONNX
};
namespace v0 { namespace v0 {
/// using ov::op::v0::LSTMCell;
/// \brief Class for single lstm cell node.
///
/// \note Following implementation supports:
/// \li \c peepholes Gers & Schmidhuber (2000)
/// https://ieeexplore.ieee.org/document/861302
/// \li Coupling input and forget gates.
///
/// \note It calculates following equations:
///
/// it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)
/// ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)
/// ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)
/// Ct = ft (.) Ct-1 + it (.) ct
/// ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)
/// Ht = ot (.) h(Ct)
///
/// * - Is a dot product,
/// (.) - is a Hadamard product (element-wise),
/// f, g, h - are activation functions.
///
/// \note This class represents only single *cell* (for current time step) and not
/// the whole LSTM Sequence layer
///
/// \sa LSTMSequence, RNNCell, GRUCell
///
class NGRAPH_API LSTMCell : public util::RNNCellBase {
public:
NGRAPH_RTTI_DECLARATION;
LSTMCell();
///
/// \brief Constructs LSTMCell node.
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] initial_cell_state The cell state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] W The gate weights tensor with shape:
/// [4*hidden_size, input_size].
/// \param[in] R The recurrence weights tensor with shape:
/// [4*hidden_size, hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] weights_format The order of gates in weights tensors. The
/// default format is IFCO since it is used by
/// DNNL.
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on input of activation functions.
/// \param[in] input_forget Controls coupling input and forget gates.
///
LSTMCell(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& initial_cell_state,
const Output<Node>& W,
const Output<Node>& R,
std::size_t hidden_size,
LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO,
const std::vector<std::string>& activations = std::vector<std::string>{"sigmoid", "tanh", "tanh"},
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
float clip = 0.f,
bool input_forget = false);
///
/// \brief Constructs LSTMCell node.
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] initial_cell_state The cell state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] W The weight tensor with shape: [4*hidden_size,
/// input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [4*hidden_size, hidden_size].
/// \param[in] B The bias tensor for gates with shape:
/// [4*hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] weights_format The order of gates in weights tensors. The
/// default format is IFCO since it is used by
/// DNNL.
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on input of activation functions.
/// \param[in] input_forget Controls coupling input and forget gates.
///
LSTMCell(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& initial_cell_state,
const Output<Node>& W,
const Output<Node>& R,
const Output<Node>& B,
std::size_t hidden_size,
LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO,
const std::vector<std::string>& activations = std::vector<std::string>{"sigmoid", "tanh", "tanh"},
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
float clip = 0.f,
bool input_forget = false);
///
/// \brief Constructs LSTMCell node.
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] initial_cell_state The cell state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] W The weight tensor with shape: [4*hidden_size,
/// input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [4*hidden_size, hidden_size].
/// \param[in] B The bias tensor for gates with shape:
/// [4*hidden_size].
/// \param[in] P The weight tensor for peepholes with shape:
/// [3*hidden_size] - 3 equals to only iof gates.
/// The order is: input, output, forget gates.
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] weights_format The order of gates in weights tensors. The
/// default format is IFCO since it is used by
/// DNNL.
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on input of activation functions.
/// \param[in] input_forget Controls coupling input and forget gates.
///
LSTMCell(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& initial_cell_state,
const Output<Node>& W,
const Output<Node>& R,
const Output<Node>& B,
const Output<Node>& P,
std::size_t hidden_size,
LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO,
const std::vector<std::string>& activations = std::vector<std::string>{"sigmoid", "tanh", "tanh"},
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
float clip = 0.f,
bool input_forget = false);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool get_input_forget() const {
return m_input_forget;
}
LSTMWeightsFormat get_weights_format() const {
return m_weights_format;
}
private:
///
/// \brief Creates the default bias input initialized with zeros.
///
/// \return The object of Output class.
///
Output<Node> get_default_bias_input() const;
///
/// \brief Creates the default peepholes input initialized with zeros.
///
/// \return The object of Output class.
///
Output<Node> get_default_peepholes_input() const;
///
/// \brief The Activation function f.
///
util::ActivationFunction m_activation_f;
///
/// \brief The Activation function g.
///
util::ActivationFunction m_activation_g;
///
/// \brief The Activation function h.
///
util::ActivationFunction m_activation_h;
///
/// \brief Controls whether to couple input and forget gates.
///
bool m_input_forget = false;
///
/// \brief The order of gates in weights tensors.
///
LSTMWeightsFormat m_weights_format;
static constexpr std::size_t s_gates_count{4};
static constexpr std::size_t s_peepholes_count{3};
};
} // namespace v0 } // namespace v0
namespace v4 { namespace v4 {
/// using ov::op::v4::LSTMCell;
/// \brief Class for single lstm cell node.
///
/// \note Following implementation supports:
/// \li \c peepholes Gers & Schmidhuber (2000)
/// https://ieeexplore.ieee.org/document/861302
/// \li Coupling input and forget gates.
///
/// \note It calculates following equations:
///
/// it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi)
/// ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Wbf + Rbf)
/// ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)
/// Ct = ft (.) Ct-1 + it (.) ct
/// ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Wbo + Rbo)
/// Ht = ot (.) h(Ct)
///
/// * - Is a dot product,
/// (.) - is a Hadamard product (element-wise),
/// f, g, h - are activation functions.
///
/// \note This class represents only single *cell* (for current time step) and not
/// the whole LSTM Sequence layer
///
/// \sa LSTMSequence, RNNCell, GRUCell
///
class NGRAPH_API LSTMCell : public util::RNNCellBase {
public:
NGRAPH_RTTI_DECLARATION;
LSTMCell();
///
/// \brief Constructs LSTMCell node.
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] initial_cell_state The cell state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] W The gate weights tensor with shape:
/// [4*hidden_size, input_size].
/// \param[in] R The recurrence weights tensor with shape:
/// [4*hidden_size, hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on input of activation functions.
LSTMCell(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& initial_cell_state,
const Output<Node>& W,
const Output<Node>& R,
std::size_t hidden_size,
const std::vector<std::string>& activations = std::vector<std::string>{"sigmoid", "tanh", "tanh"},
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
float clip = 0.f);
///
/// \brief Constructs LSTMCell node.
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] initial_cell_state The cell state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] W The weight tensor with shape: [4*hidden_size,
/// input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [4*hidden_size, hidden_size].
/// \param[in] B The bias tensor for gates with shape:
/// [4*hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on input of activation functions.
///
LSTMCell(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& initial_cell_state,
const Output<Node>& W,
const Output<Node>& R,
const Output<Node>& B,
std::size_t hidden_size,
const std::vector<std::string>& activations = std::vector<std::string>{"sigmoid", "tanh", "tanh"},
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
float clip = 0.f);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
private:
///
/// \brief Creates the default bias input initialized with zeros.
///
/// \return The object of Output class.
///
Output<Node> get_default_bias_input() const;
///
/// \brief The Activation function f.
///
util::ActivationFunction m_activation_f;
///
/// \brief The Activation function g.
///
util::ActivationFunction m_activation_g;
///
/// \brief The Activation function h.
///
util::ActivationFunction m_activation_h;
static constexpr std::size_t s_gates_count{4};
};
} // namespace v4 } // namespace v4
} // namespace op } // namespace op
NGRAPH_API
std::ostream& operator<<(std::ostream& s, const op::LSTMWeightsFormat& type);
} // namespace ngraph } // namespace ngraph
namespace ov {
template <>
class NGRAPH_API AttributeAdapter<ngraph::op::LSTMWeightsFormat>
: public EnumAttributeAdapterBase<ngraph::op::LSTMWeightsFormat> {
public:
AttributeAdapter(ngraph::op::LSTMWeightsFormat& value)
: EnumAttributeAdapterBase<ngraph::op::LSTMWeightsFormat>(value) {}
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::LSTMWeightsFormat>", 1};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};
} // namespace ov

View File

@ -15,184 +15,16 @@
#include "ngraph/op/lstm_cell.hpp" #include "ngraph/op/lstm_cell.hpp"
#include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/attr_types.hpp"
#include "ngraph/op/util/rnn_cell_base.hpp" #include "ngraph/op/util/rnn_cell_base.hpp"
#include "openvino/op/lstm_sequence.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v0 { namespace v0 {
using ov::op::v0::LSTMSequence;
///
/// \brief Class for lstm sequence node.
///
/// \note It follows notation and equations defined as in ONNX standard:
/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#LSTM
///
/// \sa LSTMCell, RNNCell, GRUCell
///
///
class NGRAPH_API LSTMSequence : public Op {
public:
NGRAPH_RTTI_DECLARATION;
LSTMSequence();
using direction = RecurrentSequenceDirection;
size_t get_default_output_index() const override {
return no_default_index();
}
explicit LSTMSequence(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& initial_cell_state,
const Output<Node>& sequence_lengths,
const Output<Node>& W,
const Output<Node>& R,
const Output<Node>& B,
const Output<Node>& P,
const std::int64_t hidden_size,
const direction lstm_direction,
LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO,
const std::vector<float> activations_alpha = {},
const std::vector<float> activations_beta = {},
const std::vector<std::string> activations = {"sigmoid", "tanh", "tanh"},
const float clip_threshold = 0,
const bool input_forget = false);
explicit LSTMSequence(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& initial_cell_state,
const Output<Node>& sequence_lengths,
const Output<Node>& W,
const Output<Node>& R,
const Output<Node>& B,
const std::int64_t hidden_size,
const direction lstm_direction,
LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO,
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
const std::vector<std::string>& activations = {"sigmoid", "tanh", "tanh"},
const float clip_threshold = 0,
const bool input_forget = false);
virtual void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
std::vector<float> get_activations_alpha() const {
return m_activations_alpha;
}
std::vector<float> get_activations_beta() const {
return m_activations_beta;
}
std::vector<std::string> get_activations() const {
return m_activations;
}
float get_clip_threshold() const {
return m_clip_threshold;
}
direction get_direction() const {
return m_direction;
}
std::int64_t get_hidden_size() const {
return m_hidden_size;
}
bool get_input_forget() const {
return m_input_forget;
}
LSTMWeightsFormat get_weights_format() const {
return m_weights_format;
}
private:
///
/// \brief Gets the masked value according to sequence length in a batch.
///
/// \note Zeros out values or sets them to default value for inputs with
/// sequence length shorter than currently procssed time step.
///
/// \param[in] data The input value.
/// \param[in] time_step The current time step denoting sequence length.
/// \param[in] batch_axis The batch axis index of data tensor.
/// \param[in] default_value The default value for masked elements.
///
/// \return The masked value.
///
std::shared_ptr<Node> get_masked_node(const Output<Node>& data,
std::int32_t time_step,
std::size_t batch_axis = 0,
const Output<Node>& default_value = Output<Node>()) const;
OutputVector lstm_pass(bool is_reverse = false) const;
// Split(bi-directional) and squeeze input data to remove 'num_direction' dimension.
std::shared_ptr<Node> prepare_input(Output<Node> node, bool is_reverse, size_t num_direction_axis = 0) const;
std::vector<float> m_activations_alpha;
std::vector<float> m_activations_beta;
std::vector<std::string> m_activations;
float m_clip_threshold;
direction m_direction;
std::int64_t m_hidden_size;
bool m_input_forget;
LSTMWeightsFormat m_weights_format;
};
} // namespace v0 } // namespace v0
namespace v5 { namespace v5 {
/// using ov::op::v5::LSTMSequence;
/// \brief Class for lstm sequence node.
///
/// \note It follows notation and equations defined as in ONNX standard:
/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#LSTM
///
/// \sa LSTMCell, RNNCell, GRUCell
///
///
class NGRAPH_API LSTMSequence : public util::RNNCellBase {
public:
NGRAPH_RTTI_DECLARATION;
LSTMSequence() = default;
using direction = RecurrentSequenceDirection;
size_t get_default_output_index() const override {
return no_default_index();
}
explicit LSTMSequence(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& initial_cell_state,
const Output<Node>& sequence_lengths,
const Output<Node>& W,
const Output<Node>& R,
const Output<Node>& B,
const std::int64_t hidden_size,
const direction lstm_direction,
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
const std::vector<std::string>& activations = {"sigmoid", "tanh", "tanh"},
const float clip = 0.f)
: RNNCellBase({X, initial_hidden_state, initial_cell_state, sequence_lengths, W, R, B},
hidden_size,
clip,
activations,
activations_alpha,
activations_beta),
m_direction(lstm_direction) {
constructor_validate_and_infer_types();
}
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
direction get_direction() const {
return m_direction;
}
private:
direction m_direction;
};
} // namespace v5 } // namespace v5
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph

View File

@ -5,78 +5,14 @@
#pragma once #pragma once
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "openvino/op/parameter.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v0 { namespace v0 {
/// \brief A function parameter. using ov::op::v0::Parameter;
///
/// Parameters are nodes that represent the arguments that will be passed to
/// user-defined functions. Function creation requires a sequence of parameters.
/// Basic graph operations do not need parameters attached to a function.
class NGRAPH_API Parameter : public op::Op {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructions a tensor-typed parameter node.
Parameter() = default;
/// \brief Constructions a tensor-typed parameter node.
///
/// \param element_type The element type of the parameter.
/// \param pshape The partial shape of the parameter.
Parameter(const ngraph::element::Type& element_type, const PartialShape& pshape);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool is_relevant_to_shapes() const;
void set_is_relevant_to_shapes(bool is_relevant);
const PartialShape& get_partial_shape() const {
return m_partial_shape;
}
PartialShape& get_partial_shape() {
return m_partial_shape;
}
void set_partial_shape(const PartialShape& partial_shape) {
m_partial_shape = partial_shape;
}
const element::Type& get_element_type() const {
return m_element_type;
}
void set_element_type(const element::Type& element_type) {
m_element_type = element_type;
}
protected:
PartialShape m_partial_shape;
element::Type m_element_type;
bool m_is_relevant_to_shapes{false};
};
} // namespace v0 } // namespace v0
using v0::Parameter; using v0::Parameter;
} // namespace op } // namespace op
using ParameterVector = std::vector<std::shared_ptr<op::Parameter>>; using ParameterVector = std::vector<std::shared_ptr<op::Parameter>>;
} // namespace ngraph } // namespace ngraph
namespace ov {
template <>
class NGRAPH_API AttributeAdapter<ngraph::ParameterVector> : public VisitorAdapter {
public:
AttributeAdapter(ngraph::ParameterVector& ref);
bool visit_attributes(AttributeVisitor& visitor) override;
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<ParameterVector>", 0};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
protected:
ngraph::ParameterVector& m_ref;
};
} // namespace ov

View File

@ -7,62 +7,14 @@
#include <memory> #include <memory>
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "openvino/op/result.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v0 { namespace v0 {
class NGRAPH_API Result : public Op { using ov::op::v0::Result;
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Allows a value to be used as a function result.
Result() = default;
/// \brief Allows a value to be used as a function result.
///
/// \param arg Node that produces the input tensor.
Result(const Output<Node>& arg, bool needs_default_layout = false);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void set_needs_default_layout(bool val) {
m_needs_default_layout = val;
}
bool needs_default_layout() const {
return m_needs_default_layout;
}
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override;
private:
bool m_needs_default_layout{false};
};
} // namespace v0 } // namespace v0
using v0::Result; using v0::Result;
} // namespace op } // namespace op
using ResultVector = std::vector<std::shared_ptr<op::Result>>; using ResultVector = std::vector<std::shared_ptr<op::Result>>;
} // namespace ngraph } // namespace ngraph
namespace ov {
template <>
class NGRAPH_API AttributeAdapter<ngraph::ResultVector> : public VisitorAdapter {
public:
AttributeAdapter(ngraph::ResultVector& ref);
bool visit_attributes(AttributeVisitor& visitor) override;
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<ResultVector>", 0};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
protected:
ngraph::ResultVector& m_ref;
};
} // namespace ov

View File

@ -9,35 +9,12 @@
#include "ngraph/function.hpp" #include "ngraph/function.hpp"
#include "ngraph/op/parameter.hpp" #include "ngraph/op/parameter.hpp"
#include "ngraph/op/util/sub_graph_base.hpp" #include "ngraph/op/util/sub_graph_base.hpp"
#include "openvino/op/tensor_iterator.hpp"
namespace ngraph { namespace ngraph {
namespace op { namespace op {
namespace v0 { namespace v0 {
/// \brief Iterate a body over tensors, accumulating into tensors. using ov::op::v0::TensorIterator;
class NGRAPH_API TensorIterator : public op::util::SubGraphOp {
public:
NGRAPH_RTTI_DECLARATION;
bool visit_attributes(AttributeVisitor& visitor) override;
TensorIterator() = default;
explicit TensorIterator(const OutputVector& values);
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return the body of the iteration
std::shared_ptr<Function> get_body() const {
return m_bodies[0];
}
/// \param body set the body of the iteration
void set_body(const std::shared_ptr<Function>& body) {
set_function(body);
}
void validate_and_infer_types() override;
void revalidate_and_infer_types_for_body_ops();
private:
void try_to_set_num_iterations_if_no_slice_inputs();
};
} // namespace v0 } // namespace v0
using v0::TensorIterator; using v0::TensorIterator;
} // namespace op } // namespace op

View File

@ -42,19 +42,15 @@ namespace runtime {
class HostTensor; class HostTensor;
} // namespace runtime } // namespace runtime
namespace op {
namespace v0 {
class Result;
} // namespace v0
} // namespace op
} // namespace ngraph } // namespace ngraph
namespace ov { namespace ov {
namespace op { namespace op {
namespace v0 {
class Result;
} // namespace v0
struct AutoBroadcastSpec; struct AutoBroadcastSpec;
} } // namespace op
namespace pass { namespace pass {
namespace pattern { namespace pattern {
class Matcher; class Matcher;
@ -76,7 +72,7 @@ class Node;
/// environment) for evaluating ngraph::function. /// environment) for evaluating ngraph::function.
using EvaluationContext = std::map<std::string, std::shared_ptr<Variant>>; using EvaluationContext = std::map<std::string, std::shared_ptr<Variant>>;
using ResultVector = std::vector<std::shared_ptr<ngraph::op::v0::Result>>; using ResultVector = std::vector<std::shared_ptr<ov::op::v0::Result>>;
OPENVINO_API OPENVINO_API
std::string node_validation_failure_loc_string(const Node* node); std::string node_validation_failure_loc_string(const Node* node);

View File

@ -0,0 +1,80 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/util/gather_base.hpp"
namespace ov {
namespace op {
namespace v1 {
/// \brief Gather slices from axis of data according to indices
class OPENVINO_API Gather : public op::util::GatherBase {
public:
OPENVINO_RTTI_DECLARATION;
static const int64_t AXIS_NOT_SET_VALUE = std::numeric_limits<int64_t>::max();
Gather() = default;
/// \param data The tensor from which slices are gathered
/// \param indices Tensor with indexes to gather
/// \param axis The tensor is a dimension index to gather data from
Gather(const Output<Node>& params, const Output<Node>& indices, const Output<Node>& axis);
bool visit_attributes(AttributeVisitor& visitor) override;
int64_t get_axis() const override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
} // namespace v1
namespace v7 {
/// \brief Gather slices from axis of data according to indices
class OPENVINO_API Gather : public op::util::GatherBase {
public:
OPENVINO_RTTI_DECLARATION;
Gather() = default;
/// \param data The tensor from which slices are gathered
/// \param indices Tensor with indexes to gather
/// \param axis The tensor is a dimension index to gather data from
/// \param batch_dims The number of batch dimension in data and indices tensors.
/// If batch_dims = 0 Gather v7 is identical to Gather v1.
Gather(const Output<Node>& data,
const Output<Node>& indices,
const Output<Node>& axis,
const int64_t batch_dims = 0);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
int64_t get_batch_dims() const;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
} // namespace v7
namespace v8 {
/// \brief Gather slices from axis of data according to indices. Negative indices
/// are supported and indicate reverse indexing from the end
class OPENVINO_API Gather : public op::util::GatherBase {
public:
OPENVINO_RTTI_DECLARATION;
Gather() = default;
/// \param data The tensor from which slices are gathered
/// \param indices Tensor with indexes to gather
/// \param axis The tensor is a dimension index to gather data from
/// \param batch_dims The number of batch dimension in data and indices tensors.
Gather(const Output<Node>& data,
const Output<Node>& indices,
const Output<Node>& axis,
const int64_t batch_dims = 0);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
int64_t get_batch_dims() const;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
} // namespace v8
} // namespace op
} // namespace ov

View File

@ -0,0 +1,39 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/op.hpp"
namespace ov {
namespace op {
namespace v6 {
/// \brief GatherElements operation
///
class OPENVINO_API GatherElements : public Op {
public:
OPENVINO_RTTI_DECLARATION;
GatherElements() = default;
/// \brief Constructs a GatherElements operation.
///
/// \param data Node producing data that are gathered
/// \param indices Node producing indices by which the operation gathers elements
/// \param axis specifies axis along which indices are specified
GatherElements(const Output<Node>& data, const Output<Node>& indices, const int64_t axis);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
int64_t get_axis() const {
return m_axis;
}
private:
int64_t m_axis;
};
} // namespace v6
} // namespace op
} // namespace ov

View File

@ -0,0 +1,40 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/op.hpp"
namespace ov {
namespace op {
namespace v5 {
/// \brief GatherND operation
///
class OPENVINO_API GatherND : public Op {
public:
OPENVINO_RTTI_DECLARATION;
GatherND() = default;
/// \brief Constructs a GatherND operation.
///
/// \param data Node producing data that are gathered
/// \param indices Node producing indices by which the operation gathers elements
/// or slices from data
/// \param batch_dims Specifies a number of batch dimensions
GatherND(const Output<Node>& data, const Output<Node>& indices, const size_t batch_dims = 0);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
size_t get_batch_dims() const {
return m_batch_dims;
}
private:
size_t m_batch_dims;
};
} // namespace v5
} // namespace op
} // namespace ov

View File

@ -0,0 +1,38 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ngraph/op/op.hpp"
namespace ov {
namespace op {
namespace v1 {
/// \brief Generates the complete beams from the ids per each step and the parent beam
/// ids.
class OPENVINO_API GatherTree : public Op {
public:
OPENVINO_RTTI_DECLARATION;
GatherTree() = default;
/// \param step_ids Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] with
/// indices from per each step
/// \param parent_idx Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] with
/// parent beam indices
/// \param max_seq_len Tensor of shape [BATCH_SIZE] with maximum lengths for each
/// sequence in the batch
/// \param end_token Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH]
GatherTree(const Output<Node>& step_ids,
const Output<Node>& parent_idx,
const Output<Node>& max_seq_len,
const Output<Node>& end_token);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -0,0 +1,81 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/op.hpp"
#include "openvino/op/util/unary_elementwise_arithmetic.hpp"
namespace ov {
namespace op {
namespace v0 {
/// \brief Gaussian Error Linear Unit
/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) )
class OPENVINO_API Gelu : public Op {
public:
OPENVINO_RTTI_DECLARATION;
Gelu();
/// \brief Constructs a Gelu operation.
///
/// \param data Input tensor
Gelu(const Output<Node>& data);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
} // namespace v0
/// \brief Specifies the approximation to calculate Gelu
enum class GeluApproximationMode { TANH, ERF };
OPENVINO_API std::ostream& operator<<(std::ostream& s, const GeluApproximationMode& type);
namespace v7 {
/// \brief Gaussian Error Linear Unit
/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) ) for "approximation" = "erf"
/// f(x) = 0.5 * x * (1 + tanh([sqrt(2 / pi)] * [x + 0.044715^3]) for "approximation" =
/// "tanh"
class OPENVINO_API Gelu : public util::UnaryElementwiseArithmetic {
public:
OPENVINO_RTTI_DECLARATION;
Gelu() = default;
/// \brief Constructs a Gelu operation.
///
/// \param data Input tensor
/// \param mode Approximation mode
Gelu(const Output<Node>& data, GeluApproximationMode mode = GeluApproximationMode::ERF);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
GeluApproximationMode get_approximation_mode() const;
private:
GeluApproximationMode m_approximation_mode = GeluApproximationMode::ERF;
};
} // namespace v7
} // namespace op
template <>
class OPENVINO_API AttributeAdapter<op::GeluApproximationMode>
: public EnumAttributeAdapterBase<op::GeluApproximationMode> {
public:
AttributeAdapter(op::GeluApproximationMode& value) : EnumAttributeAdapterBase<op::GeluApproximationMode>(value) {}
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::GeluApproximationMode>", 0};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};
} // namespace ov

View File

@ -0,0 +1,33 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/util/binary_elementwise_comparison.hpp"
namespace ov {
namespace op {
namespace v1 {
/// \brief Elementwise greater-than operation.
class OPENVINO_API Greater : public util::BinaryElementwiseComparison {
public:
OPENVINO_RTTI_DECLARATION;
/// \brief Constructs a greater-than operation.
Greater() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {}
/// \brief Constructs a greater-than operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
/// \param auto_broadcast Auto broadcast specification
Greater(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY));
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -0,0 +1,33 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/util/binary_elementwise_comparison.hpp"
namespace ov {
namespace op {
namespace v1 {
/// \brief Elementwise greater-than-or-equal operation.
class OPENVINO_API GreaterEqual : public util::BinaryElementwiseComparison {
public:
OPENVINO_RTTI_DECLARATION;
/// \brief Constructs a greater-than-or-equal operation.
GreaterEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {}
/// \brief Constructs a greater-than-or-equal operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
/// \param auto_broadcast Auto broadcast specification
GreaterEqual(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY));
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -0,0 +1,41 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include "openvino/op/op.hpp"
namespace ov {
namespace op {
namespace v0 {
/// \brief Global Response Normalization with L2 norm (across channels only).
///
class OPENVINO_API GRN : public Op {
public:
OPENVINO_RTTI_DECLARATION;
GRN() = default;
/// \brief Constructs a GRN operation.
///
/// \param data - Node producing the input tensor
/// \param bias - The bias added to the variance.
///
GRN(const Output<Node>& data, float bias);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
float get_bias() const {
return m_bias;
}
protected:
float m_bias = 1.0f;
};
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -0,0 +1,273 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/convolution.hpp"
#include "openvino/op/op.hpp"
#include "openvino/op/util/attr_types.hpp"
namespace ov {
namespace op {
namespace v1 {
/// \brief Batched convolution operation, with optional window dilation and stride.
class OPENVINO_API GroupConvolution : public Op {
public:
OPENVINO_RTTI_DECLARATION;
/// \brief Constructs a batched convolution operation.
GroupConvolution() = default;
/// \brief Constructs a batched convolution operation.
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[GROUPS, FC_OUT, FC_IN, F1, ... Ff]`
/// \param strides The strides.<br>
/// `[f]`
/// \param dilations The dilations.<br>
/// `[f]`
/// \param pads_begin The beginning of padding shape.<br>
/// `[f]`
/// \param pads_end The end of padding shape.<br>
/// `[f]`
/// \param auto_pad The pad type for automatically computing padding sizes.<br>
/// `[f]`
///
/// Output `[N, FC_OUT * GROUPS, R1, ... Rf]`
///
GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The strides.
const Strides& get_strides() const {
return m_strides;
}
void set_strides(const Strides& strides) {
m_strides = strides;
}
/// \return The dilations.
const Strides& get_dilations() const {
return m_dilations;
}
void set_dilations(const Strides& dilations) {
m_dilations = dilations;
}
/// \return The padding-below sizes (possibly negative).
const CoordinateDiff& get_pads_begin() const {
return m_pads_begin;
}
void set_pads_begin(const CoordinateDiff& pads_begin) {
m_pads_begin = pads_begin;
}
/// \return The padding-above sizes (possibly negative).
const CoordinateDiff& get_pads_end() const {
return m_pads_end;
}
void set_adding_above(const CoordinateDiff& pads_end) {
m_pads_end = pads_end;
}
/// \return The pad type for convolution.
const PadType& get_auto_pad() const {
return m_auto_pad;
}
void set_auto_pad(const PadType& auto_pad) {
m_auto_pad = auto_pad;
}
/// \return The default value for Convolution.
OPENVINO_SUPPRESS_DEPRECATED_START
std::shared_ptr<Node> get_default_value() const override;
OPENVINO_SUPPRESS_DEPRECATED_END
protected:
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
PadType m_auto_pad;
};
/// \brief Data batch backprop for batched convolution operation.
class OPENVINO_API GroupConvolutionBackpropData : public Op {
public:
OPENVINO_RTTI_DECLARATION;
/// \brief Constructs a batched-convolution data batch-backprop operation.
GroupConvolutionBackpropData();
// clang-format off
//
// \brief Constructs a batched-convolution data batch-backprop operation.
//
// \param data The node producing data from forward-prop. Shape: [N,
// C_INPUT * GROUPS, X1, ..., XD].
// \param filter The node producing the filter from forward-prop. Shape:
// [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1]
// \param output_shape The shape of the data batch from forward-prop. It's size
// should be equal to number of data spatial dimensions.
// \param strides The strides from forward-prop.
// \param pads_begin The padding-below sizes from forward-prop.
// \param pads_end The padding-above sizes from forward-prop.
// \param dilations The dilations from forward-prop.
// \param auto_pad The pad type for automatically computing padding sizes.
// \param output_padding The output padding adds additional amount of paddings per
// each spatial axis in the output tensor.
//
// clang-format on
//
GroupConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& filter,
const Output<Node>& output_shape,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT,
const CoordinateDiff& output_padding = {});
// clang-format off
//
// \brief Constructs a batched-convolution data batch-backprop operation.
//
// \param data The node producing data from forward-prop. Shape: [N,
// C_INPUT * GROUPS, X1, ..., XD].
// \param filter The node producing the filter from forward-prop. Shape:
// [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1]
// \param output_shape The shape of the data batch from forward-prop. It's size
// should be equal to number of data spatial dimensions.
// \param strides The strides from forward-prop.
// \param dilations The dilations from forward-prop.
// \param auto_pad The pad type for automatically computing padding sizes.
// \param output_padding The output padding adds additional amount of paddings per
// each spatial axis in the output tensor.
//
// clang-format on
//
GroupConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& filter,
const Output<Node>& output_shape,
const Strides& strides,
const Strides& dilations,
const PadType& auto_pad,
const CoordinateDiff& output_padding = {});
// clang-format off
//
// \brief Constructs a batched-convolution data batch-backprop operation.
//
// \param data The node producing data from forward-prop. Shape:
// [N, C_INPUT * GROUPS, X1, ..., XD].
// \param filter The node producing the filter from forward-prop. Shape:
// [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1]
// \param strides The strides from forward-prop.
// \param pads_begin The padding-below sizes from forward-prop.
// \param pads_end The padding-above sizes from forward-prop.
// \param dilations The dilations from forward-prop.
// \param auto_pad The pad type for automatically computing padding sizes.
// \param output_padding The output padding adds additional amount of paddings per
// each spatial axis in the output tensor.
//
// clang-format on
GroupConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& filter,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT,
const CoordinateDiff& output_padding = {});
///
/// \brief Calculates output spatial features size.
///
/// \param[in] input_data_shape The input data partial shape
/// \param[in] filters_shape The filters partial shape
/// \param[in] strides The strides values.
/// \param[in] dilations The dilations values.
/// \param[in] pads_begin The paddings at the beginning of axis.
/// \param[in] pads_end The paddings at the end of axis.
/// \param[in] output_padding The output padding values.
/// \param output_spatial_shape The placeholder for computed output spatial
/// partial
/// shape.
///
void infer_conv_backprop_output_spatial_shape(const std::vector<Dimension>& input_data_shape,
const std::vector<Dimension>& filters_shape,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const CoordinateDiff& output_padding,
std::vector<Dimension>& output_spatial_shape);
bool visit_attributes(AttributeVisitor& visitor) override;
bool is_dynamic() const override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The spatial shape of the output.
const PartialShape get_convolution_output_shape() const;
void set_output_shape(const ngraph::Shape& output_shape);
/// \return The strides from the forward prop.
const Strides& get_strides() const {
return m_strides;
}
void set_strides(const Strides& strides) {
m_strides = strides;
}
/// \return The dilations from the forward prop.
const Strides& get_dilations() const {
return m_dilations;
}
void set_dilations(const Strides& dilations) {
m_dilations = dilations;
}
/// \return The number of pixels to add to the beginning along each axis.
const CoordinateDiff& get_pads_begin() const {
return m_pads_begin;
}
void set_pads_begin(const CoordinateDiff& pads_begin) {
m_pads_begin = pads_begin;
}
/// \return The number of pixels to add to the ending along each axis.
const CoordinateDiff& get_pads_end() const {
return m_pads_end;
}
void set_pads_end(const CoordinateDiff& pads_end) {
m_pads_end = pads_end;
}
/// \return The auto pad.
const PadType& get_auto_pad() const {
return m_auto_pad;
}
void set_auto_pad(const PadType& auto_pad) {
m_auto_pad = auto_pad;
}
/// \return The output padding.
const CoordinateDiff& get_output_padding() const {
return m_output_padding;
}
void set_output_padding(const CoordinateDiff& output_padding) {
m_output_padding = output_padding;
}
protected:
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
PadType m_auto_pad;
CoordinateDiff m_output_padding;
};
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -0,0 +1,160 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cstddef>
#include <memory>
#include <string>
#include <vector>
#include "openvino/op/op.hpp"
#include "openvino/op/util/activation_functions.hpp"
#include "openvino/op/util/rnn_cell_base.hpp"
namespace ov {
namespace op {
namespace v3 {
///
/// \brief Class for GRU cell node.
///
/// \note Note this class represents only single *cell* and not whole GRU *layer*.
///
class OPENVINO_API GRUCell : public util::RNNCellBase {
public:
OPENVINO_RTTI_DECLARATION;
GRUCell();
///
/// \brief Constructs GRUCell node.
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] W The weight tensor with shape:
/// [gates_count * hidden_size, input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [gates_count * hidden_size, hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
///
GRUCell(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& W,
const Output<Node>& R,
std::size_t hidden_size);
///
/// \brief Constructs GRUCell node.
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] W The weight tensor with shape:
/// [gates_count * hidden_size, input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [gates_count * hidden_size, hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on input of activation functions.
///
GRUCell(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& W,
const Output<Node>& R,
std::size_t hidden_size,
const std::vector<std::string>& activations,
const std::vector<float>& activations_alpha,
const std::vector<float>& activations_beta,
float clip,
bool linear_before_reset);
///
/// \brief Constructs GRUCell node.
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] W The weight tensor with shape: [gates_count *
/// hidden_size, input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [gates_count * hidden_size, hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] B The sum of biases (weight and recurrence) for
/// update, reset and hidden gates.
/// If linear_before_reset := true then biases for
/// hidden gates are
/// placed separately (weight and recurrence).
/// Shape: [gates_count * hidden_size] if
/// linear_before_reset := false
/// Shape: [(gates_count + 1) * hidden_size] if
/// linear_before_reset := true
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on input of activation functions.
/// \param[in] linear_before_reset Whether or not to apply the linear
/// transformation before multiplying by the
/// output of the reset gate.
///
GRUCell(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& W,
const Output<Node>& R,
const Output<Node>& B,
std::size_t hidden_size,
const std::vector<std::string>& activations = std::vector<std::string>{"sigmoid", "tanh"},
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
float clip = 0.f,
bool linear_before_reset = false);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool get_linear_before_reset() const {
return m_linear_before_reset;
}
private:
/// brief Add and initialize bias input to all zeros.
void add_default_bias_input();
///
/// \brief The Activation function f.
///
util::ActivationFunction m_activation_f;
///
/// \brief The Activation function g.
///
util::ActivationFunction m_activation_g;
static constexpr std::size_t s_gates_count{3};
///
/// \brief Control whether or not apply the linear transformation.
///
/// \note The linear transformation may be applied when computing the output of
/// hidden gate. It's done before multiplying by the output of the reset gate.
///
bool m_linear_before_reset;
};
} // namespace v3
} // namespace op
} // namespace ov

View File

@ -0,0 +1,54 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include <string>
#include <vector>
#include "openvino/op/op.hpp"
#include "openvino/op/util/rnn_cell_base.hpp"
namespace ov {
namespace op {
namespace v5 {
class OPENVINO_API GRUSequence : public util::RNNCellBase {
public:
OPENVINO_RTTI_DECLARATION;
GRUSequence();
GRUSequence(const Output<Node>& X,
const Output<Node>& H_t,
const Output<Node>& sequence_lengths,
const Output<Node>& W,
const Output<Node>& R,
const Output<Node>& B,
size_t hidden_size,
op::RecurrentSequenceDirection direction,
const std::vector<std::string>& activations = std::vector<std::string>{"sigmoid", "tanh"},
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
float clip = 0.f,
bool linear_before_reset = false);
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
bool get_linear_before_reset() const {
return m_linear_before_reset;
}
op::RecurrentSequenceDirection get_direction() const {
return m_direction;
}
protected:
op::RecurrentSequenceDirection m_direction;
bool m_linear_before_reset;
};
} // namespace v5
} // namespace op
} // namespace ov

View File

@ -0,0 +1,35 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/op.hpp"
namespace ov {
namespace op {
namespace v0 {
/// \brief Parameterized, bounded sigmoid-like, piecewise linear
/// function. min(max(alpha*x + beta, 0), 1)
///
class OPENVINO_API HardSigmoid : public Op {
public:
OPENVINO_RTTI_DECLARATION;
HardSigmoid();
/// \brief Constructs a HardSigmoid operation.
///
/// \param data Input tensor.
/// \param[in] alpha A scalar value representing the alpha parameter.
/// \param[in] beta A scalar value representing the beta parameter.
///
HardSigmoid(const Output<Node>& data, const Output<Node>& alpha, const Output<Node>& beta);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -0,0 +1,35 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/op.hpp"
#include "openvino/op/util/unary_elementwise_arithmetic.hpp"
namespace ov {
namespace op {
namespace v5 {
/// \brief A HSigmoid Activation Function
/// f(x) = min(max(x + 3, 0), 6) / 6 or
/// f(x) = min(ReLU(x + 3), 6) / 6
///
class OPENVINO_API HSigmoid : public util::UnaryElementwiseArithmetic {
public:
OPENVINO_RTTI_DECLARATION;
HSigmoid() = default;
/// \brief Constructs a HSigmoid operation.
///
/// \param data Input tensor
HSigmoid(const Output<Node>& arg);
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v5
} // namespace op
} // namespace ov

View File

@ -0,0 +1,35 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/op.hpp"
#include "openvino/op/util/unary_elementwise_arithmetic.hpp"
namespace ov {
namespace op {
namespace v4 {
/// \brief A HSwish Activation Function
/// f(x) = x * min(max(x + 3, 0), 6) / 6 or
/// f(x) = x * min(ReLU(x + 3), 6) / 6
///
class OPENVINO_API HSwish : public util::UnaryElementwiseArithmetic {
public:
OPENVINO_RTTI_DECLARATION;
HSwish() = default;
/// \brief Constructs a HSwish (hard version of Swish) operation.
///
/// \param data Input tensor
HSwish(const Output<Node>& arg);
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v4
} // namespace op
} // namespace ov

View File

@ -0,0 +1,41 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cstddef>
#include <vector>
#include "openvino/op/op.hpp"
#include "openvino/op/util/fft_base.hpp"
namespace ov {
namespace op {
namespace v7 {
/// \brief An operation IDFT that computes the inverse discrete Fourier transformation.
class OPENVINO_API IDFT : public util::FFTBase {
public:
OPENVINO_RTTI_DECLARATION;
IDFT() = default;
/// \brief Constructs a IDFT operation. IDFT is performed for full size axes.
///
/// \param data Input data
/// \param axes Axes to perform IDFT
IDFT(const Output<Node>& data, const Output<Node>& axes);
/// \brief Constructs a IDFT operation.
///
/// \param data Input data
/// \param axes Axes to perform IDFT
/// \param signal_size Signal sizes for 'axes'
IDFT(const Output<Node>& data, const Output<Node>& axes, const Output<Node>& signal_size);
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
} // namespace v7
} // namespace op
} // namespace ov

View File

@ -0,0 +1,94 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <vector>
#include "openvino/core/function.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/util/multi_subgraph_base.hpp"
namespace ov {
namespace op {
namespace v8 {
/// \brief If operation.
class OPENVINO_API If : public util::MultiSubGraphOp {
public:
enum BodyIndexes { THEN_BODY_INDEX = 0, ELSE_BODY_INDEX = 1 };
OPENVINO_RTTI_DECLARATION;
bool visit_attributes(AttributeVisitor& visitor) override;
/// \brief Constructs If with condition
///
/// \param execution_condition condition node.
If(const Output<Node>& execution_condition);
If();
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
/// \brief gets then_body as ngraph::Function.
///
/// \return then_body as ngraph::Function.
const std::shared_ptr<Function>& get_then_body() const {
return m_bodies[THEN_BODY_INDEX];
}
/// \brief gets else_body as ngraph::Function.
///
/// \return else_body as ngraph::Function.
const std::shared_ptr<Function>& get_else_body() const {
return m_bodies[ELSE_BODY_INDEX];
}
/// \brief sets new ngraph::Function as new then_body.
///
/// \param body new body for 'then' branch.
void set_then_body(const std::shared_ptr<Function>& body) {
m_bodies[THEN_BODY_INDEX] = body;
}
/// \brief sets new ngraph::Function as new else_body.
///
/// \param body new body for 'else' branch.
void set_else_body(const std::shared_ptr<Function>& body) {
m_bodies[ELSE_BODY_INDEX] = body;
}
/// \brief sets new input to the operation associated with parameters
/// of each sub-graphs
///
/// \param value input to operation
/// \param then_parameter parameter for then_body or nullptr
/// \param else_parameter parameter for else_body or nullpt
void set_input(const Output<Node>& value,
const std::shared_ptr<v0::Parameter>& then_parameter,
const std::shared_ptr<v0::Parameter>& else_parameter);
/// \brief sets new output from the operation associated with results
/// of each sub-graphs
///
/// \param then_result result from then_body
/// \param else_parameter result from else_body
/// \return output from operation
Output<Node> set_output(const std::shared_ptr<v0::Result>& then_result,
const std::shared_ptr<v0::Result>& else_result);
void validate_and_infer_types() override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
private:
using OutputMap = std::map<int64_t, std::shared_ptr<MultiSubGraphOp::OutputDescription>>;
void validate_and_infer_type_body(const std::shared_ptr<Function>& body,
const MultiSubgraphInputDescriptionVector& input_descriptors);
OutputMap get_mapping_outputs_on_body_description(const MultiSubgraphOutputDescriptionVector& output_descriptors);
};
} // namespace v8
} // namespace op
} // namespace ov

View File

@ -0,0 +1,360 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cstdint>
#include <vector>
#include "openvino/core/attribute_adapter.hpp"
#include "openvino/op/op.hpp"
#include "openvino/op/util/attr_types.hpp"
namespace ov {
namespace op {
namespace v0 {
/// \brief Layer which performs bilinear interpolation
class OPENVINO_API Interpolate : public Op {
public:
OPENVINO_RTTI_DECLARATION;
/// \brief Structure that specifies attributes for interpolation
struct Attributes {
// specify dimension indices where interpolation is applied, and `axes` is any
// unordered list of indeces of different dimensions of input tensor. Required.
AxisSet axes;
// specifies type of interpolation
// one of `nearest`, `linear`, `cubic`, `area`. Required.
std::string mode;
// a flag that specifies whether to align corners or not.
// `true` (default) means the alignment is applied,
// `false` means the alignment isn't applied.
bool align_corners = true;
// a flag that specifies whether to perform anti-aliasing. default is `false`
bool antialias = false;
// specify the number of pixels to add to the beginning of the image being
// interpolated. This addition of pixels is done before interpolation calculation.
std::vector<size_t> pads_begin;
// specify the number of pixels to add to the end of the image being interpolated.
// This addition of pixels is done before interpolation calculation.
std::vector<size_t> pads_end;
};
enum class InterpolateMode {
NEAREST,
LINEAR,
CUBIC,
AREA,
nearest OPENVINO_ENUM_DEPRECATED("Please use NEAREST instead") = NEAREST,
linear OPENVINO_ENUM_DEPRECATED("Please use LINEAR instead") = LINEAR,
cubic OPENVINO_ENUM_DEPRECATED("Please use CUBIC instead") = CUBIC,
area OPENVINO_ENUM_DEPRECATED("Please use AREA instead") = AREA
};
Interpolate() = default;
/// \brief Constructs a Interpolate operation
///
/// \param image Input image
/// \param output_shape Output shape of spatial axes
/// \param attrs Interpolation attributes
Interpolate(const Output<Node>& image, const Output<Node>& output_shape, const Attributes& attrs);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
const Attributes& get_attrs() const {
return m_attrs;
}
private:
Attributes m_attrs;
};
} // namespace v0
namespace v4 {
class OPENVINO_API Interpolate : public Op {
public:
OPENVINO_RTTI_DECLARATION;
/// \brief Shape calculation mode
///
/// sizes - output shape for interpolated axes is calculated using input `sizes`
/// scales - output shape for interpolated axes is calculated using input `scales`
enum class ShapeCalcMode {
SIZES,
SCALES,
sizes OPENVINO_ENUM_DEPRECATED("Please use SIZES instead") = SIZES,
scales OPENVINO_ENUM_DEPRECATED("Please use SCALES instead") = SCALES
};
/// \brief Interpolation mode
///
/// nearest - nearest interpolation
/// linear - linear interpolation as in TensorFlow
/// linear_onnx - linear interpolation as in ONNX
/// cubic - cubic interpolation
enum class InterpolateMode {
NEAREST,
LINEAR,
LINEAR_ONNX,
CUBIC,
nearest OPENVINO_ENUM_DEPRECATED("Please use NEAREST instead") = NEAREST,
linear OPENVINO_ENUM_DEPRECATED("Please use LINEAR instead") = LINEAR,
linear_onnx OPENVINO_ENUM_DEPRECATED("Please use LINEAR_ONNX instead") = LINEAR_ONNX,
cubic OPENVINO_ENUM_DEPRECATED("Please use CUBIC instead") = CUBIC
};
/// \brief Mode of the calculation of the source coordinate from resized one
///
/// These modes are modes from ONNX runtime.
enum class CoordinateTransformMode {
HALF_PIXEL,
PYTORCH_HALF_PIXEL,
ASYMMETRIC,
TF_HALF_PIXEL_FOR_NN,
ALIGN_CORNERS,
half_pixel OPENVINO_ENUM_DEPRECATED("Please use HALF_PIXEL instead") = HALF_PIXEL,
pytorch_half_pixel OPENVINO_ENUM_DEPRECATED("Please use PYTORCH_HALF_PIXEL instead") = PYTORCH_HALF_PIXEL,
asymmetric OPENVINO_ENUM_DEPRECATED("Please use ASYMMETRIC instead") = ASYMMETRIC,
tf_half_pixel_for_nn OPENVINO_ENUM_DEPRECATED("Please use TF_HALF_PIXEL_FOR_NN instead") = TF_HALF_PIXEL_FOR_NN,
align_corners OPENVINO_ENUM_DEPRECATED("Please use ALIGN_CORNERS instead") = ALIGN_CORNERS
};
/// \brief Round modes for the nearest interpolation.
enum class NearestMode {
ROUND_PREFER_FLOOR,
ROUND_PREFER_CEIL,
FLOOR,
CEIL,
SIMPLE,
round_prefer_floor OPENVINO_ENUM_DEPRECATED("Please use ROUND_PREFER_FLOOR instead") = ROUND_PREFER_FLOOR,
round_prefer_ceil OPENVINO_ENUM_DEPRECATED("Please use ROUND_PREFER_CEIL instead") = ROUND_PREFER_CEIL,
floor OPENVINO_ENUM_DEPRECATED("Please use FLOOR instead") = FLOOR,
ceil OPENVINO_ENUM_DEPRECATED("Please use CEIL instead") = CEIL,
simple OPENVINO_ENUM_DEPRECATED("Please use SIMPLE instead") = SIMPLE
};
struct InterpolateAttrs {
// specifies type of interpolation
// one of `nearest`, `linear`, `linear_onnx`, `cubic` Required.
InterpolateMode mode = InterpolateMode::NEAREST;
// specifies shape calculation mode
// one of `sizes`, `scales` Required
ShapeCalcMode shape_calculation_mode = ShapeCalcMode::SIZES;
// specify the number of pixels to add to the beginning of the image being
// interpolated. This addition of pixels is done before interpolation
// calculation.
std::vector<size_t> pads_begin;
// specify the number of pixels to add to the end of the image being
// interpolated. This addition of pixels is done before interpolation
// calculation.
std::vector<size_t> pads_end;
// specifies how to transform the coordinate in the resized tensor to the
// coordinate in the original tensor. one of `half_pixel`, `pytorch_half_pixel`,
// `asymmetric`, `tf_half_pixel_for_nn`, `align_corners`
CoordinateTransformMode coordinate_transformation_mode = CoordinateTransformMode::HALF_PIXEL;
// specifies round mode when `mode == nearest` and is used only when `mode ==
// nearest`. one of `round_prefer_floor`, `round_prefer_ceil`, `floor`, `ceil`,
// `simple`
NearestMode nearest_mode = NearestMode::ROUND_PREFER_FLOOR;
// a flag that specifies whether to perform anti-aliasing. default is `false`
bool antialias = false;
// specifies the parameter *a* for cubic interpolation (see, e.g.
// [article](https://ieeexplore.ieee.org/document/1163711/)). *cube_coeff* is
// used only when `mode == cubic`
double cube_coeff = -0.75f;
InterpolateAttrs() = default;
InterpolateAttrs(InterpolateMode mode,
ShapeCalcMode shape_calculation_mode,
const std::vector<size_t>& pads_begin,
const std::vector<size_t>& pads_end,
CoordinateTransformMode coordinate_transformation_mode = CoordinateTransformMode::HALF_PIXEL,
NearestMode nearest_mode = NearestMode::ROUND_PREFER_FLOOR,
bool antialias = false,
double cube_coeff = -0.75)
: mode(mode),
shape_calculation_mode(shape_calculation_mode),
pads_begin(pads_begin),
pads_end(pads_end),
coordinate_transformation_mode(coordinate_transformation_mode),
nearest_mode(nearest_mode),
antialias(antialias),
cube_coeff(cube_coeff) {}
};
Interpolate() = default;
/// \brief Constructs a Interpolate operation without 'axes' input.
///
/// \param image Input image
/// \param output_shape Output shape of spatial axes
/// \param scales Scales of spatial axes, i.e. output_shape / input_shape
/// \param attrs Interpolation attributes
Interpolate(const Output<Node>& image,
const Output<Node>& output_shape,
const Output<Node>& scales,
const InterpolateAttrs& attrs);
/// \brief Constructs a Interpolate operation with 'axes' input.
///
/// \param image Input image
/// \param output_shape Output shape of spatial axes
/// \param scales Scales of spatial axes, i.e. output_shape / input_shape
/// \param axes Interpolation axes
/// \param attrs Interpolation attributes
Interpolate(const Output<Node>& image,
const Output<Node>& output_shape,
const Output<Node>& scales,
const Output<Node>& axes,
const InterpolateAttrs& attrs);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
const InterpolateAttrs& get_attrs() const {
return m_attrs;
}
protected:
/// \return The interpolation axes.
std::vector<int64_t> get_axes() const;
private:
bool evaluate_interpolate(const HostTensorVector& outputs, const HostTensorVector& inputs) const;
InterpolateAttrs m_attrs;
/// \brief Corrects pads_begin and pads_end attributes.
///
/// \details When Interpolate-4 is a result of some transformation, it is possible
/// that pads_begin.size() != pads_end.size() or
/// pads_begin.size() != input_rank. In such case, we should correct
/// pads_begin and pads_end, using padding of pads_begin and pads_end by
/// zeros or using pads_begin[0 : input_rank], pads_end[0 : input_rank].
///
/// Padding of pads_begin is performed when pads_begin.size() < input_rank,
/// and pads_begin[0 : input_rank] is used when
/// pads_begin.size() < input_rank.
///
/// Similarly for pads_end.
void correct_pads();
/// \brief Calculates input shape after padding.
///
/// \param input_shape Shape of input data.
///
/// \return Padded input shape, i.e. input_shape + pads_begin + pads_end
PartialShape get_padded_input_shape(const PartialShape& input_shape) const;
/// \brief Infers output shape using scales.
///
/// \param output_shape[in,out] output shape
/// \param axes Interpolation axes
/// \param scales Scales for interpolated axes
/// \param padded_input_shape input shape after padding
void infer_using_scales(PartialShape& output_shape,
const std::vector<int64_t>& axes,
const std::vector<float>& scales,
const PartialShape& padded_input_shape) const;
/// \brief Infers output shape using sizes.
///
/// \param output_shape[in,out] output shape
/// \param axes Interpolation axes
/// \param sizes sizes for interpolated axes
void infer_using_shapes(PartialShape& output_shape,
const std::vector<int64_t>& axes,
const std::vector<int64_t>& sizes) const;
};
} // namespace v4
} // namespace op
//---------------------------------------- v0 --------------------------------------------------
OPENVINO_API
std::ostream& operator<<(std::ostream& s, const op::v0::Interpolate::InterpolateMode& type);
//---------------------------------------- v4 --------------------------------------------------
OPENVINO_API
std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::InterpolateMode& type);
OPENVINO_API
std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::CoordinateTransformMode& type);
OPENVINO_API
std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::NearestMode& type);
OPENVINO_API
std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::ShapeCalcMode& type);
template <>
class OPENVINO_API AttributeAdapter<op::v0::Interpolate::InterpolateMode>
: public EnumAttributeAdapterBase<op::v0::Interpolate::InterpolateMode> {
public:
AttributeAdapter(op::v0::Interpolate::InterpolateMode& value)
: EnumAttributeAdapterBase<op::v0::Interpolate::InterpolateMode>(value) {}
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::v0::Interpolate::InterpolateMode>", 0};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};
template <>
class OPENVINO_API AttributeAdapter<op::v4::Interpolate::InterpolateMode>
: public EnumAttributeAdapterBase<op::v4::Interpolate::InterpolateMode> {
public:
AttributeAdapter(op::v4::Interpolate::InterpolateMode& value)
: EnumAttributeAdapterBase<op::v4::Interpolate::InterpolateMode>(value) {}
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::v4::Interpolate::InterpolateMode>", 4};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};
template <>
class OPENVINO_API AttributeAdapter<op::v4::Interpolate::CoordinateTransformMode>
: public EnumAttributeAdapterBase<op::v4::Interpolate::CoordinateTransformMode> {
public:
AttributeAdapter(op::v4::Interpolate::CoordinateTransformMode& value)
: EnumAttributeAdapterBase<op::v4::Interpolate::CoordinateTransformMode>(value) {}
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::v4::Interpolate::CoordinateTransformMode>", 4};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};
template <>
class OPENVINO_API AttributeAdapter<op::v4::Interpolate::NearestMode>
: public EnumAttributeAdapterBase<op::v4::Interpolate::NearestMode> {
public:
AttributeAdapter(op::v4::Interpolate::NearestMode& value)
: EnumAttributeAdapterBase<op::v4::Interpolate::NearestMode>(value) {}
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::v4::Interpolate::NearestMode>", 4};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};
template <>
class OPENVINO_API AttributeAdapter<op::v4::Interpolate::ShapeCalcMode>
: public EnumAttributeAdapterBase<op::v4::Interpolate::ShapeCalcMode> {
public:
AttributeAdapter(op::v4::Interpolate::ShapeCalcMode& value)
: EnumAttributeAdapterBase<op::v4::Interpolate::ShapeCalcMode>(value) {}
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::v4::Interpolate::ShapeCalcMode>", 4};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};
} // namespace ov

View File

@ -0,0 +1,33 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/util/binary_elementwise_comparison.hpp"
namespace ov {
namespace op {
namespace v1 {
/// \brief Elementwise less-than operation.
class OPENVINO_API Less : public util::BinaryElementwiseComparison {
public:
OPENVINO_RTTI_DECLARATION;
/// \brief Constructs a less-than operation.
Less() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {}
/// \brief Constructs a less-than operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
/// \param auto_broadcast Auto broadcast specification
Less(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY));
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -0,0 +1,34 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/util/binary_elementwise_comparison.hpp"
namespace ov {
namespace op {
namespace v1 {
/// \brief Elementwise less-than-or-equal operation.
class OPENVINO_API LessEqual : public util::BinaryElementwiseComparison {
public:
OPENVINO_RTTI_DECLARATION;
/// \brief Constructs a less-than-or-equal operation.
LessEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {}
/// \brief Constructs a less-than-or-equal operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
/// \param auto_broadcast Auto broadcast specification
LessEqual(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY));
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -0,0 +1,30 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ngraph/op/util/unary_elementwise_arithmetic.hpp"
namespace ov {
namespace op {
namespace v0 {
/// \brief Elementwise natural log operation.
class OPENVINO_API Log : public util::UnaryElementwiseArithmetic {
public:
OPENVINO_RTTI_DECLARATION;
/// \brief Constructs a natural log operation.
Log() = default;
/// \brief Constructs a natural log operation.
///
/// \param arg Node that produces the input tensor.
Log(const Output<Node>& arg);
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -0,0 +1,43 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ngraph/op/op.hpp"
namespace ov {
namespace op {
namespace v5 {
class OPENVINO_API LogSoftmax : public Op {
public:
OPENVINO_RTTI_DECLARATION;
LogSoftmax() = default;
/// \brief Constructs a LogSoftmax operation.
///
/// \param arg Node that produces the first input tensor.<br>
/// `[d0, ...]`
/// \param axis The axis position (0-based) on which to calculate the LogSoftmax.
///
/// Output `[d0, ...]`
///
LogSoftmax(const Output<Node>& arg, const int64_t axis);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
int64_t get_axis() const {
return m_axis;
}
void set_axis(const int64_t axis) {
m_axis = axis;
}
private:
int64_t m_axis = 1;
};
} // namespace v5
} // namespace op
} // namespace ov

View File

@ -0,0 +1,90 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <vector>
#include "openvino/core/function.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/util/sub_graph_base.hpp"
namespace ov {
namespace op {
namespace v5 {
/// \brief Iterate a body over tensors, accumulating into tensors.
class NGRAPH_API Loop : public op::util::SubGraphOp {
public:
/// \brief Allows to define the purpose of inputs/outputs in the body
struct SpecialBodyPorts {
SpecialBodyPorts() = default;
SpecialBodyPorts(int64_t in_current_iteration_input_idx, int64_t in_body_condition_output_idx)
: current_iteration_input_idx(in_current_iteration_input_idx),
body_condition_output_idx(in_body_condition_output_idx) {}
// -1 means the input is not provided, this input is optional
int64_t current_iteration_input_idx = -1;
// -1 means the output is not provided,
// this output is required, throw an exception if not provided
int64_t body_condition_output_idx = -1;
};
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a Loop operation.
Loop() = default;
/// \brief Constructs a Loop operation.
///
/// \param trip_count Node specifies the maximum number of iterations.
/// \param execution_condition Node determines whether to execute the first
/// iteration or not.
Loop(const Output<Node>& trip_count, const Output<Node>& execution_condition);
Output<Node> get_concatenated_slices(const Output<Node>& value,
int64_t start,
int64_t stride,
int64_t part_size,
int64_t end,
int64_t axis) override;
void set_special_body_ports(const SpecialBodyPorts& special_body_ports) {
m_special_body_ports = special_body_ports;
}
SpecialBodyPorts get_special_body_ports() const {
return m_special_body_ports;
}
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
protected:
Loop(const Loop&);
private:
void clone_to(Loop& dst, const OutputVector& new_args) const;
SpecialBodyPorts m_special_body_ports;
};
} // namespace v5
} // namespace op
template <>
class NGRAPH_API AttributeAdapter<op::v5::Loop::SpecialBodyPorts>
: public DirectValueAccessor<op::v5::Loop::SpecialBodyPorts> {
public:
AttributeAdapter(op::v5::Loop::SpecialBodyPorts& value)
: DirectValueAccessor<op::v5::Loop::SpecialBodyPorts>(value) {}
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::v5::Loop::SpecialBodyPorts>", 0};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};
} // namespace ov

View File

@ -0,0 +1,78 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ngraph/op/op.hpp"
namespace ov {
namespace op {
namespace v0 {
// clang-format off
/// \brief Elementwise Local Response Normalization (LRN) operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[n, c, d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$N[n, c, d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[n, c, d_1,\dots,d_n] = \frac{N[n,i,d_1,\dots,d_n]}{ (bias + alpha * (\sum_{i=max(0,(nsize-1)/2)}^{min(C, (nsize-1)/2)+1} N[n,i,d_1,\dots,d_n]^{2}) ^ {2})}\f$ |
// clang-format on
class NGRAPH_API LRN : public Op {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a LRN operation.
LRN() = default;
/// \brief Constructs a LRN operation.
///
/// \param arg Node that produces the input tensor.
LRN(const Output<Node>& arg, double alpha, double beta, double bias, size_t size);
LRN(const Output<Node>& arg, const Output<Node>& axes, double alpha, double beta, double bias, size_t size);
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
double get_alpha() const {
return m_alpha;
}
void set_alpha(double alpha) {
m_alpha = alpha;
}
double get_beta() const {
return m_beta;
}
void set_beta(double beta) {
m_beta = beta;
}
double get_bias() const {
return m_bias;
}
void set_bias(double bias) {
m_bias = bias;
}
size_t get_nsize() const {
return m_size;
}
void set_nsize(size_t size) {
m_size = size;
}
AxisSet get_reduction_axes() const;
protected:
double m_alpha;
double m_beta;
double m_bias;
size_t m_size;
};
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -0,0 +1,397 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cstddef>
#include <memory>
#include <string>
#include <vector>
#include "openvino/op/op.hpp"
#include "openvino/op/util/activation_functions.hpp"
#include "openvino/op/util/rnn_cell_base.hpp"
namespace ov {
namespace op {
enum class LSTMWeightsFormat {
FICO, // IE
ICOF, // PyTorch
IFCO, // DNNL, TF, MxNet
IFOC, // Caffe
IOFC, // ONNX
};
namespace v0 {
///
/// \brief Class for single lstm cell node.
///
/// \note Following implementation supports:
/// \li \c peepholes Gers & Schmidhuber (2000)
/// https://ieeexplore.ieee.org/document/861302
/// \li Coupling input and forget gates.
///
/// \note It calculates following equations:
///
/// it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)
/// ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)
/// ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)
/// Ct = ft (.) Ct-1 + it (.) ct
/// ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)
/// Ht = ot (.) h(Ct)
///
/// * - Is a dot product,
/// (.) - is a Hadamard product (element-wise),
/// f, g, h - are activation functions.
///
/// \note This class represents only single *cell* (for current time step) and not
/// the whole LSTM Sequence layer
///
/// \sa LSTMSequence, RNNCell, GRUCell
///
class OPENVINO_API LSTMCell : public util::RNNCellBase {
public:
OPENVINO_RTTI_DECLARATION;
LSTMCell();
///
/// \brief Constructs LSTMCell node.
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] initial_cell_state The cell state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] W The gate weights tensor with shape:
/// [4*hidden_size, input_size].
/// \param[in] R The recurrence weights tensor with shape:
/// [4*hidden_size, hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] weights_format The order of gates in weights tensors. The
/// default format is IFCO since it is used by
/// DNNL.
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on input of activation functions.
/// \param[in] input_forget Controls coupling input and forget gates.
///
LSTMCell(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& initial_cell_state,
const Output<Node>& W,
const Output<Node>& R,
std::size_t hidden_size,
LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO,
const std::vector<std::string>& activations = std::vector<std::string>{"sigmoid", "tanh", "tanh"},
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
float clip = 0.f,
bool input_forget = false);
///
/// \brief Constructs LSTMCell node.
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] initial_cell_state The cell state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] W The weight tensor with shape: [4*hidden_size,
/// input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [4*hidden_size, hidden_size].
/// \param[in] B The bias tensor for gates with shape:
/// [4*hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] weights_format The order of gates in weights tensors. The
/// default format is IFCO since it is used by
/// DNNL.
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on input of activation functions.
/// \param[in] input_forget Controls coupling input and forget gates.
///
LSTMCell(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& initial_cell_state,
const Output<Node>& W,
const Output<Node>& R,
const Output<Node>& B,
std::size_t hidden_size,
LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO,
const std::vector<std::string>& activations = std::vector<std::string>{"sigmoid", "tanh", "tanh"},
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
float clip = 0.f,
bool input_forget = false);
///
/// \brief Constructs LSTMCell node.
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] initial_cell_state The cell state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] W The weight tensor with shape: [4*hidden_size,
/// input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [4*hidden_size, hidden_size].
/// \param[in] B The bias tensor for gates with shape:
/// [4*hidden_size].
/// \param[in] P The weight tensor for peepholes with shape:
/// [3*hidden_size] - 3 equals to only iof gates.
/// The order is: input, output, forget gates.
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] weights_format The order of gates in weights tensors. The
/// default format is IFCO since it is used by
/// DNNL.
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on input of activation functions.
/// \param[in] input_forget Controls coupling input and forget gates.
///
LSTMCell(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& initial_cell_state,
const Output<Node>& W,
const Output<Node>& R,
const Output<Node>& B,
const Output<Node>& P,
std::size_t hidden_size,
LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO,
const std::vector<std::string>& activations = std::vector<std::string>{"sigmoid", "tanh", "tanh"},
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
float clip = 0.f,
bool input_forget = false);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool get_input_forget() const {
return m_input_forget;
}
LSTMWeightsFormat get_weights_format() const {
return m_weights_format;
}
private:
///
/// \brief Creates the default bias input initialized with zeros.
///
/// \return The object of Output class.
///
Output<Node> get_default_bias_input() const;
///
/// \brief Creates the default peepholes input initialized with zeros.
///
/// \return The object of Output class.
///
Output<Node> get_default_peepholes_input() const;
///
/// \brief The Activation function f.
///
util::ActivationFunction m_activation_f;
///
/// \brief The Activation function g.
///
util::ActivationFunction m_activation_g;
///
/// \brief The Activation function h.
///
util::ActivationFunction m_activation_h;
///
/// \brief Controls whether to couple input and forget gates.
///
bool m_input_forget = false;
///
/// \brief The order of gates in weights tensors.
///
LSTMWeightsFormat m_weights_format;
static constexpr std::size_t s_gates_count{4};
static constexpr std::size_t s_peepholes_count{3};
};
} // namespace v0
namespace v4 {
///
/// \brief Class for single lstm cell node.
///
/// \note Following implementation supports:
/// \li \c peepholes Gers & Schmidhuber (2000)
/// https://ieeexplore.ieee.org/document/861302
/// \li Coupling input and forget gates.
///
/// \note It calculates following equations:
///
/// it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi)
/// ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Wbf + Rbf)
/// ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)
/// Ct = ft (.) Ct-1 + it (.) ct
/// ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Wbo + Rbo)
/// Ht = ot (.) h(Ct)
///
/// * - Is a dot product,
/// (.) - is a Hadamard product (element-wise),
/// f, g, h - are activation functions.
///
/// \note This class represents only single *cell* (for current time step) and not
/// the whole LSTM Sequence layer
///
/// \sa LSTMSequence, RNNCell, GRUCell
///
class OPENVINO_API LSTMCell : public util::RNNCellBase {
public:
OPENVINO_RTTI_DECLARATION;
LSTMCell();
///
/// \brief Constructs LSTMCell node.
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] initial_cell_state The cell state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] W The gate weights tensor with shape:
/// [4*hidden_size, input_size].
/// \param[in] R The recurrence weights tensor with shape:
/// [4*hidden_size, hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on input of activation functions.
LSTMCell(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& initial_cell_state,
const Output<Node>& W,
const Output<Node>& R,
std::size_t hidden_size,
const std::vector<std::string>& activations = std::vector<std::string>{"sigmoid", "tanh", "tanh"},
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
float clip = 0.f);
///
/// \brief Constructs LSTMCell node.
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] initial_cell_state The cell state tensor at current time step
/// with shape: [batch_size, hidden_size].
/// \param[in] W The weight tensor with shape: [4*hidden_size,
/// input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [4*hidden_size, hidden_size].
/// \param[in] B The bias tensor for gates with shape:
/// [4*hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on input of activation functions.
///
LSTMCell(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& initial_cell_state,
const Output<Node>& W,
const Output<Node>& R,
const Output<Node>& B,
std::size_t hidden_size,
const std::vector<std::string>& activations = std::vector<std::string>{"sigmoid", "tanh", "tanh"},
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
float clip = 0.f);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
private:
///
/// \brief Creates the default bias input initialized with zeros.
///
/// \return The object of Output class.
///
Output<Node> get_default_bias_input() const;
///
/// \brief The Activation function f.
///
util::ActivationFunction m_activation_f;
///
/// \brief The Activation function g.
///
util::ActivationFunction m_activation_g;
///
/// \brief The Activation function h.
///
util::ActivationFunction m_activation_h;
static constexpr std::size_t s_gates_count{4};
};
} // namespace v4
} // namespace op
OPENVINO_API
std::ostream& operator<<(std::ostream& s, const op::LSTMWeightsFormat& type);
template <>
class OPENVINO_API AttributeAdapter<op::LSTMWeightsFormat> : public EnumAttributeAdapterBase<op::LSTMWeightsFormat> {
public:
AttributeAdapter(op::LSTMWeightsFormat& value) : EnumAttributeAdapterBase<op::LSTMWeightsFormat>(value) {}
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::LSTMWeightsFormat>", 1};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};
} // namespace ov

View File

@ -0,0 +1,196 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "openvino/op/constant.hpp"
#include "openvino/op/lstm_cell.hpp"
#include "openvino/op/util/attr_types.hpp"
#include "openvino/op/util/rnn_cell_base.hpp"
namespace ov {
namespace op {
namespace v0 {
///
/// \brief Class for lstm sequence node.
///
/// \note It follows notation and equations defined as in ONNX standard:
/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#LSTM
///
/// \sa LSTMCell, RNNCell, GRUCell
///
///
class NGRAPH_API LSTMSequence : public Op {
public:
NGRAPH_RTTI_DECLARATION;
LSTMSequence();
using direction = RecurrentSequenceDirection;
size_t get_default_output_index() const override {
return no_default_index();
}
explicit LSTMSequence(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& initial_cell_state,
const Output<Node>& sequence_lengths,
const Output<Node>& W,
const Output<Node>& R,
const Output<Node>& B,
const Output<Node>& P,
const std::int64_t hidden_size,
const direction lstm_direction,
LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO,
const std::vector<float> activations_alpha = {},
const std::vector<float> activations_beta = {},
const std::vector<std::string> activations = {"sigmoid", "tanh", "tanh"},
const float clip_threshold = 0,
const bool input_forget = false);
explicit LSTMSequence(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& initial_cell_state,
const Output<Node>& sequence_lengths,
const Output<Node>& W,
const Output<Node>& R,
const Output<Node>& B,
const std::int64_t hidden_size,
const direction lstm_direction,
LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO,
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
const std::vector<std::string>& activations = {"sigmoid", "tanh", "tanh"},
const float clip_threshold = 0,
const bool input_forget = false);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
std::vector<float> get_activations_alpha() const {
return m_activations_alpha;
}
std::vector<float> get_activations_beta() const {
return m_activations_beta;
}
std::vector<std::string> get_activations() const {
return m_activations;
}
float get_clip_threshold() const {
return m_clip_threshold;
}
direction get_direction() const {
return m_direction;
}
std::int64_t get_hidden_size() const {
return m_hidden_size;
}
bool get_input_forget() const {
return m_input_forget;
}
LSTMWeightsFormat get_weights_format() const {
return m_weights_format;
}
private:
///
/// \brief Gets the masked value according to sequence length in a batch.
///
/// \note Zeros out values or sets them to default value for inputs with
/// sequence length shorter than currently procssed time step.
///
/// \param[in] data The input value.
/// \param[in] time_step The current time step denoting sequence length.
/// \param[in] batch_axis The batch axis index of data tensor.
/// \param[in] default_value The default value for masked elements.
///
/// \return The masked value.
///
std::shared_ptr<Node> get_masked_node(const Output<Node>& data,
std::int32_t time_step,
std::size_t batch_axis = 0,
const Output<Node>& default_value = Output<Node>()) const;
OutputVector lstm_pass(bool is_reverse = false) const;
// Split(bi-directional) and squeeze input data to remove 'num_direction' dimension.
std::shared_ptr<Node> prepare_input(Output<Node> node, bool is_reverse, size_t num_direction_axis = 0) const;
std::vector<float> m_activations_alpha;
std::vector<float> m_activations_beta;
std::vector<std::string> m_activations;
float m_clip_threshold;
direction m_direction;
std::int64_t m_hidden_size;
bool m_input_forget;
LSTMWeightsFormat m_weights_format;
};
} // namespace v0
namespace v5 {
///
/// \brief Class for lstm sequence node.
///
/// \note It follows notation and equations defined as in ONNX standard:
/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#LSTM
///
/// \sa LSTMCell, RNNCell, GRUCell
///
///
class NGRAPH_API LSTMSequence : public util::RNNCellBase {
public:
NGRAPH_RTTI_DECLARATION;
LSTMSequence() = default;
using direction = RecurrentSequenceDirection;
size_t get_default_output_index() const override {
return no_default_index();
}
explicit LSTMSequence(const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& initial_cell_state,
const Output<Node>& sequence_lengths,
const Output<Node>& W,
const Output<Node>& R,
const Output<Node>& B,
const std::int64_t hidden_size,
const direction lstm_direction,
const std::vector<float>& activations_alpha = {},
const std::vector<float>& activations_beta = {},
const std::vector<std::string>& activations = {"sigmoid", "tanh", "tanh"},
const float clip = 0.f)
: RNNCellBase({X, initial_hidden_state, initial_cell_state, sequence_lengths, W, R, B},
hidden_size,
clip,
activations,
activations_alpha,
activations_beta),
m_direction(lstm_direction) {
constructor_validate_and_infer_types();
}
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
direction get_direction() const {
return m_direction;
}
private:
direction m_direction;
};
} // namespace v5
} // namespace op
} // namespace ov

View File

@ -0,0 +1,78 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/op.hpp"
namespace ov {
namespace op {
namespace v0 {
/// \brief A function parameter.
///
/// Parameters are nodes that represent the arguments that will be passed to
/// user-defined functions. Function creation requires a sequence of parameters.
/// Basic graph operations do not need parameters attached to a function.
class OPENVINO_API Parameter : public op::Op {
public:
OPENVINO_RTTI_DECLARATION;
/// \brief Constructions a tensor-typed parameter node.
Parameter() = default;
/// \brief Constructions a tensor-typed parameter node.
///
/// \param element_type The element type of the parameter.
/// \param pshape The partial shape of the parameter.
Parameter(const ngraph::element::Type& element_type, const PartialShape& pshape);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool is_relevant_to_shapes() const;
void set_is_relevant_to_shapes(bool is_relevant);
const PartialShape& get_partial_shape() const {
return m_partial_shape;
}
PartialShape& get_partial_shape() {
return m_partial_shape;
}
void set_partial_shape(const PartialShape& partial_shape) {
m_partial_shape = partial_shape;
}
const element::Type& get_element_type() const {
return m_element_type;
}
void set_element_type(const element::Type& element_type) {
m_element_type = element_type;
}
protected:
PartialShape m_partial_shape;
element::Type m_element_type;
bool m_is_relevant_to_shapes{false};
};
} // namespace v0
} // namespace op
using ParameterVector = std::vector<std::shared_ptr<op::v0::Parameter>>;
template <>
class OPENVINO_API AttributeAdapter<ParameterVector> : public VisitorAdapter {
public:
AttributeAdapter(ParameterVector& ref);
bool visit_attributes(AttributeVisitor& visitor) override;
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<ParameterVector>", 0};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
protected:
ParameterVector& m_ref;
};
} // namespace ov

View File

@ -0,0 +1,61 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/op.hpp"
namespace ov {
namespace op {
namespace v0 {
class OPENVINO_API Result : public Op {
public:
OPENVINO_RTTI_DECLARATION;
/// \brief Allows a value to be used as a function result.
Result() = default;
/// \brief Allows a value to be used as a function result.
///
/// \param arg Node that produces the input tensor.
Result(const Output<Node>& arg, bool needs_default_layout = false);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void set_needs_default_layout(bool val) {
m_needs_default_layout = val;
}
bool needs_default_layout() const {
return m_needs_default_layout;
}
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override;
private:
bool m_needs_default_layout{false};
};
} // namespace v0
} // namespace op
using ResultVector = std::vector<std::shared_ptr<op::v0::Result>>;
template <>
class OPENVINO_API AttributeAdapter<ResultVector> : public VisitorAdapter {
public:
AttributeAdapter(ResultVector& ref);
bool visit_attributes(AttributeVisitor& visitor) override;
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<ResultVector>", 0};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
protected:
ResultVector& m_ref;
};
} // namespace ov

View File

@ -0,0 +1,43 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <vector>
#include "openvino/core/function.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/util/sub_graph_base.hpp"
namespace ov {
namespace op {
namespace v0 {
/// \brief Iterate a body over tensors, accumulating into tensors.
class NGRAPH_API TensorIterator : public op::util::SubGraphOp {
public:
NGRAPH_RTTI_DECLARATION;
bool visit_attributes(AttributeVisitor& visitor) override;
TensorIterator() = default;
explicit TensorIterator(const OutputVector& values);
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return the body of the iteration
std::shared_ptr<Function> get_body() const {
return m_bodies[0];
}
/// \param body set the body of the iteration
void set_body(const std::shared_ptr<Function>& body) {
set_function(body);
}
void validate_and_infer_types() override;
void revalidate_and_infer_types_for_body_ops();
private:
void try_to_set_num_iterations_if_no_slice_inputs();
};
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -12,7 +12,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v1::Gather, "Gather", 1, op::util::GatherBase); OPENVINO_RTTI_DEFINITION(op::v1::Gather, "Gather", 1, op::util::GatherBase);
op::v1::Gather::Gather(const Output<Node>& params, const Output<Node>& indices, const Output<Node>& axes) op::v1::Gather::Gather(const Output<Node>& params, const Output<Node>& indices, const Output<Node>& axes)
: GatherBase(params, indices, axes) { : GatherBase(params, indices, axes) {
@ -36,7 +36,7 @@ shared_ptr<Node> op::v1::Gather::clone_with_new_inputs(const OutputVector& new_a
return make_shared<v1::Gather>(new_args.at(0), new_args.at(1), new_args.at(2)); return make_shared<v1::Gather>(new_args.at(0), new_args.at(1), new_args.at(2));
} }
NGRAPH_RTTI_DEFINITION(op::v7::Gather, "Gather", 7, op::util::GatherBase); OPENVINO_RTTI_DEFINITION(op::v7::Gather, "Gather", 7, op::util::GatherBase);
op::v7::Gather::Gather(const Output<Node>& data, op::v7::Gather::Gather(const Output<Node>& data,
const Output<Node>& indices, const Output<Node>& indices,
@ -78,7 +78,7 @@ shared_ptr<Node> op::v7::Gather::clone_with_new_inputs(const OutputVector& new_a
return make_shared<v7::Gather>(new_args.at(0), new_args.at(1), new_args.at(2), m_batch_dims); return make_shared<v7::Gather>(new_args.at(0), new_args.at(1), new_args.at(2), m_batch_dims);
} }
NGRAPH_RTTI_DEFINITION(op::v8::Gather, "Gather", 8, op::util::GatherBase); OPENVINO_RTTI_DEFINITION(op::v8::Gather, "Gather", 8, op::util::GatherBase);
op::v8::Gather::Gather(const Output<Node>& data, op::v8::Gather::Gather(const Output<Node>& data,
const Output<Node>& indices, const Output<Node>& indices,

View File

@ -12,7 +12,7 @@ using namespace ngraph;
// ------------------------------ V6 ------------------------------ // ------------------------------ V6 ------------------------------
NGRAPH_RTTI_DEFINITION(op::v6::GatherElements, "GatherElements", 6); OPENVINO_RTTI_DEFINITION(op::v6::GatherElements, "GatherElements", 6);
op::v6::GatherElements::GatherElements(const Output<Node>& data, const Output<Node>& indices, const int64_t axis) op::v6::GatherElements::GatherElements(const Output<Node>& data, const Output<Node>& indices, const int64_t axis)
: Op({data, indices}), : Op({data, indices}),

View File

@ -12,7 +12,7 @@ using namespace ngraph;
// ------------------------------ V5 ------------------------------ // ------------------------------ V5 ------------------------------
NGRAPH_RTTI_DEFINITION(op::v5::GatherND, "GatherND", 5); OPENVINO_RTTI_DEFINITION(op::v5::GatherND, "GatherND", 5);
op::v5::GatherND::GatherND(const Output<Node>& data, const Output<Node>& indices, const size_t batch_dims) op::v5::GatherND::GatherND(const Output<Node>& data, const Output<Node>& indices, const size_t batch_dims)
: Op({data, indices}), : Op({data, indices}),

View File

@ -10,7 +10,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v1::GatherTree, "GatherTree", 1); OPENVINO_RTTI_DEFINITION(op::v1::GatherTree, "GatherTree", 1);
op::v1::GatherTree::GatherTree(const Output<Node>& step_ids, op::v1::GatherTree::GatherTree(const Output<Node>& step_ids,
const Output<Node>& parent_idx, const Output<Node>& parent_idx,

View File

@ -14,7 +14,7 @@ using namespace std;
using namespace ngraph; using namespace ngraph;
// ------------------------------ V0 ------------------------------ // ------------------------------ V0 ------------------------------
NGRAPH_RTTI_DEFINITION(op::v0::Gelu, "Gelu", 0); OPENVINO_RTTI_DEFINITION(op::v0::Gelu, "Gelu", 0);
op::v0::Gelu::Gelu() : Op() {} op::v0::Gelu::Gelu() : Op() {}
@ -60,13 +60,13 @@ NGRAPH_API EnumNames<ngraph::op::GeluApproximationMode>& EnumNames<ngraph::op::G
return enum_names; return enum_names;
} }
constexpr DiscreteTypeInfo AttributeAdapter<ngraph::op::GeluApproximationMode>::type_info;
} // namespace ov
std::ostream& op::operator<<(std::ostream& s, const op::GeluApproximationMode& type) { std::ostream& op::operator<<(std::ostream& s, const op::GeluApproximationMode& type) {
return s << as_string(type); return s << as_string(type);
} }
NGRAPH_RTTI_DEFINITION(op::v7::Gelu, "Gelu", 7); constexpr DiscreteTypeInfo AttributeAdapter<ngraph::op::GeluApproximationMode>::type_info;
} // namespace ov
OPENVINO_RTTI_DEFINITION(op::v7::Gelu, "Gelu", 7);
op::v7::Gelu::Gelu(const Output<Node>& data, GeluApproximationMode mode) op::v7::Gelu::Gelu(const Output<Node>& data, GeluApproximationMode mode)
: UnaryElementwiseArithmetic(data), : UnaryElementwiseArithmetic(data),

View File

@ -50,7 +50,7 @@ bool evaluate_greater(const HostTensorPtr& arg0,
//-------------------------------------- v1 ------------------------------------ //-------------------------------------- v1 ------------------------------------
NGRAPH_RTTI_DEFINITION(op::v1::Greater, "Greater", 1, op::util::BinaryElementwiseComparison); OPENVINO_RTTI_DEFINITION(op::v1::Greater, "Greater", 1, op::util::BinaryElementwiseComparison);
op::v1::Greater::Greater(const Output<Node>& arg0, const Output<Node>& arg1, const AutoBroadcastSpec& auto_broadcast) op::v1::Greater::Greater(const Output<Node>& arg0, const Output<Node>& arg1, const AutoBroadcastSpec& auto_broadcast)
: BinaryElementwiseComparison(arg0, arg1, auto_broadcast) { : BinaryElementwiseComparison(arg0, arg1, auto_broadcast) {

View File

@ -51,7 +51,7 @@ bool evaluate_greater_equal(const HostTensorPtr& arg0,
//---------------------------------- v1 ---------------------------------------- //---------------------------------- v1 ----------------------------------------
NGRAPH_RTTI_DEFINITION(op::v1::GreaterEqual, "GreaterEqual", 1, op::util::BinaryElementwiseComparison); OPENVINO_RTTI_DEFINITION(op::v1::GreaterEqual, "GreaterEqual", 1, op::util::BinaryElementwiseComparison);
op::v1::GreaterEqual::GreaterEqual(const Output<Node>& arg0, op::v1::GreaterEqual::GreaterEqual(const Output<Node>& arg0,
const Output<Node>& arg1, const Output<Node>& arg1,

View File

@ -16,7 +16,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v0::GRN, "GRN", 0); OPENVINO_RTTI_DEFINITION(op::v0::GRN, "GRN", 0);
op::v0::GRN::GRN(const Output<Node>& data, float bias) : Op({data}), m_bias(bias) { op::v0::GRN::GRN(const Output<Node>& data, float bias) : Op({data}), m_bias(bias) {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();

View File

@ -22,10 +22,10 @@ using namespace ngraph;
// v1::GroupConvolution // v1::GroupConvolution
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
NGRAPH_RTTI_DEFINITION(op::v1::GroupConvolution, "GroupConvolution", 1); OPENVINO_RTTI_DEFINITION(op::v1::GroupConvolution, "GroupConvolution", 1);
shared_ptr<Node> op::v1::GroupConvolution::get_default_value() const { shared_ptr<Node> op::v1::GroupConvolution::get_default_value() const {
return op::Constant::create(get_element_type(), get_shape(), {0}); return op::v0::Constant::create(get_element_type(), get_shape(), {0});
} }
op::v1::GroupConvolution::GroupConvolution(const Output<Node>& data_batch, op::v1::GroupConvolution::GroupConvolution(const Output<Node>& data_batch,
@ -249,7 +249,7 @@ shared_ptr<Node> op::v1::GroupConvolution::clone_with_new_inputs(const OutputVec
// v1::GroupConvolutionBackpropData // v1::GroupConvolutionBackpropData
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
NGRAPH_RTTI_DEFINITION(op::v1::GroupConvolutionBackpropData, "GroupConvolutionBackpropData", 1); OPENVINO_RTTI_DEFINITION(op::v1::GroupConvolutionBackpropData, "GroupConvolutionBackpropData", 1);
op::v1::GroupConvolutionBackpropData::GroupConvolutionBackpropData() op::v1::GroupConvolutionBackpropData::GroupConvolutionBackpropData()
: Op(), : Op(),
@ -371,7 +371,7 @@ const PartialShape op::v1::GroupConvolutionBackpropData::get_convolution_output_
void op::v1::GroupConvolutionBackpropData::set_output_shape(const Shape& shape) { void op::v1::GroupConvolutionBackpropData::set_output_shape(const Shape& shape) {
this->input(2).replace_source_output( this->input(2).replace_source_output(
op::Constant::create(this->get_input_element_type(2), Shape{shape.size()}, shape)->output(0)); op::v0::Constant::create(this->get_input_element_type(2), Shape{shape.size()}, shape)->output(0));
} }
void op::v1::GroupConvolutionBackpropData::infer_conv_backprop_output_spatial_shape( void op::v1::GroupConvolutionBackpropData::infer_conv_backprop_output_spatial_shape(
@ -393,7 +393,7 @@ void op::v1::GroupConvolutionBackpropData::infer_conv_backprop_output_spatial_sh
int64_t val = strides[i] * (input_data_shape[i].get_length() - 1) + int64_t val = strides[i] * (input_data_shape[i].get_length() - 1) +
dilations[i] * (filters_shape[i].get_length() - 1) + 1 - pads_begin[i] - pads_end[i] + dilations[i] * (filters_shape[i].get_length() - 1) + 1 - pads_begin[i] - pads_end[i] +
output_padding[i]; output_padding[i];
output_spatial_shape.push_back(val); output_spatial_shape.emplace_back(val);
} else { } else {
output_spatial_shape.push_back(Dimension::dynamic()); output_spatial_shape.push_back(Dimension::dynamic());
} }

View File

@ -14,7 +14,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
constexpr NodeTypeInfo op::v3::GRUCell::type_info; OPENVINO_RTTI_DEFINITION(op::v3::GRUCell, "GRUCell", 1, op::util::RNNCellBase);
op::v3::GRUCell::GRUCell() : m_linear_before_reset(false) { op::v3::GRUCell::GRUCell() : m_linear_before_reset(false) {
m_activations = {"sigmoid", "tanh"}; m_activations = {"sigmoid", "tanh"};
@ -172,9 +172,9 @@ void op::v3::GRUCell::validate_and_infer_types() {
void op::v3::GRUCell::add_default_bias_input() { void op::v3::GRUCell::add_default_bias_input() {
Output<Node> B = Output<Node> B =
op::Constant::create(get_input_element_type(0), op::v0::Constant::create(get_input_element_type(0),
Shape{(s_gates_count + m_linear_before_reset) * get_hidden_size()}, Shape{(s_gates_count + m_linear_before_reset) * get_hidden_size()},
vector<float>((s_gates_count + m_linear_before_reset) * get_hidden_size(), 0.f)); vector<float>((s_gates_count + m_linear_before_reset) * get_hidden_size(), 0.f));
set_argument(4, B); set_argument(4, B);
} }

View File

@ -15,7 +15,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v5::GRUSequence, "GRUSequence", 5); OPENVINO_RTTI_DEFINITION(op::v5::GRUSequence, "GRUSequence", 5);
op::v5::GRUSequence::GRUSequence() op::v5::GRUSequence::GRUSequence()
: m_direction(op::RecurrentSequenceDirection::FORWARD), : m_direction(op::RecurrentSequenceDirection::FORWARD),

View File

@ -12,7 +12,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v0::HardSigmoid, "HardSigmoid", 0); OPENVINO_RTTI_DEFINITION(op::v0::HardSigmoid, "HardSigmoid", 0);
op::v0::HardSigmoid::HardSigmoid() : Op() {} op::v0::HardSigmoid::HardSigmoid() : Op() {}
@ -34,7 +34,7 @@ void op::v0::HardSigmoid::validate_and_infer_types() {
if (alpha_pshape.is_static()) { if (alpha_pshape.is_static()) {
const auto alpha_shape = alpha_pshape.to_shape(); const auto alpha_shape = alpha_pshape.to_shape();
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
is_scalar(alpha_shape), ngraph::is_scalar(alpha_shape),
"A scalar is expected for the 'alpha' input. Got: ", "A scalar is expected for the 'alpha' input. Got: ",
alpha_shape); alpha_shape);
} }
@ -42,7 +42,7 @@ void op::v0::HardSigmoid::validate_and_infer_types() {
if (beta_pshape.is_static()) { if (beta_pshape.is_static()) {
const auto beta_shape = beta_pshape.to_shape(); const auto beta_shape = beta_pshape.to_shape();
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
is_scalar(beta_shape), ngraph::is_scalar(beta_shape),
"A scalar is expected for the 'beta' input. Got: ", "A scalar is expected for the 'beta' input. Got: ",
beta_shape); beta_shape);
} }

View File

@ -15,7 +15,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v5::HSigmoid, "HSigmoid", 5); OPENVINO_RTTI_DEFINITION(op::v5::HSigmoid, "HSigmoid", 5, op::util::UnaryElementwiseArithmetic);
op::v5::HSigmoid::HSigmoid(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) { op::v5::HSigmoid::HSigmoid(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();

View File

@ -14,7 +14,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v4::HSwish, "HSwish", 4); OPENVINO_RTTI_DEFINITION(op::v4::HSwish, "HSwish", 4, op::util::UnaryElementwiseArithmetic);
op::v4::HSwish::HSwish(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) { op::v4::HSwish::HSwish(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();

View File

@ -18,7 +18,7 @@
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v7::IDFT, "IDFT", 7, util::FFTBase); OPENVINO_RTTI_DEFINITION(op::v7::IDFT, "IDFT", 7, util::FFTBase);
op::v7::IDFT::IDFT(const Output<Node>& data, const Output<Node>& axes) : FFTBase(data, axes) { op::v7::IDFT::IDFT(const Output<Node>& data, const Output<Node>& axes) : FFTBase(data, axes) {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();

View File

@ -18,7 +18,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(ngraph::op::v8::If, "If", 8, MultiSubGraphOp); OPENVINO_RTTI_DEFINITION(ngraph::op::v8::If, "If", 8, MultiSubGraphOp);
op::v8::If::If() : MultiSubGraphOp(2) {} op::v8::If::If() : MultiSubGraphOp(2) {}
@ -49,11 +49,11 @@ static ngraph::PartialShape resolve_shape(const ngraph::PartialShape& then_pshap
if ((*then_it).is_dynamic() || (*else_it).is_dynamic()) { if ((*then_it).is_dynamic() || (*else_it).is_dynamic()) {
new_dims.push_back(Dimension::dynamic()); new_dims.push_back(Dimension::dynamic());
} else if (*then_it == *else_it) { } else if (*then_it == *else_it) {
new_dims.push_back(Dimension(*then_it)); new_dims.emplace_back(*then_it);
} else { } else {
auto dim_min = std::min((*then_it).get_min_length(), (*else_it).get_min_length()); auto dim_min = std::min((*then_it).get_min_length(), (*else_it).get_min_length());
auto dim_max = std::max((*then_it).get_min_length(), (*else_it).get_min_length()); auto dim_max = std::max((*then_it).get_min_length(), (*else_it).get_min_length());
new_dims.push_back(Dimension(dim_min, dim_max)); new_dims.emplace_back(dim_min, dim_max);
} }
} }
@ -125,7 +125,7 @@ void op::v8::If::validate_and_infer_types() {
// shape and type inference for outputs from If operations // shape and type inference for outputs from If operations
for (const auto& output_descr : m_output_descriptions[cond_index]) { for (const auto& output_descr : m_output_descriptions[cond_index]) {
auto body_value = body->get_results().at(output_descr->m_body_value_index)->input_value(0); auto body_value = body->get_results().at(output_descr->m_body_value_index)->input_value(0);
auto body_value_partial_shape = body_value.get_partial_shape(); const auto& body_value_partial_shape = body_value.get_partial_shape();
set_output_type(output_descr->m_output_index, body_value.get_element_type(), body_value_partial_shape); set_output_type(output_descr->m_output_index, body_value.get_element_type(), body_value_partial_shape);
} }
} else // condition is non constant } else // condition is non constant
@ -236,8 +236,8 @@ bool op::v8::If::has_evaluate() const {
} }
void op::v8::If::set_input(const Output<Node>& value, void op::v8::If::set_input(const Output<Node>& value,
const std::shared_ptr<Parameter>& then_parameter, const std::shared_ptr<v0::Parameter>& then_parameter,
const std::shared_ptr<Parameter>& else_parameter) { const std::shared_ptr<v0::Parameter>& else_parameter) {
NGRAPH_CHECK(then_parameter != nullptr || else_parameter != nullptr, NGRAPH_CHECK(then_parameter != nullptr || else_parameter != nullptr,
"Missing parameters! Both parameters are nullptr!"); "Missing parameters! Both parameters are nullptr!");
auto then_param_index = m_bodies[THEN_BODY_INDEX]->get_parameter_index(then_parameter); auto then_param_index = m_bodies[THEN_BODY_INDEX]->get_parameter_index(then_parameter);
@ -253,8 +253,8 @@ void op::v8::If::set_input(const Output<Node>& value,
set_invariant_inputs(value, {then_parameter, else_parameter}); set_invariant_inputs(value, {then_parameter, else_parameter});
} }
Output<Node> op::v8::If::set_output(const std::shared_ptr<Result>& then_result, Output<Node> op::v8::If::set_output(const std::shared_ptr<v0::Result>& then_result,
const std::shared_ptr<Result>& else_result) { const std::shared_ptr<v0::Result>& else_result) {
NGRAPH_CHECK(then_result != nullptr, "Incorrect result in \"then_body\"! Result cant be \'nullptr\'"); NGRAPH_CHECK(then_result != nullptr, "Incorrect result in \"then_body\"! Result cant be \'nullptr\'");
NGRAPH_CHECK(else_result != nullptr, "Incorrect result in \"else_body\"! Result cant be \'nullptr\'"); NGRAPH_CHECK(else_result != nullptr, "Incorrect result in \"else_body\"! Result cant be \'nullptr\'");
auto then_result_id = m_bodies[THEN_BODY_INDEX]->get_result_index(then_result); auto then_result_id = m_bodies[THEN_BODY_INDEX]->get_result_index(then_result);

View File

@ -17,11 +17,9 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v0::Interpolate, "Interpolate", 0); OPENVINO_RTTI_DEFINITION(op::v0::Interpolate, "Interpolate", 0);
op::v0::Interpolate::Interpolate(const Output<Node>& image, op::v0::Interpolate::Interpolate(const Output<Node>& image, const Output<Node>& output_shape, const Attributes& attrs)
const Output<Node>& output_shape,
const op::v0::InterpolateAttrs& attrs)
: Op({image, output_shape}), : Op({image, output_shape}),
m_attrs(attrs) { m_attrs(attrs) {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
@ -69,7 +67,7 @@ shared_ptr<Node> op::v0::Interpolate::clone_with_new_inputs(const OutputVector&
return make_shared<op::v0::Interpolate>(new_args.at(0), new_args.at(1), m_attrs); return make_shared<op::v0::Interpolate>(new_args.at(0), new_args.at(1), m_attrs);
} }
std::ostream& ngraph::operator<<(std::ostream& s, const op::v0::Interpolate::InterpolateMode& type) { std::ostream& ov::operator<<(std::ostream& s, const op::v0::Interpolate::InterpolateMode& type) {
return s << as_string(type); return s << as_string(type);
} }
@ -92,7 +90,7 @@ constexpr DiscreteTypeInfo AttributeAdapter<ngraph::op::v0::Interpolate::Interpo
// Interpolate v4 // Interpolate v4
NGRAPH_RTTI_DEFINITION(op::v4::Interpolate, "Interpolate", 4); OPENVINO_RTTI_DEFINITION(op::v4::Interpolate, "Interpolate", 4);
op::v4::Interpolate::Interpolate(const Output<Node>& image, op::v4::Interpolate::Interpolate(const Output<Node>& image,
const Output<Node>& output_shape, const Output<Node>& output_shape,
@ -481,22 +479,6 @@ bool op::v4::Interpolate::has_evaluate() const {
return false; return false;
} }
std::ostream& ngraph::operator<<(std::ostream& s, const op::v4::Interpolate::InterpolateMode& type) {
return s << as_string(type);
}
std::ostream& ngraph::operator<<(std::ostream& s, const op::v4::Interpolate::ShapeCalcMode& type) {
return s << as_string(type);
}
std::ostream& ngraph::operator<<(std::ostream& s, const op::v4::Interpolate::CoordinateTransformMode& type) {
return s << as_string(type);
}
std::ostream& ngraph::operator<<(std::ostream& s, const op::v4::Interpolate::NearestMode& type) {
return s << as_string(type);
}
namespace ov { namespace ov {
template <> template <>
NGRAPH_API EnumNames<ngraph::op::v4::Interpolate::InterpolateMode>& NGRAPH_API EnumNames<ngraph::op::v4::Interpolate::InterpolateMode>&
@ -553,4 +535,20 @@ EnumNames<ngraph::op::v4::Interpolate::NearestMode>::get() {
} }
constexpr DiscreteTypeInfo AttributeAdapter<ngraph::op::v4::Interpolate::NearestMode>::type_info; constexpr DiscreteTypeInfo AttributeAdapter<ngraph::op::v4::Interpolate::NearestMode>::type_info;
std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::InterpolateMode& type) {
return s << as_string(type);
}
std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::ShapeCalcMode& type) {
return s << as_string(type);
}
std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::CoordinateTransformMode& type) {
return s << as_string(type);
}
std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::NearestMode& type) {
return s << as_string(type);
}
} // namespace ov } // namespace ov

View File

@ -50,7 +50,7 @@ bool evaluate_less(const HostTensorPtr& arg0,
// ----------------------------- v1 -------------------------------------------- // ----------------------------- v1 --------------------------------------------
NGRAPH_RTTI_DEFINITION(op::v1::Less, "Less", 1, op::util::BinaryElementwiseComparison); OPENVINO_RTTI_DEFINITION(op::v1::Less, "Less", 1, op::util::BinaryElementwiseComparison);
op::v1::Less::Less(const Output<Node>& arg0, const Output<Node>& arg1, const AutoBroadcastSpec& auto_broadcast) op::v1::Less::Less(const Output<Node>& arg0, const Output<Node>& arg1, const AutoBroadcastSpec& auto_broadcast)
: BinaryElementwiseComparison(arg0, arg1, auto_broadcast) { : BinaryElementwiseComparison(arg0, arg1, auto_broadcast) {

View File

@ -13,7 +13,7 @@ using namespace ngraph;
// ---------------------------------- v1 --------------------------------------- // ---------------------------------- v1 ---------------------------------------
NGRAPH_RTTI_DEFINITION(op::v1::LessEqual, "LessEqual", 1, op::util::BinaryElementwiseComparison); OPENVINO_RTTI_DEFINITION(op::v1::LessEqual, "LessEqual", 1, op::util::BinaryElementwiseComparison);
op::v1::LessEqual::LessEqual(const Output<Node>& arg0, op::v1::LessEqual::LessEqual(const Output<Node>& arg0,
const Output<Node>& arg1, const Output<Node>& arg1,

View File

@ -12,7 +12,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v0::Log, "Log", 0); OPENVINO_RTTI_DEFINITION(op::v0::Log, "Log", 0, op::util::UnaryElementwiseArithmetic);
op::Log::Log(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) { op::Log::Log(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();

View File

@ -10,7 +10,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v5::LogSoftmax, "LogSoftmax", 5); OPENVINO_RTTI_DEFINITION(op::v5::LogSoftmax, "LogSoftmax", 5);
op::v5::LogSoftmax::LogSoftmax(const Output<Node>& arg, const int64_t axis) : Op({arg}), m_axis(axis) { op::v5::LogSoftmax::LogSoftmax(const Output<Node>& arg, const int64_t axis) : Op({arg}), m_axis(axis) {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();

View File

@ -15,7 +15,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v5::Loop, "Loop", 5); OPENVINO_RTTI_DEFINITION(op::v5::Loop, "Loop", 5, op::util::SubGraphOp);
op::v5::Loop::Loop(const Output<Node>& trip_count, const Output<Node>& execution_condition) : SubGraphOp() { op::v5::Loop::Loop(const Output<Node>& trip_count, const Output<Node>& execution_condition) : SubGraphOp() {
set_argument(0, trip_count); set_argument(0, trip_count);
@ -178,7 +178,7 @@ void op::v5::Loop::validate_and_infer_types() {
body_parameter->set_partial_shape(input_partial_shape); body_parameter->set_partial_shape(input_partial_shape);
} else if (auto invariant_input_description = } else if (auto invariant_input_description =
ov::as_type_ptr<TensorIterator::InvariantInputDescription>(input_description)) { ov::as_type_ptr<v0::TensorIterator::InvariantInputDescription>(input_description)) {
auto body_parameter = m_bodies[0]->get_parameters().at(invariant_input_description->m_body_parameter_index); auto body_parameter = m_bodies[0]->get_parameters().at(invariant_input_description->m_body_parameter_index);
auto body_param_partial_shape = body_parameter->get_partial_shape(); auto body_param_partial_shape = body_parameter->get_partial_shape();
@ -198,7 +198,7 @@ void op::v5::Loop::validate_and_infer_types() {
auto body_value = m_bodies[0]->get_results().at(output_description->m_body_value_index)->input_value(0); auto body_value = m_bodies[0]->get_results().at(output_description->m_body_value_index)->input_value(0);
if (auto concat_output_description = if (auto concat_output_description =
ov::as_type_ptr<TensorIterator::ConcatOutputDescription>(output_description)) { ov::as_type_ptr<v0::TensorIterator::ConcatOutputDescription>(output_description)) {
const auto& body_value_partial_shape = body_value.get_partial_shape(); const auto& body_value_partial_shape = body_value.get_partial_shape();
auto out_shape = body_value_partial_shape; auto out_shape = body_value_partial_shape;
if (zero_number_of_iter) { if (zero_number_of_iter) {
@ -220,7 +220,7 @@ void op::v5::Loop::validate_and_infer_types() {
} }
else if (auto body_output_description = else if (auto body_output_description =
ov::as_type_ptr<TensorIterator::BodyOutputDescription>(output_description)) { ov::as_type_ptr<v0::TensorIterator::BodyOutputDescription>(output_description)) {
const PartialShape& ps = body_value.get_partial_shape(); const PartialShape& ps = body_value.get_partial_shape();
if (ps.is_dynamic()) { if (ps.is_dynamic()) {
set_output_type(index, body_value.get_element_type(), ps); set_output_type(index, body_value.get_element_type(), ps);

View File

@ -14,10 +14,10 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::LRN, "LRN", 0); OPENVINO_RTTI_DEFINITION(op::v0::LRN, "LRN", 0);
op::LRN::LRN(const Output<Node>& arg, double alpha, double beta, double bias, size_t size) op::LRN::LRN(const Output<Node>& arg, double alpha, double beta, double bias, size_t size)
: LRN(arg, op::Constant::create(element::i64, Shape{1}, {1}), alpha, beta, bias, size) { : LRN(arg, op::v0::Constant::create(element::i64, Shape{1}, {1}), alpha, beta, bias, size) {
add_provenance_group_member(input_value(1).get_node_shared_ptr()); add_provenance_group_member(input_value(1).get_node_shared_ptr());
} }
@ -102,5 +102,5 @@ bool ngraph::op::v0::LRN::visit_attributes(AttributeVisitor& visitor) {
shared_ptr<Node> op::LRN::clone_with_new_inputs(const OutputVector& new_args) const { shared_ptr<Node> op::LRN::clone_with_new_inputs(const OutputVector& new_args) const {
NGRAPH_OP_SCOPE(v0_LRN_clone_with_new_inputs); NGRAPH_OP_SCOPE(v0_LRN_clone_with_new_inputs);
check_new_args_count(this, new_args); check_new_args_count(this, new_args);
return make_shared<op::LRN>(new_args.at(0), new_args.at(1), m_alpha, m_beta, m_bias, m_size); return make_shared<op::v0::LRN>(new_args.at(0), new_args.at(1), m_alpha, m_beta, m_bias, m_size);
} }

View File

@ -17,8 +17,8 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v0::LSTMCell, "LSTMCell", 0, op::util::RNNCellBase); OPENVINO_RTTI_DEFINITION(op::v0::LSTMCell, "LSTMCell", 0, op::util::RNNCellBase);
NGRAPH_RTTI_DEFINITION(op::v4::LSTMCell, "LSTMCell", 4, op::util::RNNCellBase); OPENVINO_RTTI_DEFINITION(op::v4::LSTMCell, "LSTMCell", 4, op::util::RNNCellBase);
op::v0::LSTMCell::LSTMCell() : m_input_forget(false), m_weights_format(LSTMWeightsFormat::IFCO) { op::v0::LSTMCell::LSTMCell() : m_input_forget(false), m_weights_format(LSTMWeightsFormat::IFCO) {
m_activations = {"sigmoid", "tanh", "tanh"}; m_activations = {"sigmoid", "tanh", "tanh"};
@ -273,14 +273,15 @@ void op::v0::LSTMCell::validate_and_infer_types() {
} }
Output<Node> op::v0::LSTMCell::get_default_bias_input() const { Output<Node> op::v0::LSTMCell::get_default_bias_input() const {
return Output<Node>{ return Output<Node>{op::v0::Constant::create(get_input_element_type(0),
op::Constant::create(get_input_element_type(0), Shape{s_gates_count * get_hidden_size()}, vector<float>{0.f})}; Shape{s_gates_count * get_hidden_size()},
vector<float>{0.f})};
} }
Output<Node> op::v0::LSTMCell::get_default_peepholes_input() const { Output<Node> op::v0::LSTMCell::get_default_peepholes_input() const {
return Output<Node>{op::Constant::create(get_input_element_type(0), return Output<Node>{op::v0::Constant::create(get_input_element_type(0),
Shape{s_peepholes_count * get_hidden_size()}, Shape{s_peepholes_count * get_hidden_size()},
vector<float>{0.f})}; vector<float>{0.f})};
} }
shared_ptr<Node> op::v0::LSTMCell::clone_with_new_inputs(const OutputVector& new_args) const { shared_ptr<Node> op::v0::LSTMCell::clone_with_new_inputs(const OutputVector& new_args) const {
@ -511,8 +512,9 @@ void op::v4::LSTMCell::validate_and_infer_types() {
} }
Output<Node> op::v4::LSTMCell::get_default_bias_input() const { Output<Node> op::v4::LSTMCell::get_default_bias_input() const {
return Output<Node>{ return Output<Node>{op::v0::Constant::create(get_input_element_type(0),
op::Constant::create(get_input_element_type(0), Shape{s_gates_count * get_hidden_size()}, vector<float>{0.f})}; Shape{s_gates_count * get_hidden_size()},
vector<float>{0.f})};
} }
shared_ptr<Node> op::v4::LSTMCell::clone_with_new_inputs(const OutputVector& new_args) const { shared_ptr<Node> op::v4::LSTMCell::clone_with_new_inputs(const OutputVector& new_args) const {

View File

@ -16,8 +16,8 @@
using namespace ngraph; using namespace ngraph;
using namespace std; using namespace std;
NGRAPH_RTTI_DEFINITION(op::v0::LSTMSequence, "LSTMSequence", 0); OPENVINO_RTTI_DEFINITION(op::v0::LSTMSequence, "LSTMSequence", 0);
NGRAPH_RTTI_DEFINITION(op::v5::LSTMSequence, "LSTMSequence", 5); OPENVINO_RTTI_DEFINITION(op::v5::LSTMSequence, "LSTMSequence", 5);
op::v0::LSTMSequence::LSTMSequence() op::v0::LSTMSequence::LSTMSequence()
: Op(), : Op(),

View File

@ -12,7 +12,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v0::Parameter, "Parameter", 0); OPENVINO_RTTI_DEFINITION(op::v0::Parameter, "Parameter", 0);
op::Parameter::Parameter(const element::Type& element_type, const PartialShape& pshape) op::Parameter::Parameter(const element::Type& element_type, const PartialShape& pshape)
: m_partial_shape(pshape), : m_partial_shape(pshape),

View File

@ -15,7 +15,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v0::Result, "Result", 0); OPENVINO_RTTI_DEFINITION(op::v0::Result, "Result", 0);
op::Result::Result(const Output<Node>& arg, bool needs_default_layout) op::Result::Result(const Output<Node>& arg, bool needs_default_layout)
: Op({arg}), : Op({arg}),

View File

@ -12,7 +12,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v0::TensorIterator, "TensorIterator", 0, op::util::SubGraphOp); OPENVINO_RTTI_DEFINITION(op::v0::TensorIterator, "TensorIterator", 0, op::util::SubGraphOp);
op::v0::TensorIterator::TensorIterator(const OutputVector& values) : op::util::SubGraphOp(values) {} op::v0::TensorIterator::TensorIterator(const OutputVector& values) : op::util::SubGraphOp(values) {}