diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.cpp index ba314cebea5..e4950732ab6 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.cpp @@ -15,6 +15,9 @@ using namespace InferenceEngine::details; MKLDNNReferenceNode::MKLDNNReferenceNode(const std::shared_ptr& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache, const std::string& errorMessage) : MKLDNNNode(op, eng, cache), ngraphOp(op), additionalErrorMessage(errorMessage) { + if (!op->has_evaluate()) { + IE_THROW(NotImplemented) << "Cannot fallback on ngraph reference implementation (Ngraph::Node::evaluate() is not implemented)"; + } setType(Reference); setTypeStr("Reference"); } @@ -69,14 +72,7 @@ void MKLDNNReferenceNode::execute(mkldnn::stream strm) { } if (!ngraphOp->evaluate(outputs, inputs)) { - std::string errorDetails = "Unsupported operation of type: " + std::string(ngraphOp->get_type_name()) + - " name: " + std::string(ngraphOp->get_friendly_name()); - errorDetails += "\nDetails: \n"; - if (!additionalErrorMessage.empty()) { - errorDetails += additionalErrorMessage + "\n"; - } - errorDetails += "Cannot fallback on ngraph reference implementation (Ngraph::Node::evaluate() is not implemented)"; - IE_THROW(NotImplemented) << errorDetails; + IE_THROW() << "Evaluation failed on node of type: " << std::string(ngraphOp->get_type_name()) << " name: " << getName(); } } diff --git a/ngraph/core/include/ngraph/op/abs.hpp b/ngraph/core/include/ngraph/op/abs.hpp index c40daeeb027..38034f42316 100644 --- a/ngraph/core/include/ngraph/op/abs.hpp +++ b/ngraph/core/include/ngraph/op/abs.hpp @@ -38,6 +38,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Abs; diff --git a/ngraph/core/include/ngraph/op/acos.hpp b/ngraph/core/include/ngraph/op/acos.hpp index 329717b8874..3175f822401 100644 --- a/ngraph/core/include/ngraph/op/acos.hpp +++ b/ngraph/core/include/ngraph/op/acos.hpp @@ -36,6 +36,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Acos; diff --git a/ngraph/core/include/ngraph/op/acosh.hpp b/ngraph/core/include/ngraph/op/acosh.hpp index 4e6f4384702..6937e8faf97 100644 --- a/ngraph/core/include/ngraph/op/acosh.hpp +++ b/ngraph/core/include/ngraph/op/acosh.hpp @@ -37,6 +37,7 @@ namespace ngraph bool visit_attributes(AttributeVisitor& visitor) override { return true; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v3 using v3::Acosh; diff --git a/ngraph/core/include/ngraph/op/add.hpp b/ngraph/core/include/ngraph/op/add.hpp index b9ca97ea6d6..c16c8baac7b 100644 --- a/ngraph/core/include/ngraph/op/add.hpp +++ b/ngraph/core/include/ngraph/op/add.hpp @@ -51,6 +51,7 @@ namespace ngraph size_t get_version() const override { return 1; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/and.hpp b/ngraph/core/include/ngraph/op/and.hpp index 1eb15511939..2dea0365e58 100644 --- a/ngraph/core/include/ngraph/op/and.hpp +++ b/ngraph/core/include/ngraph/op/and.hpp @@ -43,6 +43,7 @@ namespace ngraph bool visit_attributes(AttributeVisitor& visitor) override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/asin.hpp b/ngraph/core/include/ngraph/op/asin.hpp index 449b19fca3f..7fc0b3786c5 100644 --- a/ngraph/core/include/ngraph/op/asin.hpp +++ b/ngraph/core/include/ngraph/op/asin.hpp @@ -37,6 +37,7 @@ namespace ngraph bool visit_attributes(AttributeVisitor& visitor) override { return true; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Asin; diff --git a/ngraph/core/include/ngraph/op/asinh.hpp b/ngraph/core/include/ngraph/op/asinh.hpp index 34fc2cbe890..1f781862b9d 100644 --- a/ngraph/core/include/ngraph/op/asinh.hpp +++ b/ngraph/core/include/ngraph/op/asinh.hpp @@ -37,6 +37,7 @@ namespace ngraph bool visit_attributes(AttributeVisitor& visitor) override { return true; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v3 using v3::Asinh; diff --git a/ngraph/core/include/ngraph/op/assign.hpp b/ngraph/core/include/ngraph/op/assign.hpp index 071f3d367a8..fddbf1edd4a 100644 --- a/ngraph/core/include/ngraph/op/assign.hpp +++ b/ngraph/core/include/ngraph/op/assign.hpp @@ -84,6 +84,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs, const EvaluationContext& evaluation_context) const override; + bool has_evaluate() const override; bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; }; diff --git a/ngraph/core/include/ngraph/op/atan.hpp b/ngraph/core/include/ngraph/op/atan.hpp index 4644d2870af..fc388cc228c 100644 --- a/ngraph/core/include/ngraph/op/atan.hpp +++ b/ngraph/core/include/ngraph/op/atan.hpp @@ -38,6 +38,7 @@ namespace ngraph bool visit_attributes(AttributeVisitor& visitor) override { return true; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Atan; diff --git a/ngraph/core/include/ngraph/op/atanh.hpp b/ngraph/core/include/ngraph/op/atanh.hpp index edb8f965d0d..50cece8cfe4 100644 --- a/ngraph/core/include/ngraph/op/atanh.hpp +++ b/ngraph/core/include/ngraph/op/atanh.hpp @@ -37,6 +37,7 @@ namespace ngraph bool visit_attributes(AttributeVisitor& visitor) override { return true; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v3 using v3::Atanh; diff --git a/ngraph/core/include/ngraph/op/batch_to_space.hpp b/ngraph/core/include/ngraph/op/batch_to_space.hpp index a9c42e4ded0..240ed2ba38e 100644 --- a/ngraph/core/include/ngraph/op/batch_to_space.hpp +++ b/ngraph/core/include/ngraph/op/batch_to_space.hpp @@ -44,6 +44,7 @@ namespace ngraph const Output& crops_end); bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; void validate_and_infer_types() override; std::shared_ptr diff --git a/ngraph/core/include/ngraph/op/broadcast.hpp b/ngraph/core/include/ngraph/op/broadcast.hpp index 30e0b6e490a..17fc97b43a2 100644 --- a/ngraph/core/include/ngraph/op/broadcast.hpp +++ b/ngraph/core/include/ngraph/op/broadcast.hpp @@ -70,6 +70,7 @@ namespace ngraph std::pair get_broadcast_axes() const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; private: bool broadcast_evaluate(const HostTensorVector& outputs, @@ -132,6 +133,7 @@ namespace ngraph void validate_and_infer_types() override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; protected: AutoBroadcastSpec m_broadcast_spec; diff --git a/ngraph/core/include/ngraph/op/ceiling.hpp b/ngraph/core/include/ngraph/op/ceiling.hpp index 3e8f071e873..491ca099b2a 100644 --- a/ngraph/core/include/ngraph/op/ceiling.hpp +++ b/ngraph/core/include/ngraph/op/ceiling.hpp @@ -30,6 +30,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Ceiling; diff --git a/ngraph/core/include/ngraph/op/clamp.hpp b/ngraph/core/include/ngraph/op/clamp.hpp index 22b41fb4305..faca96a44d0 100644 --- a/ngraph/core/include/ngraph/op/clamp.hpp +++ b/ngraph/core/include/ngraph/op/clamp.hpp @@ -42,6 +42,7 @@ namespace ngraph double get_max() const { return m_max; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; private: double m_min; diff --git a/ngraph/core/include/ngraph/op/concat.hpp b/ngraph/core/include/ngraph/op/concat.hpp index b168045d577..49810d02e10 100644 --- a/ngraph/core/include/ngraph/op/concat.hpp +++ b/ngraph/core/include/ngraph/op/concat.hpp @@ -51,6 +51,7 @@ namespace ngraph void set_axis(int64_t axis) { m_axis = axis; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; bool evaluate_lower(const HostTensorVector& output_values) const override; bool evaluate_upper(const HostTensorVector& output_values) const override; diff --git a/ngraph/core/include/ngraph/op/constant.hpp b/ngraph/core/include/ngraph/op/constant.hpp index 650651a37a7..760f9a3df06 100644 --- a/ngraph/core/include/ngraph/op/constant.hpp +++ b/ngraph/core/include/ngraph/op/constant.hpp @@ -171,6 +171,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; bool evaluate_lower(const HostTensorVector& outputs) const override; bool evaluate_upper(const HostTensorVector& outputs) const override; diff --git a/ngraph/core/include/ngraph/op/convert.hpp b/ngraph/core/include/ngraph/op/convert.hpp index 8007b81dede..9f79e8761d6 100644 --- a/ngraph/core/include/ngraph/op/convert.hpp +++ b/ngraph/core/include/ngraph/op/convert.hpp @@ -44,6 +44,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; bool evaluate_lower(const HostTensorVector& outputs) const override; bool evaluate_upper(const HostTensorVector& outputs) const override; diff --git a/ngraph/core/include/ngraph/op/cos.hpp b/ngraph/core/include/ngraph/op/cos.hpp index e310dec7554..5e5e2df07ee 100644 --- a/ngraph/core/include/ngraph/op/cos.hpp +++ b/ngraph/core/include/ngraph/op/cos.hpp @@ -30,6 +30,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Cos; diff --git a/ngraph/core/include/ngraph/op/cosh.hpp b/ngraph/core/include/ngraph/op/cosh.hpp index f71433c8b5e..7e52bf1679f 100644 --- a/ngraph/core/include/ngraph/op/cosh.hpp +++ b/ngraph/core/include/ngraph/op/cosh.hpp @@ -30,6 +30,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Cosh; diff --git a/ngraph/core/include/ngraph/op/depth_to_space.hpp b/ngraph/core/include/ngraph/op/depth_to_space.hpp index c978587ed50..782833b96eb 100644 --- a/ngraph/core/include/ngraph/op/depth_to_space.hpp +++ b/ngraph/core/include/ngraph/op/depth_to_space.hpp @@ -60,6 +60,7 @@ namespace ngraph void validate_and_infer_types() override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; protected: std::size_t m_blocksize; diff --git a/ngraph/core/include/ngraph/op/divide.hpp b/ngraph/core/include/ngraph/op/divide.hpp index 51993eb12c4..0c33fe6774e 100644 --- a/ngraph/core/include/ngraph/op/divide.hpp +++ b/ngraph/core/include/ngraph/op/divide.hpp @@ -53,6 +53,7 @@ namespace ngraph size_t get_version() const override { return 1; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; protected: bool m_pythondiv{true}; diff --git a/ngraph/core/include/ngraph/op/equal.hpp b/ngraph/core/include/ngraph/op/equal.hpp index 283beca984a..1392acae36c 100644 --- a/ngraph/core/include/ngraph/op/equal.hpp +++ b/ngraph/core/include/ngraph/op/equal.hpp @@ -54,6 +54,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/erf.hpp b/ngraph/core/include/ngraph/op/erf.hpp index 7c4deb41c2b..473914823b4 100644 --- a/ngraph/core/include/ngraph/op/erf.hpp +++ b/ngraph/core/include/ngraph/op/erf.hpp @@ -25,6 +25,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Erf; diff --git a/ngraph/core/include/ngraph/op/exp.hpp b/ngraph/core/include/ngraph/op/exp.hpp index 3a0a5692edc..78cd37ea158 100644 --- a/ngraph/core/include/ngraph/op/exp.hpp +++ b/ngraph/core/include/ngraph/op/exp.hpp @@ -31,6 +31,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Exp; diff --git a/ngraph/core/include/ngraph/op/floor.hpp b/ngraph/core/include/ngraph/op/floor.hpp index 237cd68a4e7..62704b16005 100644 --- a/ngraph/core/include/ngraph/op/floor.hpp +++ b/ngraph/core/include/ngraph/op/floor.hpp @@ -29,6 +29,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Floor; diff --git a/ngraph/core/include/ngraph/op/floor_mod.hpp b/ngraph/core/include/ngraph/op/floor_mod.hpp index 6191af7af7b..9cc1d25557d 100644 --- a/ngraph/core/include/ngraph/op/floor_mod.hpp +++ b/ngraph/core/include/ngraph/op/floor_mod.hpp @@ -44,6 +44,7 @@ namespace ngraph bool visit_attributes(AttributeVisitor& visitor) override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 diff --git a/ngraph/core/include/ngraph/op/gelu.hpp b/ngraph/core/include/ngraph/op/gelu.hpp index 824ddadfc90..ae9d5d33e96 100644 --- a/ngraph/core/include/ngraph/op/gelu.hpp +++ b/ngraph/core/include/ngraph/op/gelu.hpp @@ -74,6 +74,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; diff --git a/ngraph/core/include/ngraph/op/greater.hpp b/ngraph/core/include/ngraph/op/greater.hpp index eb8884e060a..859f6b6d121 100644 --- a/ngraph/core/include/ngraph/op/greater.hpp +++ b/ngraph/core/include/ngraph/op/greater.hpp @@ -36,6 +36,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/greater_eq.hpp b/ngraph/core/include/ngraph/op/greater_eq.hpp index 813677b6481..eceb3abe6e9 100644 --- a/ngraph/core/include/ngraph/op/greater_eq.hpp +++ b/ngraph/core/include/ngraph/op/greater_eq.hpp @@ -37,6 +37,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/hsigmoid.hpp b/ngraph/core/include/ngraph/op/hsigmoid.hpp index 150163d413d..eaebf659b78 100644 --- a/ngraph/core/include/ngraph/op/hsigmoid.hpp +++ b/ngraph/core/include/ngraph/op/hsigmoid.hpp @@ -35,6 +35,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v5 } // namespace op diff --git a/ngraph/core/include/ngraph/op/hswish.hpp b/ngraph/core/include/ngraph/op/hswish.hpp index 767c638d127..bed0bd033be 100644 --- a/ngraph/core/include/ngraph/op/hswish.hpp +++ b/ngraph/core/include/ngraph/op/hswish.hpp @@ -35,6 +35,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v4 } // namespace op diff --git a/ngraph/core/include/ngraph/op/interpolate.hpp b/ngraph/core/include/ngraph/op/interpolate.hpp index 6d6265ec84f..b64941ba8cb 100644 --- a/ngraph/core/include/ngraph/op/interpolate.hpp +++ b/ngraph/core/include/ngraph/op/interpolate.hpp @@ -216,6 +216,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; const InterpolateAttrs& get_attrs() const { return m_attrs; } diff --git a/ngraph/core/include/ngraph/op/less.hpp b/ngraph/core/include/ngraph/op/less.hpp index 2c6a7462569..05892a1b1d8 100644 --- a/ngraph/core/include/ngraph/op/less.hpp +++ b/ngraph/core/include/ngraph/op/less.hpp @@ -36,6 +36,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/less_eq.hpp b/ngraph/core/include/ngraph/op/less_eq.hpp index ab7f8deba53..abd367978aa 100644 --- a/ngraph/core/include/ngraph/op/less_eq.hpp +++ b/ngraph/core/include/ngraph/op/less_eq.hpp @@ -37,6 +37,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/log.hpp b/ngraph/core/include/ngraph/op/log.hpp index 39a0404ea8f..6c147d43314 100644 --- a/ngraph/core/include/ngraph/op/log.hpp +++ b/ngraph/core/include/ngraph/op/log.hpp @@ -30,6 +30,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Log; diff --git a/ngraph/core/include/ngraph/op/loop.hpp b/ngraph/core/include/ngraph/op/loop.hpp index aa669ae8c9e..54189956923 100644 --- a/ngraph/core/include/ngraph/op/loop.hpp +++ b/ngraph/core/include/ngraph/op/loop.hpp @@ -72,6 +72,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; protected: Loop(const Loop&); diff --git a/ngraph/core/include/ngraph/op/matmul.hpp b/ngraph/core/include/ngraph/op/matmul.hpp index 35971c34da9..a6689420e81 100644 --- a/ngraph/core/include/ngraph/op/matmul.hpp +++ b/ngraph/core/include/ngraph/op/matmul.hpp @@ -38,6 +38,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; bool get_transpose_a() const { return m_transpose_a; } bool get_transpose_b() const { return m_transpose_b; } diff --git a/ngraph/core/include/ngraph/op/max.hpp b/ngraph/core/include/ngraph/op/max.hpp index 7f98124dcda..8c8bdcbc341 100644 --- a/ngraph/core/include/ngraph/op/max.hpp +++ b/ngraph/core/include/ngraph/op/max.hpp @@ -33,6 +33,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/max_pool.hpp b/ngraph/core/include/ngraph/op/max_pool.hpp index 3dbd80fd864..9ca644cae75 100644 --- a/ngraph/core/include/ngraph/op/max_pool.hpp +++ b/ngraph/core/include/ngraph/op/max_pool.hpp @@ -73,6 +73,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; protected: Shape m_kernel; diff --git a/ngraph/core/include/ngraph/op/maximum.hpp b/ngraph/core/include/ngraph/op/maximum.hpp index 901bec51433..154801336e0 100644 --- a/ngraph/core/include/ngraph/op/maximum.hpp +++ b/ngraph/core/include/ngraph/op/maximum.hpp @@ -39,6 +39,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/min.hpp b/ngraph/core/include/ngraph/op/min.hpp index eec35ad9961..a1031f828cb 100644 --- a/ngraph/core/include/ngraph/op/min.hpp +++ b/ngraph/core/include/ngraph/op/min.hpp @@ -33,6 +33,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; bool evaluate_lower(const HostTensorVector& outputs) const override; bool evaluate_upper(const HostTensorVector& outputs) const override; }; diff --git a/ngraph/core/include/ngraph/op/mish.hpp b/ngraph/core/include/ngraph/op/mish.hpp index de1fea59ef5..5632c0dd761 100644 --- a/ngraph/core/include/ngraph/op/mish.hpp +++ b/ngraph/core/include/ngraph/op/mish.hpp @@ -34,6 +34,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v4 } // namespace op diff --git a/ngraph/core/include/ngraph/op/multiply.hpp b/ngraph/core/include/ngraph/op/multiply.hpp index afae521d775..a24ac7a70ce 100644 --- a/ngraph/core/include/ngraph/op/multiply.hpp +++ b/ngraph/core/include/ngraph/op/multiply.hpp @@ -39,6 +39,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/negative.hpp b/ngraph/core/include/ngraph/op/negative.hpp index 8d73425597a..69c7d39bdc0 100644 --- a/ngraph/core/include/ngraph/op/negative.hpp +++ b/ngraph/core/include/ngraph/op/negative.hpp @@ -30,6 +30,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Negative; diff --git a/ngraph/core/include/ngraph/op/non_zero.hpp b/ngraph/core/include/ngraph/op/non_zero.hpp index b81b9a64141..8d626da67e2 100644 --- a/ngraph/core/include/ngraph/op/non_zero.hpp +++ b/ngraph/core/include/ngraph/op/non_zero.hpp @@ -60,6 +60,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; protected: element::Type m_output_type = element::i64; diff --git a/ngraph/core/include/ngraph/op/not.hpp b/ngraph/core/include/ngraph/op/not.hpp index d25fe1aafa0..d934f26f8e8 100644 --- a/ngraph/core/include/ngraph/op/not.hpp +++ b/ngraph/core/include/ngraph/op/not.hpp @@ -31,6 +31,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/not_equal.hpp b/ngraph/core/include/ngraph/op/not_equal.hpp index d8bbaf32f19..b268af55f90 100644 --- a/ngraph/core/include/ngraph/op/not_equal.hpp +++ b/ngraph/core/include/ngraph/op/not_equal.hpp @@ -37,6 +37,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; bool visit_attributes(AttributeVisitor& visitor) override; }; } // namespace v1 diff --git a/ngraph/core/include/ngraph/op/one_hot.hpp b/ngraph/core/include/ngraph/op/one_hot.hpp index 065e20bf25b..604b9db202c 100644 --- a/ngraph/core/include/ngraph/op/one_hot.hpp +++ b/ngraph/core/include/ngraph/op/one_hot.hpp @@ -42,6 +42,7 @@ namespace ngraph virtual bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; + bool has_evaluate() const override; /// \return The index of the one-hot axis. int64_t get_axis() const { return m_axis; } diff --git a/ngraph/core/include/ngraph/op/or.hpp b/ngraph/core/include/ngraph/op/or.hpp index 45e891f6050..0b990a9a5a9 100644 --- a/ngraph/core/include/ngraph/op/or.hpp +++ b/ngraph/core/include/ngraph/op/or.hpp @@ -41,6 +41,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/pad.hpp b/ngraph/core/include/ngraph/op/pad.hpp index 73adca8a03b..93ef2740b60 100644 --- a/ngraph/core/include/ngraph/op/pad.hpp +++ b/ngraph/core/include/ngraph/op/pad.hpp @@ -74,6 +74,7 @@ namespace ngraph void set_pad_mode(PadMode pad_mode) { m_pad_mode = pad_mode; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; private: PadMode m_pad_mode; diff --git a/ngraph/core/include/ngraph/op/power.hpp b/ngraph/core/include/ngraph/op/power.hpp index ce7b37f59d8..0d800f538e5 100644 --- a/ngraph/core/include/ngraph/op/power.hpp +++ b/ngraph/core/include/ngraph/op/power.hpp @@ -52,6 +52,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/prelu.hpp b/ngraph/core/include/ngraph/op/prelu.hpp index f3ffca22510..fd0810d4c7e 100644 --- a/ngraph/core/include/ngraph/op/prelu.hpp +++ b/ngraph/core/include/ngraph/op/prelu.hpp @@ -37,6 +37,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::PRelu; diff --git a/ngraph/core/include/ngraph/op/prior_box.hpp b/ngraph/core/include/ngraph/op/prior_box.hpp index 5267da03b87..02c29a46a1b 100644 --- a/ngraph/core/include/ngraph/op/prior_box.hpp +++ b/ngraph/core/include/ngraph/op/prior_box.hpp @@ -66,6 +66,7 @@ namespace ngraph virtual bool visit_attributes(AttributeVisitor& visitor) override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; private: PriorBoxAttrs m_attrs; diff --git a/ngraph/core/include/ngraph/op/prior_box_clustered.hpp b/ngraph/core/include/ngraph/op/prior_box_clustered.hpp index b519b5427e3..47dd59584a5 100644 --- a/ngraph/core/include/ngraph/op/prior_box_clustered.hpp +++ b/ngraph/core/include/ngraph/op/prior_box_clustered.hpp @@ -54,6 +54,7 @@ namespace ngraph virtual bool visit_attributes(AttributeVisitor& visitor) override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; private: PriorBoxClusteredAttrs m_attrs; diff --git a/ngraph/core/include/ngraph/op/range.hpp b/ngraph/core/include/ngraph/op/range.hpp index f40347021d5..fe09eecdf77 100644 --- a/ngraph/core/include/ngraph/op/range.hpp +++ b/ngraph/core/include/ngraph/op/range.hpp @@ -42,6 +42,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; private: element::Type m_output_type; @@ -77,6 +78,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Range; diff --git a/ngraph/core/include/ngraph/op/read_value.hpp b/ngraph/core/include/ngraph/op/read_value.hpp index 4816d0d9e3b..d5edeaf3d7e 100644 --- a/ngraph/core/include/ngraph/op/read_value.hpp +++ b/ngraph/core/include/ngraph/op/read_value.hpp @@ -94,6 +94,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs, const EvaluationContext& evaluation_context) const override; + bool has_evaluate() const override; bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; diff --git a/ngraph/core/include/ngraph/op/reduce_l1.hpp b/ngraph/core/include/ngraph/op/reduce_l1.hpp index 1d09233ec6a..5760f855727 100644 --- a/ngraph/core/include/ngraph/op/reduce_l1.hpp +++ b/ngraph/core/include/ngraph/op/reduce_l1.hpp @@ -40,6 +40,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v4 } // namespace op diff --git a/ngraph/core/include/ngraph/op/reduce_l2.hpp b/ngraph/core/include/ngraph/op/reduce_l2.hpp index 547db7ff783..4cd629f22aa 100644 --- a/ngraph/core/include/ngraph/op/reduce_l2.hpp +++ b/ngraph/core/include/ngraph/op/reduce_l2.hpp @@ -39,6 +39,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v4 } // namespace op diff --git a/ngraph/core/include/ngraph/op/reduce_logical_and.hpp b/ngraph/core/include/ngraph/op/reduce_logical_and.hpp index 09050dfd2fe..7f290f45f3a 100644 --- a/ngraph/core/include/ngraph/op/reduce_logical_and.hpp +++ b/ngraph/core/include/ngraph/op/reduce_logical_and.hpp @@ -36,6 +36,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/reduce_logical_or.hpp b/ngraph/core/include/ngraph/op/reduce_logical_or.hpp index cb3b668eaf3..e5344b4d2e1 100644 --- a/ngraph/core/include/ngraph/op/reduce_logical_or.hpp +++ b/ngraph/core/include/ngraph/op/reduce_logical_or.hpp @@ -36,6 +36,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/reduce_mean.hpp b/ngraph/core/include/ngraph/op/reduce_mean.hpp index c5437a3f622..fb4c393b3d5 100644 --- a/ngraph/core/include/ngraph/op/reduce_mean.hpp +++ b/ngraph/core/include/ngraph/op/reduce_mean.hpp @@ -32,6 +32,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/reduce_prod.hpp b/ngraph/core/include/ngraph/op/reduce_prod.hpp index 367d4cddd1c..12ded1656d1 100644 --- a/ngraph/core/include/ngraph/op/reduce_prod.hpp +++ b/ngraph/core/include/ngraph/op/reduce_prod.hpp @@ -39,6 +39,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; bool evaluate_lower(const HostTensorVector& outputs) const override; bool evaluate_upper(const HostTensorVector& outputs) const override; }; diff --git a/ngraph/core/include/ngraph/op/reduce_sum.hpp b/ngraph/core/include/ngraph/op/reduce_sum.hpp index 2de81ee71ff..5bef40536b1 100644 --- a/ngraph/core/include/ngraph/op/reduce_sum.hpp +++ b/ngraph/core/include/ngraph/op/reduce_sum.hpp @@ -87,6 +87,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } } diff --git a/ngraph/core/include/ngraph/op/relu.hpp b/ngraph/core/include/ngraph/op/relu.hpp index 89c603e107e..aa15105a6cb 100644 --- a/ngraph/core/include/ngraph/op/relu.hpp +++ b/ngraph/core/include/ngraph/op/relu.hpp @@ -34,6 +34,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; bool visit_attributes(AttributeVisitor& visitor) override; }; } // namespace v0 diff --git a/ngraph/core/include/ngraph/op/reshape.hpp b/ngraph/core/include/ngraph/op/reshape.hpp index 0f3bf028153..be32248ec09 100644 --- a/ngraph/core/include/ngraph/op/reshape.hpp +++ b/ngraph/core/include/ngraph/op/reshape.hpp @@ -54,6 +54,7 @@ namespace ngraph void set_special_zero(bool special_zero) { m_special_zero = special_zero; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; bool evaluate_lower(const HostTensorVector& outputs) const override; bool evaluate_upper(const HostTensorVector& outputs) const override; bool constant_fold(OutputVector& output_values, diff --git a/ngraph/core/include/ngraph/op/result.hpp b/ngraph/core/include/ngraph/op/result.hpp index 9dc73b50616..60c3eef6c78 100644 --- a/ngraph/core/include/ngraph/op/result.hpp +++ b/ngraph/core/include/ngraph/op/result.hpp @@ -36,6 +36,7 @@ namespace ngraph bool needs_default_layout() const { return m_needs_default_layout; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; diff --git a/ngraph/core/include/ngraph/op/reverse.hpp b/ngraph/core/include/ngraph/op/reverse.hpp index 9c0ea573a99..2a71ec75df2 100644 --- a/ngraph/core/include/ngraph/op/reverse.hpp +++ b/ngraph/core/include/ngraph/op/reverse.hpp @@ -50,6 +50,7 @@ namespace ngraph virtual size_t get_version() const override { return 1; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; protected: Mode mode_from_string(const std::string& mode) const; diff --git a/ngraph/core/include/ngraph/op/roi_align.hpp b/ngraph/core/include/ngraph/op/roi_align.hpp index a208876935b..caf1fe2a71b 100644 --- a/ngraph/core/include/ngraph/op/roi_align.hpp +++ b/ngraph/core/include/ngraph/op/roi_align.hpp @@ -66,6 +66,7 @@ namespace ngraph PoolingMode get_mode() const { return m_mode; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; private: PoolingMode mode_from_string(const std::string& mode) const; diff --git a/ngraph/core/include/ngraph/op/round.hpp b/ngraph/core/include/ngraph/op/round.hpp index f9bfb630a27..998a54d95de 100644 --- a/ngraph/core/include/ngraph/op/round.hpp +++ b/ngraph/core/include/ngraph/op/round.hpp @@ -47,6 +47,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; RoundMode get_mode() const { return m_mode; } diff --git a/ngraph/core/include/ngraph/op/scatter_elements_update.hpp b/ngraph/core/include/ngraph/op/scatter_elements_update.hpp index 38188cc4260..457f863e79e 100644 --- a/ngraph/core/include/ngraph/op/scatter_elements_update.hpp +++ b/ngraph/core/include/ngraph/op/scatter_elements_update.hpp @@ -40,6 +40,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& inputs) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; private: bool evaluate_scatter_element_update(const HostTensorVector& outputs, diff --git a/ngraph/core/include/ngraph/op/scatter_nd_update.hpp b/ngraph/core/include/ngraph/op/scatter_nd_update.hpp index a144e455859..1b8f6de7ffa 100644 --- a/ngraph/core/include/ngraph/op/scatter_nd_update.hpp +++ b/ngraph/core/include/ngraph/op/scatter_nd_update.hpp @@ -34,6 +34,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v3 using v3::ScatterNDUpdate; diff --git a/ngraph/core/include/ngraph/op/scatter_update.hpp b/ngraph/core/include/ngraph/op/scatter_update.hpp index a3989ecd92a..4021f37adfb 100644 --- a/ngraph/core/include/ngraph/op/scatter_update.hpp +++ b/ngraph/core/include/ngraph/op/scatter_update.hpp @@ -41,6 +41,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; private: bool evaluate_scatter_update(const HostTensorVector& outputs, diff --git a/ngraph/core/include/ngraph/op/select.hpp b/ngraph/core/include/ngraph/op/select.hpp index 78cd06eb3e0..f2a9fe14911 100644 --- a/ngraph/core/include/ngraph/op/select.hpp +++ b/ngraph/core/include/ngraph/op/select.hpp @@ -67,6 +67,7 @@ namespace ngraph const AutoBroadcastSpec& get_autob() const override { return m_auto_broadcast; } virtual bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; + bool has_evaluate() const override; private: AutoBroadcastSpec m_auto_broadcast; diff --git a/ngraph/core/include/ngraph/op/shape_of.hpp b/ngraph/core/include/ngraph/op/shape_of.hpp index e7b6c9c565a..1fb26548008 100644 --- a/ngraph/core/include/ngraph/op/shape_of.hpp +++ b/ngraph/core/include/ngraph/op/shape_of.hpp @@ -43,6 +43,7 @@ namespace ngraph bool get_is_foldable() const { return m_is_foldable; } bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; + bool has_evaluate() const override; bool evaluate_lower(const HostTensorVector& output_values) const override; bool evaluate_upper(const HostTensorVector& output_values) const override; bool constant_fold(OutputVector& output_values, @@ -81,6 +82,7 @@ namespace ngraph bool get_is_foldable() const { return m_is_foldable; } bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; + bool has_evaluate() const override; bool evaluate_lower(const HostTensorVector& output_values) const override; bool evaluate_upper(const HostTensorVector& output_values) const override; bool constant_fold(OutputVector& output_values, diff --git a/ngraph/core/include/ngraph/op/shuffle_channels.hpp b/ngraph/core/include/ngraph/op/shuffle_channels.hpp index 4bccd9c231a..fd4539b0b3a 100644 --- a/ngraph/core/include/ngraph/op/shuffle_channels.hpp +++ b/ngraph/core/include/ngraph/op/shuffle_channels.hpp @@ -48,6 +48,7 @@ namespace ngraph int64_t get_group() const { return m_group; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; private: /// \brief Generates a shape required to permute the data diff --git a/ngraph/core/include/ngraph/op/sigmoid.hpp b/ngraph/core/include/ngraph/op/sigmoid.hpp index 9518678a467..93601d65eb7 100644 --- a/ngraph/core/include/ngraph/op/sigmoid.hpp +++ b/ngraph/core/include/ngraph/op/sigmoid.hpp @@ -26,6 +26,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Sigmoid; diff --git a/ngraph/core/include/ngraph/op/sign.hpp b/ngraph/core/include/ngraph/op/sign.hpp index 2c6e38d353b..8a984ee7294 100644 --- a/ngraph/core/include/ngraph/op/sign.hpp +++ b/ngraph/core/include/ngraph/op/sign.hpp @@ -30,6 +30,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Sign; diff --git a/ngraph/core/include/ngraph/op/sin.hpp b/ngraph/core/include/ngraph/op/sin.hpp index a04e62d4322..46708b7ddfb 100644 --- a/ngraph/core/include/ngraph/op/sin.hpp +++ b/ngraph/core/include/ngraph/op/sin.hpp @@ -43,6 +43,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Sin; diff --git a/ngraph/core/include/ngraph/op/sinh.hpp b/ngraph/core/include/ngraph/op/sinh.hpp index df38da42364..6715e0cf682 100644 --- a/ngraph/core/include/ngraph/op/sinh.hpp +++ b/ngraph/core/include/ngraph/op/sinh.hpp @@ -29,6 +29,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Sinh; diff --git a/ngraph/core/include/ngraph/op/softmax.hpp b/ngraph/core/include/ngraph/op/softmax.hpp index 0790fb0d772..34537618153 100644 --- a/ngraph/core/include/ngraph/op/softmax.hpp +++ b/ngraph/core/include/ngraph/op/softmax.hpp @@ -42,6 +42,7 @@ namespace ngraph void set_axis(const size_t axis) { m_axis = axis; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; private: size_t m_axis; diff --git a/ngraph/core/include/ngraph/op/softplus.hpp b/ngraph/core/include/ngraph/op/softplus.hpp index 4f775d38907..0f8854c61dd 100644 --- a/ngraph/core/include/ngraph/op/softplus.hpp +++ b/ngraph/core/include/ngraph/op/softplus.hpp @@ -34,6 +34,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v4 } // namespace op diff --git a/ngraph/core/include/ngraph/op/space_to_batch.hpp b/ngraph/core/include/ngraph/op/space_to_batch.hpp index 82e052d97ec..35587652384 100644 --- a/ngraph/core/include/ngraph/op/space_to_batch.hpp +++ b/ngraph/core/include/ngraph/op/space_to_batch.hpp @@ -51,6 +51,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; private: bool evaluate_space_to_batch(const HostTensorVector& outputs, diff --git a/ngraph/core/include/ngraph/op/space_to_depth.hpp b/ngraph/core/include/ngraph/op/space_to_depth.hpp index 536027213a2..f6ae3bd9b01 100644 --- a/ngraph/core/include/ngraph/op/space_to_depth.hpp +++ b/ngraph/core/include/ngraph/op/space_to_depth.hpp @@ -58,6 +58,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; protected: std::size_t m_blocksize; diff --git a/ngraph/core/include/ngraph/op/split.hpp b/ngraph/core/include/ngraph/op/split.hpp index 90851cadd3d..d5a399172d1 100644 --- a/ngraph/core/include/ngraph/op/split.hpp +++ b/ngraph/core/include/ngraph/op/split.hpp @@ -41,6 +41,7 @@ namespace ngraph void set_num_splits(const size_t num_splits) { m_num_splits = num_splits; } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; protected: size_t m_num_splits; diff --git a/ngraph/core/include/ngraph/op/sqrt.hpp b/ngraph/core/include/ngraph/op/sqrt.hpp index ca135616b60..bf3cc0b42bd 100644 --- a/ngraph/core/include/ngraph/op/sqrt.hpp +++ b/ngraph/core/include/ngraph/op/sqrt.hpp @@ -43,6 +43,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Sqrt; diff --git a/ngraph/core/include/ngraph/op/squeeze.hpp b/ngraph/core/include/ngraph/op/squeeze.hpp index 4bfeb78bbf9..8612231be59 100644 --- a/ngraph/core/include/ngraph/op/squeeze.hpp +++ b/ngraph/core/include/ngraph/op/squeeze.hpp @@ -29,6 +29,7 @@ namespace ngraph void validate_and_infer_types() override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; bool evaluate_lower(const HostTensorVector& outputs) const override; bool evaluate_upper(const HostTensorVector& outputs) const override; bool constant_fold(OutputVector& output_values, diff --git a/ngraph/core/include/ngraph/op/strided_slice.hpp b/ngraph/core/include/ngraph/op/strided_slice.hpp index c3b78d2d56c..33144706b74 100644 --- a/ngraph/core/include/ngraph/op/strided_slice.hpp +++ b/ngraph/core/include/ngraph/op/strided_slice.hpp @@ -93,6 +93,7 @@ namespace ngraph size_t get_version() const override { return 1; } bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; + bool has_evaluate() const override; bool evaluate_lower(const HostTensorVector& outputs) const override; bool evaluate_upper(const HostTensorVector& outputs) const override; diff --git a/ngraph/core/include/ngraph/op/subtract.hpp b/ngraph/core/include/ngraph/op/subtract.hpp index c3c1b43e48e..625028348b6 100644 --- a/ngraph/core/include/ngraph/op/subtract.hpp +++ b/ngraph/core/include/ngraph/op/subtract.hpp @@ -37,6 +37,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 } // namespace op diff --git a/ngraph/core/include/ngraph/op/swish.hpp b/ngraph/core/include/ngraph/op/swish.hpp index 8ef0b1d021c..a9fed2785e6 100644 --- a/ngraph/core/include/ngraph/op/swish.hpp +++ b/ngraph/core/include/ngraph/op/swish.hpp @@ -39,6 +39,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v4 } // namespace op diff --git a/ngraph/core/include/ngraph/op/tan.hpp b/ngraph/core/include/ngraph/op/tan.hpp index 16d55f0eaa6..3a6fe15d6e9 100644 --- a/ngraph/core/include/ngraph/op/tan.hpp +++ b/ngraph/core/include/ngraph/op/tan.hpp @@ -43,6 +43,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Tan; diff --git a/ngraph/core/include/ngraph/op/tanh.hpp b/ngraph/core/include/ngraph/op/tanh.hpp index f3eb2513f71..b67849ed20c 100644 --- a/ngraph/core/include/ngraph/op/tanh.hpp +++ b/ngraph/core/include/ngraph/op/tanh.hpp @@ -29,6 +29,7 @@ namespace ngraph clone_with_new_inputs(const OutputVector& new_args) const override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 using v0::Tanh; diff --git a/ngraph/core/include/ngraph/op/tile.hpp b/ngraph/core/include/ngraph/op/tile.hpp index d05b97a97ea..5ea0fe343ad 100644 --- a/ngraph/core/include/ngraph/op/tile.hpp +++ b/ngraph/core/include/ngraph/op/tile.hpp @@ -35,6 +35,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; private: bool evaluate_tile(const HostTensorVector& outputs, diff --git a/ngraph/core/include/ngraph/op/topk.hpp b/ngraph/core/include/ngraph/op/topk.hpp index 6f59e259ca6..6d03190b6f6 100644 --- a/ngraph/core/include/ngraph/op/topk.hpp +++ b/ngraph/core/include/ngraph/op/topk.hpp @@ -86,6 +86,7 @@ namespace ngraph size_t get_default_output_index() const override { return no_default_index(); } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; protected: int64_t m_axis; @@ -149,6 +150,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; protected: virtual size_t diff --git a/ngraph/core/include/ngraph/op/transpose.hpp b/ngraph/core/include/ngraph/op/transpose.hpp index 8e261d576f8..53152870d25 100644 --- a/ngraph/core/include/ngraph/op/transpose.hpp +++ b/ngraph/core/include/ngraph/op/transpose.hpp @@ -40,6 +40,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 using v1::Transpose; diff --git a/ngraph/core/include/ngraph/op/unsqueeze.hpp b/ngraph/core/include/ngraph/op/unsqueeze.hpp index 51dc8a5512d..8382b170d84 100644 --- a/ngraph/core/include/ngraph/op/unsqueeze.hpp +++ b/ngraph/core/include/ngraph/op/unsqueeze.hpp @@ -28,6 +28,7 @@ namespace ngraph bool visit_attributes(AttributeVisitor& visitor) override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; bool evaluate_lower(const HostTensorVector& output_values) const override; bool evaluate_upper(const HostTensorVector& output_values) const override; diff --git a/ngraph/core/include/ngraph/op/variadic_split.hpp b/ngraph/core/include/ngraph/op/variadic_split.hpp index 9c2926d1b56..4293270fcbe 100644 --- a/ngraph/core/include/ngraph/op/variadic_split.hpp +++ b/ngraph/core/include/ngraph/op/variadic_split.hpp @@ -44,6 +44,7 @@ namespace ngraph size_t get_default_output_index() const override { return no_default_index(); } bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; private: bool evaluate_variadic_split(const HostTensorVector& outputs, diff --git a/ngraph/core/include/ngraph/op/xor.hpp b/ngraph/core/include/ngraph/op/xor.hpp index b415df28fd9..d119ae9525c 100644 --- a/ngraph/core/include/ngraph/op/xor.hpp +++ b/ngraph/core/include/ngraph/op/xor.hpp @@ -42,6 +42,7 @@ namespace ngraph bool visit_attributes(AttributeVisitor& visitor) override; bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v1 namespace v0 @@ -73,6 +74,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 diff --git a/ngraph/core/src/op/abs.cpp b/ngraph/core/src/op/abs.cpp index 1608f92b26c..f83ca7d9da3 100644 --- a/ngraph/core/src/op/abs.cpp +++ b/ngraph/core/src/op/abs.cpp @@ -65,3 +65,21 @@ bool op::Abs::evaluate(const HostTensorVector& outputs, const HostTensorVector& NGRAPH_OP_SCOPE(v0_Abs_evaluate); return absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::Abs::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Abs_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: + case ngraph::element::bf16: + case ngraph::element::boolean: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/acos.cpp b/ngraph/core/src/op/acos.cpp index e5643137828..4605e51df2c 100644 --- a/ngraph/core/src/op/acos.cpp +++ b/ngraph/core/src/op/acos.cpp @@ -73,3 +73,20 @@ bool op::Acos::evaluate(const HostTensorVector& outputs, const HostTensorVector& NGRAPH_OP_SCOPE(v0_Acos_evaluate); return acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::Acos::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Acos_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: + case ngraph::element::boolean: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/acosh.cpp b/ngraph/core/src/op/acosh.cpp index 851925eec31..cc8d72a8f08 100644 --- a/ngraph/core/src/op/acosh.cpp +++ b/ngraph/core/src/op/acosh.cpp @@ -62,3 +62,19 @@ bool op::v3::Acosh::evaluate(const HostTensorVector& outputs, const HostTensorVe NGRAPH_OP_SCOPE(v3_Acosh_evaluate); return acoshop::evaluate_acosh(inputs[0], outputs[0]); } + +bool op::v3::Acosh::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v3_Acosh_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/add.cpp b/ngraph/core/src/op/add.cpp index 974cbe47685..9f2cbad8f6d 100644 --- a/ngraph/core/src/op/add.cpp +++ b/ngraph/core/src/op/add.cpp @@ -84,3 +84,24 @@ bool op::v1::Add::evaluate(const HostTensorVector& outputs, const HostTensorVect NGRAPH_OP_SCOPE(v1_Add_evaluate); return add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob()); } + +bool op::v1::Add::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Add_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::bf16: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/and.cpp b/ngraph/core/src/op/and.cpp index 5b3912fa6e0..4c81190083b 100644 --- a/ngraph/core/src/op/and.cpp +++ b/ngraph/core/src/op/and.cpp @@ -79,3 +79,20 @@ bool op::v1::LogicalAnd::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v1_LogicalAnd_evaluate); return logand::evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob()); } + +bool op::v1::LogicalAnd::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_LogicalAnd_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/asin.cpp b/ngraph/core/src/op/asin.cpp index 40640994433..7ddc97a3e04 100644 --- a/ngraph/core/src/op/asin.cpp +++ b/ngraph/core/src/op/asin.cpp @@ -74,3 +74,20 @@ bool op::Asin::evaluate(const HostTensorVector& outputs, const HostTensorVector& NGRAPH_OP_SCOPE(v0_Asin_evaluate); return asinop::evaluate_asin(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::Asin::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Asin_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/asinh.cpp b/ngraph/core/src/op/asinh.cpp index c8f12bbdf99..ed7b2191bd6 100644 --- a/ngraph/core/src/op/asinh.cpp +++ b/ngraph/core/src/op/asinh.cpp @@ -62,3 +62,19 @@ bool op::v3::Asinh::evaluate(const HostTensorVector& outputs, const HostTensorVe NGRAPH_OP_SCOPE(v3_Asinh_evaluate); return asinhop::evaluate_asinh(inputs[0], outputs[0]); } + +bool op::v3::Asinh::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Asinh_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/assign.cpp b/ngraph/core/src/op/assign.cpp index 1c1cb9883b0..529bbb5af4f 100644 --- a/ngraph/core/src/op/assign.cpp +++ b/ngraph/core/src/op/assign.cpp @@ -151,6 +151,12 @@ bool op::v6::Assign::evaluate(const HostTensorVector& outputs, return true; } +bool op::v6::Assign::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Assign_has_evaluate); + return true; +} + bool op::v6::Assign::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) { return false; diff --git a/ngraph/core/src/op/atan.cpp b/ngraph/core/src/op/atan.cpp index 623e52a51f8..c3f1cce9fb4 100644 --- a/ngraph/core/src/op/atan.cpp +++ b/ngraph/core/src/op/atan.cpp @@ -73,3 +73,20 @@ bool op::Atan::evaluate(const HostTensorVector& outputs, const HostTensorVector& NGRAPH_OP_SCOPE(v0_Atan_evaluate); return atanop::evaluate_atan(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::Atan::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Atan_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/atanh.cpp b/ngraph/core/src/op/atanh.cpp index 96a0dd3720d..a14e6aaebe1 100644 --- a/ngraph/core/src/op/atanh.cpp +++ b/ngraph/core/src/op/atanh.cpp @@ -62,3 +62,19 @@ bool op::v3::Atanh::evaluate(const HostTensorVector& outputs, const HostTensorVe NGRAPH_OP_SCOPE(v3_Atanh_evaluate); return atanhop::evaluate_atanh(inputs[0], outputs[0]); } + +bool op::v3::Atanh::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Atanh_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/batch_to_space.cpp b/ngraph/core/src/op/batch_to_space.cpp index 25f8b0bf13c..40feec46384 100644 --- a/ngraph/core/src/op/batch_to_space.cpp +++ b/ngraph/core/src/op/batch_to_space.cpp @@ -252,3 +252,11 @@ bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v1_BatchToSpace); return batch_to_space_evaluate(outputs, inputs); } + +bool ngraph::op::v1::BatchToSpace::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_BatchToSpace_has_evaluate); + return !get_input_partial_shape(0).is_dynamic() && + (get_input_shape(0).size() == 4 || get_input_shape(0).size() == 5) && + get_input_shape(0).size() <= shape_size(get_input_shape(1)); +} diff --git a/ngraph/core/src/op/broadcast.cpp b/ngraph/core/src/op/broadcast.cpp index 33ff84fd6c4..2df01eb9075 100644 --- a/ngraph/core/src/op/broadcast.cpp +++ b/ngraph/core/src/op/broadcast.cpp @@ -221,6 +221,13 @@ bool op::v3::Broadcast::evaluate(const HostTensorVector& outputs, return broadcast_evaluate(outputs, inputs); } +bool op::v3::Broadcast::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v3_Broadcast_has_evaluate); + return m_mode.m_type == BroadcastType::NONE || m_mode.m_type == BroadcastType::PDPD || + m_mode.m_type == BroadcastType::NUMPY || m_mode.m_type == BroadcastType::BIDIRECTIONAL; +} + namespace { using namespace op; @@ -313,3 +320,10 @@ bool op::v1::Broadcast::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v1_Broadcast_evaluate); return op::util::BroadcastBase::evaluate(outputs, inputs); } + +bool op::v1::Broadcast::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Broadcast_has_evaluate); + return m_mode.m_type == BroadcastType::NONE || m_mode.m_type == BroadcastType::PDPD || + m_mode.m_type == BroadcastType::NUMPY; +} diff --git a/ngraph/core/src/op/ceiling.cpp b/ngraph/core/src/op/ceiling.cpp index e866fb1326a..7ef0e8a6bec 100644 --- a/ngraph/core/src/op/ceiling.cpp +++ b/ngraph/core/src/op/ceiling.cpp @@ -75,3 +75,24 @@ bool op::Ceiling::evaluate(const HostTensorVector& outputs, const HostTensorVect NGRAPH_OP_SCOPE(v0_Ceiling_evaluate); return ceiling::evaluate_ceiling(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::Ceiling::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Ceiling_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/clamp.cpp b/ngraph/core/src/op/clamp.cpp index 62fa86d503b..1f67f003bcf 100644 --- a/ngraph/core/src/op/clamp.cpp +++ b/ngraph/core/src/op/clamp.cpp @@ -107,6 +107,27 @@ bool op::v0::Clamp::evaluate(const HostTensorVector& outputs, const HostTensorVe return clamp::evaluate_clamp(inputs[0], outputs[0], get_min(), get_max()); } +bool op::v0::Clamp::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Clamp_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::bf16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} + NGRAPH_RTTI_DEFINITION(op::v0::Clamp, "Clamp", 0); op::Clamp::Clamp() diff --git a/ngraph/core/src/op/concat.cpp b/ngraph/core/src/op/concat.cpp index a1232eec1ce..625592fb908 100644 --- a/ngraph/core/src/op/concat.cpp +++ b/ngraph/core/src/op/concat.cpp @@ -147,6 +147,13 @@ bool op::Concat::evaluate(const HostTensorVector& outputs, const HostTensorVecto auto concat_axis = get_axis() < 0 ? get_axis() + inputs[0]->get_shape().size() : get_axis(); return evaluate_concat(inputs, outputs[0], concat_axis); } + +bool op::Concat::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Concat_has_evaluate); + return true; +} + bool op::Concat::evaluate_lower(const HostTensorVector& output_values) const { return default_lower_bound_evaluator(this, output_values); diff --git a/ngraph/core/src/op/constant.cpp b/ngraph/core/src/op/constant.cpp index ef3134c95c9..fb295566f48 100644 --- a/ngraph/core/src/op/constant.cpp +++ b/ngraph/core/src/op/constant.cpp @@ -488,6 +488,12 @@ bool op::v0::Constant::evaluate(const HostTensorVector& outputs, return true; } +bool op::v0::Constant::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Constant_has_evaluate); + return true; +} + bool op::v0::Constant::evaluate_lower(const HostTensorVector& outputs) const { return evaluate(outputs, {}); diff --git a/ngraph/core/src/op/convert.cpp b/ngraph/core/src/op/convert.cpp index 3bd81768761..4230c3a9afb 100644 --- a/ngraph/core/src/op/convert.cpp +++ b/ngraph/core/src/op/convert.cpp @@ -180,6 +180,51 @@ bool op::v0::Convert::evaluate(const HostTensorVector& output_values, return convert::evaluate_convert(input_values[0], output_values[0]); } +bool op::v0::Convert::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Convert_has_evaluate); + + switch (get_input_element_type(0)) + { + case ngraph::element::u1: + case ngraph::element::u4: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::i4: + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::bf16: + case ngraph::element::f16: + case ngraph::element::f32: + case ngraph::element::boolean: break; + default: return false; + } + switch (get_output_element_type(0)) + { + case ngraph::element::i4: + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u1: + case ngraph::element::u4: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::bf16: + case ngraph::element::f16: + case ngraph::element::f32: + case ngraph::element::boolean: break; + default: return false; + } + return true; +} + bool op::v0::Convert::evaluate_lower(const HostTensorVector& output_values) const { return convert::evaluate_bound(this, output_values, false); diff --git a/ngraph/core/src/op/cos.cpp b/ngraph/core/src/op/cos.cpp index 79c28e3f23c..aa3ac962123 100644 --- a/ngraph/core/src/op/cos.cpp +++ b/ngraph/core/src/op/cos.cpp @@ -71,3 +71,20 @@ bool op::Cos::evaluate(const HostTensorVector& outputs, const HostTensorVector& NGRAPH_OP_SCOPE(v0_Cos_evaluate); return cosop::evaluate_cos(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::Cos::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Cos_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/cosh.cpp b/ngraph/core/src/op/cosh.cpp index b2c0b673e3e..d4f22bc6f46 100644 --- a/ngraph/core/src/op/cosh.cpp +++ b/ngraph/core/src/op/cosh.cpp @@ -70,3 +70,20 @@ bool op::Cosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& NGRAPH_OP_SCOPE(v0_Cosh_evaluate); return coshop::evaluate_cosh(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::Cosh::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Cosh_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/depth_to_space.cpp b/ngraph/core/src/op/depth_to_space.cpp index 761256d627e..4905904614d 100644 --- a/ngraph/core/src/op/depth_to_space.cpp +++ b/ngraph/core/src/op/depth_to_space.cpp @@ -235,6 +235,13 @@ bool op::DepthToSpace::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v0_DepthToSpace_evaluate); return evaluate_depth_to_space(outputs, inputs); } + +bool op::DepthToSpace::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_DepthToSpace_has_evaluate); + return !get_input_partial_shape(0).is_dynamic(); +} + namespace ngraph { template <> diff --git a/ngraph/core/src/op/divide.cpp b/ngraph/core/src/op/divide.cpp index 2e7e1893080..18200c88934 100644 --- a/ngraph/core/src/op/divide.cpp +++ b/ngraph/core/src/op/divide.cpp @@ -95,3 +95,20 @@ bool op::v1::Divide::evaluate(const HostTensorVector& outputs, const HostTensorV NGRAPH_OP_SCOPE(v1_Divide_evaluate); return divide::evaluate_divide(inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv()); } + +bool op::v1::Divide::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Divide_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::bf16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/equal.cpp b/ngraph/core/src/op/equal.cpp index 02373ea4735..d70abc1537c 100644 --- a/ngraph/core/src/op/equal.cpp +++ b/ngraph/core/src/op/equal.cpp @@ -74,6 +74,23 @@ bool op::v1::Equal::evaluate(const HostTensorVector& outputs, const HostTensorVe return equal::evaluate_equal(inputs[0], inputs[1], outputs[0], get_autob()); } +bool op::v1::Equal::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Equal_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} + bool op::v1::Equal::visit_attributes(AttributeVisitor& visitor) { NGRAPH_OP_SCOPE(v1_Equal_visit_attributes); diff --git a/ngraph/core/src/op/erf.cpp b/ngraph/core/src/op/erf.cpp index 39851520d91..90f64dbe9df 100644 --- a/ngraph/core/src/op/erf.cpp +++ b/ngraph/core/src/op/erf.cpp @@ -69,3 +69,20 @@ bool op::Erf::evaluate(const HostTensorVector& outputs, const HostTensorVector& NGRAPH_OP_SCOPE(v0_Erf_evaluate); return erfop::evaluate_erf(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::Erf::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Erf_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/exp.cpp b/ngraph/core/src/op/exp.cpp index 5cb36c024d7..b366f24491b 100644 --- a/ngraph/core/src/op/exp.cpp +++ b/ngraph/core/src/op/exp.cpp @@ -71,3 +71,20 @@ bool op::Exp::evaluate(const HostTensorVector& outputs, const HostTensorVector& NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); return expop::evaluate_exp(inputs[0], outputs[0]); } + +bool op::Exp::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Exp_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/floor.cpp b/ngraph/core/src/op/floor.cpp index 89a6a5efda8..815b482766a 100644 --- a/ngraph/core/src/op/floor.cpp +++ b/ngraph/core/src/op/floor.cpp @@ -81,3 +81,24 @@ bool op::Floor::evaluate(const HostTensorVector& outputs, const HostTensorVector NGRAPH_OP_SCOPE(v0_Floor_evaluate); return floorop::evaluate_floor(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::Floor::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Floor_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/floor_mod.cpp b/ngraph/core/src/op/floor_mod.cpp index a3575642d28..3ccb7a29524 100644 --- a/ngraph/core/src/op/floor_mod.cpp +++ b/ngraph/core/src/op/floor_mod.cpp @@ -75,6 +75,25 @@ bool op::v1::FloorMod::evaluate(const HostTensorVector& outputs, return floor_mod::evaluate_floor_mod(inputs[0], inputs[1], outputs[0], get_autob()); } +bool op::v1::FloorMod::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_FloorMod_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i8: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::bf16: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} + bool op::v1::FloorMod::visit_attributes(AttributeVisitor& visitor) { NGRAPH_OP_SCOPE(v1_FloorMod_visit_attributes); diff --git a/ngraph/core/src/op/gelu.cpp b/ngraph/core/src/op/gelu.cpp index c2b07a50ef5..e1beb2b36a3 100644 --- a/ngraph/core/src/op/gelu.cpp +++ b/ngraph/core/src/op/gelu.cpp @@ -190,3 +190,15 @@ bool op::v7::Gelu::evaluate(const HostTensorVector& outputs, const HostTensorVec NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); return gelu::evaluate_gelu(inputs[0], outputs[0], m_approximation_mode); } + +bool op::v7::Gelu::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v7_Gelu_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/greater.cpp b/ngraph/core/src/op/greater.cpp index ed2dde4ddc3..bbc28493346 100644 --- a/ngraph/core/src/op/greater.cpp +++ b/ngraph/core/src/op/greater.cpp @@ -74,3 +74,20 @@ bool op::v1::Greater::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v1_Greater_evaluate); return greaterop::evaluate_greater(inputs[0], inputs[1], outputs[0], get_autob()); } + +bool op::v1::Greater::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Greater_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/greater_eq.cpp b/ngraph/core/src/op/greater_eq.cpp index b7bd0adaa52..3db1d4155a5 100644 --- a/ngraph/core/src/op/greater_eq.cpp +++ b/ngraph/core/src/op/greater_eq.cpp @@ -75,6 +75,23 @@ bool op::v1::GreaterEqual::evaluate(const HostTensorVector& outputs, return greater_equalop::evaluate_greater_equal(inputs[0], inputs[1], outputs[0], get_autob()); } +bool op::v1::GreaterEqual::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_GreaterEqual_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} + bool op::v1::GreaterEqual::visit_attributes(AttributeVisitor& visitor) { NGRAPH_OP_SCOPE(v1_GreaterEqual_visit_attributes); diff --git a/ngraph/core/src/op/hsigmoid.cpp b/ngraph/core/src/op/hsigmoid.cpp index 2f7ee0f93a7..5156dc4b444 100644 --- a/ngraph/core/src/op/hsigmoid.cpp +++ b/ngraph/core/src/op/hsigmoid.cpp @@ -69,3 +69,16 @@ bool op::v5::HSigmoid::evaluate(const HostTensorVector& outputs, NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); return evaluate_hsigmoid(inputs[0], outputs[0]); } + +bool op::v5::HSigmoid::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v5_HSigmoid_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::bf16: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/hswish.cpp b/ngraph/core/src/op/hswish.cpp index 9ef892c46c6..c5d683285a0 100644 --- a/ngraph/core/src/op/hswish.cpp +++ b/ngraph/core/src/op/hswish.cpp @@ -67,3 +67,16 @@ bool op::v4::HSwish::evaluate(const HostTensorVector& outputs, const HostTensorV NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); return hswish::evaluate_hswish(inputs[0], outputs[0]); } + +bool op::v4::HSwish::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v4_HSwish_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::bf16: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/interpolate.cpp b/ngraph/core/src/op/interpolate.cpp index c5d77d33922..b39c0cfc23d 100644 --- a/ngraph/core/src/op/interpolate.cpp +++ b/ngraph/core/src/op/interpolate.cpp @@ -540,6 +540,19 @@ bool op::v4::Interpolate::evaluate(const HostTensorVector& outputs, return evaluate_interpolate(outputs, inputs); } +bool op::v4::Interpolate::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v4_Interpolate_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::u8: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} + namespace ngraph { template <> diff --git a/ngraph/core/src/op/less.cpp b/ngraph/core/src/op/less.cpp index 8447d13c73c..d9b4e8dfeb9 100644 --- a/ngraph/core/src/op/less.cpp +++ b/ngraph/core/src/op/less.cpp @@ -73,3 +73,20 @@ bool op::v1::Less::evaluate(const HostTensorVector& outputs, const HostTensorVec NGRAPH_OP_SCOPE(v1_Less_evaluate); return lessop::evaluate_less(inputs[0], inputs[1], outputs[0], get_autob()); } + +bool op::v1::Less::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Less_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/less_eq.cpp b/ngraph/core/src/op/less_eq.cpp index af83b483a2a..3528090de46 100644 --- a/ngraph/core/src/op/less_eq.cpp +++ b/ngraph/core/src/op/less_eq.cpp @@ -74,3 +74,20 @@ bool op::v1::LessEqual::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v1_LessEqual_evaluate); return less_equalop::evaluate_less_equal(inputs[0], inputs[1], outputs[0], get_autob()); } + +bool op::v1::LessEqual::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_LessEqual_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/log.cpp b/ngraph/core/src/op/log.cpp index 222b5f12e10..49a41c0d931 100644 --- a/ngraph/core/src/op/log.cpp +++ b/ngraph/core/src/op/log.cpp @@ -69,3 +69,20 @@ bool op::Log::evaluate(const HostTensorVector& outputs, const HostTensorVector& NGRAPH_OP_SCOPE(v0_Log_evaluate); return logop::evaluate_log(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::Log::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Log_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/loop.cpp b/ngraph/core/src/op/loop.cpp index 1ce0179b670..2941d46d2e2 100644 --- a/ngraph/core/src/op/loop.cpp +++ b/ngraph/core/src/op/loop.cpp @@ -327,6 +327,18 @@ bool op::v5::Loop::evaluate(const HostTensorVector& outputs, const HostTensorVec return true; } +bool op::v5::Loop::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v5_Loop_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: return true; + default: break; + } + return false; +} + void op::v5::Loop::clone_to(op::v5::Loop& dst, const OutputVector& new_args) const { dst.set_arguments(new_args); diff --git a/ngraph/core/src/op/matmul.cpp b/ngraph/core/src/op/matmul.cpp index 2252183c092..109e4eea1d8 100644 --- a/ngraph/core/src/op/matmul.cpp +++ b/ngraph/core/src/op/matmul.cpp @@ -255,6 +255,22 @@ bool op::MatMul::evaluate(const HostTensorVector& outputs, const HostTensorVecto inputs[0], inputs[1], outputs[0], get_transpose_a(), get_transpose_b()); } +bool op::MatMul::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_MatMul_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} + void ngraph::op::v0::MatMul::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_MatMul_validate_and_infer_types); diff --git a/ngraph/core/src/op/max.cpp b/ngraph/core/src/op/max.cpp index 0c93434c451..318910aa70c 100644 --- a/ngraph/core/src/op/max.cpp +++ b/ngraph/core/src/op/max.cpp @@ -69,3 +69,19 @@ bool op::v1::ReduceMax::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v1_ReduceMax_evaluate); return maxop::evaluate_max(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); } + +bool op::v1::ReduceMax::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_ReduceMax_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/max_pool.cpp b/ngraph/core/src/op/max_pool.cpp index fbb6c3fa3c2..eb41510dabb 100644 --- a/ngraph/core/src/op/max_pool.cpp +++ b/ngraph/core/src/op/max_pool.cpp @@ -263,3 +263,19 @@ bool op::v1::MaxPool::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v1_MaxPool_evaluate); return evaluate_maxpool(outputs, inputs); } + +bool op::v1::MaxPool::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Log_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/maximum.cpp b/ngraph/core/src/op/maximum.cpp index b15c6c0b20c..0733759c2b5 100644 --- a/ngraph/core/src/op/maximum.cpp +++ b/ngraph/core/src/op/maximum.cpp @@ -81,3 +81,19 @@ bool op::v1::Maximum::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v1_Maximum_evaluate); return maximumop::evaluate_maximum(inputs[0], inputs[1], outputs[0], get_autob()); } + +bool op::v1::Maximum::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Maximum_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/min.cpp b/ngraph/core/src/op/min.cpp index 39439ca762c..37f3512413b 100644 --- a/ngraph/core/src/op/min.cpp +++ b/ngraph/core/src/op/min.cpp @@ -72,6 +72,22 @@ bool op::v1::ReduceMin::evaluate(const HostTensorVector& outputs, return minop::evaluate_min(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); } +bool op::v1::ReduceMin::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_ReduceMin_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} + bool op::v1::ReduceMin::evaluate_lower(const HostTensorVector& output_values) const { if (!input_value(1).get_tensor().has_and_set_bound()) diff --git a/ngraph/core/src/op/mish.cpp b/ngraph/core/src/op/mish.cpp index 339466b7709..e9a9aed5b0b 100644 --- a/ngraph/core/src/op/mish.cpp +++ b/ngraph/core/src/op/mish.cpp @@ -74,3 +74,15 @@ bool op::v4::Mish::evaluate(const HostTensorVector& outputs, const HostTensorVec NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); return mish::evaluate_mish(inputs[0], outputs[0]); } + +bool op::v4::Mish::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v4_Mish_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/multiply.cpp b/ngraph/core/src/op/multiply.cpp index e40bd3bfebe..eae9cd1ac59 100644 --- a/ngraph/core/src/op/multiply.cpp +++ b/ngraph/core/src/op/multiply.cpp @@ -74,3 +74,20 @@ bool op::v1::Multiply::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v1_Multiply_evaluate); return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob()); } + +bool op::v1::Multiply::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Multiply_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: + case ngraph::element::bf16: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/negative.cpp b/ngraph/core/src/op/negative.cpp index 6fc9857fc20..f670ac19a2e 100644 --- a/ngraph/core/src/op/negative.cpp +++ b/ngraph/core/src/op/negative.cpp @@ -67,6 +67,23 @@ bool op::Negative::evaluate(const HostTensorVector& outputs, const HostTensorVec return negativeop::evaluate_negative(inputs[0], outputs[0], shape_size(get_output_shape(0))); } +bool op::Negative::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Negative_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} + shared_ptr ngraph::operator-(const Output& arg0) { return make_shared(arg0); diff --git a/ngraph/core/src/op/non_zero.cpp b/ngraph/core/src/op/non_zero.cpp index 1e11aad1b2a..35211a902c4 100644 --- a/ngraph/core/src/op/non_zero.cpp +++ b/ngraph/core/src/op/non_zero.cpp @@ -174,3 +174,25 @@ bool op::v3::NonZero::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v3_NonZero_evaluate); return nonzero::evaluate_nonzero(inputs[0], outputs[0]); } + +bool op::v3::NonZero::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v3_NonZero_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::bf16: + case ngraph::element::f16: + case ngraph::element::f32: + case ngraph::element::f64: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/not.cpp b/ngraph/core/src/op/not.cpp index 30192544a1f..e731898a4e8 100644 --- a/ngraph/core/src/op/not.cpp +++ b/ngraph/core/src/op/not.cpp @@ -83,3 +83,20 @@ bool op::v1::LogicalNot::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v1_LogicalNot_evaluate); return notop::evaluate_not(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::v1::LogicalNot::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_LogicalNot_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/not_equal.cpp b/ngraph/core/src/op/not_equal.cpp index 0db7617bc1a..a53ea2ee74f 100644 --- a/ngraph/core/src/op/not_equal.cpp +++ b/ngraph/core/src/op/not_equal.cpp @@ -75,6 +75,23 @@ bool op::v1::NotEqual::evaluate(const HostTensorVector& outputs, return not_equalop::evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob()); } +bool op::v1::NotEqual::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_NotEqual_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} + bool op::v1::NotEqual::visit_attributes(AttributeVisitor& visitor) { NGRAPH_OP_SCOPE(v1_NotEqual_visit_attributes); diff --git a/ngraph/core/src/op/one_hot.cpp b/ngraph/core/src/op/one_hot.cpp index c1d6b8e149a..d70e9847428 100644 --- a/ngraph/core/src/op/one_hot.cpp +++ b/ngraph/core/src/op/one_hot.cpp @@ -175,3 +175,15 @@ bool op::v1::OneHot::evaluate(const HostTensorVector& output_values, "Incompatible axis and depth values."); return one_hot::evaluate_onehot(output_values, input_values, axis); } + +bool op::v1::OneHot::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_OneHot_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/or.cpp b/ngraph/core/src/op/or.cpp index 8412d1df044..f5b1deafdd6 100644 --- a/ngraph/core/src/op/or.cpp +++ b/ngraph/core/src/op/or.cpp @@ -72,3 +72,20 @@ bool op::v1::LogicalOr::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v1_LogicalOr_evaluate); return logor::evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob()); } + +bool op::v1::LogicalOr::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_LogicalOr_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/pad.cpp b/ngraph/core/src/op/pad.cpp index 1dee8e3355e..cc1f8ecb206 100644 --- a/ngraph/core/src/op/pad.cpp +++ b/ngraph/core/src/op/pad.cpp @@ -233,3 +233,9 @@ bool op::v1::Pad::evaluate(const HostTensorVector& outputs, const HostTensorVect NGRAPH_OP_SCOPE(v1_Pad_evaluate); return evaluate_pad(outputs, inputs); } + +bool op::v1::Pad::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Pad_has_evaluate); + return true; +} diff --git a/ngraph/core/src/op/power.cpp b/ngraph/core/src/op/power.cpp index b62356566f2..10695c755b5 100644 --- a/ngraph/core/src/op/power.cpp +++ b/ngraph/core/src/op/power.cpp @@ -76,3 +76,20 @@ bool op::v1::Power::evaluate(const HostTensorVector& outputs, const HostTensorVe NGRAPH_OP_SCOPE(v1_Power_evaluate); return power::evaluate_power(inputs[0], inputs[1], outputs[0], get_autob()); } + +bool op::v1::Power::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Power_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::bf16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/prelu.cpp b/ngraph/core/src/op/prelu.cpp index c7d5038e809..4cdbfd1500c 100644 --- a/ngraph/core/src/op/prelu.cpp +++ b/ngraph/core/src/op/prelu.cpp @@ -81,3 +81,17 @@ bool op::PRelu::evaluate(const HostTensorVector& outputs, const HostTensorVector NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 2)); return prelu::evaluate_prelu(inputs[0], inputs[1], outputs[0]); } + +bool op::PRelu::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_PRelu_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i8: + case ngraph::element::bf16: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/prior_box.cpp b/ngraph/core/src/op/prior_box.cpp index d1db0539734..52f12330a65 100644 --- a/ngraph/core/src/op/prior_box.cpp +++ b/ngraph/core/src/op/prior_box.cpp @@ -187,3 +187,21 @@ bool op::v0::PriorBox::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v0_PriorBox_evaluate); return prior_box::evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs()); } + +bool op::v0::PriorBox::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_PriorBox_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/prior_box_clustered.cpp b/ngraph/core/src/op/prior_box_clustered.cpp index 21a5ad12a87..1b146005a75 100644 --- a/ngraph/core/src/op/prior_box_clustered.cpp +++ b/ngraph/core/src/op/prior_box_clustered.cpp @@ -160,3 +160,21 @@ bool op::v0::PriorBoxClustered::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v0_PriorBoxClustered_evaluate); return prior_box_clustered::evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs()); } + +bool op::v0::PriorBoxClustered::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_PriorBoxClustered_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/range.cpp b/ngraph/core/src/op/range.cpp index 7e040b3a280..5990ef3dcbd 100644 --- a/ngraph/core/src/op/range.cpp +++ b/ngraph/core/src/op/range.cpp @@ -300,6 +300,28 @@ bool op::v4::Range::evaluate(const HostTensorVector& outputs, const HostTensorVe return rangeop::evaluate_power(out, start, stop, step, m_output_type, 4); } +bool op::v4::Range::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v4_Range_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::bf16: + case ngraph::element::f16: + case ngraph::element::f32: + case ngraph::element::f64: + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: return true; + default: break; + } + return false; +} + constexpr NodeTypeInfo op::v0::Range::type_info; op::v0::Range::Range(const Output& start, const Output& stop, const Output& step) @@ -500,3 +522,25 @@ bool op::v0::Range::evaluate(const HostTensorVector& outputs, const HostTensorVe HostTensorPtr step = inputs[2]; return rangeop::evaluate_power(out, start, stop, step, start->get_element_type(), 0); } + +bool op::v0::Range::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Range_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::bf16: + case ngraph::element::f16: + case ngraph::element::f32: + case ngraph::element::f64: + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/read_value.cpp b/ngraph/core/src/op/read_value.cpp index 1e1d51c292c..d62c46a28a6 100644 --- a/ngraph/core/src/op/read_value.cpp +++ b/ngraph/core/src/op/read_value.cpp @@ -127,6 +127,12 @@ bool op::v6::ReadValue::evaluate(const HostTensorVector& outputs, return true; } +bool op::v6::ReadValue::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v6_ReadValue_has_evaluate); + return true; +} + bool op::v6::ReadValue::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) { diff --git a/ngraph/core/src/op/reduce_l1.cpp b/ngraph/core/src/op/reduce_l1.cpp index 351cbea426d..29de7e4e03f 100644 --- a/ngraph/core/src/op/reduce_l1.cpp +++ b/ngraph/core/src/op/reduce_l1.cpp @@ -73,3 +73,18 @@ bool op::v4::ReduceL1::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v4_ReduceL1_evaluate); return reduce_l1::evaluate_sum(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); } + +bool op::v4::ReduceL1::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v4_ReduceL1_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::bf16: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/reduce_l2.cpp b/ngraph/core/src/op/reduce_l2.cpp index 1f5ff694a64..e3ee81b1875 100644 --- a/ngraph/core/src/op/reduce_l2.cpp +++ b/ngraph/core/src/op/reduce_l2.cpp @@ -72,3 +72,16 @@ bool op::v4::ReduceL2::evaluate(const HostTensorVector& outputs, return reduce_l2::evaluate_reduce_l2( inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); } + +bool op::v4::ReduceL2::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v4_ReduceL2_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::bf16: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/reduce_logical_and.cpp b/ngraph/core/src/op/reduce_logical_and.cpp index a522131a7d3..c75c244d590 100644 --- a/ngraph/core/src/op/reduce_logical_and.cpp +++ b/ngraph/core/src/op/reduce_logical_and.cpp @@ -73,3 +73,10 @@ bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs, const auto& out = outputs[0]; return evaluate_reduce_logical_and(data, axes, out, get_keep_dims()); } + +bool op::v1::ReduceLogicalAnd::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_ReduceLogicalAnd_has_evaluate); + return get_input_element_type(0) == element::boolean && + get_input_element_type(1).is_integral_number(); +} diff --git a/ngraph/core/src/op/reduce_logical_or.cpp b/ngraph/core/src/op/reduce_logical_or.cpp index cc09e5c42bf..a6afa5f77c8 100644 --- a/ngraph/core/src/op/reduce_logical_or.cpp +++ b/ngraph/core/src/op/reduce_logical_or.cpp @@ -73,3 +73,10 @@ bool op::v1::ReduceLogicalOr::evaluate(const HostTensorVector& outputs, const auto& out = outputs[0]; return evaluate_reduce_logical_or(data, axes, out, get_keep_dims()); } + +bool op::v1::ReduceLogicalOr::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_ReduceLogicalOr_has_evaluate); + return get_input_element_type(0) == element::boolean && + get_input_element_type(1).is_integral_number(); +} diff --git a/ngraph/core/src/op/reduce_mean.cpp b/ngraph/core/src/op/reduce_mean.cpp index 01bb0ddcde4..6c41fcba07e 100644 --- a/ngraph/core/src/op/reduce_mean.cpp +++ b/ngraph/core/src/op/reduce_mean.cpp @@ -70,3 +70,19 @@ bool op::v1::ReduceMean::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v1_ReduceMean_evaluate); return mean::evaluate_mean(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); } + +bool op::v1::ReduceMean::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_ReduceMean_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/reduce_prod.cpp b/ngraph/core/src/op/reduce_prod.cpp index 53f31945725..bbacf23cc7e 100644 --- a/ngraph/core/src/op/reduce_prod.cpp +++ b/ngraph/core/src/op/reduce_prod.cpp @@ -79,6 +79,22 @@ bool op::v1::ReduceProd::evaluate(const HostTensorVector& outputs, inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); } +bool op::v1::ReduceProd::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_ReduceProd_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} + bool op::v1::ReduceProd::evaluate_lower(const HostTensorVector& output_values) const { if (!input_value(1).get_tensor().has_and_set_bound()) diff --git a/ngraph/core/src/op/reduce_sum.cpp b/ngraph/core/src/op/reduce_sum.cpp index 8da6ba369a3..1eab2dcefb8 100644 --- a/ngraph/core/src/op/reduce_sum.cpp +++ b/ngraph/core/src/op/reduce_sum.cpp @@ -75,3 +75,19 @@ bool op::v1::ReduceSum::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v1_ReduceSum_evaluate); return reduce_sum::evaluate_sum(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); } + +bool op::v1::ReduceSum::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_ReduceSum_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/relu.cpp b/ngraph/core/src/op/relu.cpp index e698406dde4..e86cd0e66d2 100644 --- a/ngraph/core/src/op/relu.cpp +++ b/ngraph/core/src/op/relu.cpp @@ -67,6 +67,23 @@ bool op::Relu::evaluate(const HostTensorVector& outputs, const HostTensorVector& return relu::evaluate_relu(inputs[0], outputs[0]); } +bool op::Relu::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Relu_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} + bool op::Relu::visit_attributes(AttributeVisitor& visitor) { NGRAPH_OP_SCOPE(v0_Relu_visit_attributes); diff --git a/ngraph/core/src/op/reshape.cpp b/ngraph/core/src/op/reshape.cpp index 4ee131587de..26fa7393797 100644 --- a/ngraph/core/src/op/reshape.cpp +++ b/ngraph/core/src/op/reshape.cpp @@ -329,6 +329,24 @@ bool op::v1::Reshape::evaluate(const HostTensorVector& outputs, return evaluate_reshape(outputs, inputs); } +bool op::v1::Reshape::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Reshape_has_evaluate); + switch (get_input_element_type(1)) + { + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: return true; + default: break; + } + return false; +} + bool op::v1::Reshape::evaluate_lower(const HostTensorVector& output_values) const { if (!input_value(1).get_tensor().has_and_set_bound()) diff --git a/ngraph/core/src/op/result.cpp b/ngraph/core/src/op/result.cpp index 92afc6d71da..9c018855235 100644 --- a/ngraph/core/src/op/result.cpp +++ b/ngraph/core/src/op/result.cpp @@ -58,6 +58,12 @@ bool op::Result::evaluate(const HostTensorVector& outputs, const HostTensorVecto return true; } +bool op::Result::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Result_has_evaluate); + return true; +} + bool op::Result::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) { return false; diff --git a/ngraph/core/src/op/reverse.cpp b/ngraph/core/src/op/reverse.cpp index e9f1c85eb20..68571a94a95 100644 --- a/ngraph/core/src/op/reverse.cpp +++ b/ngraph/core/src/op/reverse.cpp @@ -203,6 +203,31 @@ bool op::v1::Reverse::evaluate(const HostTensorVector& outputs, return evaluate_reverse(outputs, inputs); } +bool op::v1::Reverse::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Reverse_has_evaluate); + + if (get_mode() == op::v1::Reverse::Mode::INDEX) + { + switch (get_input_element_type(1)) + { + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: return true; + default: return false; ; + } + } + else + { + return true; + } +} + namespace ngraph { template <> diff --git a/ngraph/core/src/op/roi_align.cpp b/ngraph/core/src/op/roi_align.cpp index 75aadc45b45..43642b70cf7 100644 --- a/ngraph/core/src/op/roi_align.cpp +++ b/ngraph/core/src/op/roi_align.cpp @@ -295,3 +295,16 @@ bool op::v3::ROIAlign::evaluate(const HostTensorVector& outputs, return roi_alinop::evaluate_roi_align( inputs, outputs[0], m_pooled_h, m_pooled_w, m_sampling_ratio, m_spatial_scale, m_mode); } + +bool op::v3::ROIAlign::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v3_ROIAlign_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::bf16: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/round.cpp b/ngraph/core/src/op/round.cpp index 85a50cdf002..dde7a19d81b 100644 --- a/ngraph/core/src/op/round.cpp +++ b/ngraph/core/src/op/round.cpp @@ -101,6 +101,28 @@ bool op::v5::Round::evaluate(const HostTensorVector& outputs, const HostTensorVe inputs[0], outputs[0], shape_size(get_output_shape(0)), get_mode()); } +bool op::v5::Round::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v5_Round_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: + case ngraph::element::bf16: return true; + default: break; + } + return false; +} + namespace ngraph { template <> diff --git a/ngraph/core/src/op/scatter_elements_update.cpp b/ngraph/core/src/op/scatter_elements_update.cpp index d66cde3dec9..460d84ccdfe 100644 --- a/ngraph/core/src/op/scatter_elements_update.cpp +++ b/ngraph/core/src/op/scatter_elements_update.cpp @@ -288,3 +288,33 @@ bool op::v3::ScatterElementsUpdate::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v3_ScatterElementsUpdate_evaluate); return evaluate_scatter_element_update(outputs, inputs); } + +bool op::v3::ScatterElementsUpdate::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v3_ScatterElementsUpdate_has_evaluate); + + switch (get_output_element_type(0)) + { + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: break; + default: return false; + } + switch (get_input_element_type(1)) + { + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: break; + default: return false; + } + return true; +} diff --git a/ngraph/core/src/op/scatter_nd_update.cpp b/ngraph/core/src/op/scatter_nd_update.cpp index 915984418f9..855267369cb 100644 --- a/ngraph/core/src/op/scatter_nd_update.cpp +++ b/ngraph/core/src/op/scatter_nd_update.cpp @@ -97,3 +97,27 @@ bool op::v3::ScatterNDUpdate::evaluate(const HostTensorVector& outputs, return scatter::evaluate_scatter(inputs[0], inputs[1], inputs[2], outputs[0]); } + +bool op::v3::ScatterNDUpdate::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v3_ScatterNDUpdate_has_evaluate); + + switch (get_output_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: + case ngraph::element::boolean: break; + default: return false; + } + switch (get_input_element_type(1)) + { + case ngraph::element::i32: + case ngraph::element::i64: break; + default: return false; + } + return true; +} diff --git a/ngraph/core/src/op/scatter_update.cpp b/ngraph/core/src/op/scatter_update.cpp index 4c5b3d21390..d9792cdffcd 100644 --- a/ngraph/core/src/op/scatter_update.cpp +++ b/ngraph/core/src/op/scatter_update.cpp @@ -104,3 +104,22 @@ bool op::v3::ScatterUpdate::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v3_ScatterUpdate_evaluate); return evaluate_scatter_update(outputs, inputs); } + +bool op::v3::ScatterUpdate::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v3_ScatterUpdate_has_evaluate); + + switch (get_input_element_type(1)) + { + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/select.cpp b/ngraph/core/src/op/select.cpp index d5fc2d7ef33..657845b4cc9 100644 --- a/ngraph/core/src/op/select.cpp +++ b/ngraph/core/src/op/select.cpp @@ -152,3 +152,26 @@ bool op::v1::Select::evaluate(const HostTensorVector& output_values, return detail::evaluate_select( output_values, input_values, autob, output_values[0]->get_element_type()); } + +bool op::v1::Select::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Select_has_evaluate); + switch (get_output_element_type(0)) + { + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::bf16: + case ngraph::element::f16: + case ngraph::element::f32: + case ngraph::element::f64: + case ngraph::element::boolean: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/shape_of.cpp b/ngraph/core/src/op/shape_of.cpp index ca480339777..d929eb0c1c0 100644 --- a/ngraph/core/src/op/shape_of.cpp +++ b/ngraph/core/src/op/shape_of.cpp @@ -221,6 +221,20 @@ bool op::v3::ShapeOf::evaluate(const HostTensorVector& output_values, return shape_of::evaluate_shape_of(output_values[0], input_values[0]); } +bool op::v3::ShapeOf::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v3_ShapeOf_has_evaluate); + switch (get_output_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: return true; + default: break; + } + return false; +} + bool op::v3::ShapeOf::evaluate_lower(const HostTensorVector& output_values) const { return shape_of::evaluate_bound_shape(this, output_values, false); @@ -285,6 +299,20 @@ bool op::v0::ShapeOf::evaluate(const HostTensorVector& output_values, return shape_of::evaluate_shape_of(output_values[0], input_values[0]); } +bool op::v0::ShapeOf::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_ShapeOf_has_evaluate); + switch (get_output_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: return true; + default: break; + } + return false; +} + bool op::v0::ShapeOf::constant_fold(OutputVector& output_values, const OutputVector& input_values) { OV_ITT_SCOPED_TASK(itt::domains::nGraph, "op::v0::ShapeOf::constant_fold"); diff --git a/ngraph/core/src/op/shuffle_channels.cpp b/ngraph/core/src/op/shuffle_channels.cpp index 5c2e2cb7837..03859b3cb60 100644 --- a/ngraph/core/src/op/shuffle_channels.cpp +++ b/ngraph/core/src/op/shuffle_channels.cpp @@ -181,3 +181,9 @@ bool op::ShuffleChannels::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v0_ShuffleChannels_evaluate); return evaluate_shuffle_channels(outputs, inputs); } + +bool op::ShuffleChannels::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_ShuffleChannels_has_evaluate); + return true; +} diff --git a/ngraph/core/src/op/sigmoid.cpp b/ngraph/core/src/op/sigmoid.cpp index c3051c6929e..e751ec317dc 100644 --- a/ngraph/core/src/op/sigmoid.cpp +++ b/ngraph/core/src/op/sigmoid.cpp @@ -67,3 +67,20 @@ bool op::Sigmoid::evaluate(const HostTensorVector& outputs, const HostTensorVect NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); return sigmoid::evaluate_sigmoid(inputs[0], outputs[0]); } + +bool op::Sigmoid::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Sigmoid_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/sign.cpp b/ngraph/core/src/op/sign.cpp index fb7749014f5..9d0e3e27332 100644 --- a/ngraph/core/src/op/sign.cpp +++ b/ngraph/core/src/op/sign.cpp @@ -68,3 +68,20 @@ bool op::Sign::evaluate(const HostTensorVector& outputs, const HostTensorVector& NGRAPH_OP_SCOPE(v0_Sign_evaluate); return signop::evaluate_sign(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::Sign::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Sign_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/sin.cpp b/ngraph/core/src/op/sin.cpp index 47dff773364..7db1d0549bd 100644 --- a/ngraph/core/src/op/sin.cpp +++ b/ngraph/core/src/op/sin.cpp @@ -70,3 +70,20 @@ bool op::Sin::evaluate(const HostTensorVector& outputs, const HostTensorVector& NGRAPH_OP_SCOPE(v0_Sin_evaluate); return sinop::evaluate_sin(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::Sin::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Sin_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/sinh.cpp b/ngraph/core/src/op/sinh.cpp index 9d6416e5079..d19d6cbe493 100644 --- a/ngraph/core/src/op/sinh.cpp +++ b/ngraph/core/src/op/sinh.cpp @@ -70,3 +70,20 @@ bool op::Sinh::evaluate(const HostTensorVector& outputs, const HostTensorVector& NGRAPH_OP_SCOPE(v0_Sinh_evaluate); return sinhop::evaluate_sinh(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::Sinh::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Sinh_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/softmax.cpp b/ngraph/core/src/op/softmax.cpp index 2517133f763..ae4317a9c12 100644 --- a/ngraph/core/src/op/softmax.cpp +++ b/ngraph/core/src/op/softmax.cpp @@ -98,3 +98,17 @@ bool op::v1::Softmax::evaluate(const HostTensorVector& outputs, outputs[0]->set_unary(inputs[0]); return evaluate_softmax(inputs[0], outputs[0], AxisSet{m_axis}); } + +bool op::v1::Softmax::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Softmax_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::bf16: + case ngraph::element::f16: + case ngraph::element::f32: + case ngraph::element::f64: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/softplus.cpp b/ngraph/core/src/op/softplus.cpp index af6049d1b30..f37008220a4 100644 --- a/ngraph/core/src/op/softplus.cpp +++ b/ngraph/core/src/op/softplus.cpp @@ -74,3 +74,16 @@ bool op::v4::SoftPlus::evaluate(const HostTensorVector& outputs, NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); return softplus::evaluate_softplus(inputs[0], outputs[0]); } + +bool op::v4::SoftPlus::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v4_SoftPlus_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::bf16: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/space_to_batch.cpp b/ngraph/core/src/op/space_to_batch.cpp index 3b918feaeda..75c6ad435b0 100644 --- a/ngraph/core/src/op/space_to_batch.cpp +++ b/ngraph/core/src/op/space_to_batch.cpp @@ -263,6 +263,13 @@ bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVecto bool ngraph::op::v1::SpaceToBatch::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_SpaceToBatch); + NGRAPH_OP_SCOPE(v1_SpaceToBatch_evaluate); return evaluate_space_to_batch(outputs, inputs); } + +bool ngraph::op::v1::SpaceToBatch::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_SpaceToBatch_has_evaluate); + return !get_input_partial_shape(0).is_dynamic() && + (get_input_shape(0).size() == 4 || get_input_shape(0).size() == 5); +} diff --git a/ngraph/core/src/op/space_to_depth.cpp b/ngraph/core/src/op/space_to_depth.cpp index 2532946d581..8847c6efbce 100644 --- a/ngraph/core/src/op/space_to_depth.cpp +++ b/ngraph/core/src/op/space_to_depth.cpp @@ -223,6 +223,12 @@ bool ngraph::op::v0::SpaceToDepth::evaluate(const HostTensorVector& outputs, return evaluate_space_to_depth(outputs, inputs); } +bool ngraph::op::v0::SpaceToDepth::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_SpaceToDepth_has_evaluate); + return !get_input_partial_shape(0).is_dynamic(); +} + namespace ngraph { template <> diff --git a/ngraph/core/src/op/split.cpp b/ngraph/core/src/op/split.cpp index 82c886bd9ed..0c26bf8699d 100644 --- a/ngraph/core/src/op/split.cpp +++ b/ngraph/core/src/op/split.cpp @@ -167,3 +167,9 @@ bool op::v1::Split::evaluate(const HostTensorVector& outputs, const HostTensorVe const auto& axis = inputs[1]; return split::evaluate_split(data, axis, outputs, m_num_splits, this); } + +bool op::v1::Split::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Split_has_evaluate); + return get_input_element_type(1).is_integral_number(); +} diff --git a/ngraph/core/src/op/sqrt.cpp b/ngraph/core/src/op/sqrt.cpp index 80e36b7775e..e706e4ae7c2 100644 --- a/ngraph/core/src/op/sqrt.cpp +++ b/ngraph/core/src/op/sqrt.cpp @@ -68,3 +68,19 @@ bool op::Sqrt::evaluate(const HostTensorVector& outputs, const HostTensorVector& NGRAPH_OP_SCOPE(v0_Sqrt_evaluate); return sqrtop::evaluate_sqrt(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::Sqrt::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Sqrt_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/squeeze.cpp b/ngraph/core/src/op/squeeze.cpp index 481595b2ea6..3b4732f8729 100644 --- a/ngraph/core/src/op/squeeze.cpp +++ b/ngraph/core/src/op/squeeze.cpp @@ -264,6 +264,36 @@ bool op::v0::Squeeze::evaluate(const HostTensorVector& outputs, return squeeze::evaluate_squeeze(inputs[0], inputs[1], outputs[0]); } +bool op::v0::Squeeze::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Squeeze_has_evaluate); + + if (get_input_size() == 2) + { + switch (get_input_element_type(1)) + { + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: return true; + default: break; + } + return false; + } + else if (get_input_size() == 1) + { + return true; + } + else + { + return false; + } +} + bool op::v0::Squeeze::evaluate_lower(const HostTensorVector& output_values) const { NGRAPH_OP_SCOPE(v0_Squeeze_evaluate_lower); diff --git a/ngraph/core/src/op/strided_slice.cpp b/ngraph/core/src/op/strided_slice.cpp index be2036f0abf..f402c1f02de 100644 --- a/ngraph/core/src/op/strided_slice.cpp +++ b/ngraph/core/src/op/strided_slice.cpp @@ -288,6 +288,12 @@ bool op::v1::StridedSlice::evaluate(const HostTensorVector& output_values, output_values[0]); } +bool op::v1::StridedSlice::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_StridedSlice_has_evaluate); + return get_input_size() == 4; +} + bool op::v1::StridedSlice::evaluate_lower(const HostTensorVector& output_values) const { if (!input_value(1).get_tensor().has_and_set_bound() || diff --git a/ngraph/core/src/op/subtract.cpp b/ngraph/core/src/op/subtract.cpp index 630904e4a4f..a570a226df7 100644 --- a/ngraph/core/src/op/subtract.cpp +++ b/ngraph/core/src/op/subtract.cpp @@ -75,3 +75,20 @@ bool op::v1::Subtract::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v1_Subtract_evaluate); return subtract::evaluate_subtract(inputs[0], inputs[1], outputs[0], get_autob()); } + +bool op::v1::Subtract::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Subtract_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: + case ngraph::element::bf16: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/swish.cpp b/ngraph/core/src/op/swish.cpp index f29284e5874..5b3b7175950 100644 --- a/ngraph/core/src/op/swish.cpp +++ b/ngraph/core/src/op/swish.cpp @@ -127,3 +127,15 @@ bool op::v4::Swish::evaluate(const HostTensorVector& outputs, const HostTensorVe (validate_host_tensor_vector(inputs, 2) || validate_host_tensor_vector(inputs, 1))); return swish::evaluate_swish(inputs, outputs[0]); } + +bool op::v4::Swish::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v4_Swish_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/tan.cpp b/ngraph/core/src/op/tan.cpp index a3dae283193..d81d23db097 100644 --- a/ngraph/core/src/op/tan.cpp +++ b/ngraph/core/src/op/tan.cpp @@ -71,3 +71,20 @@ bool op::Tan::evaluate(const HostTensorVector& outputs, const HostTensorVector& NGRAPH_OP_SCOPE(v0_Tan_evaluate); return tanop::evaluate_tan(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::Tan::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Tan_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/tanh.cpp b/ngraph/core/src/op/tanh.cpp index fe4fac69dd3..c251f1c63a5 100644 --- a/ngraph/core/src/op/tanh.cpp +++ b/ngraph/core/src/op/tanh.cpp @@ -69,3 +69,19 @@ bool op::Tanh::evaluate(const HostTensorVector& outputs, const HostTensorVector& NGRAPH_OP_SCOPE(v0_Tanh_evaluate); return tanhop::evaluate_tanh(inputs[0], outputs[0], shape_size(get_output_shape(0))); } + +bool op::Tanh::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Tanh_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} diff --git a/ngraph/core/src/op/tile.cpp b/ngraph/core/src/op/tile.cpp index a85be43025a..87b56353b9c 100644 --- a/ngraph/core/src/op/tile.cpp +++ b/ngraph/core/src/op/tile.cpp @@ -120,3 +120,9 @@ bool op::v0::Tile::evaluate(const HostTensorVector& outputs, const HostTensorVec NGRAPH_OP_SCOPE(v0_Tile_evaluate); return evaluate_tile(outputs, inputs); } + +bool op::v0::Tile::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Tile_has_evaluate); + return true; +} diff --git a/ngraph/core/src/op/topk.cpp b/ngraph/core/src/op/topk.cpp index 8d924e2d12b..6b91fd13339 100644 --- a/ngraph/core/src/op/topk.cpp +++ b/ngraph/core/src/op/topk.cpp @@ -492,6 +492,50 @@ bool op::v1::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVec get_index_element_type()); } +bool op::v1::TopK::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_TopK_has_evaluate); + + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: break; + default: return false; + } + + if (op::is_constant(input_value(1).get_node())) + { + switch (get_input_element_type(1)) + { + case ngraph::element::i8: + case ngraph::element::i32: + case ngraph::element::i64: break; + default: return false; + } + } + else + { + switch (get_input_element_type(1)) + { + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: break; + default: return false; + } + } + + return true; +} + // v3 version starts constexpr NodeTypeInfo op::v3::TopK::type_info; @@ -575,3 +619,47 @@ bool op::v3::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVec NGRAPH_OP_SCOPE(v3_TopK_evaluate); return op::v1::TopK::evaluate(outputs, inputs); } + +bool op::v3::TopK::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v3_TopK_has_evaluate); + + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: break; + default: return false; + } + + if (op::is_constant(input_value(1).get_node())) + { + switch (get_input_element_type(1)) + { + case ngraph::element::i8: + case ngraph::element::i32: + case ngraph::element::i64: break; + default: return false; + } + } + else + { + switch (get_input_element_type(1)) + { + case ngraph::element::i8: + case ngraph::element::i16: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u8: + case ngraph::element::u16: + case ngraph::element::u32: + case ngraph::element::u64: break; + default: return false; + } + } + + return true; +} diff --git a/ngraph/core/src/op/transpose.cpp b/ngraph/core/src/op/transpose.cpp index 8e8ff0c1181..1420d5b3748 100644 --- a/ngraph/core/src/op/transpose.cpp +++ b/ngraph/core/src/op/transpose.cpp @@ -126,3 +126,9 @@ bool op::v1::Transpose::evaluate(const HostTensorVector& output_values, NGRAPH_OP_SCOPE(v1_Transpose_evaluate); return transpose::evaluate_transpose(input_values[0], input_values[1], output_values[0]); } + +bool op::v1::Transpose::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_Transpose_has_evaluate); + return get_input_element_type(1).is_integral_number(); +} diff --git a/ngraph/core/src/op/unsqueeze.cpp b/ngraph/core/src/op/unsqueeze.cpp index cc1cfa9da0a..a043d59a660 100644 --- a/ngraph/core/src/op/unsqueeze.cpp +++ b/ngraph/core/src/op/unsqueeze.cpp @@ -147,6 +147,22 @@ bool op::v0::Unsqueeze::evaluate(const HostTensorVector& outputs, return unsqueeze::evaluate_unsqueeze(inputs[0], inputs[1], outputs[0]); } +bool op::v0::Unsqueeze::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Unsqueeze_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} + bool op::v0::Unsqueeze::evaluate_lower(const HostTensorVector& output_values) const { if (!input_value(1).get_tensor().has_and_set_bound()) diff --git a/ngraph/core/src/op/variadic_split.cpp b/ngraph/core/src/op/variadic_split.cpp index 9b0fea4ede9..505584ec6e1 100644 --- a/ngraph/core/src/op/variadic_split.cpp +++ b/ngraph/core/src/op/variadic_split.cpp @@ -208,3 +208,10 @@ bool op::v1::VariadicSplit::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v1_VariadicSplit_evaluate); return evaluate_variadic_split(inputs, outputs); } + +bool op::v1::VariadicSplit::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_VariadicSplit_has_evaluate); + return get_input_element_type(1).is_integral_number() && + get_input_element_type(2).is_integral_number(); +} diff --git a/ngraph/core/src/op/xor.cpp b/ngraph/core/src/op/xor.cpp index d70c7a59ced..9f5dffe31a5 100644 --- a/ngraph/core/src/op/xor.cpp +++ b/ngraph/core/src/op/xor.cpp @@ -80,6 +80,23 @@ bool op::v1::LogicalXor::evaluate(const HostTensorVector& outputs, return logxor::evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob()); } +bool op::v1::LogicalXor::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v1_LogicalXor_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +} + constexpr NodeTypeInfo op::v0::Xor::type_info; op::v0::Xor::Xor(const Output& arg0, @@ -102,3 +119,20 @@ bool op::v0::Xor::evaluate(const HostTensorVector& outputs, const HostTensorVect NGRAPH_OP_SCOPE(v0_Xor_evaluate); return logxor::evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob()); } + +bool op::v0::Xor::has_evaluate() const +{ + NGRAPH_OP_SCOPE(v0_Xor_has_evaluate); + switch (get_input_element_type(0)) + { + case ngraph::element::boolean: + case ngraph::element::i32: + case ngraph::element::i64: + case ngraph::element::u32: + case ngraph::element::u64: + case ngraph::element::f16: + case ngraph::element::f32: return true; + default: break; + } + return false; +}