[Core] Use API 2.0 in evaluate for trigonometric operators (#19414)

* Use API 2.0 in operators evaluate
- Drop ngraph namespace in ops
- Refactor reference implementation for modified ops

* Apply code style

* Fix build issue in reference impl

* Fix code style

* Fix compile warnings

* Add inputs check and set output shape in evaluates
This commit is contained in:
Pawel Raasz 2023-08-29 08:16:07 +02:00 committed by GitHub
parent 915de21626
commit f9aa624099
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
42 changed files with 797 additions and 829 deletions

View File

@ -28,9 +28,7 @@ public:
///
Acos(const Output<Node>& arg);
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v0

View File

@ -30,9 +30,7 @@ public:
Acosh(const Output<Node>& arg);
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v3

View File

@ -32,9 +32,7 @@ public:
bool visit_attributes(AttributeVisitor&) override {
return true;
}
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v0

View File

@ -32,9 +32,7 @@ public:
bool visit_attributes(AttributeVisitor&) override {
return true;
}
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v3

View File

@ -33,9 +33,7 @@ public:
bool visit_attributes(AttributeVisitor&) override {
return true;
}
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v0

View File

@ -33,9 +33,7 @@ public:
bool visit_attributes(AttributeVisitor&) override {
return true;
}
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v3

View File

@ -24,9 +24,7 @@ public:
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v0

View File

@ -24,9 +24,7 @@ public:
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v0

View File

@ -36,9 +36,7 @@ public:
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v0

View File

@ -22,9 +22,7 @@ public:
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v0

View File

@ -36,9 +36,7 @@ public:
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v0

View File

@ -23,9 +23,7 @@ public:
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v0

View File

@ -4,18 +4,24 @@
#pragma once
#include <algorithm>
#include <cmath>
#include <cstddef>
namespace ngraph {
namespace runtime {
namespace ov {
namespace reference {
/**
* @brief Reference implementation of Acos operator.
*
* @param arg Input buffer pointer with input data.
* @param out Output buffer pointer with results.
* @param count Number of elements in input buffer.
*/
template <typename T>
void acos(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::acos(arg[i]));
}
void acos(const T* arg, T* out, const size_t count) {
std::transform(arg, arg + count, out, [](T in) {
return static_cast<T>(std::acos(in));
});
}
} // namespace reference
} // namespace runtime
} // namespace ngraph
} // namespace ov

View File

@ -4,25 +4,35 @@
#pragma once
#include <algorithm>
#include <cmath>
#include <cstddef>
namespace ngraph {
namespace runtime {
#include "openvino/reference/utils/type_util.hpp"
namespace ov {
namespace reference {
template <typename T, typename std::enable_if<!std::is_integral<T>::value, bool>::type = true>
void acosh(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::acosh(arg[i]));
}
namespace func {
template <class T, typename std::enable_if<ov::is_floating_point<T>()>::type* = nullptr>
T acosh(const T in) {
return std::acosh(in);
}
template <typename T, typename std::enable_if<std::is_integral<T>::value, bool>::type = true>
void acosh(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::roundl(std::acosh(arg[i])));
}
template <class T, typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
T acosh(const T in) {
return static_cast<T>(std::round(std::acosh(in)));
}
} // namespace func
/**
* @brief Reference implementation of Acos operator.
*
* @param arg Input buffer pointer with input data.
* @param out Output buffer pointer with results.
* @param count Number of elements in input buffer.
*/
template <class T>
void acosh(const T* arg, T* out, const size_t count) {
std::transform(arg, arg + count, out, &func::acosh<T>);
}
} // namespace reference
} // namespace runtime
} // namespace ngraph
} // namespace ov

View File

@ -4,18 +4,24 @@
#pragma once
#include <algorithm>
#include <cmath>
#include <cstddef>
namespace ngraph {
namespace runtime {
namespace ov {
namespace reference {
/**
* @brief Reference implementation of Asin operator.
*
* @param arg Input buffer pointer with input data.
* @param out Output buffer pointer with results.
* @param count Number of elements in input buffer.
*/
template <typename T>
void asin(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::asin(arg[i]));
}
void asin(const T* arg, T* out, const size_t count) {
std::transform(arg, arg + count, out, [](T in) {
return static_cast<T>(std::asin(in));
});
}
} // namespace reference
} // namespace runtime
} // namespace ngraph
} // namespace ov

View File

@ -4,25 +4,35 @@
#pragma once
#include <algorithm>
#include <cmath>
#include <cstddef>
namespace ngraph {
namespace runtime {
#include "openvino/reference/utils/type_util.hpp"
namespace ov {
namespace reference {
template <typename T, typename std::enable_if<!std::is_integral<T>::value, bool>::type = true>
void asinh(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = std::asinh(arg[i]);
}
namespace func {
template <class T, typename std::enable_if<ov::is_floating_point<T>()>::type* = nullptr>
T asinh(const T in) {
return std::asinh(in);
}
template <typename T, typename std::enable_if<std::is_integral<T>::value, bool>::type = true>
void asinh(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::roundl(std::asinh(arg[i])));
}
template <class T, typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
T asinh(const T in) {
return static_cast<T>(std::round(std::asinh(in)));
}
} // namespace func
/**
* @brief Reference implementation of Asinh operator.
*
* @param arg Input buffer pointer with input data.
* @param out Output buffer pointer with results.
* @param count Number of elements in input buffer.
*/
template <class T>
void asinh(const T* arg, T* out, const size_t count) {
std::transform(arg, arg + count, out, &func::asinh<T>);
}
} // namespace reference
} // namespace runtime
} // namespace ngraph
} // namespace ov

View File

@ -4,25 +4,35 @@
#pragma once
#include <algorithm>
#include <cmath>
#include <cstddef>
namespace ngraph {
namespace runtime {
#include "openvino/reference/utils/type_util.hpp"
namespace ov {
namespace reference {
template <typename T, typename std::enable_if<!std::is_integral<T>::value, bool>::type = true>
void atan(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::atan(arg[i]));
}
namespace func {
template <class T, typename std::enable_if<ov::is_floating_point<T>()>::type* = nullptr>
T atan(const T in) {
return std::atan(in);
}
template <typename T, typename std::enable_if<std::is_integral<T>::value, bool>::type = true>
void atan(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::roundl(std::atan(arg[i])));
}
template <class T, typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
T atan(const T in) {
return static_cast<T>(std::round(std::atan(in)));
}
} // namespace func
/**
* @brief Reference implementation of Atan operator.
*
* @param arg Input buffer pointer with input data.
* @param out Output buffer pointer with results.
* @param count Number of elements in input buffer.
*/
template <class T>
void atan(const T* arg, T* out, const size_t count) {
std::transform(arg, arg + count, out, &func::atan<T>);
}
} // namespace reference
} // namespace runtime
} // namespace ngraph
} // namespace ov

View File

@ -6,42 +6,41 @@
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <numeric>
namespace ngraph {
namespace runtime {
#include "openvino/reference/utils/type_util.hpp"
namespace ov {
namespace reference {
template <typename T, typename std::enable_if<!std::is_integral<T>::value, bool>::type = true>
void atanh(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = std::atanh(arg[i]);
}
namespace func {
template <class T, typename std::enable_if<ov::is_floating_point<T>()>::type* = nullptr>
T atanh(const T in) {
return std::atanh(in);
}
template <typename T, typename std::enable_if<std::is_integral<T>::value, bool>::type = true>
void atanh(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
/**
* Intgral type don't support: NAN and INFINITY.
* So we clip input value, and make sure return avaiable value.
*/
if (std::is_same<T, uint8_t>::value || std::is_same<T, uint32_t>::value || std::is_same<T, uint64_t>::value) {
if (arg[i] > 0) {
out[i] = std::numeric_limits<T>::max();
} else {
out[i] = static_cast<T>(std::roundl(std::atanh(arg[i])));
}
} else {
if (arg[i] <= -1) {
out[i] = std::numeric_limits<T>::min();
} else if (arg[i] >= 1) {
out[i] = std::numeric_limits<T>::max();
} else {
out[i] = static_cast<T>(std::roundl(std::atanh(arg[i])));
}
}
template <class T, typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
T atanh(const T in) {
// Integral type not support NAN and INFINITY, use integral limits instead for special values.
if (in > 0) {
return std::numeric_limits<T>::min();
} else if (in < 0) {
return std::numeric_limits<T>::max();
} else {
return 0;
}
}
} // namespace func
/**
* @brief Reference implementation of Atanh operator.
*
* @param arg Input buffer pointer with input data.
* @param out Output buffer pointer with results.
* @param count Number of elements in input buffer.
*/
template <class T>
void atanh(const T* arg, T* out, const size_t count) {
std::transform(arg, arg + count, out, &func::atanh<T>);
}
} // namespace reference
} // namespace runtime
} // namespace ngraph
} // namespace ov

View File

@ -4,25 +4,35 @@
#pragma once
#include <algorithm>
#include <cmath>
#include <cstddef>
namespace ngraph {
namespace runtime {
#include "openvino/reference/utils/type_util.hpp"
namespace ov {
namespace reference {
template <typename T, typename std::enable_if<!std::is_integral<T>::value, bool>::type = true>
void cos(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = std::cos(arg[i]);
}
namespace func {
template <class T, typename std::enable_if<ov::is_floating_point<T>()>::type* = nullptr>
T cos(const T in) {
return std::cos(in);
}
template <typename T, typename std::enable_if<std::is_integral<T>::value, bool>::type = true>
void cos(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::roundl(std::cos(arg[i])));
}
template <class T, typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
T cos(const T in) {
return static_cast<T>(std::round(std::cos(in)));
}
} // namespace func
/**
* @brief Reference implementation of Cos operator.
*
* @param arg Input buffer pointer with input data.
* @param out Output buffer pointer with results.
* @param count Number of elements in input buffer.
*/
template <class T>
void cos(const T* arg, T* out, const size_t count) {
std::transform(arg, arg + count, out, &func::cos<T>);
}
} // namespace reference
} // namespace runtime
} // namespace ngraph
} // namespace ov

View File

@ -4,25 +4,35 @@
#pragma once
#include <algorithm>
#include <cmath>
#include <cstddef>
namespace ngraph {
namespace runtime {
#include "openvino/reference/utils/type_util.hpp"
namespace ov {
namespace reference {
template <typename T, typename std::enable_if<!std::is_integral<T>::value, bool>::type = true>
void cosh(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::cosh(arg[i]));
}
namespace func {
template <class T, typename std::enable_if<ov::is_floating_point<T>()>::type* = nullptr>
T cosh(const T in) {
return std::cosh(in);
}
template <typename T, typename std::enable_if<std::is_integral<T>::value, bool>::type = true>
template <class T, typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
T cosh(const T in) {
return static_cast<T>(std::round(std::cosh(in)));
}
} // namespace func
/**
* @brief Reference implementation of Cosh operator.
*
* @param arg Input buffer pointer with input data.
* @param out Output buffer pointer with results.
* @param count Number of elements in input buffer.
*/
template <class T>
void cosh(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::roundl(std::cosh(arg[i])));
}
std::transform(arg, arg + count, out, &func::cosh<T>);
}
} // namespace reference
} // namespace runtime
} // namespace ngraph
} // namespace ov

View File

@ -130,7 +130,7 @@ void gru_cell(const T* X,
} else if (activation == "sigmoid") {
reference::sigmoid(gate.data(), gate.data(), gate.size());
} else if (activation == "tanh") {
reference::tanh(gate.data(), gate.data(), gate.size());
ov::reference::tanh(gate.data(), gate.data(), gate.size());
} else {
OPENVINO_THROW("Activation function " + activation + " is not supported.");
}

View File

@ -121,7 +121,7 @@ void lstm_cell(const T* X,
} else if (activation == "sigmoid") {
reference::sigmoid(gate.data(), gate.data(), gate.size());
} else if (activation == "tanh") {
reference::tanh(gate.data(), gate.data(), gate.size());
ov::reference::tanh(gate.data(), gate.data(), gate.size());
} else {
OPENVINO_THROW("Activation function " + activation + " is not supported.");
}
@ -269,7 +269,7 @@ void lstm_cell_v1(const T* X,
} else if (activation == "sigmoid") {
reference::sigmoid(gate.data(), gate.data(), gate.size());
} else if (activation == "tanh") {
reference::tanh(gate.data(), gate.data(), gate.size());
ov::reference::tanh(gate.data(), gate.data(), gate.size());
} else {
OPENVINO_THROW("Activation function " + activation + " is not supported.");
}

View File

@ -87,7 +87,7 @@ void rnn_cell(const T* X,
} else if (activation_f == "sigmoid") {
reference::sigmoid(i_t.data(), dst_data, i_t.size());
} else if (activation_f == "tanh") {
reference::tanh(i_t.data(), dst_data, i_t.size());
ov::reference::tanh(i_t.data(), dst_data, i_t.size());
} else {
OPENVINO_THROW("Activation function " + activation_f + " is not supported.");
}

View File

@ -10,14 +10,9 @@
#include "openvino/op/round.hpp"
#include "openvino/reference/round_guard.hpp"
#include "openvino/reference/utils/type_util.hpp"
namespace ov {
template <class T>
constexpr bool is_floating_point() {
using U = typename std::decay<T>::type;
return std::is_floating_point<U>::value || std::is_same<float16, U>::value || std::is_same<bfloat16, U>::value;
}
namespace reference {
/**
* @brief Rounding algorithm for ov::op::v5::Round::RoundMode::HALF_TO_EVEN.

View File

@ -4,18 +4,24 @@
#pragma once
#include <algorithm>
#include <cmath>
#include <cstddef>
namespace ngraph {
namespace runtime {
namespace ov {
namespace reference {
/**
* @brief Reference implementation of Sin operator.
*
* @param arg Input buffer pointer with input data.
* @param out Output buffer pointer with results.
* @param count Number of elements in input buffer.
*/
template <typename T>
void sin(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::sin(arg[i]));
}
void sin(const T* arg, T* out, const size_t count) {
std::transform(arg, arg + count, out, [](T in) {
return static_cast<T>(std::sin(in));
});
}
} // namespace reference
} // namespace runtime
} // namespace ngraph
} // namespace ov

View File

@ -4,24 +4,35 @@
#pragma once
#include <algorithm>
#include <cmath>
#include <cstddef>
namespace ngraph {
namespace runtime {
#include "openvino/reference/utils/type_util.hpp"
namespace ov {
namespace reference {
template <typename T, typename std::enable_if<!std::is_integral<T>::value, bool>::type = true>
void sinh(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::sinh(arg[i]));
}
namespace func {
template <class T, typename std::enable_if<ov::is_floating_point<T>()>::type* = nullptr>
T sinh(const T in) {
return std::sinh(in);
}
template <typename T, typename std::enable_if<std::is_integral<T>::value, bool>::type = true>
template <class T, typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
T sinh(const T in) {
return static_cast<T>(std::round(std::sinh(in)));
}
} // namespace func
/**
* @brief Reference implementation of Sinh operator.
*
* @param arg Input buffer pointer with input data.
* @param out Output buffer pointer with results.
* @param count Number of elements in input buffer.
*/
template <class T>
void sinh(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::roundl(std::sinh(arg[i])));
}
std::transform(arg, arg + count, out, &func::sinh<T>);
}
} // namespace reference
} // namespace runtime
} // namespace ngraph
} // namespace ov

View File

@ -4,24 +4,35 @@
#pragma once
#include <algorithm>
#include <cmath>
#include <cstddef>
namespace ngraph {
namespace runtime {
#include "openvino/reference/utils/type_util.hpp"
namespace ov {
namespace reference {
template <typename T, typename std::enable_if<!std::is_integral<T>::value, bool>::type = true>
void tan(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::tan(arg[i]));
}
namespace func {
template <class T, typename std::enable_if<ov::is_floating_point<T>()>::type* = nullptr>
T tan(const T in) {
return std::tan(in);
}
template <typename T, typename std::enable_if<std::is_integral<T>::value, bool>::type = true>
void tan(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::roundl(std::tan(arg[i])));
}
template <class T, typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
T tan(const T in) {
return static_cast<T>(std::round(std::tan(in)));
}
} // namespace func
/**
* @brief Reference implementation of Tan operator.
*
* @param arg Input buffer pointer with input data.
* @param out Output buffer pointer with results.
* @param count Number of elements in input buffer.
*/
template <class T>
void tan(const T* arg, T* out, const size_t count) {
std::transform(arg, arg + count, out, &func::tan<T>);
}
} // namespace reference
} // namespace runtime
} // namespace ngraph
} // namespace ov

View File

@ -4,24 +4,36 @@
#pragma once
#include <algorithm>
#include <cmath>
#include <cstddef>
namespace ngraph {
namespace runtime {
#include "openvino/reference/utils/type_util.hpp"
namespace ov {
namespace reference {
template <typename T, typename std::enable_if<!std::is_integral<T>::value, bool>::type = true>
void tanh(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::tanh(arg[i]));
}
namespace func {
template <class T, typename std::enable_if<ov::is_floating_point<T>()>::type* = nullptr>
T tanh(const T in) {
return std::tanh(in);
}
template <typename T, typename std::enable_if<std::is_integral<T>::value, bool>::type = true>
void tanh(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = static_cast<T>(std::roundl(std::tanh(arg[i])));
}
template <class T, typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
T tanh(const T in) {
return static_cast<T>(std::round(std::tanh(in)));
}
} // namespace func
/**
* @brief Reference implementation of Tanh operator.
*
* @param arg Input buffer pointer with input data.
* @param out Output buffer pointer with results.
* @param count Number of elements in input buffer.
*/
template <class T>
void tanh(const T* arg, T* out, const size_t count) {
std::transform(arg, arg + count, out, &func::tanh<T>);
}
} // namespace reference
} // namespace runtime
} // namespace ngraph
} // namespace ov

View File

@ -0,0 +1,23 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <type_traits>
#include "openvino/core/type/bfloat16.hpp"
#include "openvino/core/type/float16.hpp"
namespace ov {
/**
* @brief Check if T is OpenVINO floating point precision.
*
* @return True if OpenVino floating point precision.
*/
template <class T>
constexpr bool is_floating_point() {
using U = typename std::decay<T>::type;
return std::is_floating_point<U>::value || std::is_same<float16, U>::value || std::is_same<bfloat16, U>::value;
}
} // namespace ov

View File

@ -2,22 +2,29 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/acos.hpp"
#include <string>
#include "openvino/op/acos.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/axis_set.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/negative.hpp"
#include "ngraph/op/sqrt.hpp"
#include "ngraph/op/subtract.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "openvino/reference/acos.hpp"
namespace ov {
namespace op {
namespace acos {
struct Evaluate : ov::element::NoAction<bool> {
using ov::element::NoAction<bool>::visit;
template <element::Type_t ET>
static result_type visit(const Tensor& arg0, Tensor& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
reference::acos(arg0.data<T>(), out.data<T>(), count);
return true;
}
};
} // namespace acos
} // namespace op
} // namespace ov
ov::op::v0::Acos::Acos(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types();
}
@ -28,53 +35,29 @@ std::shared_ptr<ov::Node> ov::op::v0::Acos::clone_with_new_inputs(const OutputVe
return std::make_shared<Acos>(new_args.at(0));
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace acosop {
namespace {
template <ov::element::Type_t ET>
inline bool evaluate(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out, const size_t count) {
using T = typename ov::element_type_traits<ET>::value_type;
ngraph::runtime::reference::acos<T>(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), count);
return true;
}
bool evaluate_acos(const ov::HostTensorPtr& arg0, const ov::HostTensorPtr& out, const size_t count) {
bool rc = true;
out->set_unary(arg0);
switch (arg0->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_acos, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, f32, arg0, out, count);
default:
rc = false;
break;
}
return rc;
}
} // namespace
} // namespace acosop
bool ov::op::v0::Acos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool ov::op::v0::Acos::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v0_Acos_evaluate);
return acosop::evaluate_acos(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
OPENVINO_ASSERT(inputs.size() == 1 && outputs.size() == 1);
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<acos::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool ov::op::v0::Acos::has_evaluate() const {
OV_OP_SCOPE(v0_Acos_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}

View File

@ -2,16 +2,29 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/acosh.hpp"
#include <string>
#include <vector>
#include "openvino/op/acosh.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/type/element_type.hpp"
#include "openvino/reference/acosh.hpp"
namespace ov {
namespace op {
namespace acosh {
struct Evaluate : ov::element::NoAction<bool> {
using ov::element::NoAction<bool>::visit;
template <element::Type_t ET>
static result_type visit(const Tensor& arg0, Tensor& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
reference::acosh(arg0.data<T>(), out.data<T>(), count);
return true;
}
};
} // namespace acosh
} // namespace op
} // namespace ov
ov::op::v3::Acosh::Acosh(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types();
}
@ -22,51 +35,29 @@ std::shared_ptr<ov::Node> ov::op::v3::Acosh::clone_with_new_inputs(const OutputV
return std::make_shared<Acosh>(new_args.at(0));
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace acoshop {
namespace {
template <ov::element::Type_t ET>
bool evaluate(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out) {
ngraph::runtime::reference::acosh(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), shape_size(arg0->get_shape()));
return true;
}
bool evaluate_acosh(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out) {
bool rc = true;
out->set_unary(arg0);
switch (arg0->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_acosh, i32, arg0, out);
NGRAPH_TYPE_CASE(evaluate_acosh, i64, arg0, out);
NGRAPH_TYPE_CASE(evaluate_acosh, u32, arg0, out);
NGRAPH_TYPE_CASE(evaluate_acosh, u64, arg0, out);
NGRAPH_TYPE_CASE(evaluate_acosh, f16, arg0, out);
NGRAPH_TYPE_CASE(evaluate_acosh, f32, arg0, out);
default:
rc = false;
break;
}
return rc;
}
} // namespace
} // namespace acoshop
bool ov::op::v3::Acosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool ov::op::v3::Acosh::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v3_Acosh_evaluate);
return acoshop::evaluate_acosh(inputs[0], outputs[0]);
OPENVINO_ASSERT(inputs.size() == 1 && outputs.size() == 1);
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<acosh::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool ov::op::v3::Acosh::has_evaluate() const {
OV_OP_SCOPE(v3_Acosh_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}

View File

@ -2,83 +2,65 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/asin.hpp"
#include <string>
#include <vector>
#include "openvino/op/asin.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/axis_set.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/sqrt.hpp"
#include "ngraph/op/subtract.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/shape.hpp"
#include "openvino/reference/asin.hpp"
using namespace std;
using namespace ngraph;
namespace ov {
namespace op {
namespace asin {
struct Evaluate : ov::element::NoAction<bool> {
using ov::element::NoAction<bool>::visit;
op::Asin::Asin(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
template <element::Type_t ET>
static result_type visit(const Tensor& arg0, Tensor& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
reference::asin(arg0.data<T>(), out.data<T>(), count);
return true;
}
};
} // namespace asin
namespace v0 {
Asin::Asin(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::Asin::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> Asin::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v0_Asin_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<Asin>(new_args.at(0));
return std::make_shared<Asin>(new_args.at(0));
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace asinop {
namespace {
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
runtime::reference::asin<T>(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), count);
return true;
}
bool evaluate_asin(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
bool rc = true;
out->set_unary(arg0);
switch (arg0->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_asin, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asin, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asin, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asin, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asin, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asin, f32, arg0, out, count);
default:
rc = false;
break;
}
return rc;
}
} // namespace
} // namespace asinop
bool op::Asin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool Asin::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v0_Asin_evaluate);
return asinop::evaluate_asin(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
OPENVINO_ASSERT(inputs.size() == 1 && outputs.size() == 1);
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<asin::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool op::Asin::has_evaluate() const {
bool Asin::has_evaluate() const {
OV_OP_SCOPE(v1_Asin_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -2,77 +2,64 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/asinh.hpp"
#include <string>
#include <vector>
#include "openvino/op/asinh.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/op/util/elementwise_args.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/type/element_type.hpp"
#include "openvino/reference/asinh.hpp"
using namespace std;
using namespace ngraph;
namespace ov {
namespace op {
namespace asinh {
struct Evaluate : ov::element::NoAction<bool> {
using ov::element::NoAction<bool>::visit;
op::v3::Asinh::Asinh(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
template <element::Type_t ET>
static result_type visit(const Tensor& arg0, Tensor& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
reference::asinh(arg0.data<T>(), out.data<T>(), count);
return true;
}
};
} // namespace asinh
namespace v3 {
Asinh::Asinh(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v3::Asinh::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> Asinh::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v3_Asinh_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<Asinh>(new_args.at(0));
return std::make_shared<Asinh>(new_args.at(0));
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace asinhop {
namespace {
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
runtime::reference::asinh(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), count);
return true;
}
bool evaluate_asinh(const HostTensorPtr& arg0, const HostTensorPtr& out) {
bool rc = true;
size_t count = shape_size(arg0->get_shape());
out->set_unary(arg0);
switch (arg0->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_asinh, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asinh, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asinh, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asinh, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asinh, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asinh, f32, arg0, out, count);
default:
rc = false;
break;
}
return rc;
}
} // namespace
} // namespace asinhop
bool op::v3::Asinh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool Asinh::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v3_Asinh_evaluate);
return asinhop::evaluate_asinh(inputs[0], outputs[0]);
OPENVINO_ASSERT(inputs.size() == 1 && outputs.size() == 1);
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<asinh::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool op::v3::Asinh::has_evaluate() const {
bool Asinh::has_evaluate() const {
OV_OP_SCOPE(v3_Asinh_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}
} // namespace v3
} // namespace op
} // namespace ov

View File

@ -2,81 +2,67 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/atan.hpp"
#include "openvino/op/atan.hpp"
#include <string>
#include <vector>
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/axis_set.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/shape.hpp"
#include "openvino/reference/atan.hpp"
using namespace std;
using namespace ngraph;
namespace ov {
namespace op {
namespace atan {
struct Evaluate : ov::element::NoAction<bool> {
using ov::element::NoAction<bool>::visit;
op::Atan::Atan(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
template <element::Type_t ET>
static result_type visit(const Tensor& arg0, Tensor& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
reference::atan(arg0.data<T>(), out.data<T>(), count);
return true;
}
};
} // namespace atan
namespace v0 {
Atan::Atan(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::Atan::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> Atan::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v0_Atan_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<Atan>(new_args.at(0));
return std::make_shared<Atan>(new_args.at(0));
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace atanop {
namespace {
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
runtime::reference::atan<T>(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), count);
return true;
}
bool evaluate_atan(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
bool rc = true;
out->set_unary(arg0);
switch (arg0->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_atan, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_atan, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_atan, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_atan, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_atan, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_atan, f32, arg0, out, count);
default:
rc = false;
break;
}
return rc;
}
} // namespace
} // namespace atanop
bool op::Atan::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool Atan::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v0_Atan_evaluate);
return atanop::evaluate_atan(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
OPENVINO_ASSERT(inputs.size() == 1 && outputs.size() == 1);
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<atan::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool op::Atan::has_evaluate() const {
bool Atan::has_evaluate() const {
OV_OP_SCOPE(v1_Atan_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -2,74 +2,62 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/atanh.hpp"
#include <string>
#include <vector>
#include "openvino/op/atanh.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/type/element_type.hpp"
#include "openvino/reference/atanh.hpp"
using namespace std;
using namespace ngraph;
namespace ov {
namespace op {
namespace atanh {
struct Evaluate : ov::element::NoAction<bool> {
using ov::element::NoAction<bool>::visit;
template <element::Type_t ET>
static result_type visit(const Tensor& arg0, Tensor& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
reference::atanh(arg0.data<T>(), out.data<T>(), count);
return true;
}
};
} // namespace atanh
} // namespace op
op::v3::Atanh::Atanh(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v3::Atanh::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> op::v3::Atanh::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v3_Atanh_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<Atanh>(new_args.at(0));
return std::make_shared<Atanh>(new_args.at(0));
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace atanhop {
namespace {
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out) {
runtime::reference::atanh(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), shape_size(arg0->get_shape()));
return true;
}
bool evaluate_atanh(const HostTensorPtr& arg0, const HostTensorPtr& out) {
bool rc = true;
out->set_unary(arg0);
switch (arg0->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_atanh, i32, arg0, out);
NGRAPH_TYPE_CASE(evaluate_atanh, i64, arg0, out);
NGRAPH_TYPE_CASE(evaluate_atanh, u32, arg0, out);
NGRAPH_TYPE_CASE(evaluate_atanh, u64, arg0, out);
NGRAPH_TYPE_CASE(evaluate_atanh, f16, arg0, out);
NGRAPH_TYPE_CASE(evaluate_atanh, f32, arg0, out);
default:
rc = false;
break;
}
return rc;
}
} // namespace
} // namespace atanhop
bool op::v3::Atanh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool op::v3::Atanh::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v3_Atanh_evaluate);
return atanhop::evaluate_atanh(inputs[0], outputs[0]);
OPENVINO_ASSERT(inputs.size() == 1 && outputs.size() == 1);
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<atanh::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool op::v3::Atanh::has_evaluate() const {
OV_OP_SCOPE(v1_Atanh_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}
} // namespace ov

View File

@ -2,81 +2,71 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/cos.hpp"
#include "openvino/op/cos.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/validation_util.hpp"
#include "openvino/reference/cos.hpp"
using namespace std;
using namespace ngraph;
op::Cos::Cos(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
namespace ov {
namespace op {
namespace cos {
struct Evaluate : ov::element::NoAction<bool> {
using ov::element::NoAction<bool>::visit;
template <element::Type_t ET>
static result_type visit(const Tensor& arg0, Tensor& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
reference::cos(arg0.data<T>(), out.data<T>(), count);
return true;
}
};
} // namespace cos
namespace v0 {
Cos::Cos(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types();
}
bool op::Cos::visit_attributes(AttributeVisitor& visitor) {
bool Cos::visit_attributes(AttributeVisitor& visitor) {
OV_OP_SCOPE(v0_Cos_visit_attributes);
return true;
}
shared_ptr<Node> op::Cos::clone_with_new_inputs(const OutputVector& new_args) const {
shared_ptr<Node> Cos::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v0_Cos_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<Cos>(new_args.at(0));
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace cosop {
namespace {
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
runtime::reference::cos<T>(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), count);
return true;
}
bool evaluate_cos(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
bool rc = true;
out->set_unary(arg0);
switch (arg0->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_cos, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cos, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cos, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cos, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cos, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cos, f32, arg0, out, count);
default:
rc = false;
break;
}
return rc;
}
} // namespace
} // namespace cosop
bool op::Cos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool Cos::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v0_Cos_evaluate);
OPENVINO_SUPPRESS_DEPRECATED_START
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1));
OPENVINO_SUPPRESS_DEPRECATED_END
return cosop::evaluate_cos(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
OPENVINO_ASSERT(inputs.size() == 1 && outputs.size() == 1);
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<cos::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool op::Cos::has_evaluate() const {
bool Cos::has_evaluate() const {
OV_OP_SCOPE(v0_Cos_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -2,81 +2,71 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/cosh.hpp"
#include "openvino/op/cosh.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/validation_util.hpp"
#include "openvino/reference/cosh.hpp"
using namespace std;
using namespace ngraph;
op::Cosh::Cosh(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
namespace ov {
namespace op {
namespace cosh {
struct Evaluate : ov::element::NoAction<bool> {
using ov::element::NoAction<bool>::visit;
template <element::Type_t ET>
static result_type visit(const Tensor& arg0, Tensor& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
reference::cosh(arg0.data<T>(), out.data<T>(), count);
return true;
}
};
} // namespace cosh
namespace v0 {
Cosh::Cosh(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types();
}
bool op::Cosh::visit_attributes(AttributeVisitor& visitor) {
bool Cosh::visit_attributes(AttributeVisitor& visitor) {
OV_OP_SCOPE(v0_Cosh_visit_attributes);
return true;
}
shared_ptr<Node> op::Cosh::clone_with_new_inputs(const OutputVector& new_args) const {
shared_ptr<Node> Cosh::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v0_Cosh_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<Cosh>(new_args.at(0));
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace coshop {
namespace {
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
runtime::reference::cosh<T>(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), count);
return true;
}
bool evaluate_cosh(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
bool rc = true;
out->set_unary(arg0);
switch (arg0->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_cosh, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cosh, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cosh, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cosh, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cosh, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_cosh, f32, arg0, out, count);
default:
rc = false;
break;
}
return rc;
}
} // namespace
} // namespace coshop
bool op::Cosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool Cosh::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v0_Cosh_evaluate);
OPENVINO_SUPPRESS_DEPRECATED_START
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1));
OPENVINO_SUPPRESS_DEPRECATED_END
return coshop::evaluate_cosh(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
OPENVINO_ASSERT(inputs.size() == 1 && outputs.size() == 1);
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<cosh::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool op::Cosh::has_evaluate() const {
bool Cosh::has_evaluate() const {
OV_OP_SCOPE(v0_Cosh_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -2,79 +2,71 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/sin.hpp"
#include "openvino/op/sin.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/op/cos.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "openvino/reference/sin.hpp"
using namespace std;
using namespace ngraph;
op::Sin::Sin(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
namespace ov {
namespace op {
namespace sin {
struct Evaluate : ov::element::NoAction<bool> {
using ov::element::NoAction<bool>::visit;
template <element::Type_t ET>
static result_type visit(const Tensor& arg0, Tensor& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
reference::sin(arg0.data<T>(), out.data<T>(), count);
return true;
}
};
} // namespace sin
namespace v0 {
Sin::Sin(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types();
}
bool ngraph::op::v0::Sin::visit_attributes(AttributeVisitor& visitor) {
bool op::v0::Sin::visit_attributes(AttributeVisitor& visitor) {
OV_OP_SCOPE(v0_Sin_visit_attributes);
return true;
}
shared_ptr<Node> op::Sin::clone_with_new_inputs(const OutputVector& new_args) const {
shared_ptr<Node> Sin::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v0_Sin_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<Sin>(new_args.at(0));
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace sinop {
namespace {
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
runtime::reference::sin<T>(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), count);
return true;
}
bool evaluate_sin(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
bool rc = true;
out->set_unary(arg0);
switch (arg0->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_sin, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sin, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sin, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sin, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sin, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sin, f32, arg0, out, count);
default:
rc = false;
break;
}
return rc;
}
} // namespace
} // namespace sinop
bool op::Sin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool Sin::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v0_Sin_evaluate);
return sinop::evaluate_sin(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
OPENVINO_ASSERT(inputs.size() == 1 && outputs.size() == 1);
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<sin::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool op::Sin::has_evaluate() const {
bool Sin::has_evaluate() const {
OV_OP_SCOPE(v0_Sin_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -2,81 +2,69 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/sinh.hpp"
#include <ngraph/validation_util.hpp>
#include "openvino/op/sinh.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "openvino/reference/sinh.hpp"
using namespace std;
using namespace ngraph;
namespace ov {
namespace op {
namespace sinh {
struct Evaluate : ov::element::NoAction<bool> {
using ov::element::NoAction<bool>::visit;
op::Sinh::Sinh(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
template <element::Type_t ET>
static result_type visit(const Tensor& arg0, Tensor& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
reference::sinh(arg0.data<T>(), out.data<T>(), count);
return true;
}
};
} // namespace sinh
namespace v0 {
Sinh::Sinh(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types();
}
bool ngraph::op::v0::Sinh::visit_attributes(AttributeVisitor& visitor) {
bool Sinh::visit_attributes(AttributeVisitor& visitor) {
OV_OP_SCOPE(v0_Sinh_visit_attributes);
return true;
}
shared_ptr<Node> op::Sinh::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> Sinh::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v0_Sinh_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<Sinh>(new_args.at(0));
return std::make_shared<Sinh>(new_args.at(0));
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace sinhop {
namespace {
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
runtime::reference::sinh<T>(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), count);
return true;
}
bool evaluate_sinh(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
bool rc = true;
out->set_unary(arg0);
switch (arg0->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_sinh, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sinh, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sinh, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sinh, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sinh, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_sinh, f32, arg0, out, count);
default:
rc = false;
break;
}
return rc;
}
} // namespace
} // namespace sinhop
bool op::Sinh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool Sinh::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v0_Sinh_evaluate);
OPENVINO_SUPPRESS_DEPRECATED_START
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1));
OPENVINO_SUPPRESS_DEPRECATED_END
return sinhop::evaluate_sinh(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
OPENVINO_ASSERT(inputs.size() == 1 && outputs.size() == 1);
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<sinh::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool op::Sinh::has_evaluate() const {
bool Sinh::has_evaluate() const {
OV_OP_SCOPE(v0_Sinh_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -2,73 +2,69 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/tan.hpp"
#include "openvino/op/tan.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/op/cos.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "openvino/reference/tan.hpp"
using namespace std;
using namespace ngraph;
op::Tan::Tan(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types();
}
bool ngraph::op::v0::Tan::visit_attributes(AttributeVisitor& visitor) {
OV_OP_SCOPE(v0_Tan_visit_attributes);
return true;
}
shared_ptr<Node> op::Tan::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v0_Tan_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<Tan>(new_args.at(0));
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace tanop {
namespace {
namespace ov {
namespace op {
namespace tan {
struct Evaluate : ov::element::NoAction<bool> {
using ov::element::NoAction<bool>::visit;
template <element::Type_t ET>
static result_type visit(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
ngraph::runtime::reference::tan(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), count);
static result_type visit(const Tensor& arg0, Tensor& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
reference::tan(arg0.data<T>(), out.data<T>(), count);
return true;
}
};
} // namespace tan
bool evaluate_tan(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
out->set_unary(arg0);
namespace v0 {
Tan::Tan(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types();
}
bool Tan::visit_attributes(AttributeVisitor& visitor) {
OV_OP_SCOPE(v0_Tan_visit_attributes);
return true;
}
std::shared_ptr<Node> Tan::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v0_Tan_clone_with_new_inputs);
check_new_args_count(this, new_args);
return std::make_shared<Tan>(new_args.at(0));
}
bool Tan::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v0_Tan_evaluate);
OPENVINO_ASSERT(inputs.size() == 1 && outputs.size() == 1);
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<Evaluate>(arg0->get_element_type(), arg0, out, count);
}
} // namespace
} // namespace tanop
bool op::Tan::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
OV_OP_SCOPE(v0_Tan_evaluate);
return tanop::evaluate_tan(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<tan::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool op::Tan::has_evaluate() const {
bool Tan::has_evaluate() const {
OV_OP_SCOPE(v0_Tan_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -2,71 +2,71 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/tanh.hpp"
#include "openvino/op/tanh.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/subtract.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "openvino/reference/tanh.hpp"
using namespace std;
using namespace ngraph;
op::Tanh::Tanh(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types();
}
bool ngraph::op::v0::Tanh::visit_attributes(AttributeVisitor& visitor) {
OV_OP_SCOPE(v0_Tanh_visit_attributes);
return true;
}
shared_ptr<Node> op::Tanh::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v0_Tanh_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<Tanh>(new_args.at(0));
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace tanhop {
namespace {
namespace ov {
namespace op {
namespace tanh {
struct Evaluate : ov::element::NoAction<bool> {
using ov::element::NoAction<bool>::visit;
template <element::Type_t ET>
static result_type visit(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
ngraph::runtime::reference::tanh(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), count);
static result_type visit(const Tensor& arg0, Tensor& out, const size_t count) {
using T = typename element_type_traits<ET>::value_type;
reference::tanh(arg0.data<T>(), out.data<T>(), count);
return true;
}
};
} // namespace tanh
bool evaluate_tanh(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) {
out->set_unary(arg0);
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<Evaluate>(arg0->get_element_type(), arg0, out, count);
namespace v0 {
Tanh::Tanh(const Output<Node>& arg) : UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types();
}
} // namespace
} // namespace tanhop
bool op::Tanh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool Tanh::visit_attributes(AttributeVisitor& visitor) {
OV_OP_SCOPE(v0_Tanh_visit_attributes);
return true;
}
std::shared_ptr<Node> Tanh::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v0_Tanh_clone_with_new_inputs);
check_new_args_count(this, new_args);
return std::make_shared<Tanh>(new_args.at(0));
}
bool Tanh::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v0_Tanh_evaluate);
return tanhop::evaluate_tanh(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
OPENVINO_ASSERT(inputs.size() == 1 && outputs.size() == 1);
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<tanh::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool op::Tanh::has_evaluate() const {
bool Tanh::has_evaluate() const {
OV_OP_SCOPE(v0_Tanh_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::f32:
return true;
default:
return false;
break;
}
return false;
}
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -12,9 +12,9 @@ bool evaluate(const std::shared_ptr<ngraph::op::v0::Tanh>& op,
const ngraph::HostTensorVector& outputs,
const ngraph::HostTensorVector& inputs) {
using T = typename ngraph::element_type_traits<ET>::value_type;
ngraph::runtime::reference::tanh<T>(inputs[0]->get_data_ptr<T>(),
outputs[0]->get_data_ptr<T>(),
ngraph::shape_size(inputs[0]->get_shape()));
ov::reference::tanh<T>(inputs[0]->get_data_ptr<T>(),
outputs[0]->get_data_ptr<T>(),
ngraph::shape_size(inputs[0]->get_shape()));
return true;
}