Tq/revise asinh (#6498)

* update spec

Signed-off-by: Li, Tingqian <tingqian.li@intel.com>

* use RTTI macro

Signed-off-by: Li, Tingqian <tingqian.li@intel.com>

* fix op scope typo

Signed-off-by: Li, Tingqian <tingqian.li@intel.com>

* simplify evaluate template function

Signed-off-by: Li, Tingqian <tingqian.li@intel.com>

* override validate_and_infer_types() to align with spec

Signed-off-by: Li, Tingqian <tingqian.li@intel.com>

* Add visitor API test

Signed-off-by: Li, Tingqian <tingqian.li@intel.com>

* Add type_prop test

Signed-off-by: Li, Tingqian <tingqian.li@intel.com>

* Add SLT asinh test in cpu/gpu plugin

Signed-off-by: Li, Tingqian <tingqian.li@intel.com>

* change `T` in the spec to *T*

Signed-off-by: Li, Tingqian <tingqian.li@intel.com>

* refector spec according to Cos

Signed-off-by: Li, Tingqian <tingqian.li@intel.com>

* Support element type of integral

Signed-off-by: Li, Tingqian <tingqian.li@intel.com>

* Add new cpp files into CMakeLists.txt

Signed-off-by: Li, Tingqian <tingqian.li@intel.com>
This commit is contained in:
Tingqian Li 2021-07-08 12:18:09 +08:00 committed by GitHub
parent 35d9bd0f63
commit 340583fa35
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 89 additions and 34 deletions

View File

@ -4,33 +4,29 @@
**Category**: Arithmetic unary operation
**Short description**: *Asinh* performs element-wise hyperbolic inverse sine (arcsinh) operation with given tensor.
**Short description**: *Asinh* performs element-wise inverse hyperbolic sine operation (arcsinh) on a given input tensor.
**Attributes**:
No attributes available.
**Inputs**
* **1**: A tensor of type *T*. **Required.**
**Outputs**
* **1**: The result of element-wise asinh operation. A tensor of type *T*.
**Types**
* *T*: any floating point type.
*Asinh* does the following with the input tensor *a*:
**Detailed description**: *Asinh* performs element-wise inverse hyperbolic sine operation on a given input tensor, based on the following mathematical formula:
\f[
a_{i} = asinh(a_{i})
\f]
**Examples**
**Attributes**: *Asinh* operation has no attributes.
*Example 1*
**Inputs**
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
**Outputs**
* **1**: The result of element-wise *Asinh* operation. A tensor of type *T* and the same shape as input tensor.
**Types**
* *T*: any numeric type.
**Example**
```xml
<layer ... type="Asinh">

View File

@ -37,6 +37,7 @@ const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypes
{Negative, {}},
{Acos, {}},
{Asin, {}},
{Asinh, {}},
{Atan, {}},
{Cos, {}},
{Cosh, {}},

View File

@ -38,6 +38,7 @@ const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypes
{Negative, {}},
{Acos, {}},
{Asin, {}},
{Asinh, {}},
{Atan, {}},
{Cos, {}},
{Cosh, {}},
@ -65,6 +66,7 @@ const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypes
// List of operations that should be tested also with integer precision
const std::map<ActivationTypes, std::vector<std::vector<float>>> intActivationTypes = {
{Asinh, {}},
{Atan, {}},
{Negative, {}},
{Ceiling, {}},

View File

@ -35,6 +35,7 @@ const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypes
{Negative, {}},
{Acos, {}},
{Asin, {}},
{Asinh, {}},
{Atan, {}},
{Cos, {}},
{Cosh, {}},

View File

@ -40,6 +40,7 @@ static std::map<ngraph::helpers::ActivationTypes, std::string> activationNames =
{ngraph::helpers::ActivationTypes::Negative, "Negative"},
{ngraph::helpers::ActivationTypes::Acos, "Acos"},
{ngraph::helpers::ActivationTypes::Asin, "Asin"},
{ngraph::helpers::ActivationTypes::Asinh, "Asinh"},
{ngraph::helpers::ActivationTypes::Atan, "Atan"},
{ngraph::helpers::ActivationTypes::Cos, "Cos"},
{ngraph::helpers::ActivationTypes::Cosh, "Cosh"},

View File

@ -6,6 +6,7 @@ VERIFIED_OP_REFERENCES = [
'Acos-1',
'Add-1',
'Asin-1',
'Asinh-3',
'Assign-6',
'AvgPool-1',
'BatchNormInference-5',

View File

@ -100,6 +100,7 @@ enum ActivationTypes {
Negative,
Acos,
Asin,
Asinh,
Atan,
Cos,
Cosh,

View File

@ -48,6 +48,8 @@ std::shared_ptr<ngraph::Node> makeActivation(const ngraph::Output<Node> &in,
return std::make_shared<ngraph::op::Acos>(in);
case ngraph::helpers::ActivationTypes::Asin:
return std::make_shared<ngraph::op::Asin>(in);
case ngraph::helpers::ActivationTypes::Asinh:
return std::make_shared<ngraph::op::Asinh>(in);
case ngraph::helpers::ActivationTypes::Atan:
return std::make_shared<ngraph::op::Atan>(in);
case ngraph::helpers::ActivationTypes::Cos:

View File

@ -19,8 +19,8 @@ namespace ngraph
class NGRAPH_API Asinh : public util::UnaryElementwiseArithmetic
{
public:
static constexpr NodeTypeInfo type_info{"Asinh", 3};
const NodeTypeInfo& get_type_info() const override { return type_info; }
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs an Asinh operation.
Asinh() = default;
/// \brief Constructs an Asinh operation.

View File

@ -13,7 +13,8 @@ namespace ngraph
{
namespace reference
{
template <typename T>
template <typename T,
typename std::enable_if<!std::is_integral<T>::value, bool>::type = true>
void asinh(const T* arg, T* out, size_t count)
{
for (size_t i = 0; i < count; i++)
@ -21,6 +22,16 @@ namespace ngraph
out[i] = std::asinh(arg[i]);
}
}
template <typename T,
typename std::enable_if<std::is_integral<T>::value, bool>::type = true>
void asinh(const T* arg, T* out, size_t count)
{
for (size_t i = 0; i < count; i++)
{
out[i] = std::roundl(std::asinh(arg[i]));
}
}
} // namespace reference
} // namespace runtime
} // namespace ngraph

View File

@ -7,6 +7,7 @@
#include "itt.hpp"
#include "ngraph/op/asinh.hpp"
#include "ngraph/op/util/elementwise_args.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/reference/asinh.hpp"
#include "ngraph/type/element_type.hpp"
@ -14,7 +15,7 @@
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::v3::Asinh::type_info;
NGRAPH_RTTI_DEFINITION(op::v3::Asinh, "Asinh", 3, util::UnaryElementwiseArithmetic);
op::v3::Asinh::Asinh(const Output<Node>& arg)
: UnaryElementwiseArithmetic(arg)
@ -32,25 +33,26 @@ shared_ptr<Node> op::v3::Asinh::clone_with_new_inputs(const OutputVector& new_ar
namespace asinhop
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out)
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
{
runtime::reference::asinh(
arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), shape_size(arg0->get_shape()));
runtime::reference::asinh(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), count);
return true;
}
bool evaluate_asinh(const HostTensorPtr& arg0, const HostTensorPtr& out)
{
bool rc = true;
size_t count = shape_size(arg0->get_shape());
out->set_unary(arg0);
switch (arg0->get_element_type())
{
NGRAPH_TYPE_CASE(evaluate_asinh, i32, arg0, out);
NGRAPH_TYPE_CASE(evaluate_asinh, i64, arg0, out);
NGRAPH_TYPE_CASE(evaluate_asinh, u32, arg0, out);
NGRAPH_TYPE_CASE(evaluate_asinh, u64, arg0, out);
NGRAPH_TYPE_CASE(evaluate_asinh, f16, arg0, out);
NGRAPH_TYPE_CASE(evaluate_asinh, f32, arg0, out);
NGRAPH_TYPE_CASE(evaluate_asinh, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asinh, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asinh, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asinh, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asinh, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_asinh, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
@ -65,7 +67,7 @@ bool op::v3::Asinh::evaluate(const HostTensorVector& outputs, const HostTensorVe
bool op::v3::Asinh::has_evaluate() const
{
NGRAPH_OP_SCOPE(v1_Asinh_has_evaluate);
NGRAPH_OP_SCOPE(v3_Asinh_has_evaluate);
switch (get_input_element_type(0))
{
case ngraph::element::i32:

View File

@ -95,6 +95,7 @@ set(SRC
type_prop/adaptive_avg_pool.cpp
type_prop/adaptive_max_pool.cpp
type_prop/asin.cpp
type_prop/asinh.cpp
type_prop/assign.cpp
type_prop/atan.cpp
type_prop/avg_pool.cpp
@ -231,6 +232,7 @@ set(SRC
visitors/value_map.cpp
visitors/op/adaptive_avg_pool.cpp
visitors/op/adaptive_max_pool.cpp
visitors/op/asinh.cpp
visitors/op/atan.cpp
visitors/op/batch_norm.cpp
visitors/op/batch_to_space.cpp

View File

@ -49,3 +49,18 @@ NGRAPH_TEST(${BACKEND_NAME}, asinh)
test_case.add_expected_output<float>(shape, expected);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, asinh_i32)
{
Shape shape{11};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::Asinh>(A), ParameterVector{A});
vector<int> input{-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5};
vector<int> expected{-2, -2, -2, -1, -1, 0, 1, 1, 2, 2, 2};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<int>(input);
test_case.add_expected_output<int>(shape, expected);
test_case.run();
}

View File

@ -0,0 +1,9 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "unary_ops.hpp"
using Type = ::testing::Types<ngraph::op::Asinh>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_asinh, UnaryOperator, Type);

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "unary_ops.hpp"
using Type = ::testing::Types<UnaryOperatorType<ngraph::op::Asinh, element::f32>>;
INSTANTIATE_TYPED_TEST_SUITE_P(visitor_without_attribute,
UnaryOperatorVisitor,
Type,
UnaryOperatorTypeName);