* Added support for Gelu-6 to the MO * Adding Gelu-6 to ngraph and python API + some tests * Fixed typo in the Gelu approximation mode * Fixed Gelu-6 reference implementation for Tanh mode * Added transformation to downgrade v6::Gelu to v2::Gelu * Added specification for the Gelu-6 * Code style fixes * The Gelu-6 operation specification update * Fixed compilation issue in reference implementation for Gelu * Fix compilation issues for some OSs * Code style fix * One more cpplint issue fix * Fixed Gelu6 reference implementation compilation on Windows. * Code style fix * Fixed various ngraph unit tests * Code style check * Reverted Gelu-2 to be fused op * Fixed Gelu6 downgrade transformation * Added unit test for Gelu6Downgrade transformation * Update copyright year * Updated copyright year * Replaced tab characters with 4 spaces in IR reader tests * Code style fixes * Added default value for GeluApproximation mode for Gelu-6 op * Fixed code style for Gelu-6 * Changed order of parameters for the Gelu evaluate to potentially avoid backward compatibility issues with ARM plugin * Fixed code style * Introduced opset7. Moved Gelu6 to opset7 * Fixed non-updated transformation * Fixed opset version in ngraph Python API for Gelu operation * Fixed typo in the opset number in the documentation * Reverted some changes related to Gelu6 * Updated MO to produce Gelu7 * Updated unit tests for Gelu * Updated Gelu7 specification * Changed gelu reference implementation. Added opset7 to Python packages * Updated Python API tests for Gelu operation * Code style fix * Marked get_approximation_mode function as const * Added missing "const" qualifier * Fixed code style issues in tests * Added extractor for MxNet operation Gelu * Spelling issues fix * Updated MxNet supported symbols * Added NGRAPH_OP_SCOPE for Gelu7 validate_and_infer_types * Fixed a typo in the comment
73 lines
2.8 KiB
C++
73 lines
2.8 KiB
C++
//*****************************************************************************
|
|
// Copyright 2017-2021 Intel Corporation
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
//*****************************************************************************
|
|
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
#include "gtest/gtest.h"
|
|
|
|
#include "ngraph/op/gelu.hpp"
|
|
#include "ngraph/runtime/host_tensor.hpp"
|
|
#include "ngraph/validation_util.hpp"
|
|
#include "util/test_tools.hpp"
|
|
|
|
using namespace std;
|
|
using namespace ngraph;
|
|
|
|
TEST(op_eval, gelu_tanh)
|
|
{
|
|
auto p = make_shared<op::Parameter>(element::f32, Shape{});
|
|
auto gelu = make_shared<op::v7::Gelu>(p, op::GeluApproximationMode::TANH);
|
|
auto fun = make_shared<Function>(OutputVector{gelu}, ParameterVector{p});
|
|
|
|
std::vector<std::vector<float>> inputs{{-1.0}, {-0.5}, {0}, {0.5}, {1.0}};
|
|
std::vector<std::vector<float>> expected_result{
|
|
{-0.15880796}, {-0.154286}, {0}, {0.345714}, {0.841192}};
|
|
|
|
for (size_t i = 0; i < inputs.size(); i++)
|
|
{
|
|
auto result = make_shared<HostTensor>();
|
|
ASSERT_TRUE(
|
|
fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(Shape{}, inputs[i])}));
|
|
EXPECT_EQ(result->get_element_type(), element::f32);
|
|
EXPECT_EQ(result->get_shape(), (Shape{}));
|
|
auto result_data = read_vector<float>(result);
|
|
EXPECT_NEAR(result_data[0], expected_result[i][0], 0.000001);
|
|
}
|
|
}
|
|
|
|
TEST(op_eval, gelu_erf)
|
|
{
|
|
auto p = make_shared<op::Parameter>(element::f32, Shape{});
|
|
auto gelu = make_shared<op::v7::Gelu>(p, op::GeluApproximationMode::ERF);
|
|
auto fun = make_shared<Function>(OutputVector{gelu}, ParameterVector{p});
|
|
|
|
std::vector<std::vector<float>> inputs{{-1.0}, {-0.5}, {0}, {0.5}, {1.0}};
|
|
std::vector<std::vector<float>> expected_result{
|
|
{-0.15865529}, {-0.15426877}, {0}, {0.34573123}, {0.8413447}};
|
|
|
|
for (size_t i = 0; i < inputs.size(); i++)
|
|
{
|
|
auto result = make_shared<HostTensor>();
|
|
ASSERT_TRUE(
|
|
fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(Shape{}, inputs[i])}));
|
|
EXPECT_EQ(result->get_element_type(), element::f32);
|
|
EXPECT_EQ(result->get_shape(), (Shape{}));
|
|
auto result_data = read_vector<float>(result);
|
|
EXPECT_NEAR(result_data[0], expected_result[i][0], 0.000001);
|
|
}
|
|
}
|