diff --git a/docs/template_plugin/tests/functional/op_reference/base_reference_test.cpp b/docs/template_plugin/tests/functional/op_reference/base_reference_test.cpp index cbe8a4c7dba..dd5a5b24dd6 100644 --- a/docs/template_plugin/tests/functional/op_reference/base_reference_test.cpp +++ b/docs/template_plugin/tests/functional/op_reference/base_reference_test.cpp @@ -21,6 +21,7 @@ CommonReferenceTest::CommonReferenceTest(): targetDevice("TEMPLATE") { } void CommonReferenceTest::Exec() { + SKIP_IF_CURRENT_TEST_IS_DISABLED(); LoadNetwork(); FillInputs(); Infer(); @@ -97,6 +98,11 @@ void CommonReferenceTest::ValidateBlobs(const ov::runtime::Tensor& refBlob, cons refBlob.data(), outBlob.data(), refBlob.get_size(), threshold, abs_threshold); break; + case ov::element::f64: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), outBlob.data(), + refBlob.get_size(), threshold, abs_threshold); + break; case ov::element::i8: LayerTestsUtils::LayerTestsCommon::Compare( refBlob.data(), outBlob.data(), diff --git a/docs/template_plugin/tests/functional/op_reference/clamp.cpp b/docs/template_plugin/tests/functional/op_reference/clamp.cpp new file mode 100644 index 00000000000..da8b5320a5f --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/clamp.cpp @@ -0,0 +1,201 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "openvino/op/clamp.hpp" +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ov; + +namespace { +struct ClampParams { + template + ClampParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector& iValues, const std::vector& oValues, + const double min, const double max) + : min(min), + max(max), + pshape(shape), + inType(iType), + outType(iType), + inputData(CreateTensor(iType, iValues)), + refData(CreateTensor(iType, oValues)) {} + + double min = 0; + double max = 0; + + ov::PartialShape pshape; + ov::element::Type inType; + ov::element::Type outType; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; +}; + +class ReferenceClampLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType, params.min, params.max); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType << "_"; + result << "min=" << param.min << "_"; + result << "max=" << param.max; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const ov::PartialShape& input_shape, const ov::element::Type& input_type, + const ov::element::Type& expected_output_type, const double min, const double max) { + const auto in = std::make_shared(input_type, input_shape); + const auto Clamp = std::make_shared(in, min, max); + return std::make_shared(NodeVector {Clamp}, ParameterVector {in}); + } +}; + +TEST_P(ReferenceClampLayerTest, CompareWithRefs) { + Exec(); +} + +template +std::vector generateClampFloatParams() { + using T = typename element_type_traits::value_type; + auto min = std::numeric_limits::min(); + auto max = std::numeric_limits::max(); + auto pinf = std::numeric_limits::infinity(); + auto ninf = -std::numeric_limits::infinity(); + std::vector clampParams { + ClampParams(ov::PartialShape {5, 2}, + IN_ET, + std::vector{-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8}, + std::vector{0.2, 0.2, 0.2, 0.2, 0.3, 0.4, 0.5, 0.6, 0.6, 0.6}, + 0.2, + 0.6), + ClampParams(ov::PartialShape {5, 2}, + IN_ET, + std::vector{min, max, ninf, pinf, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.000001}, + std::vector{10.0, 20.0, 10.0, 20.0, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.0}, + 10.0, + 20.0), + ClampParams(ov::PartialShape {5, 2}, + IN_ET, + std::vector{min, max, ninf, pinf, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.000001}, + std::vector{10.0, max, 10.0, pinf, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.000001}, + 10.0, + pinf), + ClampParams(ov::PartialShape {5, 2}, + IN_ET, + std::vector{min, max, ninf, pinf, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.000001}, + std::vector{min, 20.0, ninf, 20.0, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.0}, + ninf, + 20.0) + }; + return clampParams; +} + +template +std::vector generateClampIntParams() { + using T = typename element_type_traits::value_type; + auto min = std::numeric_limits::min(); + auto max = std::numeric_limits::max(); + auto pinf = std::numeric_limits::infinity(); + auto ninf = -std::numeric_limits::infinity(); + std::vector clampParams { + ClampParams(ov::PartialShape {6}, + IN_ET, + std::vector{-1, 3, -10, 20, 6, 2}, + std::vector{1, 3, 1, 5, 5, 2}, + 0.4, + 5.6), + ClampParams(ov::PartialShape {6}, + IN_ET, + std::vector{-6, 1, -2, 0, -1, 2}, + std::vector{-5, -1, -2, -1, -1, -1}, + -5.6, + -0.4), + ClampParams(ov::PartialShape {4, 2}, + IN_ET, + std::vector{min, max, 9, 10, 11, 19, 20, 21}, + std::vector{10, 20, 10, 10, 11, 19, 20, 20}, + 10.0, + 20.0), + ClampParams(ov::PartialShape {4, 2}, + IN_ET, + std::vector{min, max, 9, 10, 11, 19, 20, 21}, + std::vector{10, max, 10, 10, 11, 19, 20, 21}, + 10.0, + pinf), + ClampParams(ov::PartialShape {4, 2}, + IN_ET, + std::vector{min, max, 9, 10, 11, 19, 20, 21}, + std::vector{min, 20, 9, 10, 11, 19, 20, 20}, + ninf, + 20.0) + }; + return clampParams; +} + +template +std::vector generateClampUintParams() { + using T = typename element_type_traits::value_type; + auto min = std::numeric_limits::min(); + T max = (static_cast(1) << (std::numeric_limits::digits - 1)) - 1; + auto pinf = static_cast(max); + auto ninf = -std::numeric_limits::infinity(); + std::vector clampParams { + ClampParams(ov::PartialShape {4, 2}, + IN_ET, + std::vector{min, max, 9, 10, 11, 19, 20, 21}, + std::vector{10, 20, 10, 10, 11, 19, 20, 20}, + 10.0, + 20.0), + ClampParams(ov::PartialShape {4, 2}, + IN_ET, + std::vector{min, max, 9, 10, 11, 19, 20, 21}, + std::vector{10, max, 10, 10, 11, 19, 20, 21}, + 10.0, + pinf), + ClampParams(ov::PartialShape {4, 2}, + IN_ET, + std::vector{min, max, 9, 10, 11, 19, 20, 21}, + std::vector{min, 20, 9, 10, 11, 19, 20, 20}, + ninf, + 20.0) + }; + return clampParams; +} + +std::vector generateClampCombinedParams() { + const std::vector> clampTypeParams { + generateClampFloatParams(), + generateClampFloatParams(), + generateClampFloatParams(), + generateClampIntParams(), + generateClampIntParams(), + generateClampIntParams(), + generateClampIntParams(), + generateClampUintParams(), + generateClampUintParams(), + generateClampUintParams(), + generateClampUintParams() + }; + std::vector combinedParams; + + for (const auto& params : clampTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_Clamp_With_Hardcoded_Refs, ReferenceClampLayerTest, + testing::ValuesIn(generateClampCombinedParams()), ReferenceClampLayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/op_reference/elu.cpp b/docs/template_plugin/tests/functional/op_reference/elu.cpp new file mode 100644 index 00000000000..387323cdf10 --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/elu.cpp @@ -0,0 +1,146 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "openvino/op/elu.hpp" +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ov; + +namespace { +struct EluParams { + template + EluParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector& iValues, const std::vector& oValues, + const double alpha) + : alpha(alpha), + pshape(shape), + inType(iType), + outType(iType), + inputData(CreateTensor(iType, iValues)), + refData(CreateTensor(iType, oValues)) {} + + double alpha = 0; + + ov::PartialShape pshape; + ov::element::Type inType; + ov::element::Type outType; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; +}; + +class ReferenceEluLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType, params.alpha); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType << "_"; + result << "alpha=" << param.alpha; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type, + const element::Type& expected_output_type, const double alpha) { + const auto in = std::make_shared(input_type, input_shape); + const auto Elu = std::make_shared(in, alpha); + return std::make_shared(NodeVector {Elu}, ParameterVector {in}); + } +}; + +TEST_P(ReferenceEluLayerTest, CompareWithRefs) { + Exec(); +} + +template +std::vector generateEluFloatParams() { + using T = typename element_type_traits::value_type; + + std::vector eluParams { + EluParams(ov::PartialShape {3, 2}, + IN_ET, + std::vector{-2.f, 3.f, -2.f, 1.f, -1.f, 0.f}, + std::vector{-0.432332358f, 3.f, -0.432332358f, 1.f, -0.316060279f, 0.f}, + 0.5f), + EluParams(ov::PartialShape {3, 2}, + IN_ET, + std::vector{-2.f, 3.f, -2.f, 1.f, -1.f, 0.f}, + std::vector{0.864664717f, 3.f, 0.864664717f, 1.f, 0.632120559f, 0.f}, + -1.f) + }; + return eluParams; +} + +template +std::vector generateEluIntParams() { + using T = typename element_type_traits::value_type; + + std::vector eluParams { + EluParams(ov::PartialShape {3, 2}, + IN_ET, + std::vector{-2, 3, -2, 1, -1, 0}, + std::vector{0, 3, 0, 1, 0, 0}, + 0.5f), + EluParams(ov::PartialShape {3, 2}, + IN_ET, + std::vector{-2, 3, -2, 1, -1, 0}, + std::vector{0, 3, 0, 1, 0, 0}, + -1.f) + }; + return eluParams; +} + +template +std::vector generateEluUintParams() { + using T = typename element_type_traits::value_type; + + std::vector eluParams { + EluParams(ov::PartialShape {3, 2}, + IN_ET, + std::vector{5, 4, 3, 2, 1, 0}, + std::vector{5, 4, 3, 2, 1, 0}, + 0.5f), + EluParams(ov::PartialShape {3, 2}, + IN_ET, + std::vector{5, 4, 3, 2, 1, 0}, + std::vector{5, 4, 3, 2, 1, 0}, + -1.f) + }; + return eluParams; +} +std::vector generateEluCombinedParams() { + const std::vector> eluTypeParams { + generateEluFloatParams(), + generateEluFloatParams(), + generateEluFloatParams(), + generateEluIntParams(), + generateEluIntParams(), + generateEluIntParams(), + generateEluIntParams(), + generateEluUintParams(), + generateEluUintParams(), + generateEluUintParams(), + generateEluUintParams() + }; + std::vector combinedParams; + + for (const auto& params : eluTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_Elu_With_Hardcoded_Refs, ReferenceEluLayerTest, + testing::ValuesIn(generateEluCombinedParams()), ReferenceEluLayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/op_reference/exp.cpp b/docs/template_plugin/tests/functional/op_reference/exp.cpp new file mode 100644 index 00000000000..258aa07a02d --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/exp.cpp @@ -0,0 +1,194 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "openvino/op/exp.hpp" +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ov; +using namespace InferenceEngine; + +namespace { +struct ExpParams { + template + ExpParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector& iValues, const std::vector& oValues) + : pshape(shape), + inType(iType), + outType(iType), + inputData(CreateTensor(iType, iValues)), + refData(CreateTensor(iType, oValues)) {} + + ov::PartialShape pshape; + ov::element::Type inType; + ov::element::Type outType; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; +}; + +class ReferenceExpLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type, + const element::Type& expected_output_type) { + const auto in = std::make_shared(input_type, input_shape); + const auto Exp = std::make_shared(in); + return std::make_shared(NodeVector {Exp}, ParameterVector {in}); + } +}; + +class ReferenceExpInPlaceLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type, + const element::Type& expected_output_type) { + const auto in = std::make_shared(input_type, input_shape); + const auto Exp = std::make_shared(in); + const auto ExpInPlace = std::make_shared(Exp); + return std::make_shared(NodeVector {ExpInPlace}, ParameterVector {in}); + } +}; + +TEST_P(ReferenceExpLayerTest, CompareWithRefs) { + Exec(); +} + +TEST_P(ReferenceExpInPlaceLayerTest, CompareWithRefs) { + Exec(); +} + +template +std::vector generateExpFloatParams() { + using T = typename element_type_traits::value_type; + + std::vector expParams { + ExpParams(ov::PartialShape {8}, + IN_ET, + std::vector{-4, -3, -2, -1, 0, 1, 2, 3}, + std::vector{expf(-4), expf(-3), expf(-2), expf(-1), expf(0), expf(1), expf(2), expf(3)}), + ExpParams(ov::PartialShape {1}, + IN_ET, + std::vector{13}, + std::vector{expf(13)}) + }; + return expParams; +} + +template +std::vector generateExpIntParams() { + using T = typename element_type_traits::value_type; + + std::vector expParams { + ExpParams(ov::PartialShape {8}, + IN_ET, + std::vector{-4, -3, -2, -1, 0, 1, 2, 3}, + std::vector{static_cast(expf(-4)), static_cast(expf(-3)), static_cast(expf(-2)), static_cast(expf(-1)), + static_cast(expf(0)), static_cast(expf(1)), static_cast(expf(2)), static_cast(expf(3))}), + ExpParams(ov::PartialShape {1}, + IN_ET, + std::vector{13}, + std::vector{static_cast(expf(13))}) + }; + return expParams; +} + +template +std::vector generateExpUintParams() { + using T = typename element_type_traits::value_type; + + std::vector expParams { + ExpParams(ov::PartialShape {8}, + IN_ET, + std::vector{0, 1, 2, 3, 4, 5, 10, 100}, + std::vector{static_cast(expf(0)), static_cast(expf(1)), static_cast(expf(2)), static_cast(expf(3)), + static_cast(expf(4)), static_cast(expf(5)), static_cast(expf(10)), static_cast(expf(100))}), + ExpParams(ov::PartialShape {1}, + IN_ET, + std::vector{13}, + std::vector{static_cast(expf(13))}) + }; + return expParams; +} + +template +std::vector generateExpInPlaceFloatParams() { + using T = typename element_type_traits::value_type; + + std::vector expParams { + ExpParams(ov::PartialShape {2}, + IN_ET, + std::vector{1, 3}, + std::vector{expf(expf(1)), expf(expf(3))}) + }; + return expParams; +} + +std::vector generateExpCombinedParams() { + const std::vector> expTypeParams { + generateExpFloatParams(), + generateExpFloatParams(), + generateExpIntParams(), + generateExpIntParams(), + generateExpUintParams(), + generateExpUintParams() + }; + std::vector combinedParams; + + for (const auto& params : expTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +std::vector generateExpInPlaceCombinedParams() { + const std::vector> expTypeParams { + generateExpInPlaceFloatParams(), + generateExpInPlaceFloatParams() + }; + std::vector combinedParams; + + for (const auto& params : expTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_Exp_With_Hardcoded_Refs, ReferenceExpLayerTest, + testing::ValuesIn(generateExpCombinedParams()), ReferenceExpLayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Exp_In_Place_With_Hardcoded_Refs, ReferenceExpInPlaceLayerTest, + testing::ValuesIn(generateExpInPlaceCombinedParams()), ReferenceExpInPlaceLayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/op_reference/gelu.cpp b/docs/template_plugin/tests/functional/op_reference/gelu.cpp new file mode 100644 index 00000000000..d02287ee5d2 --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/gelu.cpp @@ -0,0 +1,174 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "openvino/op/gelu.hpp" +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ov; +using namespace InferenceEngine; + +namespace { +struct GeluParams { + template + GeluParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector& iValues, const std::vector& oValues, + const ov::op::GeluApproximationMode mode) + : mode(mode), + pshape(shape), + inType(iType), + outType(iType), + inputData(CreateTensor(iType, iValues)), + refData(CreateTensor(iType, oValues)) {} + + ov::op::GeluApproximationMode mode = ov::op::GeluApproximationMode::ERF; + ov::PartialShape pshape; + ov::element::Type inType; + ov::element::Type outType; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; +}; + +class ReferenceGeluV0LayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType, params.mode); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type, + const element::Type& expected_output_type, const op::GeluApproximationMode mode) { + const auto in = std::make_shared(input_type, input_shape); + const auto Gelu = std::make_shared(in); + return std::make_shared(NodeVector {Gelu}, ParameterVector {in}); + } +}; + +class ReferenceGeluV7LayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType, params.mode); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType << "_"; + result << "ApproxMode=" << param.mode; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type, + const element::Type& expected_output_type, const op::GeluApproximationMode mode) { + const auto in = std::make_shared(input_type, input_shape); + const auto Gelu = std::make_shared(in, mode); + return std::make_shared(NodeVector {Gelu}, ParameterVector {in}); + } +}; + +TEST_P(ReferenceGeluV0LayerTest, CompareWithRefs) { + Exec(); +} +TEST_P(ReferenceGeluV7LayerTest, CompareWithRefs) { + Exec(); +} + +template +std::vector generateGeluV0FloatParams() { + using T = typename element_type_traits::value_type; + + std::vector geluParams { + GeluParams(ov::PartialShape {8}, + IN_ET, + std::vector{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0}, + std::vector{-0.00012636185, -0.0040495098, -0.04550028, -0.15865529, 0.0, 0.8413447, 1.9544997, 2.9959507}, + op::GeluApproximationMode::ERF), + GeluParams(ov::PartialShape {3}, + IN_ET, + std::vector{-0.5, 0.1, 0.4}, + std::vector{-0.15426877, 0.05398279, 0.2621686}, + op::GeluApproximationMode::ERF) + }; + return geluParams; +} + +template +std::vector generateGeluV7FloatParams() { + using T = typename element_type_traits::value_type; + + std::vector geluParams { + GeluParams(ov::PartialShape {8}, + IN_ET, + std::vector{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0}, + std::vector{-0.00012636185, -0.0040495098, -0.04550028, -0.15865529, 0.0, 0.8413447, 1.9544997, 2.9959507}, + op::GeluApproximationMode::ERF), + GeluParams(ov::PartialShape {8}, + IN_ET, + std::vector{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0}, + std::vector{-0.00012636185, -0.0040495098, -0.04550028, -0.15865529, 0.0, 0.8413447, 1.9544997, 2.9959507}, + op::GeluApproximationMode::TANH), + GeluParams(ov::PartialShape {3}, + IN_ET, + std::vector{-0.5, 0.1, 0.4}, + std::vector{-0.15426877, 0.05398279, 0.2621686}, + op::GeluApproximationMode::ERF), + GeluParams(ov::PartialShape {3}, + IN_ET, + std::vector{-0.5, 0.1, 0.4}, + std::vector{-0.15428599, 0.053982753, 0.262161165}, + op::GeluApproximationMode::TANH) + }; + return geluParams; +} + +std::vector generateGeluV0CombinedParams() { + const std::vector> geluTypeParams { + generateGeluV0FloatParams(), + generateGeluV0FloatParams() + }; + std::vector combinedParams; + + for (const auto& params : geluTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +std::vector generateGeluV7CombinedParams() { + const std::vector> geluTypeParams { + generateGeluV7FloatParams(), + generateGeluV7FloatParams() + }; + std::vector combinedParams; + + for (const auto& params : geluTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_Gelu_v2_With_Hardcoded_Refs, ReferenceGeluV0LayerTest, + testing::ValuesIn(generateGeluV0CombinedParams()), ReferenceGeluV0LayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Gelu_v7_With_Hardcoded_Refs, ReferenceGeluV7LayerTest, + testing::ValuesIn(generateGeluV7CombinedParams()), ReferenceGeluV7LayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/op_reference/hard_sigmoid.cpp b/docs/template_plugin/tests/functional/op_reference/hard_sigmoid.cpp new file mode 100644 index 00000000000..fe57e28c260 --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/hard_sigmoid.cpp @@ -0,0 +1,111 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "openvino/op/hard_sigmoid.hpp" +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ov; + +namespace { +struct HardSigmoidParams { + template + HardSigmoidParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector& iValues, const std::vector& oValues, + const float alpha, const float beta) + : pshape(shape), + inType(iType), + outType(iType), + inputData(CreateTensor(iType, iValues)), + refData(CreateTensor(iType, oValues)), + alpha(alpha), + beta(beta) {} + + ov::PartialShape pshape; + ov::element::Type inType; + ov::element::Type outType; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; + float alpha; + float beta; +}; + +class ReferenceHardSigmoidLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType, params.alpha, params.beta); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType << "_"; + result << "alpha=" << param.alpha << "_"; + result << "beta=" << param.beta; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type, + const element::Type& expected_output_type, const float alphaData, const float betaData) { + std::vector alphaArray; + std::vector betaArray; + alphaArray.push_back(alphaData); + betaArray.push_back(betaData); + const auto in = std::make_shared(input_type, input_shape); + const auto alpha = ngraph::op::Constant::create(input_type, Shape{}, {alphaData}); + const auto beta = ngraph::op::Constant::create(input_type, Shape{}, {betaData}); + const auto HardSigmoid = std::make_shared(in, alpha, beta); + return std::make_shared(NodeVector {HardSigmoid}, ParameterVector {in}); + } +}; + +TEST_P(ReferenceHardSigmoidLayerTest, CompareWithRefs) { + Exec(); +} + +template +std::vector generateHardSigmoidFloatParams() { + using T = typename element_type_traits::value_type; + + std::vector hardSigmoidParams { + HardSigmoidParams(ov::PartialShape {3}, + IN_ET, + std::vector{-1.0f, 0.0f, 1.0f}, + std::vector{0.1f, 0.6f, 1.f}, + 0.5, + 0.6), + HardSigmoidParams(ov::PartialShape {2, 5}, + IN_ET, + std::vector{-3.0f, -1.0f, 0.0f, 1.0f, 3.0f, 0.5f, -0.2f, 6.0f, 8.0f, 0.1f}, + std::vector{0.0f, 0.3f, 0.5f, 0.7f, 1.0f, 0.6f, 0.46f, 1.0f, 1.0f, 0.52f}, + 0.2, + 0.5) + }; + return hardSigmoidParams; +} + +std::vector generateHardSigmoidCombinedParams() { + const std::vector> hardSigmoidTypeParams { + generateHardSigmoidFloatParams(), + generateHardSigmoidFloatParams() + }; + std::vector combinedParams; + + for (const auto& params : hardSigmoidTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_HardSigmoid_With_Hardcoded_Refs, ReferenceHardSigmoidLayerTest, + testing::ValuesIn(generateHardSigmoidCombinedParams()), ReferenceHardSigmoidLayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/op_reference/hsigmoid.cpp b/docs/template_plugin/tests/functional/op_reference/hsigmoid.cpp new file mode 100644 index 00000000000..b3fbb29904c --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/hsigmoid.cpp @@ -0,0 +1,90 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "openvino/op/hsigmoid.hpp" +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ov; + +namespace { +struct HSigmoidParams { + template + HSigmoidParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector& iValues, const std::vector& oValues) + : pshape(shape), + inType(iType), + outType(iType), + inputData(CreateTensor(iType, iValues)), + refData(CreateTensor(iType, oValues)) {} + + ov::PartialShape pshape; + ov::element::Type inType; + ov::element::Type outType; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; +}; + +class ReferenceHSigmoidLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type, + const element::Type& HSigmoidected_output_type) { + const auto in = std::make_shared(input_type, input_shape); + const auto HSigmoid = std::make_shared(in); + return std::make_shared(NodeVector {HSigmoid}, ParameterVector {in}); + } +}; + +TEST_P(ReferenceHSigmoidLayerTest, CompareWithRefs) { + Exec(); +} + + +template +std::vector generateHSigmoidFloatParams() { + using T = typename element_type_traits::value_type; + + std::vector hSigmoidParams { + HSigmoidParams(ov::PartialShape {13}, + IN_ET, + std::vector{-10.f, -5.f, -4.f, -3.f, -2.f, -1.f, 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 10.f}, + std::vector{0.f, 0.f, 0.f, 0.f, 0.16666667f, 0.33333333f, 0.5f, 0.66666667f, 0.83333333f, 1.f, 1.f, 1.f, 1.f}) + }; + return hSigmoidParams; +} + +std::vector generateHSigmoidCombinedParams() { + const std::vector> hSigmoidTypeParams { + generateHSigmoidFloatParams(), + generateHSigmoidFloatParams() + }; + std::vector combinedParams; + + for (const auto& params : hSigmoidTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_HSigmoid_With_Hardcoded_Refs, ReferenceHSigmoidLayerTest, + testing::ValuesIn(generateHSigmoidCombinedParams()), ReferenceHSigmoidLayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/op_reference/hswish.cpp b/docs/template_plugin/tests/functional/op_reference/hswish.cpp new file mode 100644 index 00000000000..d88559837c3 --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/hswish.cpp @@ -0,0 +1,95 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "openvino/op/hswish.hpp" +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ov; +using namespace InferenceEngine; + +namespace { +struct HSwishParams { + template + HSwishParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector& iValues, const std::vector& oValues) + : pshape(shape), + inType(iType), + outType(iType), + inputData(CreateTensor(iType, iValues)), + refData(CreateTensor(iType, oValues)) {} + + ov::PartialShape pshape; + ov::element::Type inType; + ov::element::Type outType; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; +}; + +class ReferenceHSwishLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type, + const element::Type& HSwishected_output_type) { + const auto in = std::make_shared(input_type, input_shape); + const auto HSwish = std::make_shared(in); + return std::make_shared(NodeVector {HSwish}, ParameterVector {in}); + } +}; + +TEST_P(ReferenceHSwishLayerTest, CompareWithRefs) { + Exec(); +} + + +template +std::vector generateHSwishFloatParams() { + using T = typename element_type_traits::value_type; + + std::vector hSwishParams { + HSwishParams(ov::PartialShape {2, 3}, + IN_ET, + std::vector{1.f, 8.f, -8.f, 17.f, -0.5f, -1.f}, + std::vector{0.66666667f, 8.f, 0.f, 17.f, -0.20833333f, -0.33333333f}), + HSwishParams(ov::PartialShape {2, 2, 1, 2}, + IN_ET, + std::vector{0.1f, 0.6f, 20.f, -7.f, -5.3f, 3.5f, -9.f, 11.f}, + std::vector{0.05166667f, 0.36f, 20.f, 0.f, 0.f, 3.5f, 0.f, 11.f}) + }; + return hSwishParams; +} + +std::vector generateHSwishCombinedParams() { + const std::vector> hSwishTypeParams { + generateHSwishFloatParams(), + generateHSwishFloatParams() + }; + std::vector combinedParams; + + for (const auto& params : hSwishTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_HSwish_With_Hardcoded_Refs, ReferenceHSwishLayerTest, + testing::ValuesIn(generateHSwishCombinedParams()), ReferenceHSwishLayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/op_reference/log_softmax.cpp b/docs/template_plugin/tests/functional/op_reference/log_softmax.cpp new file mode 100644 index 00000000000..d35c2054bbf --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/log_softmax.cpp @@ -0,0 +1,248 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "openvino/op/log_softmax.hpp" +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ov; +using namespace InferenceEngine; + +namespace { +struct LogSoftmaxParams { + template + LogSoftmaxParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector& iValues, const std::vector& oValues, + const int64_t axis) + : axis(axis), + pshape(shape), + inType(iType), + outType(iType), + inputData(CreateTensor(iType, iValues)), + refData(CreateTensor(iType, oValues)) {} + + int64_t axis = 0; + + ov::PartialShape pshape; + ov::element::Type inType; + ov::element::Type outType; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; +}; + +class ReferenceLogSoftmaxLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType, params.axis); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType << "_"; + result << "axis=" << param.axis; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type, + const element::Type& expected_output_type, const int64_t axis) { + const auto in = std::make_shared(input_type, input_shape); + const auto LogSoftmax = std::make_shared(in, axis); + return std::make_shared(NodeVector {LogSoftmax}, ParameterVector {in}); + } +}; + +TEST_P(ReferenceLogSoftmaxLayerTest, CompareWithRefs) { + Exec(); +} + +template +std::vector generateLogSoftmaxFloatParams() { + using T = typename element_type_traits::value_type; + + std::vector logSoftmaxParams { + LogSoftmaxParams(ov::PartialShape {1}, + IN_ET, + std::vector{1}, + std::vector{0}, + 0), + LogSoftmaxParams(ov::PartialShape {2, 4}, + IN_ET, + std::vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}, + std::vector{-10000., -10000., -10000., -10000., 0., 0., 0., 0.}, + 0), + LogSoftmaxParams(ov::PartialShape {2, 4}, + IN_ET, + std::vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}, + std::vector{-3.4401896, -2.4401896, -1.4401897, -0.4401897, -3.4401896, -2.4401896, -1.4401897, -0.4401897}, + 1), + LogSoftmaxParams(ov::PartialShape {2, 4}, + IN_ET, + std::vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}, + std::vector{-3.4401896, -2.4401896, -1.4401897, -0.4401897, -3.4401896, -2.4401896, -1.4401897, -0.4401897}, + -1), + LogSoftmaxParams(ov::PartialShape {2, 4}, + IN_ET, + std::vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}, + std::vector{-10000., -10000., -10000., -10000., 0., 0., 0., 0.}, + -2), + LogSoftmaxParams(ov::PartialShape {3, 2, 3}, + IN_ET, + std::vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}, + std::vector{-12.0024818, + -12.0024818, + -12.0024818, + -12.0024818, + -12.0024818, + -12.0024818, + -6.00248181, + -6.00248181, + -6.00248181, + -6.00248181, + -6.00248181, + -6.00248181, + -2.48181414e-03, + -2.48181414e-03, + -2.48181414e-03, + -2.48181414e-03, + -2.48181414e-03, + -2.48181414e-03}, + 0), + LogSoftmaxParams(ov::PartialShape {3, 2, 3}, + IN_ET, + std::vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}, + std::vector{-3.04858735, + -3.04858735, + -3.04858735, + -0.04858735, + -0.04858735, + -0.04858735, + -3.04858735, + -3.04858735, + -3.04858735, + -0.04858735, + -0.04858735, + -0.04858735, + -3.04858735, + -3.04858735, + -3.04858735, + -0.04858735, + -0.04858735, + -0.04858735}, + 1), + LogSoftmaxParams(ov::PartialShape {3, 2, 3}, + IN_ET, + std::vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}, + std::vector{-2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596}, + 2), + LogSoftmaxParams(ov::PartialShape {3, 2, 3}, + IN_ET, + std::vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}, + std::vector{-2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596}, + -1), + LogSoftmaxParams(ov::PartialShape {3, 2, 3}, + IN_ET, + std::vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}, + std::vector{-3.04858735, + -3.04858735, + -3.04858735, + -0.04858735, + -0.04858735, + -0.04858735, + -3.04858735, + -3.04858735, + -3.04858735, + -0.04858735, + -0.04858735, + -0.04858735, + -3.04858735, + -3.04858735, + -3.04858735, + -0.04858735, + -0.04858735, + -0.04858735}, + -2), + LogSoftmaxParams(ov::PartialShape {3, 2, 3}, + IN_ET, + std::vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}, + std::vector{-12.0024818, + -12.0024818, + -12.0024818, + -12.0024818, + -12.0024818, + -12.0024818, + -6.00248181, + -6.00248181, + -6.00248181, + -6.00248181, + -6.00248181, + -6.00248181, + -2.48181414e-03, + -2.48181414e-03, + -2.48181414e-03, + -2.48181414e-03, + -2.48181414e-03, + -2.48181414e-03}, + -3) + }; + return logSoftmaxParams; +} + +std::vector generateLogSoftmaxCombinedParams() { + const std::vector> logSoftmaxTypeParams { + generateLogSoftmaxFloatParams(), + generateLogSoftmaxFloatParams() + }; + std::vector combinedParams; + + for (const auto& params : logSoftmaxTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_LogSoftmax_With_Hardcoded_Refs, ReferenceLogSoftmaxLayerTest, + testing::ValuesIn(generateLogSoftmaxCombinedParams()), ReferenceLogSoftmaxLayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/op_reference/mish.cpp b/docs/template_plugin/tests/functional/op_reference/mish.cpp new file mode 100644 index 00000000000..39b0375d19c --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/mish.cpp @@ -0,0 +1,128 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include "openvino/op/mish.hpp" +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ov; + +namespace { +struct MishParams { + template + MishParams(const ov::PartialShape& dynamicShape, const ov::Shape& inputShape, + const ov::element::Type& iType, const std::vector& iValues, const std::vector& oValues, + const std::string& test_name = "") + : dynamicShape(dynamicShape), + inputShape(inputShape), + inType(iType), + outType(iType), + inputData(CreateTensor(iType, iValues)), + refData(CreateTensor(iType, oValues)), + testcaseName(test_name) {} + + ov::PartialShape dynamicShape; + ov::PartialShape inputShape; + ov::element::Type inType; + ov::element::Type outType; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; + std::string testcaseName; +}; + +class ReferenceMishLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.dynamicShape, params.inType); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "dShape=" << param.dynamicShape << "_"; + result << "iShape=" << param.inputShape << "_"; + result << "iType=" << param.inType << "_"; + if (param.testcaseName != "") { + result << "oType=" << param.outType << "_"; + result << param.testcaseName; + } else { + result << "oType=" << param.outType; + } + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type) { + const auto in = std::make_shared(input_type, input_shape); + const auto Mish = std::make_shared(in); + return std::make_shared(NodeVector {Mish}, ParameterVector {in}); + } +}; + +TEST_P(ReferenceMishLayerTest, CompareWithRefs) { + Exec(); +} + +template +std::vector generateMishFloatParams(const PartialShape& dynamicShape, const Shape& staticShape, const std::string& test_name = "") { + using T = typename element_type_traits::value_type; + + // generate input tensor (with possible type conversion) + auto staticSize = shape_size(staticShape); + std::vector expected; + std::vector input; + { + std::mt19937 gen{0}; // use fixed seed for reproducibility of the test + std::normal_distribution<> d{0.0, 20.0}; + + for (auto i = staticSize; i > 0; i--) { + auto x = static_cast(d(gen)); + auto y = static_cast(static_cast(x) * std::tanh(std::log(1.0 + std::exp(x)))); + input.push_back(x); + expected.push_back(y); + } + } + + std::vector mishParams; + + if (test_name != "") { + mishParams = { + MishParams(dynamicShape, staticShape, IN_ET, input, expected, test_name) + }; + } else { + mishParams = { + MishParams(dynamicShape, staticShape, IN_ET, input, expected) + }; + } + return mishParams; +} + +std::vector generateMishCombinedParams() { + const std::vector> mishTypeParams { + generateMishFloatParams({2, 5}, {2, 5}), + generateMishFloatParams({2, 3, 4, 5}, {2, 3, 4, 5}), + generateMishFloatParams(PartialShape::dynamic(), {2, 3, 4, 5}), + generateMishFloatParams({2, Dimension::dynamic(), 4, 5}, {2, 3, 4, 5}, "dimensionDynamic"), + generateMishFloatParams({2, 5}, {2, 5}), + generateMishFloatParams({2, 3, 4, 5}, {2, 3, 4, 5}), + generateMishFloatParams(PartialShape::dynamic(), {2, 3, 4, 5}), + generateMishFloatParams({2, Dimension::dynamic(), 4, 5}, {2, 3, 4, 5}, "dimensionDynamic") + }; + std::vector combinedParams; + + for (const auto& params : mishTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_Mish_With_Hardcoded_Refs, ReferenceMishLayerTest, + testing::ValuesIn(generateMishCombinedParams()), ReferenceMishLayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/op_reference/prelu.cpp b/docs/template_plugin/tests/functional/op_reference/prelu.cpp new file mode 100644 index 00000000000..9f2549ac037 --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/prelu.cpp @@ -0,0 +1,421 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include "openvino/op/prelu.hpp" +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ov; + +namespace { +struct PreluParams { + template + PreluParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector& iValues, const std::vector& oValues, + const ov::Shape& slopeShape, const std::vector& negativeSlopeValues, const std::string& test_name = "") + : pshape(shape), + inType(iType), + outType(iType), + inputData(CreateTensor(iType, iValues)), + refData(CreateTensor(iType, oValues)), + negativeSlopeShape(slopeShape), + negativeSlope(CreateTensor(iType, negativeSlopeValues)), + testcaseName(test_name) {} + + ov::PartialShape pshape; + ov::element::Type inType; + ov::element::Type outType; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; + ov::Shape negativeSlopeShape; + ov::runtime::Tensor negativeSlope; + std::string testcaseName; +}; + +class ReferencePreluLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.negativeSlopeShape, params.inType); + inputData = {params.inputData, params.negativeSlope}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType << "_"; + if (param.testcaseName != "") { + result << "slopeShape=" << param.negativeSlopeShape << "_"; + result << param.testcaseName; + } else { + result << "slopeShape=" << param.negativeSlopeShape; + } + + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const Shape& slope_shape, const element::Type& input_type) { + const auto in = std::make_shared(input_type, input_shape); + const auto SLOPE = std::make_shared(input_type, slope_shape); + const auto Prelu = std::make_shared(in, SLOPE); + return std::make_shared(NodeVector {Prelu}, ParameterVector {in, SLOPE}); + } +}; + +TEST_P(ReferencePreluLayerTest, CompareWithRefs) { + Exec(); +} + +template +std::vector generatePreluFloatParams() { + using T = typename element_type_traits::value_type; + + std::vector preluParams { + PreluParams(ov::PartialShape {6}, + IN_ET, + std::vector{1, 2, -3, -4, 5, 6}, + std::vector{1, 2, -6, -8, 5, 6}, + ov::Shape {1}, + std::vector{2}), + PreluParams(ov::PartialShape {6}, + IN_ET, + std::vector{1, 2, -3, -4, 5, 6}, + std::vector{1, 2, -12, -20, 5, 6}, + ov::Shape {6}, + std::vector{2, 3, 4, 5, 6, 7}), + PreluParams(ov::PartialShape {3, 2}, + IN_ET, + std::vector{-2, 3, -2, 1, -1, 0}, + std::vector{0, 3, 0, 1, 0, 0}, + ov::Shape {2}, + std::vector{0, 1}), + PreluParams(ov::PartialShape {2, 6}, + IN_ET, + std::vector{1, 2, -3, -4, 5, 6, 7, 8, -9, -10, 11, 12}, + std::vector{1, 2, -9, -16, 5, 6, 7, 8, -27, -40, 11, 12}, + ov::Shape {6}, + std::vector{1, 2, 3, 4, 5, 6}), + PreluParams(ov::PartialShape {3, 2}, + IN_ET, + std::vector{-1, -1, -1, -1, -1, -1}, + std::vector{-2, -0.5, -2, -0.5, -2, -0.5}, + ov::Shape {2}, + std::vector{2, 0.5}, + "C_2_const"), + PreluParams(ov::PartialShape {2, 2, 2}, + IN_ET, + std::vector{-0.5, -2, -3, -4, -5, -6, -7, -8}, + std::vector{0.25, 1, 6, 8, 2.5, 3, 14, 16}, + ov::Shape {2}, + std::vector{-0.5, -2}), + PreluParams(ov::PartialShape {3, 2}, + IN_ET, + std::vector{-2, 3, -2, 1, -1, 0}, + std::vector{1, 3, 1, 1, 0.5, 0}, + ov::Shape {2}, + std::vector{-0.5, -1}, + "negative_slope"), + PreluParams(ov::PartialShape {2, 6}, + IN_ET, + std::vector{1, 2, -3, -4, 5, 6, 1, 2, -3, -4, 5, 6}, + std::vector{1, 2, -6, -8, 5, 6, 1, 2, -12, -8, 5, 6}, + ov::Shape {2, 6}, + std::vector{2, 2, 2, 2, 2, 2, 1, 1, 4, 2, 1, 1}), + PreluParams(ov::PartialShape {2, 2, 2, 2}, + IN_ET, + std::vector{1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4}, + std::vector{1, 2, -3, -8, 1, 2, -9, -16, 1, 2, -3, -8, 1, 2, -9, -16}, + ov::Shape {2, 1, 2}, + std::vector{1, 2, 3, 4}), + PreluParams(ov::PartialShape {2, 2, 2, 2}, + IN_ET, + std::vector{1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4}, + std::vector{1, 2, -3, -8, 1, 2, -9, -16, 1, 2, -3, -8, 1, 2, -9, -16}, + ov::Shape {1, 2, 1, 2}, + std::vector{1, 2, 3, 4}), + PreluParams(ov::PartialShape {2, 2, 6}, + IN_ET, + std::vector{1, 2, -3, -4, -5, 6, -1, -2, -3, -4, -5, -6, 1, 2, -3, -4, 5, 6, -2, 4, -6, -8, 10, 12}, + std::vector{1, 2, -9, -16, -5, 6, -2, -2, -9, -16, -5, -42, 1, 2, -9, -16, 5, 6, -2, 4, -18, -32, 10, 12}, + ov::Shape {2, 1, 6}, + std::vector{2, 1, 3, 4, 1, 7, 1, 2, 3, 4, 5, 6}), + PreluParams(ov::PartialShape {2, 3, 2}, + IN_ET, + std::vector{1, 2, -3, -4, -5, 6, -1, -2, -3, -4, -5, -6}, + std::vector{1, 2, -9, -16, -5, 6, -1, -4, -9, -16, -25, -36}, + ov::Shape {2, 3, 2}, + std::vector{2, 1, 3, 4, 1, 7, 1, 2, 3, 4, 5, 6}), + PreluParams(ov::PartialShape {2, 1, 2}, + IN_ET, + std::vector{-10, -10, -10, -10}, + std::vector{-1, -100, -1, -100}, + ov::Shape {2}, + std::vector{0.1, 10}), + PreluParams(ov::PartialShape {1, 2, 1, 2}, + IN_ET, + std::vector{-10, -10, -10, -10}, + std::vector{-1, -1, -100, -100}, + ov::Shape {2}, + std::vector{0.1, 10}), + PreluParams(ov::PartialShape {1, 5, 1, 1}, + IN_ET, + std::vector{-1, 0, -1, -1, -1}, + std::vector{-1, 0, -3, -4, -5}, + ov::Shape {5}, + std::vector{1, 2, 3, 4, 5}), + PreluParams(ov::PartialShape {2, 3, 4, 5}, + IN_ET, + std::vector{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.}, + std::vector{-0., -1., -2., -3., -4., -5., -6., -7., -8., -9., -10., -11., -12., -13., -14., + -15., -16., -17., -18., -19., -20., -21., -22., -23., -24., -25., -26., -27., -28., -29., + -30., -31., -32., -33., -34., -35., -36., -37., -38., -39., -40., -41., -42., -43., -44., + -45., -46., -47., -48., -49., -50., -51., -52., -53., -54., -55., -56., -57., -58., -59., + -60., -61., -62., -63., -64., -65., -66., -67., -68., -69., -70., -71., -72., -73., -74., + -75., -76., -77., -78., -79., -80., -81., -82., -83., -84., -85., -86., -87., -88., -89., + -90., -91., -92., -93., -94., -95., -96., -97., -98., -99., -100., -101., -102., -103., -104., + -105., -106., -107., -108., -109., -110., -111., -112., -113., -114., -115., -116., -117., -118., -119.}, + ov::Shape {2, 3, 4, 5}, + std::vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}), + PreluParams(ov::PartialShape {2, 3, 4, 5}, + IN_ET, + std::vector{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.}, + std::vector{-0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., + -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., + -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., + -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., + -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., + -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., + -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4.}, + ov::Shape {5}, + std::vector{0, 1, 2, 3, 4}), + PreluParams(ov::PartialShape {2, 3, 4, 5}, + IN_ET, + std::vector{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.}, + std::vector{-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., + -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., + -2., -2., -2., -2., -2., -2., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., + -0., -0., -0., -0., -0., -0., -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., + -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.}, + ov::Shape {3}, + std::vector{0, 1, 2}), + PreluParams(ov::PartialShape {2, 3, 4, 5}, + IN_ET, + std::vector{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.}, + std::vector{-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., + -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., + -2., -2., -2., -2., -2., -2., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., + -0., -0., -0., -0., -0., -0., -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., + -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., + -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.}, + ov::Shape {3, 1, 1}, + std::vector{0, 1, 2}) + }; + return preluParams; +} + +template +std::vector generatePreluI8Params() { + using T = typename element_type_traits::value_type; + + std::vector preluParams { + PreluParams(ov::PartialShape {6}, + IN_ET, + std::vector{1, 2, -3, -4, 5, 6}, + std::vector{1, 2, -6, -8, 5, 6}, + ov::Shape {1}, + std::vector{2}), + PreluParams(ov::PartialShape {6}, + IN_ET, + std::vector{1, 2, -3, -4, 5, 6}, + std::vector{1, 2, -12, -20, 5, 6}, + ov::Shape {6}, + std::vector{2, 3, 4, 5, 6, 7}), + PreluParams(ov::PartialShape {3, 2}, + IN_ET, + std::vector{-2, 3, -2, 1, -1, 0}, + std::vector{0, 3, 0, 1, 0, 0}, + ov::Shape {2}, + std::vector{0, 1}), + PreluParams(ov::PartialShape {2, 6}, + IN_ET, + std::vector{1, 2, -3, -4, 5, 6, 7, 8, -9, -10, 11, 12}, + std::vector{1, 2, -9, -16, 5, 6, 7, 8, -27, -40, 11, 12}, + ov::Shape {6}, + std::vector{1, 2, 3, 4, 5, 6}), + PreluParams(ov::PartialShape {2, 6}, + IN_ET, + std::vector{1, 2, -3, -4, 5, 6, 1, 2, -3, -4, 5, 6}, + std::vector{1, 2, -6, -8, 5, 6, 1, 2, -12, -8, 5, 6}, + ov::Shape {2, 6}, + std::vector{2, 2, 2, 2, 2, 2, 1, 1, 4, 2, 1, 1}), + PreluParams(ov::PartialShape {2, 2, 2, 2}, + IN_ET, + std::vector{1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4}, + std::vector{1, 2, -3, -8, 1, 2, -9, -16, 1, 2, -3, -8, 1, 2, -9, -16}, + ov::Shape {2, 1, 2}, + std::vector{1, 2, 3, 4}), + PreluParams(ov::PartialShape {2, 2, 2, 2}, + IN_ET, + std::vector{1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4}, + std::vector{1, 2, -3, -8, 1, 2, -9, -16, 1, 2, -3, -8, 1, 2, -9, -16}, + ov::Shape {1, 2, 1, 2}, + std::vector{1, 2, 3, 4}), + PreluParams(ov::PartialShape {2, 2, 6}, + IN_ET, + std::vector{1, 2, -3, -4, -5, 6, -1, -2, -3, -4, -5, -6, 1, 2, -3, -4, 5, 6, -2, 4, -6, -8, 10, 12}, + std::vector{1, 2, -9, -16, -5, 6, -2, -2, -9, -16, -5, -42, 1, 2, -9, -16, 5, 6, -2, 4, -18, -32, 10, 12}, + ov::Shape {2, 1, 6}, + std::vector{2, 1, 3, 4, 1, 7, 1, 2, 3, 4, 5, 6}), + PreluParams(ov::PartialShape {2, 3, 2}, + IN_ET, + std::vector{1, 2, -3, -4, -5, 6, -1, -2, -3, -4, -5, -6}, + std::vector{1, 2, -9, -16, -5, 6, -1, -4, -9, -16, -25, -36}, + ov::Shape {2, 3, 2}, + std::vector{2, 1, 3, 4, 1, 7, 1, 2, 3, 4, 5, 6}), + PreluParams(ov::PartialShape {1, 5, 1, 1}, + IN_ET, + std::vector{-1, 0, -1, -1, -1}, + std::vector{-1, 0, -3, -4, -5}, + ov::Shape {5}, + std::vector{1, 2, 3, 4, 5}), + PreluParams(ov::PartialShape {2, 3, 4, 5}, + IN_ET, + std::vector{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + std::vector{-0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, + -15, -16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27, -28, -29, + -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, -40, -41, -42, -43, -44, + -45, -46, -47, -48, -49, -50, -51, -52, -53, -54, -55, -56, -57, -58, -59, + -60, -61, -62, -63, -64, -65, -66, -67, -68, -69, -70, -71, -72, -73, -74, + -75, -76, -77, -78, -79, -80, -81, -82, -83, -84, -85, -86, -87, -88, -89, + -90, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100, -101, -102, -103, -104, + -105, -106, -107, -108, -109, -110, -111, -112, -113, -114, -115, -116, -117, -118, -119}, + ov::Shape {2, 3, 4, 5}, + std::vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}), + PreluParams(ov::PartialShape {2, 3, 4, 5}, + IN_ET, + std::vector{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + std::vector{-0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, + -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, + -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, + -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, + -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, + -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, + -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4}, + ov::Shape {5}, + std::vector{0, 1, 2, 3, 4}), + PreluParams(ov::PartialShape {2, 3, 4, 5}, + IN_ET, + std::vector{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + std::vector{-0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, + -0, -0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, + -2, -2, -2, -2, -2, -2, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, + -0, -0, -0, -0, -0, -0, -0, -0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -2, + -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2}, + ov::Shape {3}, + std::vector{0, 1, 2}), + PreluParams(ov::PartialShape {2, 3, 4, 5}, + IN_ET, + std::vector{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + std::vector{-0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, + -0, -0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, + -2, -2, -2, -2, -2, -2, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, + -0, -0, -0, -0, -0, -0, -0, -0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -2, + -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2}, + ov::Shape {3, 1, 1}, + std::vector{0, 1, 2}) + }; + return preluParams; +} + +std::vector generatePreluCombinedParams() { + const std::vector> preluTypeParams { + generatePreluFloatParams(), + generatePreluFloatParams(), + generatePreluFloatParams(), + generatePreluI8Params() + }; + std::vector combinedParams; + + for (const auto& params : preluTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_Prelu_With_Hardcoded_Refs, ReferencePreluLayerTest, + testing::ValuesIn(generatePreluCombinedParams()), ReferencePreluLayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/op_reference/relu.cpp b/docs/template_plugin/tests/functional/op_reference/relu.cpp new file mode 100644 index 00000000000..ce790b11931 --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/relu.cpp @@ -0,0 +1,124 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "openvino/op/relu.hpp" +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ov; + +namespace { +struct ReluParams { + template + ReluParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector& iValues, const std::vector& oValues) + : pshape(shape), + inType(iType), + outType(iType), + inputData(CreateTensor(iType, iValues)), + refData(CreateTensor(iType, oValues)) {} + + ov::PartialShape pshape; + ov::element::Type inType; + ov::element::Type outType; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; +}; + +class ReferenceReluLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type, + const element::Type& Reluected_output_type) { + const auto in = std::make_shared(input_type, input_shape); + const auto Relu = std::make_shared(in); + return std::make_shared(NodeVector {Relu}, ParameterVector {in}); + } +}; + +TEST_P(ReferenceReluLayerTest, CompareWithRefs) { + Exec(); +} + + +template +std::vector generateReluFloatParams() { + using T = typename element_type_traits::value_type; + + std::vector reluParams { + ReluParams(ov::PartialShape {2, 5}, + IN_ET, + std::vector{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5}, + std::vector{1, 8, 0, 17, 0, 1, 8, 0, 17, 0}), + ReluParams(ov::PartialShape {2, 2, 2, 2}, + IN_ET, + std::vector{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5, 1}, + std::vector{1, 8, 0, 17, 0, 1, 8, 0, 17, 0, 1, 8, 0, 17, 0, 1}) + }; + return reluParams; +} + +template +std::vector generateReluIntParams() { + using T = typename element_type_traits::value_type; + + std::vector reluParams { + ReluParams(ov::PartialShape {2, 5}, + IN_ET, + std::vector{1, 8, -8, 17, -2, 1, 8, -8, 17, -1}, + std::vector{1, 8, 0, 17, 0, 1, 8, 0, 17, 0}) + }; + return reluParams; +} + +template +std::vector generateReluUintParams() { + using T = typename element_type_traits::value_type; + + std::vector reluParams { + ReluParams(ov::PartialShape {2, 5}, + IN_ET, + std::vector{1, 8, 17, 1, 8, 17, 1, 8, 17, 0}, + std::vector{1, 8, 17, 1, 8, 17, 1, 8, 17, 0}) + }; + return reluParams; +} + +std::vector generateReluCombinedParams() { + const std::vector> reluTypeParams { + generateReluFloatParams(), + generateReluFloatParams(), + generateReluIntParams(), + generateReluIntParams(), + generateReluUintParams(), + generateReluUintParams() + }; + std::vector combinedParams; + + for (const auto& params : reluTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_Relu_With_Hardcoded_Refs, ReferenceReluLayerTest, + testing::ValuesIn(generateReluCombinedParams()), ReferenceReluLayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/op_reference/selu.cpp b/docs/template_plugin/tests/functional/op_reference/selu.cpp new file mode 100644 index 00000000000..7d060a537fb --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/selu.cpp @@ -0,0 +1,140 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "openvino/op/selu.hpp" +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ov; + +namespace { +struct SeluParams { + template + SeluParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector& iValues, const std::vector& oValues, + const ov::Shape& alphaShape, const ov::Shape& lambdaShape, + const std::vector& alphaValues, const std::vector& lambdaValues, + const std::string& test_name = "") + : pshape(shape), + inType(iType), + outType(iType), + inputData(CreateTensor(iType, iValues)), + refData(CreateTensor(iType, oValues)), + alphaShape(alphaShape), + lambdaShape(lambdaShape), + alpha(CreateTensor(iType, alphaValues)), + lambda(CreateTensor(iType, lambdaValues)), + testcaseName(test_name) {} + + ov::PartialShape pshape; + ov::element::Type inType; + ov::element::Type outType; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; + ov::Shape alphaShape; + ov::Shape lambdaShape; + ov::runtime::Tensor alpha; + ov::runtime::Tensor lambda; + std::string testcaseName; +}; + +class ReferenceSeluLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params); + inputData = {params.inputData, params.alpha, params.lambda}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType << "_"; + result << "alpha=" << param.alpha.data() << "_"; + if (param.testcaseName != "") { + result << "lambda=" << param.lambda.data() << "_"; + result << param.testcaseName; + } else { + result << "lambda=" << param.lambda.data(); + } + + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const SeluParams& params) { + const auto in = std::make_shared(params.inType, params.pshape); + const auto alpha = std::make_shared(params.inType, params.alphaShape); + const auto lambda = std::make_shared(params.inType, params.lambdaShape); + const auto Selu = std::make_shared(in, alpha, lambda); + return std::make_shared(NodeVector {Selu}, ParameterVector {in, alpha, lambda}); + } +}; + +TEST_P(ReferenceSeluLayerTest, CompareWithRefs) { + Exec(); +} + +template +std::vector generateSeluFloatParams() { + using T = typename element_type_traits::value_type; + + std::vector seluParams { + SeluParams(ov::PartialShape {2}, + IN_ET, + std::vector{-1, 3}, + std::vector{-1.1113307, 3.152103}, + ov::Shape {1}, + ov::Shape {1}, + std::vector{1.67326324}, + std::vector{1.05070098}), + SeluParams(ov::PartialShape {4}, + IN_ET, + std::vector{-1.0, 0.0, 1.0, 2.0}, + std::vector{-1.1113307, 0., 1.050701, 2.101402}, + ov::Shape {1}, + ov::Shape {1}, + std::vector{1.67326324}, + std::vector{1.05070098}), + SeluParams(ov::PartialShape {1}, + IN_ET, + std::vector{112.0}, + std::vector{117.67851}, + ov::Shape {1}, + ov::Shape {1}, + std::vector{1.67326324}, + std::vector{1.05070098}), + SeluParams(ov::PartialShape {3}, + IN_ET, + std::vector{-3.0, -12.5, -7.0}, + std::vector{-1.6705687, -1.7580928, -1.7564961}, + ov::Shape {1}, + ov::Shape {1}, + std::vector{1.67326324}, + std::vector{1.05070098}) + }; + return seluParams; +} + +std::vector generateSeluCombinedParams() { + const std::vector> seluTypeParams { + generateSeluFloatParams(), + generateSeluFloatParams(), + generateSeluFloatParams() + }; + std::vector combinedParams; + + for (const auto& params : seluTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_Selu_With_Hardcoded_Refs, ReferenceSeluLayerTest, + testing::ValuesIn(generateSeluCombinedParams()), ReferenceSeluLayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/op_reference/sigmoid.cpp b/docs/template_plugin/tests/functional/op_reference/sigmoid.cpp new file mode 100644 index 00000000000..fb7908f6d60 --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/sigmoid.cpp @@ -0,0 +1,137 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "openvino/op/sigmoid.hpp" +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ov; + +namespace { +struct SigmoidParams { + template + SigmoidParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector& iValues, const std::vector& oValues) + : pshape(shape), + inType(iType), + outType(iType), + inputData(CreateTensor(iType, iValues)), + refData(CreateTensor(iType, oValues)) {} + + ov::PartialShape pshape; + ov::element::Type inType; + ov::element::Type outType; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; +}; + +class ReferenceSigmoidLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type, + const element::Type& Sigmoidected_output_type) { + const auto in = std::make_shared(input_type, input_shape); + const auto Sigmoid = std::make_shared(in); + return std::make_shared(NodeVector {Sigmoid}, ParameterVector {in}); + } +}; + +TEST_P(ReferenceSigmoidLayerTest, CompareWithRefs) { + Exec(); +} + + +template +std::vector generateSigmoidFloatParams() { + using T = typename element_type_traits::value_type; + + float x1 = 1.0f; + float x2 = 4.0f; + float sigma1 = 1.0f / (1.0f + std::exp(-x1)); + float sigma2 = 1.0f / (1.0f + std::exp(-x2)); + + std::vector sigmoidParams { + SigmoidParams(ov::PartialShape {1, 1, 2, 2}, + IN_ET, + std::vector{x1, x2, x1, x2}, + std::vector{sigma1, sigma2, sigma1, sigma2}), + SigmoidParams(ov::PartialShape {1, 1, 4}, + IN_ET, + std::vector{x1, x2, x1, x2}, + std::vector{sigma1, sigma2, sigma1, sigma2}) + }; + return sigmoidParams; +} + +template +std::vector generateSigmoidIntParams() { + using T = typename element_type_traits::value_type; + + std::vector sigmoidParams { + SigmoidParams(ov::PartialShape {1, 1, 2, 2}, + IN_ET, + std::vector{1, 4, -1, -4}, + std::vector{1, 1, 0, 0}), + SigmoidParams(ov::PartialShape {1, 1, 4}, + IN_ET, + std::vector{1, 4, -1, -4}, + std::vector{1, 1, 0, 0}) + }; + return sigmoidParams; +} + +template +std::vector generateSigmoidUintParams() { + using T = typename element_type_traits::value_type; + + std::vector sigmoidParams { + SigmoidParams(ov::PartialShape {1, 1, 2, 2}, + IN_ET, + std::vector{1, 4, 1, 4}, + std::vector{1, 1, 1, 1}), + SigmoidParams(ov::PartialShape {1, 1, 4}, + IN_ET, + std::vector{1, 4, 1, 4}, + std::vector{1, 1, 1, 1}) + }; + return sigmoidParams; +} + +std::vector generateSigmoidCombinedParams() { + const std::vector> sigmoidTypeParams { + generateSigmoidFloatParams(), + generateSigmoidFloatParams(), + generateSigmoidIntParams(), + generateSigmoidIntParams(), + generateSigmoidUintParams(), + generateSigmoidUintParams() + }; + std::vector combinedParams; + + for (const auto& params : sigmoidTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_Sigmoid_With_Hardcoded_Refs, ReferenceSigmoidLayerTest, + testing::ValuesIn(generateSigmoidCombinedParams()), ReferenceSigmoidLayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/op_reference/softmax.cpp b/docs/template_plugin/tests/functional/op_reference/softmax.cpp new file mode 100644 index 00000000000..7f5f1d68f4c --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/softmax.cpp @@ -0,0 +1,191 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "openvino/op/softmax.hpp" +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ov; + +namespace { +struct SoftmaxParams { + template + SoftmaxParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector& iValues, const std::vector& oValues, + const int64_t axis, const std::string& test_name) + : axis(axis), + pshape(shape), + inType(iType), + outType(iType), + inputData(CreateTensor(iType, iValues)), + refData(CreateTensor(iType, oValues)), + test_case_name(test_name) {} + + int64_t axis = 0; + + ov::PartialShape pshape; + ov::element::Type inType; + ov::element::Type outType; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; + std::string test_case_name; +}; + +class ReferenceSoftmaxLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType, params.axis); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType << "_"; + if (param.test_case_name != "") { + result << "axis=" << param.axis << "_"; + result << param.test_case_name; + } else { + result << "axis=" << param.axis; + } + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type, + const element::Type& expected_output_type, const int64_t axis) { + const auto in = std::make_shared(input_type, input_shape); + const auto Softmax = std::make_shared(in, axis); + return std::make_shared(NodeVector {Softmax}, ParameterVector {in}); + } +}; + +TEST_P(ReferenceSoftmaxLayerTest, CompareWithRefs) { + Exec(); +} + +template +std::vector generateSoftmaxFloatParams() { + using T = typename element_type_traits::value_type; + + auto d0 = expf(-10) + expf(-1); + auto d1 = expf(-20) + expf(-2); + auto d2 = expf(-30) + expf(-3); + auto d3 = expf(-40) + expf(-4); + auto d4 = expf(-50) + expf(-5); + auto d5 = expf(-60) + expf(-6); + + auto d0_a1 = expf(-10) + expf(-20) + expf(-30); + auto d1_a1 = expf(-40) + expf(-50) + expf(-60); + + auto d0_a0 = expf(-10) + expf(-40); + auto d1_a0 = expf(-20) + expf(-50); + auto d2_a0 = expf(-30) + expf(-60); + + auto low = std::numeric_limits::lowest(); + auto high = std::numeric_limits::max(); + + auto d0_uf = expf(low) + expf(3); + auto d1_uf = expf(1) + expf(4); + auto d2_uf = expf(2) + expf(5); + + auto d0_of = expf(high - high) + expf(3 - high); + auto d1_of = expf(1) + expf(4); + auto d2_of = expf(2) + expf(5); + + std::vector softmaxParams { + SoftmaxParams(ov::PartialShape {2, 2, 3}, + IN_ET, + std::vector{-10, -20, -30, -40, -50, -60, -1, -2, -3, -4, -5, -6}, + std::vector{expf(-10) / d0, + expf(-20) / d1, + expf(-30) / d2, + expf(-40) / d3, + expf(-50) / d4, + expf(-60) / d5, + expf(-1) / d0, + expf(-2) / d1, + expf(-3) / d2, + expf(-4) / d3, + expf(-5) / d4, + expf(-6) / d5}, + 0, + ""), + SoftmaxParams(ov::PartialShape {2, 3}, + IN_ET, + std::vector{-10, -20, -30, -40, -50, -60}, + std::vector{expf(-10) / d0_a1, + expf(-20) / d0_a1, + expf(-30) / d0_a1, + expf(-40) / d1_a1, + expf(-50) / d1_a1, + expf(-60) / d1_a1}, + 1, + ""), + SoftmaxParams(ov::PartialShape {2, 3}, + IN_ET, + std::vector{-10, -20, -30, -40, -50, -60}, + std::vector{expf(-10) / d0_a0, + expf(-20) / d1_a0, + expf(-30) / d2_a0, + expf(-40) / d0_a0, + expf(-50) / d1_a0, + expf(-60) / d2_a0}, + 0, + "test"), + SoftmaxParams(ov::PartialShape {1, 2, 3}, + IN_ET, + std::vector{-10, -20, -30, -40, -50, -60}, + std::vector{1, 1, 1, 1, 1, 1}, + 0, + "trivial"), + SoftmaxParams(ov::PartialShape {2, 3}, + IN_ET, + std::vector{low, 1, 2, 3, 4, 5}, + std::vector{expf(low) / d0_uf, + expf(1) / d1_uf, + expf(2) / d2_uf, + expf(3) / d0_uf, + expf(4) / d1_uf, + expf(5) / d2_uf}, + 0, + "underflow"), + SoftmaxParams(ov::PartialShape {2, 3}, + IN_ET, + std::vector{high, 1, 2, 3, 4, 5}, + std::vector{expf(high - high) / d0_of, + expf(1) / d1_of, + expf(2) / d2_of, + expf(3 - high) / d0_of, + expf(4) / d1_of, + expf(5) / d2_of}, + 0, + "overflow") + }; + return softmaxParams; +} + +std::vector generateSoftmaxCombinedParams() { + const std::vector> softmaxTypeParams { + generateSoftmaxFloatParams(), + generateSoftmaxFloatParams(), + generateSoftmaxFloatParams(), + generateSoftmaxFloatParams() + }; + std::vector combinedParams; + + for (const auto& params : softmaxTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_Softmax_With_Hardcoded_Refs, ReferenceSoftmaxLayerTest, + testing::ValuesIn(generateSoftmaxCombinedParams()), ReferenceSoftmaxLayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/op_reference/softplus.cpp b/docs/template_plugin/tests/functional/op_reference/softplus.cpp new file mode 100644 index 00000000000..8a7f3e7ed7a --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/softplus.cpp @@ -0,0 +1,90 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "openvino/op/softplus.hpp" +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ov; + +namespace { +struct SoftPlusParams { + template + SoftPlusParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector& iValues, const std::vector& oValues) + : pshape(shape), + inType(iType), + outType(iType), + inputData(CreateTensor(iType, iValues)), + refData(CreateTensor(iType, oValues)) {} + + ov::PartialShape pshape; + ov::element::Type inType; + ov::element::Type outType; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; +}; + +class ReferenceSoftPlusLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type, + const element::Type& SoftPlusected_output_type) { + const auto in = std::make_shared(input_type, input_shape); + const auto SoftPlus = std::make_shared(in); + return std::make_shared(NodeVector {SoftPlus}, ParameterVector {in}); + } +}; + +TEST_P(ReferenceSoftPlusLayerTest, CompareWithRefs) { + Exec(); +} + +template +std::vector generateSoftPlusFloatParams() { + using T = typename element_type_traits::value_type; + + std::vector softPlusParams { + SoftPlusParams(ov::PartialShape {4}, + IN_ET, + std::vector{-1.0, 0.0, 1.0, 20.0}, + std::vector{0.31326166, 0.69314718, 1.3132616, 20.0}) + }; + return softPlusParams; +} + +std::vector generateSoftPlusCombinedParams() { + const std::vector> softPlusTypeParams { + generateSoftPlusFloatParams(), + generateSoftPlusFloatParams(), + generateSoftPlusFloatParams() + }; + std::vector combinedParams; + + for (const auto& params : softPlusTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_SoftPlus_With_Hardcoded_Refs, ReferenceSoftPlusLayerTest, + testing::ValuesIn(generateSoftPlusCombinedParams()), ReferenceSoftPlusLayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/op_reference/swish.cpp b/docs/template_plugin/tests/functional/op_reference/swish.cpp new file mode 100644 index 00000000000..f6f2244f0ac --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/swish.cpp @@ -0,0 +1,133 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "openvino/op/swish.hpp" +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ov; + +namespace { +struct SwishParams { + template + SwishParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector& iValues, + const float beta = 1) + : pshape(shape), + inType(iType), + outType(iType), + inputData(CreateTensor(iType, iValues)), + beta(beta) { + std::vector oValues; + std::vector output; + std::vector betaVector; + + for (auto element : iValues) + output.push_back(static_cast(element)); + + std::transform(output.begin(), output.end(), output.begin(), [&beta](float x) -> float { + return (x / (1.0f + std::exp(x * beta * -1.0f))); + }); + + for (auto element : output) + oValues.push_back(static_cast(element)); + refData = CreateTensor(outType, oValues); + + betaVector.push_back(static_cast(beta)); + betaBlob = CreateTensor(inType, betaVector); + } + + ov::PartialShape pshape; + ov::element::Type inType; + ov::element::Type outType; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; + ov::runtime::Tensor betaBlob; + + float beta; +}; + +class ReferenceSwishLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + threshold = 0.06; // 0.01 failed in fp32 test + + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType, params.beta); + if (params.beta != 1) { + inputData = {params.inputData, params.betaBlob}; + refOutData = {params.refData}; + } else { + inputData = {params.inputData}; + refOutData = {params.refData}; + } + } + + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType << "_"; + result << "beta=" << param.beta; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type, + const element::Type& Swishected_output_type, const float beta) { + const auto in = std::make_shared(input_type, input_shape); + if (beta != 1) { + const auto BETA = std::make_shared(input_type, Shape {}); + const auto Swish = std::make_shared(in, BETA); + return std::make_shared(NodeVector {Swish}, ParameterVector {in, BETA}); + } else { + const auto Swish = std::make_shared(in); + return std::make_shared(NodeVector {Swish}, ParameterVector {in}); + } + } +}; + +TEST_P(ReferenceSwishLayerTest, CompareWithRefs) { + Exec(); +} + +template +std::vector generateSwishFloatParams() { + using T = typename element_type_traits::value_type; + + std::vector swishParams { + SwishParams(ov::PartialShape {2, 4}, + IN_ET, + std::vector{0.4, -5.7, -6, 3, -0.9, 23, 5, 3.3}, + 0.6f), + SwishParams(ov::PartialShape {2, 3}, + IN_ET, + std::vector{1, 8, -8, 17, -0.5, -1}), + SwishParams(ov::PartialShape {2, 2, 1, 2}, + IN_ET, + std::vector{0.1, 0.6, 20, -7, -5.3, 3.5, -9, 11}, + 0.33f) + }; + return swishParams; +} + +std::vector generateSwishCombinedParams() { + const std::vector> swishTypeParams { + generateSwishFloatParams(), + generateSwishFloatParams() + }; + std::vector combinedParams; + + for (const auto& params : swishTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_Swish_With_Hardcoded_Refs, ReferenceSwishLayerTest, + testing::ValuesIn(generateSwishCombinedParams()), ReferenceSwishLayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/skip_tests_config.cpp b/docs/template_plugin/tests/functional/skip_tests_config.cpp index 0781e0f2268..3803537984d 100644 --- a/docs/template_plugin/tests/functional/skip_tests_config.cpp +++ b/docs/template_plugin/tests/functional/skip_tests_config.cpp @@ -8,7 +8,7 @@ #include "functional_test_utils/skip_tests_config.hpp" std::vector disabledTestPatterns() { - return { + std::vector retVector{ // CVS-66280 R"(.*canLoadCorrectNetworkAndCheckConfig.*)", R"(.*canSetCorrectConfigLoadNetworkAndCheckConfig.*)", @@ -29,5 +29,16 @@ std::vector disabledTestPatterns() { // TODO: Round with f16 is not supported R"(.*smoke_Hetero_BehaviorTests.*OVExecNetwork.*readFromV10IR.*)", + + // CVS-64094 + R"(.*ReferenceLogSoftmaxLayerTest.*4.*iType=f16.*axis=.*1.*)", + // CVS-64080 + R"(.*ReferenceMishLayerTest.*dimensionDynamic.*)", }; + +#ifdef _WIN32 + // CVS-63989 + retVector.emplace_back(R"(.*ReferenceSigmoidLayerTest.*u64.*)"); +#endif + return retVector; } diff --git a/ngraph/core/src/op/exp.cpp b/ngraph/core/src/op/exp.cpp index 25b47ff58a4..a2ed45611c4 100644 --- a/ngraph/core/src/op/exp.cpp +++ b/ngraph/core/src/op/exp.cpp @@ -44,7 +44,6 @@ bool evaluate_exp(const HostTensorPtr& arg0, const HostTensorPtr& out) { out->set_unary(arg0); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_exp, boolean, arg0, out, count); NGRAPH_TYPE_CASE(evaluate_exp, i32, arg0, out, count); NGRAPH_TYPE_CASE(evaluate_exp, i64, arg0, out, count); NGRAPH_TYPE_CASE(evaluate_exp, u32, arg0, out, count); @@ -68,7 +67,6 @@ bool op::Exp::evaluate(const HostTensorVector& outputs, const HostTensorVector& bool op::Exp::has_evaluate() const { NGRAPH_OP_SCOPE(v0_Exp_has_evaluate); switch (get_input_element_type(0)) { - case ngraph::element::boolean: case ngraph::element::i32: case ngraph::element::i64: case ngraph::element::u32: diff --git a/ngraph/core/src/op/sigmoid.cpp b/ngraph/core/src/op/sigmoid.cpp index 170b9a2e014..785683f7484 100644 --- a/ngraph/core/src/op/sigmoid.cpp +++ b/ngraph/core/src/op/sigmoid.cpp @@ -41,7 +41,6 @@ bool evaluate_sigmoid(const HostTensorPtr& arg0, const HostTensorPtr& out) { out->set_unary(arg0); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_sigmoid, boolean, arg0, out, count); NGRAPH_TYPE_CASE(evaluate_sigmoid, i32, arg0, out, count); NGRAPH_TYPE_CASE(evaluate_sigmoid, i64, arg0, out, count); NGRAPH_TYPE_CASE(evaluate_sigmoid, u32, arg0, out, count); @@ -65,7 +64,6 @@ bool ov::op::v0::Sigmoid::evaluate(const HostTensorVector& outputs, const HostTe bool ov::op::v0::Sigmoid::has_evaluate() const { NGRAPH_OP_SCOPE(v0_Sigmoid_has_evaluate); switch (get_input_element_type(0)) { - case ngraph::element::boolean: case ngraph::element::i32: case ngraph::element::i64: case ngraph::element::u32: diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 64fedd6bcea..266faee4a52 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -210,6 +210,7 @@ set(SRC type_prop/reduce_min.cpp type_prop/reduce_prod.cpp type_prop/reduce_sum.cpp + type_prop/relu.cpp type_prop/reorg_yolo.cpp type_prop/reshape.cpp type_prop/result.cpp @@ -229,6 +230,7 @@ set(SRC type_prop/selu.cpp type_prop/shape_of.cpp type_prop/shuffle_channels.cpp + type_prop/sigmoid.cpp type_prop/sign.cpp type_prop/sin.cpp type_prop/sinh.cpp @@ -269,6 +271,7 @@ set(SRC visitors/op/broadcast.cpp visitors/op/bucketize.cpp visitors/op/ceiling.cpp + visitors/op/clamp.cpp visitors/op/constant.cpp visitors/op/convert.cpp visitors/op/convert_color_nv12.cpp @@ -286,6 +289,7 @@ set(SRC visitors/op/elu.cpp visitors/op/equal.cpp visitors/op/erf.cpp + visitors/op/exp.cpp visitors/op/extractimagepatches.cpp visitors/op/fake_quantize.cpp visitors/op/floor_mod.cpp @@ -297,11 +301,15 @@ set(SRC visitors/op/greater.cpp visitors/op/grn.cpp visitors/op/group_conv.cpp + visitors/op/hard_sigmoid.cpp + visitors/op/hsigmoid.cpp + visitors/op/hswish.cpp visitors/op/interpolate.cpp visitors/op/if.cpp visitors/op/less_equal.cpp visitors/op/less.cpp visitors/op/log.cpp + visitors/op/log_softmax.cpp visitors/op/logical_and.cpp visitors/op/logical_or.cpp visitors/op/logical_not.cpp @@ -328,6 +336,7 @@ set(SRC visitors/op/pad.cpp visitors/op/parameter.cpp visitors/op/power.cpp + visitors/op/prelu.cpp visitors/op/prior_box.cpp visitors/op/prior_box_clustered.cpp visitors/op/proposal.cpp @@ -343,6 +352,7 @@ set(SRC visitors/op/reduce_prod.cpp visitors/op/reduce_sum.cpp visitors/op/region_yolo.cpp + visitors/op/relu.cpp visitors/op/reorg_yolo.cpp visitors/op/reshape.cpp visitors/op/result.cpp @@ -358,6 +368,7 @@ set(SRC visitors/op/space_to_depth.cpp visitors/op/selu.cpp visitors/op/shuffle_channels.cpp + visitors/op/sigmoid.cpp visitors/op/sign.cpp visitors/op/sin.cpp visitors/op/sinh.cpp @@ -455,7 +466,6 @@ set(MULTI_TEST_SRC backend/constant.in.cpp backend/convolution_backprop.in.cpp backend/binary_convolution.in.cpp - backend/clamp.in.cpp backend/ctc_greedy_decoder.in.cpp backend/ctc_greedy_decoder_seq_len.in.cpp backend/deformable_psroi_pooling.in.cpp @@ -470,8 +480,6 @@ set(MULTI_TEST_SRC backend/experimental_detectron_topk_rois.in.cpp backend/strided_slice.in.cpp backend/dynamic.in.cpp - backend/elu.in.cpp - backend/exp.in.cpp backend/experimental_detectron_detection_output.in.cpp backend/experimental_detectron_prior_grid.in.cpp backend/fake_quantize.in.cpp @@ -481,20 +489,16 @@ set(MULTI_TEST_SRC backend/gather.in.cpp backend/gather_elements.in.cpp backend/gather_nd.in.cpp - backend/gelu.in.cpp backend/group_convolution.in.cpp backend/group_convolution_backprop_data.in.cpp - backend/hard_sigmoid.in.cpp backend/idft.in.cpp backend/interpolate.in.cpp backend/log.in.cpp - backend/log_softmax.in.cpp backend/lrn.in.cpp backend/matmul.in.cpp backend/matrix_nms.in.cpp backend/maximum.in.cpp backend/max_pool.in.cpp - backend/mish.in.cpp backend/mod.in.cpp backend/multiclass_nms.in.cpp backend/multiple_backends.in.cpp @@ -509,7 +513,6 @@ set(MULTI_TEST_SRC backend/pad.in.cpp backend/parameter_as_output.in.cpp backend/power.in.cpp - backend/prelu.in.cpp backend/prior_box_clustered.in.cpp backend/prior_box.in.cpp backend/proposal.in.cpp @@ -517,7 +520,6 @@ set(MULTI_TEST_SRC backend/range.in.cpp backend/recurrent_cells.in.cpp backend/region_yolo.in.cpp - backend/relu.in.cpp backend/reorg_yolo.in.cpp backend/reshape.in.cpp backend/result.in.cpp @@ -526,19 +528,14 @@ set(MULTI_TEST_SRC backend/round.in.cpp backend/scatter_nd_update.in.cpp backend/space_to_depth.in.cpp - backend/selu.in.cpp backend/shape_of.in.cpp backend/shuffle_channels.in.cpp - backend/sigmoid.in.cpp - backend/softmax.in.cpp - backend/softplus.in.cpp backend/space_to_batch.in.cpp backend/split.in.cpp backend/sqrt.in.cpp backend/squared_difference.in.cpp backend/squeeze.in.cpp backend/subtract.in.cpp - backend/swish.in.cpp backend/tile.in.cpp backend/topk.in.cpp backend/transpose.in.cpp diff --git a/ngraph/test/backend/clamp.in.cpp b/ngraph/test/backend/clamp.in.cpp deleted file mode 100644 index a4c86f026cc..00000000000 --- a/ngraph/test/backend/clamp.in.cpp +++ /dev/null @@ -1,403 +0,0 @@ -//***************************************************************************** -// Copyright 2021 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** - -#include "engines_util/test_case.hpp" -#include "engines_util/test_engines.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "util/test_control.hpp" - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; -using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); - -namespace { -template -void clamp_test(const element::Type& type, - const PartialShape& dynamic_shape, - const Shape& static_shape, - const std::vector& input, - double min, - double max, - const std::vector& output) { - auto data = make_shared(type, dynamic_shape); - auto clamp = make_shared(data, min, max); - auto function = make_shared(clamp, ParameterVector{data}); - - auto test_case = test::TestCase(function); - test_case.template add_input(static_shape, input); - test_case.template add_expected_output(static_shape, output); - return test_case.run(); -} -} // namespace - -NGRAPH_TEST(${BACKEND_NAME}, clamp_integral) { - Shape in_shape{6}; - element::Type et = element::i32; - - float min = 0.4; // ceiled to 1 - float max = 5.6; // floored to 5 - - auto input = make_shared(et, in_shape); - auto clamp = make_shared(input, min, max); - auto f = make_shared(clamp, ParameterVector{input}); - - vector in_vec{-1, 3, -10, 20, 6, 2}; - vector out_vec{1, 3, 1, 5, 5, 2}; - - auto test_case = test::TestCase(f); - test_case.add_input(in_shape, in_vec); - test_case.add_expected_output(in_shape, out_vec); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, clamp_integral_negative) { - Shape in_shape{6}; - element::Type et = element::i32; - - float min = -5.6; // ceiled to -5 - float max = -0.4; // floored to -1 - - auto input = make_shared(et, in_shape); - auto clamp = make_shared(input, min, max); - auto f = make_shared(clamp, ParameterVector{input}); - - vector in_vec{-6, 1, -2, 0, -1, 2}; - vector out_vec{-5, -1, -2, -1, -1, -1}; - - auto test_case = test::TestCase(f); - test_case.add_input(in_shape, in_vec); - test_case.add_expected_output(in_shape, out_vec); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, clamp_float) { - auto type = element::f32; - typedef float ctype; - - auto sshape = Shape{5, 2}; - auto dshape = PartialShape::dynamic(); - - auto min = numeric_limits::min(); - auto max = numeric_limits::max(); - auto pinf = numeric_limits::infinity(); - auto ninf = -numeric_limits::infinity(); - - vector input{min, max, ninf, pinf, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.000001}; - - // static shape - clamp_test(type, - sshape, - sshape, - {-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8}, - 0.2, - 0.6, - {0.2, 0.2, 0.2, 0.2, 0.3, 0.4, 0.5, 0.6, 0.6, 0.6}); - - clamp_test(type, - sshape, - sshape, - input, - 10.0, - 20.0, - {10.0, 20.0, 10.0, 20.0, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.0}); - - clamp_test(type, - sshape, - sshape, - input, - 10.0, - pinf, - {10.0, max, 10.0, pinf, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.000001}); - - clamp_test(type, - sshape, - sshape, - input, - ninf, - 20.0, - {min, 20.0, ninf, 20.0, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.0}); -} - -NGRAPH_TEST(${BACKEND_NAME}, clamp_int8) { - auto type = element::i8; - typedef int8_t ctype; - - auto sshape = Shape{4, 2}; - auto dshape = PartialShape::dynamic(); - - auto min = numeric_limits::min(); - auto max = numeric_limits::max(); - auto pinf = numeric_limits::infinity(); - auto ninf = -numeric_limits::infinity(); - - vector input{min, max, 9, 10, 11, 19, 20, 21}; - - // static shape - clamp_test(type, sshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20}); - clamp_test(type, sshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21}); - clamp_test(type, sshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20}); -} - -NGRAPH_TEST(${BACKEND_NAME}, clamp_int16) { - auto type = element::i16; - typedef int16_t ctype; - - auto sshape = Shape{4, 2}; - auto dshape = PartialShape::dynamic(); - - auto min = numeric_limits::min(); - auto max = numeric_limits::max(); - auto pinf = numeric_limits::infinity(); - auto ninf = -numeric_limits::infinity(); - - vector input{min, max, 9, 10, 11, 19, 20, 21}; - - // static shape - clamp_test(type, sshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20}); - clamp_test(type, sshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21}); - clamp_test(type, sshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20}); -} - -NGRAPH_TEST(${BACKEND_NAME}, clamp_int32) { - auto type = element::i32; - typedef int32_t ctype; - - auto sshape = Shape{4, 2}; - auto dshape = PartialShape::dynamic(); - - auto min = numeric_limits::min(); - auto max = numeric_limits::max(); - auto pinf = numeric_limits::infinity(); - auto ninf = -numeric_limits::infinity(); - - vector input{min, max, 9, 10, 11, 19, 20, 21}; - - // static shape - clamp_test(type, sshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20}); - clamp_test(type, sshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21}); - clamp_test(type, sshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20}); -} - -NGRAPH_TEST(${BACKEND_NAME}, clamp_int64) { - auto type = element::i64; - typedef int64_t ctype; - - auto sshape = Shape{4, 2}; - auto dshape = PartialShape::dynamic(); - - auto min = numeric_limits::min(); - auto max = numeric_limits::max(); - auto pinf = numeric_limits::infinity(); - auto ninf = -numeric_limits::infinity(); - - vector input{min, max, 9, 10, 11, 19, 20, 21}; - - // static shape - clamp_test(type, sshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20}); - clamp_test(type, sshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21}); - clamp_test(type, sshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20}); -} - -NGRAPH_TEST(${BACKEND_NAME}, clamp_uint8) { - auto type = element::u8; - typedef uint8_t ctype; - - auto sshape = Shape{4, 2}; - auto dshape = PartialShape::dynamic(); - - auto min = numeric_limits::min(); - // TODO: Fix CPU DEX / MLIR correctness bug: using signed comparison for unsigned ints - // auto max = numeric_limits::max(); - // auto pinf = numeric_limits::infinity(); - ctype max = (static_cast(1) << (numeric_limits::digits - 1)) - 1; - auto pinf = static_cast(max); - auto ninf = -numeric_limits::infinity(); - - vector input{min, max, 9, 10, 11, 19, 20, 21}; - - // static shape - clamp_test(type, sshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20}); - clamp_test(type, sshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21}); - clamp_test(type, sshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20}); -} - -NGRAPH_TEST(${BACKEND_NAME}, clamp_uint16) { - auto type = element::u16; - typedef uint16_t ctype; - - auto sshape = Shape{4, 2}; - auto dshape = PartialShape::dynamic(); - - auto min = numeric_limits::min(); - // TODO: Fix CPU DEX / MLIR correctness bug: using signed comparison for unsigned ints - // auto max = numeric_limits::max(); - // auto pinf = numeric_limits::infinity(); - ctype max = (static_cast(1) << (numeric_limits::digits - 1)) - 1; - auto pinf = static_cast(max); - auto ninf = -numeric_limits::infinity(); - - vector input{min, max, 9, 10, 11, 19, 20, 21}; - - // static shape - clamp_test(type, sshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20}); - clamp_test(type, sshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21}); - clamp_test(type, sshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20}); -} - -NGRAPH_TEST(${BACKEND_NAME}, clamp_uint32) { - auto type = element::u32; - typedef uint32_t ctype; - - auto sshape = Shape{4, 2}; - auto dshape = PartialShape::dynamic(); - - auto min = numeric_limits::min(); - // TODO: Fix CPU DEX / MLIR correctness bug: using signed comparison for unsigned ints - // auto max = numeric_limits::max(); - // auto pinf = numeric_limits::infinity(); - ctype max = (static_cast(1) << (numeric_limits::digits - 1)) - 1; - auto pinf = static_cast(max); - auto ninf = -numeric_limits::infinity(); - - vector input{min, max, 9, 10, 11, 19, 20, 21}; - - // static shape - clamp_test(type, sshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20}); - clamp_test(type, sshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21}); - clamp_test(type, sshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20}); -} - -NGRAPH_TEST(${BACKEND_NAME}, clamp_uint64) { - auto type = element::u64; - typedef uint64_t ctype; - - auto sshape = Shape{4, 2}; - auto dshape = PartialShape::dynamic(); - - auto min = numeric_limits::min(); - // TODO: Fix CPU DEX / MLIR correctness bug: using signed comparison for unsigned ints - // auto max = numeric_limits::max(); - // auto pinf = numeric_limits::infinity(); - ctype max = (static_cast(1) << (32 - 1)) - 1; - auto pinf = static_cast(max); - auto ninf = -numeric_limits::infinity(); - - vector input{min, max, 9, 10, 11, 19, 20, 21}; - - // static shape - clamp_test(type, sshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20}); - clamp_test(type, sshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21}); - clamp_test(type, sshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20}); -} - -NGRAPH_TEST(${BACKEND_NAME}, clamp_float16) { - auto type = element::f16; - typedef float16 ctype; - - auto sshape = Shape{5, 2}; - auto dshape = PartialShape::dynamic(); - - auto min = numeric_limits::min(); - auto max = numeric_limits::max(); - auto pinf = numeric_limits::infinity(); - auto ninf = -numeric_limits::infinity(); - - vector input{min, max, ninf, pinf, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.000001}; - - // static shape - clamp_test(type, - sshape, - sshape, - {-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8}, - 0.2, - 0.6, - {0.2, 0.2, 0.2, 0.2, 0.3, 0.4, 0.5, 0.6, 0.6, 0.6}); - - clamp_test(type, - sshape, - sshape, - input, - 10.0, - 20.0, - {10.0, 20.0, 10.0, 20.0, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.0}); - - clamp_test(type, - sshape, - sshape, - input, - 10.0, - pinf, - {10.0, max, 10.0, pinf, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.000001}); - - clamp_test(type, - sshape, - sshape, - input, - ninf, - 20.0, - {min, 20.0, ninf, 20.0, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.0}); -} - -NGRAPH_TEST(${BACKEND_NAME}, clamp_bfloat16) { - auto type = element::bf16; - typedef bfloat16 ctype; - - auto sshape = Shape{5, 2}; - auto dshape = PartialShape::dynamic(); - - auto min = numeric_limits::min(); - auto max = numeric_limits::max(); - auto pinf = numeric_limits::infinity(); - auto ninf = -numeric_limits::infinity(); - - vector input{min, max, ninf, pinf, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.000001}; - - // static shape - clamp_test(type, - sshape, - sshape, - {-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8}, - 0.2, - 0.6, - {0.2, 0.2, 0.2, 0.2, 0.3, 0.4, 0.5, 0.6, 0.6, 0.6}); - - clamp_test(type, - sshape, - sshape, - input, - 10.0, - 20.0, - {10.0, 20.0, 10.0, 20.0, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.0}); - - clamp_test(type, - sshape, - sshape, - input, - 10.0, - pinf, - {10.0, max, 10.0, pinf, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.000001}); - - clamp_test(type, - sshape, - sshape, - input, - ninf, - 20.0, - {min, 20.0, ninf, 20.0, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.0}); -} diff --git a/ngraph/test/backend/elu.in.cpp b/ngraph/test/backend/elu.in.cpp deleted file mode 100644 index 3d4c4eb1fd1..00000000000 --- a/ngraph/test/backend/elu.in.cpp +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include -#include - -// clang-format off -#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS -#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS -#endif - -#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS -#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS -#endif -// clang-format on - -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "engines_util/test_engines.hpp" -#include "engines_util/test_case.hpp" -#include "util/test_control.hpp" - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; -using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); - -NGRAPH_TEST(${BACKEND_NAME}, elu) { - auto A = make_shared(element::f32, Shape{3, 2}); - auto elu = make_shared(A, 0.5f); - auto function = make_shared(NodeVector{elu}, ParameterVector{A}); - - auto test_case = test::TestCase(function); - test_case.add_input(vector{-2.f, 3.f, -2.f, 1.f, -1.f, 0.f}); - test_case.add_expected_output(vector{-0.432332358f, 3.f, -0.432332358f, 1.f, -0.316060279f, 0.f}); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, elu_negative_alpha) { - auto A = make_shared(element::f32, Shape{3, 2}); - auto elu = make_shared(A, -1.f); - auto function = make_shared(NodeVector{elu}, ParameterVector{A}); - - auto test_case = test::TestCase(function); - test_case.add_input(vector{-2.f, 3.f, -2.f, 1.f, -1.f, 0.f}); - test_case.add_expected_output(vector{0.864664717f, 3.f, 0.864664717f, 1.f, 0.632120559f, 0.f}); - test_case.run(); -} diff --git a/ngraph/test/backend/exp.in.cpp b/ngraph/test/backend/exp.in.cpp deleted file mode 100644 index 1f516a7b0a3..00000000000 --- a/ngraph/test/backend/exp.in.cpp +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include -#include - -// clang-format off -#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS -#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS -#endif - -#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS -#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS -#endif -// clang-format on - -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "engines_util/test_engines.hpp" -#include "engines_util/test_case.hpp" -#include "util/test_control.hpp" - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; -using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); - -NGRAPH_TEST(${BACKEND_NAME}, exp) { - Shape shape{8}; - auto A = make_shared(element::f32, shape); - auto f = make_shared(make_shared(A), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_input({-4, -3, -2, -1, 0, 1, 2, 3}); - test_case.add_expected_output(shape, - {expf(-4), expf(-3), expf(-2), expf(-1), expf(0), expf(1), expf(2), expf(3)}); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, exp_negative) { - Shape shape{5}; - auto A = make_shared(element::f32, shape); - auto f = make_shared(make_shared(A), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_input({-4, -3, -2, -1, -5}); - test_case.add_expected_output(shape, {expf(-4), expf(-3), expf(-2), expf(-1), expf(-5)}); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, exp_scalar) { - Shape shape{}; - auto A = make_shared(element::f32, shape); - auto f = make_shared(make_shared(A), ParameterVector{A}); - - vector a{13}; - - auto test_case = test::TestCase(f); - test_case.add_input({a}); - test_case.add_expected_output(shape, {expf(13)}); - test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 2); -} - -NGRAPH_TEST(${BACKEND_NAME}, exp_in_place) { - Shape shape{2}; - auto A = make_shared(element::f32, shape); - ; - auto T = make_shared(A); - auto T2 = make_shared(T); - - auto f = make_shared(T2, ParameterVector{A}); - - vector a{1, 3}; - - auto test_case = test::TestCase(f); - test_case.add_input({a}); - test_case.add_expected_output(shape, {expf(expf(1)), expf(expf(3))}); - test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 2); -} diff --git a/ngraph/test/backend/gelu.in.cpp b/ngraph/test/backend/gelu.in.cpp deleted file mode 100644 index a2981dbd94e..00000000000 --- a/ngraph/test/backend/gelu.in.cpp +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "engines_util/test_case.hpp" -#include "engines_util/test_engines.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "util/test_control.hpp" - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; -using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); - -NGRAPH_TEST(${BACKEND_NAME}, gelu_erf_mode_inference_f32_8D) { - Shape in_shape{8}; - element::Type et = element::f32; - - auto param = make_shared(et, in_shape); - auto gelu = make_shared(param); - auto f = make_shared(gelu, ParameterVector{param}); - - vector in_vec{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0}; - vector - out_vec{-0.00012636185, -0.0040495098, -0.04550028, -0.15865529, 0.0, 0.8413447, 1.9544997, 2.9959507}; - - auto test_case = test::TestCase(f); - test_case.add_input(in_shape, in_vec); - test_case.add_expected_output(in_shape, out_vec); - test_case.run_with_tolerance_as_fp(1e-4f); -} - -NGRAPH_TEST(${BACKEND_NAME}, gelu_tanh_mode_inference_f32_8D) { - Shape in_shape{8}; - element::Type et = element::f32; - - auto param = make_shared(et, in_shape); - auto gelu = make_shared(param, op::GeluApproximationMode::TANH); - auto f = make_shared(gelu, ParameterVector{param}); - - vector in_vec{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0}; - vector - out_vec{-0.00012636185, -0.0040495098, -0.04550028, -0.15865529, 0.0, 0.8413447, 1.9544997, 2.9959507}; - - auto test_case = test::TestCase(f); - test_case.add_input(in_shape, in_vec); - test_case.add_expected_output(in_shape, out_vec); - test_case.run_with_tolerance_as_fp(1e-3f); -} - -NGRAPH_TEST(${BACKEND_NAME}, gelu_erf_mode_inference_f32_3D) { - Shape in_shape{3}; - element::Type et = element::f32; - - auto param = make_shared(et, in_shape); - auto gelu = make_shared(param); - auto f = make_shared(gelu, ParameterVector{param}); - - vector in_vec{-0.5, 0.1, 0.4}; - vector out_vec{-0.15426877, 0.05398279, 0.2621686}; - - auto test_case = test::TestCase(f); - test_case.add_input(in_shape, in_vec); - test_case.add_expected_output(in_shape, out_vec); - test_case.run_with_tolerance_as_fp(1e-4f); -} - -NGRAPH_TEST(${BACKEND_NAME}, gelu_tanh_mode_inference_f32_3D) { - Shape in_shape{3}; - element::Type et = element::f32; - - auto param = make_shared(et, in_shape); - auto gelu = make_shared(param, op::GeluApproximationMode::TANH); - auto f = make_shared(gelu, ParameterVector{param}); - - vector in_vec{-0.5, 0.1, 0.4}; - vector out_vec{-0.15428599, 0.053982753, 0.262161165}; - - auto test_case = test::TestCase(f); - test_case.add_input(in_shape, in_vec); - test_case.add_expected_output(in_shape, out_vec); - test_case.run_with_tolerance_as_fp(1e-4f); -} diff --git a/ngraph/test/backend/hard_sigmoid.in.cpp b/ngraph/test/backend/hard_sigmoid.in.cpp deleted file mode 100644 index 495e16006c5..00000000000 --- a/ngraph/test/backend/hard_sigmoid.in.cpp +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "engines_util/test_case.hpp" -#include "engines_util/test_engines.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "util/test_control.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; -using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); - -NGRAPH_TEST(${BACKEND_NAME}, hard_sigmoid_1d) { - const Shape a_shape{3}; - const auto A = make_shared(element::f32, a_shape); - - const auto alpha = op::Constant::create(element::f32, Shape{}, {0.5f}); - const auto beta = op::Constant::create(element::f32, Shape{}, {0.6f}); - - const auto R = make_shared(A, alpha, beta); - const auto f = make_shared(R, ParameterVector{A}); - - std::vector a{-1.0f, 0.0f, 1.0f}; - - EXPECT_EQ(R->get_output_shape(0), a_shape); - - auto test_case = test::TestCase(f); - - test_case.add_input({a}); - test_case.add_expected_output({0.1f, 0.6f, 1.f}); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, hard_sigmoid_2d) { - const Shape a_shape{2, 5}; - const auto A = make_shared(element::f32, a_shape); - - const auto alpha = op::Constant::create(element::f32, Shape{}, {0.2f}); - const auto beta = op::Constant::create(element::f32, Shape{}, {0.5f}); - - const auto R = make_shared(A, alpha, beta); - const auto f = make_shared(R, ParameterVector{A}); - - std::vector a{-3.0f, -1.0f, 0.0f, 1.0f, 3.0f, 0.5f, -0.2f, 6.0f, 8.0f, 0.1f}; - - EXPECT_EQ(R->get_output_shape(0), a_shape); - - auto test_case = test::TestCase(f); - - test_case.add_input({a}); - test_case.add_expected_output(a_shape, {0.0f, 0.3f, 0.5f, 0.7f, 1.0f, 0.6f, 0.46f, 1.0f, 1.0f, 0.52f}); - test_case.run(); -} diff --git a/ngraph/test/backend/log_softmax.in.cpp b/ngraph/test/backend/log_softmax.in.cpp deleted file mode 100644 index fec292a24bd..00000000000 --- a/ngraph/test/backend/log_softmax.in.cpp +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -// clang-format off -#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS -#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS -#endif - -#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS -#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS -#endif -// clang-format on - -#include "gtest/gtest.h" -#include "runtime/backend.hpp" -#include "ngraph/runtime/tensor.hpp" -#include "ngraph/ngraph.hpp" -#include "util/all_close.hpp" -#include "util/all_close_f.hpp" -#include "util/ndarray.hpp" -#include "util/test_control.hpp" -#include "engines_util/execute_tools.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; - -NGRAPH_TEST(${BACKEND_NAME}, log_softmax_1d_single_value) { - Shape shape{1}; - auto A = make_shared(element::f32, shape); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{1}); - auto result = backend->create_tensor(element::f32, shape); - - std::vector expected_result{0}; - - auto f = make_shared(make_shared(A, 0), ParameterVector{A}); - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis0) { - Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}); - auto result = backend->create_tensor(element::f32, shape); - - std::vector expected_result{-10000., -10000., -10000., -10000., 0., 0., 0., 0.}; - - auto f = make_shared(make_shared(A, 0), ParameterVector{A}); - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis1) { - Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}); - auto result = backend->create_tensor(element::f32, shape); - - std::vector - expected_result{-3.4401896, -2.4401896, -1.4401897, -0.4401897, -3.4401896, -2.4401896, -1.4401897, -0.4401897}; - - auto f = make_shared(make_shared(A, 1), ParameterVector{A}); - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis_neg1) { - Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}); - auto result = backend->create_tensor(element::f32, shape); - - std::vector - expected_result{-3.4401896, -2.4401896, -1.4401897, -0.4401897, -3.4401896, -2.4401896, -1.4401897, -0.4401897}; - - auto f = make_shared(make_shared(A, -1), ParameterVector{A}); - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis_neg2) { - Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}); - auto result = backend->create_tensor(element::f32, shape); - - std::vector expected_result{-10000., -10000., -10000., -10000., 0., 0., 0., 0.}; - - auto f = make_shared(make_shared(A, -2), ParameterVector{A}); - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_0) { - Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); - - std::vector expected_result{-12.0024818, - -12.0024818, - -12.0024818, - -12.0024818, - -12.0024818, - -12.0024818, - -6.00248181, - -6.00248181, - -6.00248181, - -6.00248181, - -6.00248181, - -6.00248181, - -2.48181414e-03, - -2.48181414e-03, - -2.48181414e-03, - -2.48181414e-03, - -2.48181414e-03, - -2.48181414e-03}; - - auto f = make_shared(make_shared(A, 0), ParameterVector{A}); - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_1) { - Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); - - std::vector expected_result{-3.04858735, - -3.04858735, - -3.04858735, - -0.04858735, - -0.04858735, - -0.04858735, - -3.04858735, - -3.04858735, - -3.04858735, - -0.04858735, - -0.04858735, - -0.04858735, - -3.04858735, - -3.04858735, - -3.04858735, - -0.04858735, - -0.04858735, - -0.04858735}; - - auto f = make_shared(make_shared(A, 1), ParameterVector{A}); - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_2) { - Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); - - std::vector expected_result{-2.40760596, - -1.40760596, - -0.40760596, - -2.40760596, - -1.40760596, - -0.40760596, - -2.40760596, - -1.40760596, - -0.40760596, - -2.40760596, - -1.40760596, - -0.40760596, - -2.40760596, - -1.40760596, - -0.40760596, - -2.40760596, - -1.40760596, - -0.40760596}; - - auto f = make_shared(make_shared(A, 2), ParameterVector{A}); - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg1) { - Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); - - std::vector expected_result{-2.40760596, - -1.40760596, - -0.40760596, - -2.40760596, - -1.40760596, - -0.40760596, - -2.40760596, - -1.40760596, - -0.40760596, - -2.40760596, - -1.40760596, - -0.40760596, - -2.40760596, - -1.40760596, - -0.40760596, - -2.40760596, - -1.40760596, - -0.40760596}; - - auto f = make_shared(make_shared(A, -1), ParameterVector{A}); - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg2) { - Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); - - std::vector expected_result{-3.04858735, - -3.04858735, - -3.04858735, - -0.04858735, - -0.04858735, - -0.04858735, - -3.04858735, - -3.04858735, - -3.04858735, - -0.04858735, - -0.04858735, - -0.04858735, - -3.04858735, - -3.04858735, - -3.04858735, - -0.04858735, - -0.04858735, - -0.04858735}; - - auto f = make_shared(make_shared(A, -2), ParameterVector{A}); - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg3) { - Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); - - std::vector expected_result{-12.0024818, - -12.0024818, - -12.0024818, - -12.0024818, - -12.0024818, - -12.0024818, - -6.00248181, - -6.00248181, - -6.00248181, - -6.00248181, - -6.00248181, - -6.00248181, - -2.48181414e-03, - -2.48181414e-03, - -2.48181414e-03, - -2.48181414e-03, - -2.48181414e-03, - -2.48181414e-03}; - - auto f = make_shared(make_shared(A, -3), ParameterVector{A}); - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); -} diff --git a/ngraph/test/backend/mish.in.cpp b/ngraph/test/backend/mish.in.cpp deleted file mode 100644 index d4e989d59bc..00000000000 --- a/ngraph/test/backend/mish.in.cpp +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include -#include - -#include "engines_util/execute_tools.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/runtime/tensor.hpp" -#include "runtime/backend.hpp" -#include "util/all_close.hpp" -#include "util/all_close_f.hpp" -#include "util/ndarray.hpp" -#include "util/test_control.hpp" - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; - -template > -static void mish_test(const PartialShape& dynamic_shape, const Shape& static_shape, const double fp_tolerance = 1e-5) { - bool must_support_dynamic = dynamic_shape.is_dynamic(); - auto data = make_shared(Type, dynamic_shape); - auto f = make_shared(make_shared(data), ParameterVector{data}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}", must_support_dynamic); - - auto create_output_tensor = [&]() { - if (must_support_dynamic) - return backend->create_dynamic_tensor(Type, dynamic_shape); - return backend->create_tensor(Type, dynamic_shape.get_shape()); - }; - - auto a = backend->create_tensor(Type, static_shape); - auto result = create_output_tensor(); - - // generate input tensor (with possible type conversion) - auto static_size = shape_size(static_shape); - std::vector expected; - std::vector input; - { - std::mt19937 gen{0}; // use fixed seed for reproducibility of the test - std::normal_distribution<> d{0.0, 20.0}; - - for (auto i = static_size; i > 0; i--) { - auto x = static_cast(d(gen)); - auto y = static_cast(static_cast(x) * std::tanh(std::log(1.0 + std::exp(x)))); - input.push_back(x); - expected.push_back(y); - } - - copy_data(a, input); - } - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - - auto actual = read_vector(result); - - // size equility test - EXPECT_EQ(actual.size(), static_size); - EXPECT_EQ(result->get_shape(), static_shape); - - // backend is allowed to trade off accuracy for performance - for (size_t i = 0; i < static_size; i++) - EXPECT_NEAR(actual[i], expected[i], fp_tolerance) << "input[i] is " << input[i]; -} - -NGRAPH_TEST(${BACKEND_NAME}, mish_f32) { - mish_test({2, 5}, {2, 5}); - mish_test({2, 3, 4, 5}, {2, 3, 4, 5}); -} - -NGRAPH_TEST(${BACKEND_NAME}, mish_f16) { - mish_test({2, 5}, {2, 5}); - mish_test({2, 3, 4, 5}, {2, 3, 4, 5}); -} - -NGRAPH_TEST(${BACKEND_NAME}, mish_dynamic) { - mish_test(PartialShape::dynamic(), {2, 3, 4, 5}); - mish_test({2, Dimension::dynamic(), 4, 5}, {2, 3, 4, 5}); -} diff --git a/ngraph/test/backend/prelu.in.cpp b/ngraph/test/backend/prelu.in.cpp deleted file mode 100644 index 0e0e937dbe1..00000000000 --- a/ngraph/test/backend/prelu.in.cpp +++ /dev/null @@ -1,850 +0,0 @@ -// Co pyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "engines_util/test_case.hpp" -#include "engines_util/test_engines.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "util/test_control.hpp" - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; -using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); - -NGRAPH_TEST(${BACKEND_NAME}, prelu_1d_1_param) { - Shape shape_a{6}; - Shape shape_slope{1}; - - std::vector a{1, 2, -3, -4, 5, 6}; - std::vector slope{2}; - std::vector out{1, 2, -6, -8, 5, 6}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_1d_1_const) { - Shape shape_a{6}; - Shape shape_slope{1}; - - std::vector a{1, 2, -3, -4, 5, 6}; - std::vector slope{2}; - std::vector out{1, 2, -6, -8, 5, 6}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_1d_param) { - Shape shape_a{6}; - Shape shape_slope{6}; - - std::vector a{1, 2, -3, -4, 5, 6}; - std::vector slope{2, 3, 4, 5, 6, 7}; - std::vector out{1, 2, -12, -20, 5, 6}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_1d_6_const) { - Shape shape_a{6}; - Shape shape_slope{6}; - - std::vector a{1, 2, -3, -4, 5, 6}; - std::vector slope{2, 3, 4, 5, 6, 7}; - std::vector out{1, 2, -12, -20, 5, 6}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_2d_2_W_param) { - Shape shape_a{3, 2}; - Shape shape_slope{2}; - - std::vector a{-2, 3, -2, 1, -1, 0}; - std::vector slope{0, 1}; - std::vector out{0, 3, 0, 1, 0, 0}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_2d_2_W_const) { - Shape shape_a{3, 2}; - Shape shape_slope{2}; - - std::vector a{-2, 3, -2, 1, -1, 0}; - std::vector slope{0, 1}; - std::vector out{0, 3, 0, 1, 0, 0}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_2d_6_W_param) { - Shape shape_a{2, 6}; - Shape shape_slope{6}; - - std::vector a{1, 2, -3, -4, 5, 6, 7, 8, -9, -10, 11, 12}; - std::vector slope{1, 2, 3, 4, 5, 6}; - std::vector out{1, 2, -9, -16, 5, 6, 7, 8, -27, -40, 11, 12}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_2d_6_W_const) { - Shape shape_a{2, 6}; - Shape shape_slope{6}; - - std::vector a{1, 2, -3, -4, 5, 6, 7, 8, -9, -10, 11, 12}; - std::vector slope{1, 2, 3, 4, 5, 6}; - std::vector out{1, 2, -9, -16, 5, 6, 7, 8, -27, -40, 11, 12}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_2d_C_2_param) { - Shape shape_a{3, 2}; - Shape shape_slope{2}; - - std::vector a{-1, -1, -1, -1, -1, -1}; - std::vector slope{2, 0.5}; - std::vector out{-2, -0.5, -2, -0.5, -2, -0.5}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_2d_C_2_const) { - Shape shape_a{3, 2}; - Shape shape_slope{2}; - - std::vector a{-1, -1, -1, -1, -1, -1}; - std::vector slope{2, 0.5}; - std::vector out{-2, -0.5, -2, -0.5, -2, -0.5}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_negative_equal_dims_slope_param) { - Shape shape_a{2, 2, 2}; - Shape shape_slope{2}; - - std::vector a{-0.5, -2, -3, -4, -5, -6, -7, -8}; - std::vector slope{-0.5, -2}; - // std::vector out{0.25, 4, 1.5, 8, 2.5, 12, 3.5, 16}; // broadcast (1, 1, 2) - std::vector out{0.25, 1, 6, 8, 2.5, 3, 14, 16}; // broadcast (1, 2, 1) - // std::vector out{0.25, 1, 1.5, 2, 10, 12, 14, 16}; // broadcast (2, 1, 1) - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_negative_equal_dims_slope_const) { - Shape shape_a{2, 2, 2}; - Shape shape_slope{2}; - - std::vector a{-0.5, -2, -3, -4, -5, -6, -7, -8}; - std::vector slope{-0.5, -2}; - // std::vector out{0.25, 4, 1.5, 8, 2.5, 12, 3.5, 16}; // broadcast (1, 1, 2) - std::vector out{0.25, 1, 6, 8, 2.5, 3, 14, 16}; // broadcast (1, 2, 1) - // std::vector out{0.25, 1, 1.5, 2, 10, 12, 14, 16}; // broadcast (2, 1, 1) - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_negative_slope_param) { - Shape shape_a{3, 2}; - Shape shape_slope{2}; - - std::vector a{-2, 3, -2, 1, -1, 0}; - std::vector slope{-0.5, -1}; - std::vector out{1, 3, 1, 1, 0.5, 0}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_negative_slope_const) { - Shape shape_a{3, 2}; - Shape shape_slope{2}; - - std::vector a{-2, 3, -2, 1, -1, 0}; - std::vector slope{-0.5, -1}; - std::vector out{1, 3, 1, 1, 0.5, 0}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_2d_same_shape_param) { - Shape shape_a{2, 6}; - Shape shape_slope{2, 6}; - - std::vector a{1, 2, -3, -4, 5, 6, 1, 2, -3, -4, 5, 6}; - std::vector slope{2, 2, 2, 2, 2, 2, 1, 1, 4, 2, 1, 1}; - std::vector out{1, 2, -6, -8, 5, 6, 1, 2, -12, -8, 5, 6}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_2d_same_shape_const) { - Shape shape_a{2, 6}; - Shape shape_slope{2, 6}; - - std::vector a{1, 2, -3, -4, 5, 6, 1, 2, -3, -4, 5, 6}; - std::vector slope{2, 2, 2, 2, 2, 2, 1, 1, 4, 2, 1, 1}; - std::vector out{1, 2, -6, -8, 5, 6, 1, 2, -12, -8, 5, 6}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_C_1_W_slope_param) { - Shape shape_a{2, 2, 2, 2}; - Shape shape_slope{2, 1, 2}; - - std::vector a{1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4}; - std::vector slope{1, 2, 3, 4}; - std::vector out{1, 2, -3, -8, 1, 2, -9, -16, 1, 2, -3, -8, 1, 2, -9, -16}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_C_1_W_slope_const) { - Shape shape_a{2, 2, 2, 2}; - Shape shape_slope{2, 1, 2}; - - std::vector a{1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4}; - std::vector slope{1, 2, 3, 4}; - std::vector out{1, 2, -3, -8, 1, 2, -9, -16, 1, 2, -3, -8, 1, 2, -9, -16}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_4d_slope_1_C_1_W_param) { - Shape shape_a{2, 2, 2, 2}; - Shape shape_slope{1, 2, 1, 2}; - - std::vector a{1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4}; - std::vector slope{1, 2, 3, 4}; - std::vector out{1, 2, -3, -8, 1, 2, -9, -16, 1, 2, -3, -8, 1, 2, -9, -16}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_4d_slope_1_C_1_W_const) { - Shape shape_a{2, 2, 2, 2}; - Shape shape_slope{1, 2, 1, 2}; - - std::vector a{1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4}; - std::vector slope{1, 2, 3, 4}; - std::vector out{1, 2, -3, -8, 1, 2, -9, -16, 1, 2, -3, -8, 1, 2, -9, -16}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_3d_W_param) { - Shape shape_a{2, 2, 6}; - Shape shape_slope{2, 1, 6}; - - std::vector a{1, 2, -3, -4, -5, 6, -1, -2, -3, -4, -5, -6, 1, 2, -3, -4, 5, 6, -2, 4, -6, -8, 10, 12}; - std::vector slope{2, 1, 3, 4, 1, 7, 1, 2, 3, 4, 5, 6}; - std::vector out{1, 2, -9, -16, -5, 6, -2, -2, -9, -16, -5, -42, - 1, 2, -9, -16, 5, 6, -2, 4, -18, -32, 10, 12}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_3d_W_const) { - Shape shape_a{2, 2, 6}; - Shape shape_slope{2, 1, 6}; - - std::vector a{1, 2, -3, -4, -5, 6, -1, -2, -3, -4, -5, -6, 1, 2, -3, -4, 5, 6, -2, 4, -6, -8, 10, 12}; - std::vector slope{2, 1, 3, 4, 1, 7, 1, 2, 3, 4, 5, 6}; - std::vector out{1, 2, -9, -16, -5, 6, -2, -2, -9, -16, -5, -42, - 1, 2, -9, -16, 5, 6, -2, 4, -18, -32, 10, 12}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_3d_same_shape_param) { - Shape shape_a{2, 3, 2}; - Shape shape_slope{2, 3, 2}; - - std::vector a{ - 1, - 2, - -3, - -4, - -5, - 6, - -1, - -2, - -3, - -4, - -5, - -6, - }; - std::vector slope{2, 1, 3, 4, 1, 7, 1, 2, 3, 4, 5, 6}; - std::vector out{1, 2, -9, -16, -5, 6, -1, -4, -9, -16, -25, -36}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_3d_same_shape_const) { - Shape shape_a{2, 3, 2}; - Shape shape_slope{2, 3, 2}; - - std::vector a{ - 1, - 2, - -3, - -4, - -5, - 6, - -1, - -2, - -3, - -4, - -5, - -6, - }; - std::vector slope{2, 1, 3, 4, 1, 7, 1, 2, 3, 4, 5, 6}; - std::vector out{1, 2, -9, -16, -5, 6, -1, -4, -9, -16, -25, -36}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_3d_broadcast_C_W_slope_param) { - Shape shape_a{2, 1, 2}; - Shape shape_slope{2}; - - std::vector a{-10, -10, -10, -10}; - std::vector slope{0.1, 10}; - std::vector out{-1, -100, -1, -100}; // broadcast (1, 2, 1) - // std::vector out{-1, -1, -100, -100}; // broadcast (2, 1, 1) - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_3d_broadcast_C_W_slope_const) { - Shape shape_a{2, 1, 2}; - Shape shape_slope{2}; - - std::vector a{-10, -10, -10, -10}; - std::vector slope{0.1, 10}; - std::vector out{-1, -100, -1, -100}; // broadcast (1, 2, 1) - // std::vector out{-1, -1, -100, -100}; // broadcast (2, 1, 1) - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_4d_broadcast_C_W_slope_param) { - Shape shape_a{1, 2, 1, 2}; - Shape shape_slope{2}; - - std::vector a{-10, -10, -10, -10}; - std::vector slope{0.1, 10}; - // std::vector out{-1, -100, -1, -100}; // broadcast (1, 1, 1, 2) - std::vector out{-1, -1, -100, -100}; // broadcast (1, 2, 1, 1) - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_4d_broadcast_C_W_slope_const) { - Shape shape_a{1, 2, 1, 2}; - Shape shape_slope{2}; - - std::vector a{-10, -10, -10, -10}; - std::vector slope{0.1, 10}; - // std::vector out{-1, -100, -1, -100}; // broadcast (1, 1, 1, 2) - std::vector out{-1, -1, -100, -100}; // broadcast (1, 2, 1, 1) - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_4d_broadcast_C_slope_param) { - Shape shape_a{1, 5, 1, 1}; - Shape shape_slope{5}; - - std::vector a{-1, 0, -1, -1, -1}; - std::vector slope{1, 2, 3, 4, 5}; - std::vector out{-1, 0, -3, -4, -5}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_4d_broadcast_C_slope_const) { - Shape shape_a{1, 5, 1, 1}; - Shape shape_slope{5}; - - std::vector a{-1, 0, -1, -1, -1}; - std::vector slope{1, 2, 3, 4, 5}; - std::vector out{-1, 0, -3, -4, -5}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_batch_nd_elementwise_param) { - Shape shape_a{2, 3, 4, 5}; - Shape shape_slope{2, 3, 4, 5}; - - std::vector a{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.}; - std::vector slope(shape_size(shape_slope)); - std::iota(std::begin(slope), std::end(slope), 0); - - std::vector out{ - -0., -1., -2., -3., -4., -5., -6., -7., -8., -9., -10., -11., -12., -13., -14., - -15., -16., -17., -18., -19., -20., -21., -22., -23., -24., -25., -26., -27., -28., -29., - -30., -31., -32., -33., -34., -35., -36., -37., -38., -39., -40., -41., -42., -43., -44., - -45., -46., -47., -48., -49., -50., -51., -52., -53., -54., -55., -56., -57., -58., -59., - -60., -61., -62., -63., -64., -65., -66., -67., -68., -69., -70., -71., -72., -73., -74., - -75., -76., -77., -78., -79., -80., -81., -82., -83., -84., -85., -86., -87., -88., -89., - -90., -91., -92., -93., -94., -95., -96., -97., -98., -99., -100., -101., -102., -103., -104., - -105., -106., -107., -108., -109., -110., -111., -112., -113., -114., -115., -116., -117., -118., -119.}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_batch_nd_elementwise_const) { - Shape shape_a{2, 3, 4, 5}; - Shape shape_slope{2, 3, 4, 5}; - - std::vector a{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.}; - std::vector slope(shape_size(shape_slope)); - std::iota(std::begin(slope), std::end(slope), 0); - - std::vector out{ - -0., -1., -2., -3., -4., -5., -6., -7., -8., -9., -10., -11., -12., -13., -14., - -15., -16., -17., -18., -19., -20., -21., -22., -23., -24., -25., -26., -27., -28., -29., - -30., -31., -32., -33., -34., -35., -36., -37., -38., -39., -40., -41., -42., -43., -44., - -45., -46., -47., -48., -49., -50., -51., -52., -53., -54., -55., -56., -57., -58., -59., - -60., -61., -62., -63., -64., -65., -66., -67., -68., -69., -70., -71., -72., -73., -74., - -75., -76., -77., -78., -79., -80., -81., -82., -83., -84., -85., -86., -87., -88., -89., - -90., -91., -92., -93., -94., -95., -96., -97., -98., -99., -100., -101., -102., -103., -104., - -105., -106., -107., -108., -109., -110., -111., -112., -113., -114., -115., -116., -117., -118., -119.}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_1d_W_slope_param) { - Shape shape_a{2, 3, 4, 5}; - Shape shape_slope{5}; - - std::vector a{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.}; - - std::vector slope{0, 1, 2, 3, 4}; - - std::vector out{-0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., - -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., - -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., - -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., - -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., - -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., - -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4.}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_1d_W_slope_const) { - Shape shape_a{2, 3, 4, 5}; - Shape shape_slope{5}; - - std::vector a{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.}; - - std::vector slope{0, 1, 2, 3, 4}; - - std::vector out{-0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., - -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., - -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., - -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., - -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., - -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., - -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4.}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_1d_C_slope_param) { - Shape shape_a{2, 3, 4, 5}; - Shape shape_slope{3}; - - std::vector a{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.}; - - std::vector slope{0, 1, 2}; - - std::vector out{-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., - -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., - -2., -2., -2., -2., -2., -2., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., - -0., -0., -0., -0., -0., -0., -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., - -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_1d_C_slope_const) { - Shape shape_a{2, 3, 4, 5}; - Shape shape_slope{3}; - - std::vector a{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.}; - - std::vector slope{0, 1, 2}; - - std::vector out{-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., - -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., - -2., -2., -2., -2., -2., -2., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., - -0., -0., -0., -0., -0., -0., -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., - -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope, slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_C_1_1_slope_param) { - Shape shape_a{2, 3, 4, 5}; - Shape shape_slope{3, 1, 1}; - - std::vector a{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.}; - - std::vector slope{0, 1, 2}; - - std::vector out{-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., - -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., - -2., -2., -2., -2., -2., -2., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., - -0., -0., -0., -0., -0., -0., -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., - -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.}; - - const auto A = make_shared(element::f32, shape_a); - const auto SLOPE = make_shared(element::f32, shape_slope); - const auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A, SLOPE}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a, slope}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, prelu_C_1_1_slope_const) { - Shape shape_a{2, 3, 4, 5}; - Shape shape_slope{3, 1, 1}; - - std::vector a{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.}; - - std::vector slope{0, 1, 2}; - - std::vector out{-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., - -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., - -2., -2., -2., -2., -2., -2., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., - -0., -0., -0., -0., -0., -0., -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., - -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., - -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.}; - - auto A = make_shared(element::f32, shape_a); - auto SLOPE = make_shared(element::f32, shape_slope, slope); - auto f = make_shared(make_shared(A, SLOPE), ParameterVector{A}); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs({a}); - test_case.add_expected_output(shape_a, out); - test_case.run(); -} diff --git a/ngraph/test/backend/relu.in.cpp b/ngraph/test/backend/relu.in.cpp deleted file mode 100644 index 2e93828f46c..00000000000 --- a/ngraph/test/backend/relu.in.cpp +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "engines_util/execute_tools.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/runtime/tensor.hpp" -#include "runtime/backend.hpp" -#include "util/all_close.hpp" -#include "util/all_close_f.hpp" -#include "util/ndarray.hpp" -#include "util/test_control.hpp" - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; - -NGRAPH_TEST(${BACKEND_NAME}, relu_2Dfprop) { - auto shape_a = Shape{2, 5}; - auto A = make_shared(element::f32, shape_a); - auto relu = make_shared(A); - auto shape_rt = Shape{2, 5}; - auto f = make_shared(relu, ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5}); - auto result = backend->create_tensor(element::f32, shape_rt); - vector expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0}; - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f(read_vector(result), expected, MIN_FLOAT_TOLERANCE_BITS)); -} - -NGRAPH_TEST(${BACKEND_NAME}, relu_2Dfprop_i32) { - auto shape_a = Shape{2, 5}; - auto A = make_shared(element::i32, shape_a); - auto relu = make_shared(A); - auto shape_rt = Shape{2, 5}; - auto f = make_shared(relu, ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::i32, shape_a); - copy_data(a, vector{1, 8, -8, 17, -2, 1, 8, -8, 17, -1}); - auto result = backend->create_tensor(element::i32, shape_rt); - vector expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0}; - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ(expected, read_vector(result)); -} - -NGRAPH_TEST(${BACKEND_NAME}, relu_4Dfprop) { - auto shape_a = Shape{2, 2, 2, 2}; - auto A = make_shared(element::f32, shape_a); - auto relu = make_shared(A); - auto shape_rt = Shape{2, 2, 2, 2}; - auto f = make_shared(relu, ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5, 1}); - auto result = backend->create_tensor(element::f32, shape_rt); - vector expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0, 1, 8, 0, 17, 0, 1}; - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f(read_vector(result), expected, MIN_FLOAT_TOLERANCE_BITS)); -} - -NGRAPH_TEST(${BACKEND_NAME}, fuse_max_with_constant_zero_input_as_relu) { - auto shape_a = Shape{2, 5}; - auto A = op::Constant::create(element::f32, shape_a, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); - auto B = make_shared(element::f32, shape_a); - auto max = make_shared(A, B); - auto shape_rt = Shape{2, 5}; - auto f = make_shared(max, ParameterVector{B}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto b = backend->create_tensor(element::f32, shape_a); - copy_data(b, vector{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5}); - auto result = backend->create_tensor(element::f32, shape_rt); - vector expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0}; - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {b}); - EXPECT_TRUE(test::all_close_f(read_vector(result), expected, MIN_FLOAT_TOLERANCE_BITS)); -} diff --git a/ngraph/test/backend/selu.in.cpp b/ngraph/test/backend/selu.in.cpp deleted file mode 100644 index 9dc7d0357ae..00000000000 --- a/ngraph/test/backend/selu.in.cpp +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (C) 2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "engines_util/test_case.hpp" -#include "engines_util/test_engines.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "util/test_control.hpp" - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; -using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); - -NGRAPH_TEST(${BACKEND_NAME}, selu_2Dfprop) { - Shape rt_shape{2}; - Shape c_shape{1}; - element::Type et = element::f32; - - auto input = make_shared(et, rt_shape); - auto alpha = op::Constant::create(et, c_shape, {1.67326324}); - auto lambda = op::Constant::create(et, c_shape, {1.05070098}); - auto selu = make_shared(input, alpha, lambda); - auto f = make_shared(selu, ParameterVector{input}); - - vector input_data{-1, 3}; - vector expected_out{-1.1113307, 3.152103}; - - auto test_case = test::TestCase(f); - test_case.add_input(rt_shape, input_data); - test_case.add_expected_output(rt_shape, expected_out); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, selu_4Dfprop) { - Shape in_shape{4}; - Shape c_shape{1}; - element::Type et = element::f32; - - auto input = make_shared(et, in_shape); - auto alpha = op::Constant::create(et, c_shape, {1.67326324}); - auto lambda = op::Constant::create(et, c_shape, {1.05070098}); - auto selu = make_shared(input, alpha, lambda); - auto f = make_shared(selu, ParameterVector{input}); - - vector in_vec{-1.0, 0.0, 1.0, 2.0}; - vector out_vec{-1.1113307, 0., 1.050701, 2.101402}; - - auto test_case = test::TestCase(f); - test_case.add_input(in_shape, in_vec); - test_case.add_expected_output(in_shape, out_vec); - test_case.run_with_tolerance_as_fp(1e-4f); -} - -NGRAPH_TEST(${BACKEND_NAME}, selu_1Dfprop) { - Shape in_shape{1}; - Shape c_shape{1}; - element::Type et = element::f32; - - auto input = make_shared(et, in_shape); - auto alpha = op::Constant::create(et, c_shape, {1.67326324}); - auto lambda = op::Constant::create(et, c_shape, {1.05070098}); - auto selu = make_shared(input, alpha, lambda); - auto f = make_shared(selu, ParameterVector{input}); - - vector in_vec{112.0}; - vector out_vec{117.67851}; - - auto test_case = test::TestCase(f); - test_case.add_input(in_shape, in_vec); - test_case.add_expected_output(in_shape, out_vec); - test_case.run_with_tolerance_as_fp(1e-4f); -} - -NGRAPH_TEST(${BACKEND_NAME}, selu_3Dfprop_negative) { - Shape in_shape{3}; - Shape c_shape{1}; - element::Type et = element::f32; - - auto input = make_shared(et, in_shape); - auto alpha = op::Constant::create(et, c_shape, {1.67326324}); - auto lambda = op::Constant::create(et, c_shape, {1.05070098}); - auto selu = make_shared(input, alpha, lambda); - auto f = make_shared(selu, ParameterVector{input}); - - vector in_vec{-3.0, -12.5, -7.0}; - vector out_vec{-1.6705687, -1.7580928, -1.7564961}; - - auto test_case = test::TestCase(f); - test_case.add_input(in_shape, in_vec); - test_case.add_expected_output(in_shape, out_vec); - test_case.run_with_tolerance_as_fp(1e-4f); -} diff --git a/ngraph/test/backend/sigmoid.in.cpp b/ngraph/test/backend/sigmoid.in.cpp deleted file mode 100644 index dc59cfbf725..00000000000 --- a/ngraph/test/backend/sigmoid.in.cpp +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -// clang-format off -#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS -#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS -#endif - -#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS -#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS -#endif -// clang-format on - -#include "gtest/gtest.h" -#include "runtime/backend.hpp" -#include "ngraph/runtime/tensor.hpp" -#include "ngraph/ngraph.hpp" -#include "util/all_close.hpp" -#include "util/all_close_f.hpp" -#include "util/ndarray.hpp" -#include "util/test_control.hpp" -#include "engines_util/execute_tools.hpp" - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; - -NGRAPH_TEST(${BACKEND_NAME}, sigmoid_n1c1h2w2) { - auto input = make_shared(element::f32, Shape{1, 1, 2, 2}); - auto sigmoid_node = make_shared(input); - auto func = make_shared(sigmoid_node, ParameterVector{input}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - shared_ptr a = backend->create_tensor(element::f32, input->get_shape()); - shared_ptr result = backend->create_tensor(element::f32, input->get_shape()); - - float x1 = 1.0f; - float x2 = 4.0f; - float sigma1 = 1.0f / (1.0f + std::exp(-x1)); - float sigma2 = 1.0f / (1.0f + std::exp(-x2)); - - vector dataA{x1, x2, x1, x2}; - copy_data(a, dataA); - - auto handle = backend->compile(func); - handle->call_with_validate({result}, {a}); - vector expected{sigma1, sigma2, sigma1, sigma2}; - EXPECT_TRUE(test::all_close_f(read_vector(result), expected)); -} - -NGRAPH_TEST(${BACKEND_NAME}, sigmoid_n1c1h4) { - auto input = make_shared(element::f32, Shape{1, 1, 4}); - auto sigmoid_node = make_shared(input); - auto func = make_shared(sigmoid_node, ParameterVector{input}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - shared_ptr a = backend->create_tensor(element::f32, input->get_shape()); - shared_ptr result = backend->create_tensor(element::f32, input->get_shape()); - - float x1 = 1.0f; - float x2 = 4.0f; - float sigma1 = 1.0f / (1.0f + std::exp(-x1)); - float sigma2 = 1.0f / (1.0f + std::exp(-x2)); - - vector dataA{x1, x2, x1, x2}; - copy_data(a, dataA); - - auto handle = backend->compile(func); - handle->call_with_validate({result}, {a}); - vector expected{sigma1, sigma2, sigma1, sigma2}; - EXPECT_TRUE(test::all_close_f(read_vector(result), expected)); -} diff --git a/ngraph/test/backend/softmax.in.cpp b/ngraph/test/backend/softmax.in.cpp deleted file mode 100644 index 07ce69c0add..00000000000 --- a/ngraph/test/backend/softmax.in.cpp +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -// clang-format off -#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS -#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS -#endif - -#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS -#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS -#endif -// clang-format on - -#include "gtest/gtest.h" -#include "runtime/backend.hpp" -#include "ngraph/runtime/tensor.hpp" -#include "ngraph/ngraph.hpp" -#include "util/all_close.hpp" -#include "util/all_close_f.hpp" -#include "util/ndarray.hpp" -#include "util/test_control.hpp" -#include "engines_util/execute_tools.hpp" - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; - -NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d) { - Shape shape{2, 2, 3}; - auto A = make_shared(element::f32, shape); - auto f = make_shared(make_shared(A, 0), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{-10, -20, -30, -40, -50, -60, -1, -2, -3, -4, -5, -6}); - auto result = backend->create_tensor(element::f32, shape); - - auto d0 = expf(-10) + expf(-1); - auto d1 = expf(-20) + expf(-2); - auto d2 = expf(-30) + expf(-3); - auto d3 = expf(-40) + expf(-4); - auto d4 = expf(-50) + expf(-5); - auto d5 = expf(-60) + expf(-6); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - vector expected{expf(-10) / d0, - expf(-20) / d1, - expf(-30) / d2, - expf(-40) / d3, - expf(-50) / d4, - expf(-60) / d5, - expf(-1) / d0, - expf(-2) / d1, - expf(-3) / d2, - expf(-4) / d3, - expf(-5) / d4, - expf(-6) / d5}; - - EXPECT_TRUE(test::all_close(expected, read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d_double) { - Shape shape{2, 2, 3}; - auto A = make_shared(element::f64, shape); - auto f = make_shared(make_shared(A, 0), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f64, shape); - copy_data(a, vector{-10, -20, -30, -40, -50, -60, -1, -2, -3, -4, -5, -6}); - auto result = backend->create_tensor(element::f64, shape); - - auto d0 = exp(-10) + exp(-1); - auto d1 = exp(-20) + exp(-2); - auto d2 = exp(-30) + exp(-3); - auto d3 = exp(-40) + exp(-4); - auto d4 = exp(-50) + exp(-5); - auto d5 = exp(-60) + exp(-6); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - vector expected{exp(-10) / d0, - exp(-20) / d1, - exp(-30) / d2, - exp(-40) / d3, - exp(-50) / d4, - exp(-60) / d5, - exp(-1) / d0, - exp(-2) / d1, - exp(-3) / d2, - exp(-4) / d3, - exp(-5) / d4, - exp(-6) / d5}; - - EXPECT_TRUE(test::all_close(expected, read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, softmax_2d_axis_1) { - Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); - auto f = make_shared(make_shared(A, 1), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{-10, -20, -30, -40, -50, -60}); - auto result = backend->create_tensor(element::f32, shape); - - auto d0 = expf(-10) + expf(-20) + expf(-30); - auto d1 = expf(-40) + expf(-50) + expf(-60); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - vector expected{expf(-10) / d0, - expf(-20) / d0, - expf(-30) / d0, - expf(-40) / d1, - expf(-50) / d1, - expf(-60) / d1}; - EXPECT_TRUE(test::all_close_f(expected, read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, softmax_2d_axis_0) { - Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); - auto f = make_shared(make_shared(A, 0), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{-10, -20, -30, -40, -50, -60}); - auto result = backend->create_tensor(element::f32, shape); - - auto d0 = expf(-10) + expf(-40); - auto d1 = expf(-20) + expf(-50); - auto d2 = expf(-30) + expf(-60); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - vector expected{expf(-10) / d0, - expf(-20) / d1, - expf(-30) / d2, - expf(-40) / d0, - expf(-50) / d1, - expf(-60) / d2}; - EXPECT_TRUE(test::all_close(expected, read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d_trivial) { - Shape shape{1, 2, 3}; - auto A = make_shared(element::f32, shape); - auto f = make_shared(make_shared(A, 0), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{-10, -20, -30, -40, -50, -60}); - auto result = backend->create_tensor(element::f32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - vector expected{1, 1, 1, 1, 1, 1}; - EXPECT_TRUE(test::all_close(expected, read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, softmax_underflow) { - Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); - auto f = make_shared(make_shared(A, 0), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto low = std::numeric_limits::lowest(); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{low, 1, 2, 3, 4, 5}); - auto result = backend->create_tensor(element::f32, shape); - - auto d0 = expf(low) + expf(3); - auto d1 = expf(1) + expf(4); - auto d2 = expf(2) + expf(5); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - vector expected{expf(low) / d0, expf(1) / d1, expf(2) / d2, expf(3) / d0, expf(4) / d1, expf(5) / d2}; - EXPECT_TRUE(test::all_close_f(expected, read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, softmax_overflow) { - Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); - auto f = make_shared(make_shared(A, 0), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - auto high = std::numeric_limits::max(); - - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{high, 1, 2, 3, 4, 5}); - auto result = backend->create_tensor(element::f32, shape); - - auto d0 = expf(high - high) + expf(3 - high); - auto d1 = expf(1) + expf(4); - auto d2 = expf(2) + expf(5); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - vector expected{expf(high - high) / d0, - expf(1) / d1, - expf(2) / d2, - expf(3 - high) / d0, - expf(4) / d1, - expf(5) / d2}; - EXPECT_TRUE(test::all_close_f(expected, read_vector(result))); -} diff --git a/ngraph/test/backend/softplus.in.cpp b/ngraph/test/backend/softplus.in.cpp deleted file mode 100644 index 0aa37db6af0..00000000000 --- a/ngraph/test/backend/softplus.in.cpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include -#include - -// clang-format off -#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS -#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS -#endif - -#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS -#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS -#endif -// clang-format on - -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "engines_util/test_engines.hpp" -#include "engines_util/test_case.hpp" -#include "util/test_control.hpp" - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; -using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); - -NGRAPH_TEST(${BACKEND_NAME}, softplus) { - auto A = make_shared(element::f32, Shape{4}); - auto softplus = make_shared(A); - auto function = make_shared(NodeVector{softplus}, ParameterVector{A}); - - auto test_case = test::TestCase(function); - test_case.add_input(vector{-1.0, 0.0, 1.0, 20.0}); - test_case.add_expected_output(vector{0.31326166, 0.69314718, 1.3132616, 20.0}); - test_case.run(); -} diff --git a/ngraph/test/backend/swish.in.cpp b/ngraph/test/backend/swish.in.cpp deleted file mode 100644 index 1b56cf277f0..00000000000 --- a/ngraph/test/backend/swish.in.cpp +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "engines_util/test_case.hpp" -#include "engines_util/test_engines.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "util/test_control.hpp" - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; -using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); - -NGRAPH_TEST(${BACKEND_NAME}, swish_2D_with_beta0_6) { - Shape in_shape{2, 4}; - element::Type et = element::f32; - auto beta = 0.6f; - - auto args0 = make_shared(et, in_shape); - auto args1 = make_shared(et, Shape{}); - auto swish = make_shared(args0, args1); - auto f = make_shared(swish, ParameterVector{args0, args1}); - - vector> in_vec{vector{0.4, -5.7, -6, 3, -0.9, 23, 5, 3.3}, vector{beta}}; - vector out_vec{in_vec[0]}; - std::transform(out_vec.begin(), out_vec.end(), out_vec.begin(), [&beta](float x) -> float { - return (x / (1.0f + std::exp(x * beta * -1.0f))); - }); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs(in_vec); - test_case.add_expected_output(in_shape, out_vec); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, swish_2D_without_beta) { - Shape in_shape{2, 3}; - element::Type et = element::f32; - - auto args0 = make_shared(et, in_shape); - auto swish = make_shared(args0); - auto f = make_shared(swish, ParameterVector{args0}); - - vector in_vec{1, 8, -8, 17, -0.5, -1}; - vector out_vec{in_vec}; - std::transform(out_vec.begin(), out_vec.end(), out_vec.begin(), [](float x) -> float { - return (x / (1.0f + std::exp(x * -1.0f))); - }); - - auto test_case = test::TestCase(f); - test_case.add_input(in_vec); - test_case.add_expected_output(in_shape, out_vec); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, swish_4D_with_beta0_33) { - Shape in_shape{2, 2, 1, 2}; - element::Type et = element::f32; - auto beta = 0.33f; - - auto args0 = make_shared(et, in_shape); - auto args1 = make_shared(et, Shape{}); - auto swish = make_shared(args0, args1); - auto f = make_shared(swish, ParameterVector{args0, args1}); - - vector> in_vec{vector{0.1, 0.6, 20, -7, -5.3, 3.5, -9, 11}, vector{beta}}; - vector out_vec{in_vec[0]}; - std::transform(out_vec.begin(), out_vec.end(), out_vec.begin(), [&beta](float x) -> float { - return (x / (1.0f + std::exp(x * beta * -1.0f))); - }); - - auto test_case = test::TestCase(f); - test_case.add_multiple_inputs(in_vec); - test_case.add_expected_output(in_shape, out_vec); - test_case.run(); -} diff --git a/ngraph/test/type_prop/relu.cpp b/ngraph/test/type_prop/relu.cpp new file mode 100644 index 00000000000..6d45e0d6846 --- /dev/null +++ b/ngraph/test/type_prop/relu.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/type_prop.hpp" + +using namespace std; +using namespace ngraph; + +TEST(type_prop, relu_2d) { + auto param = make_shared(element::f32, Shape{2, 4}); + Shape relu_shape{2, 4}; + auto relu = make_shared(param); + ASSERT_EQ(relu->get_element_type(), element::f32); + ASSERT_EQ(relu->get_shape(), relu_shape); +} + +TEST(type_prop, relu_4d) { + auto param = make_shared(element::f32, Shape{2, 2, 2, 2}); + Shape relu_shape{2, 2, 2, 2}; + auto relu = make_shared(param); + ASSERT_EQ(relu->get_element_type(), element::f32); + ASSERT_EQ(relu->get_shape(), relu_shape); +} \ No newline at end of file diff --git a/ngraph/test/type_prop/sigmoid.cpp b/ngraph/test/type_prop/sigmoid.cpp new file mode 100644 index 00000000000..ed9d8b65c18 --- /dev/null +++ b/ngraph/test/type_prop/sigmoid.cpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/type_prop.hpp" + +using namespace std; +using namespace ngraph; + +TEST(type_prop, sigmoid) { + auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto sigmoid_func = make_shared(data); + EXPECT_EQ(sigmoid_func->get_element_type(), element::f32); + EXPECT_EQ(sigmoid_func->get_shape(), data->get_output_shape(0)); +} + +TEST(type_prop, sigmoid_partial) { + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto sigmoid_func = make_shared(data); + EXPECT_EQ(sigmoid_func->get_element_type(), element::f32); + ASSERT_TRUE(sigmoid_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); + + // rank unknown + auto sigmoid_partial = make_shared(make_shared(element::f32, PartialShape::dynamic())); + ASSERT_TRUE(sigmoid_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); +} + +TEST(type_prop, sigmoid_partial_static_rank) { + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto sigmoid_func = make_shared(data); + EXPECT_EQ(sigmoid_func->get_element_type(), element::f32); + ASSERT_TRUE(sigmoid_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); + ASSERT_TRUE(sigmoid_func->get_output_partial_shape(0).rank().is_static()); +} diff --git a/ngraph/test/visitors/op/clamp.cpp b/ngraph/test/visitors/op/clamp.cpp new file mode 100644 index 00000000000..f4b4f11ce12 --- /dev/null +++ b/ngraph/test/visitors/op/clamp.cpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/opsets/opset1.hpp" +#include "util/visitor.hpp" + +using namespace std; +using namespace ngraph; +using ngraph::test::NodeBuilder; +using ngraph::test::ValueMap; + +TEST(attributes, clamp_op) { + NodeBuilder::get_ops().register_factory(); + auto data = make_shared(element::f32, Shape{2, 4}); + + double min = 0.4; + double max = 5.6; + + const auto clamp = make_shared(data, min, max); + NodeBuilder builder(clamp); + auto g_clamp = ov::as_type_ptr(builder.create()); + + const auto expected_attr_count = 2; + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); + + EXPECT_EQ(g_clamp->get_min(), clamp->get_min()); + EXPECT_EQ(g_clamp->get_max(), clamp->get_max()); +} diff --git a/ngraph/test/visitors/op/exp.cpp b/ngraph/test/visitors/op/exp.cpp new file mode 100644 index 00000000000..3ea036eefb6 --- /dev/null +++ b/ngraph/test/visitors/op/exp.cpp @@ -0,0 +1,9 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "unary_ops.hpp" + +using Type = ::testing::Types>; + +INSTANTIATE_TYPED_TEST_SUITE_P(visitor_without_atrribute, UnaryOperatorVisitor, Type, UnaryOperatorTypeName); diff --git a/ngraph/test/visitors/op/hard_sigmoid.cpp b/ngraph/test/visitors/op/hard_sigmoid.cpp new file mode 100644 index 00000000000..6345d608e80 --- /dev/null +++ b/ngraph/test/visitors/op/hard_sigmoid.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/opsets/opset1.hpp" +#include "util/visitor.hpp" + +using namespace std; +using namespace ngraph; +using ngraph::test::NodeBuilder; + +TEST(attributes, hardsigmoid_op) { + NodeBuilder::get_ops().register_factory(); + const auto data = make_shared(element::f32, Shape{2, 5}); + const auto alpha = make_shared(element::f32, Shape{}); + const auto beta = make_shared(element::f32, Shape{}); + + const auto hardsigmoid = make_shared(data, alpha, beta); + NodeBuilder builder(hardsigmoid); + + const auto expected_attr_count = 0; + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); +} diff --git a/ngraph/test/visitors/op/hsigmoid.cpp b/ngraph/test/visitors/op/hsigmoid.cpp new file mode 100644 index 00000000000..48665318839 --- /dev/null +++ b/ngraph/test/visitors/op/hsigmoid.cpp @@ -0,0 +1,9 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "unary_ops.hpp" + +using Type = ::testing::Types>; + +INSTANTIATE_TYPED_TEST_SUITE_P(visitor_without_atrribute, UnaryOperatorVisitor, Type, UnaryOperatorTypeName); diff --git a/ngraph/test/visitors/op/hswish.cpp b/ngraph/test/visitors/op/hswish.cpp new file mode 100644 index 00000000000..1b04c293eed --- /dev/null +++ b/ngraph/test/visitors/op/hswish.cpp @@ -0,0 +1,9 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "unary_ops.hpp" + +using Type = ::testing::Types>; + +INSTANTIATE_TYPED_TEST_SUITE_P(visitor_without_atrribute, UnaryOperatorVisitor, Type, UnaryOperatorTypeName); diff --git a/ngraph/test/visitors/op/log_softmax.cpp b/ngraph/test/visitors/op/log_softmax.cpp new file mode 100644 index 00000000000..7732bb2f9c7 --- /dev/null +++ b/ngraph/test/visitors/op/log_softmax.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/opsets/opset1.hpp" +#include "ngraph/opsets/opset3.hpp" +#include "ngraph/opsets/opset4.hpp" +#include "ngraph/opsets/opset5.hpp" +#include "util/visitor.hpp" + +using namespace std; +using namespace ngraph; +using ngraph::test::NodeBuilder; +using ngraph::test::ValueMap; + +TEST(attributes, logsoftmax_op) { + NodeBuilder::get_ops().register_factory(); + auto data = make_shared(element::f32, Shape{3, 2, 3}); + + int64_t axis = 2; + + const auto logsoftmax = make_shared(data, axis); + NodeBuilder builder(logsoftmax); + auto g_logsoftmax = ov::as_type_ptr(builder.create()); + + const auto expected_attr_count = 1; + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); + + EXPECT_EQ(g_logsoftmax->get_axis(), logsoftmax->get_axis()); +} diff --git a/ngraph/test/visitors/op/prelu.cpp b/ngraph/test/visitors/op/prelu.cpp new file mode 100644 index 00000000000..70434d56c64 --- /dev/null +++ b/ngraph/test/visitors/op/prelu.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/opsets/opset1.hpp" +#include "util/visitor.hpp" + +using namespace std; +using namespace ngraph; +using ngraph::test::NodeBuilder; + +TEST(attributes, prelu_op) { + NodeBuilder::get_ops().register_factory(); + const auto data = make_shared(element::f32, Shape{1, 2, 1, 2}); + const auto slope = make_shared(element::f32, Shape{5}); + + const auto prelu = make_shared(data, slope); + NodeBuilder builder(prelu); + + const auto expected_attr_count = 0; + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); +} diff --git a/ngraph/test/visitors/op/relu.cpp b/ngraph/test/visitors/op/relu.cpp new file mode 100644 index 00000000000..f547b42e266 --- /dev/null +++ b/ngraph/test/visitors/op/relu.cpp @@ -0,0 +1,9 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "unary_ops.hpp" + +using Type = ::testing::Types>; + +INSTANTIATE_TYPED_TEST_SUITE_P(visitor_without_atrribute, UnaryOperatorVisitor, Type, UnaryOperatorTypeName); diff --git a/ngraph/test/visitors/op/sigmoid.cpp b/ngraph/test/visitors/op/sigmoid.cpp new file mode 100644 index 00000000000..6daaad7511a --- /dev/null +++ b/ngraph/test/visitors/op/sigmoid.cpp @@ -0,0 +1,9 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "unary_ops.hpp" + +using Type = ::testing::Types>; + +INSTANTIATE_TYPED_TEST_SUITE_P(visitor_without_atrribute, UnaryOperatorVisitor, Type, UnaryOperatorTypeName);