Migrate ngraph backend test/activations (#7973)
* Remove fp16 of Convert layer test from skip_tests.config.cpp as it works now * update repo * create new PR from PR7849 * remove backend tests of activation operations
This commit is contained in:
parent
595222e9c6
commit
a8f0109584
@ -21,6 +21,7 @@ CommonReferenceTest::CommonReferenceTest(): targetDevice("TEMPLATE") {
|
||||
}
|
||||
|
||||
void CommonReferenceTest::Exec() {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED();
|
||||
LoadNetwork();
|
||||
FillInputs();
|
||||
Infer();
|
||||
@ -97,6 +98,11 @@ void CommonReferenceTest::ValidateBlobs(const ov::runtime::Tensor& refBlob, cons
|
||||
refBlob.data<const float>(), outBlob.data<const float>(),
|
||||
refBlob.get_size(), threshold, abs_threshold);
|
||||
break;
|
||||
case ov::element::f64:
|
||||
LayerTestsUtils::LayerTestsCommon::Compare<double, double>(
|
||||
refBlob.data<const double>(), outBlob.data<const double>(),
|
||||
refBlob.get_size(), threshold, abs_threshold);
|
||||
break;
|
||||
case ov::element::i8:
|
||||
LayerTestsUtils::LayerTestsCommon::Compare<int8_t, int8_t>(
|
||||
refBlob.data<const int8_t>(), outBlob.data<const int8_t>(),
|
||||
|
201
docs/template_plugin/tests/functional/op_reference/clamp.cpp
Normal file
201
docs/template_plugin/tests/functional/op_reference/clamp.cpp
Normal file
@ -0,0 +1,201 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "openvino/op/clamp.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
|
||||
namespace {
|
||||
struct ClampParams {
|
||||
template <class IT>
|
||||
ClampParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues,
|
||||
const double min, const double max)
|
||||
: min(min),
|
||||
max(max),
|
||||
pshape(shape),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateTensor(iType, iValues)),
|
||||
refData(CreateTensor(iType, oValues)) {}
|
||||
|
||||
double min = 0;
|
||||
double max = 0;
|
||||
|
||||
ov::PartialShape pshape;
|
||||
ov::element::Type inType;
|
||||
ov::element::Type outType;
|
||||
ov::runtime::Tensor inputData;
|
||||
ov::runtime::Tensor refData;
|
||||
};
|
||||
|
||||
class ReferenceClampLayerTest : public testing::TestWithParam<ClampParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.pshape, params.inType, params.outType, params.min, params.max);
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<ClampParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType << "_";
|
||||
result << "min=" << param.min << "_";
|
||||
result << "max=" << param.max;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const ov::PartialShape& input_shape, const ov::element::Type& input_type,
|
||||
const ov::element::Type& expected_output_type, const double min, const double max) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
const auto Clamp = std::make_shared<op::v0::Clamp>(in, min, max);
|
||||
return std::make_shared<ov::Function>(NodeVector {Clamp}, ParameterVector {in});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceClampLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<ClampParams> generateClampFloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
auto min = std::numeric_limits<T>::min();
|
||||
auto max = std::numeric_limits<T>::max();
|
||||
auto pinf = std::numeric_limits<float>::infinity();
|
||||
auto ninf = -std::numeric_limits<float>::infinity();
|
||||
std::vector<ClampParams> clampParams {
|
||||
ClampParams(ov::PartialShape {5, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8},
|
||||
std::vector<T>{0.2, 0.2, 0.2, 0.2, 0.3, 0.4, 0.5, 0.6, 0.6, 0.6},
|
||||
0.2,
|
||||
0.6),
|
||||
ClampParams(ov::PartialShape {5, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{min, max, ninf, pinf, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.000001},
|
||||
std::vector<T>{10.0, 20.0, 10.0, 20.0, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.0},
|
||||
10.0,
|
||||
20.0),
|
||||
ClampParams(ov::PartialShape {5, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{min, max, ninf, pinf, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.000001},
|
||||
std::vector<T>{10.0, max, 10.0, pinf, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.000001},
|
||||
10.0,
|
||||
pinf),
|
||||
ClampParams(ov::PartialShape {5, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{min, max, ninf, pinf, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.000001},
|
||||
std::vector<T>{min, 20.0, ninf, 20.0, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.0},
|
||||
ninf,
|
||||
20.0)
|
||||
};
|
||||
return clampParams;
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<ClampParams> generateClampIntParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
auto min = std::numeric_limits<T>::min();
|
||||
auto max = std::numeric_limits<T>::max();
|
||||
auto pinf = std::numeric_limits<float>::infinity();
|
||||
auto ninf = -std::numeric_limits<float>::infinity();
|
||||
std::vector<ClampParams> clampParams {
|
||||
ClampParams(ov::PartialShape {6},
|
||||
IN_ET,
|
||||
std::vector<T>{-1, 3, -10, 20, 6, 2},
|
||||
std::vector<T>{1, 3, 1, 5, 5, 2},
|
||||
0.4,
|
||||
5.6),
|
||||
ClampParams(ov::PartialShape {6},
|
||||
IN_ET,
|
||||
std::vector<T>{-6, 1, -2, 0, -1, 2},
|
||||
std::vector<T>{-5, -1, -2, -1, -1, -1},
|
||||
-5.6,
|
||||
-0.4),
|
||||
ClampParams(ov::PartialShape {4, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{min, max, 9, 10, 11, 19, 20, 21},
|
||||
std::vector<T>{10, 20, 10, 10, 11, 19, 20, 20},
|
||||
10.0,
|
||||
20.0),
|
||||
ClampParams(ov::PartialShape {4, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{min, max, 9, 10, 11, 19, 20, 21},
|
||||
std::vector<T>{10, max, 10, 10, 11, 19, 20, 21},
|
||||
10.0,
|
||||
pinf),
|
||||
ClampParams(ov::PartialShape {4, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{min, max, 9, 10, 11, 19, 20, 21},
|
||||
std::vector<T>{min, 20, 9, 10, 11, 19, 20, 20},
|
||||
ninf,
|
||||
20.0)
|
||||
};
|
||||
return clampParams;
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<ClampParams> generateClampUintParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
auto min = std::numeric_limits<T>::min();
|
||||
T max = (static_cast<T>(1) << (std::numeric_limits<T>::digits - 1)) - 1;
|
||||
auto pinf = static_cast<double>(max);
|
||||
auto ninf = -std::numeric_limits<float>::infinity();
|
||||
std::vector<ClampParams> clampParams {
|
||||
ClampParams(ov::PartialShape {4, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{min, max, 9, 10, 11, 19, 20, 21},
|
||||
std::vector<T>{10, 20, 10, 10, 11, 19, 20, 20},
|
||||
10.0,
|
||||
20.0),
|
||||
ClampParams(ov::PartialShape {4, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{min, max, 9, 10, 11, 19, 20, 21},
|
||||
std::vector<T>{10, max, 10, 10, 11, 19, 20, 21},
|
||||
10.0,
|
||||
pinf),
|
||||
ClampParams(ov::PartialShape {4, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{min, max, 9, 10, 11, 19, 20, 21},
|
||||
std::vector<T>{min, 20, 9, 10, 11, 19, 20, 20},
|
||||
ninf,
|
||||
20.0)
|
||||
};
|
||||
return clampParams;
|
||||
}
|
||||
|
||||
std::vector<ClampParams> generateClampCombinedParams() {
|
||||
const std::vector<std::vector<ClampParams>> clampTypeParams {
|
||||
generateClampFloatParams<element::Type_t::f32>(),
|
||||
generateClampFloatParams<element::Type_t::f16>(),
|
||||
generateClampFloatParams<element::Type_t::bf16>(),
|
||||
generateClampIntParams<element::Type_t::i8>(),
|
||||
generateClampIntParams<element::Type_t::i16>(),
|
||||
generateClampIntParams<element::Type_t::i32>(),
|
||||
generateClampIntParams<element::Type_t::i64>(),
|
||||
generateClampUintParams<element::Type_t::u8>(),
|
||||
generateClampUintParams<element::Type_t::u16>(),
|
||||
generateClampUintParams<element::Type_t::u32>(),
|
||||
generateClampUintParams<element::Type_t::u64>()
|
||||
};
|
||||
std::vector<ClampParams> combinedParams;
|
||||
|
||||
for (const auto& params : clampTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Clamp_With_Hardcoded_Refs, ReferenceClampLayerTest,
|
||||
testing::ValuesIn(generateClampCombinedParams()), ReferenceClampLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
146
docs/template_plugin/tests/functional/op_reference/elu.cpp
Normal file
146
docs/template_plugin/tests/functional/op_reference/elu.cpp
Normal file
@ -0,0 +1,146 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "openvino/op/elu.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
|
||||
namespace {
|
||||
struct EluParams {
|
||||
template <class IT>
|
||||
EluParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues,
|
||||
const double alpha)
|
||||
: alpha(alpha),
|
||||
pshape(shape),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateTensor(iType, iValues)),
|
||||
refData(CreateTensor(iType, oValues)) {}
|
||||
|
||||
double alpha = 0;
|
||||
|
||||
ov::PartialShape pshape;
|
||||
ov::element::Type inType;
|
||||
ov::element::Type outType;
|
||||
ov::runtime::Tensor inputData;
|
||||
ov::runtime::Tensor refData;
|
||||
};
|
||||
|
||||
class ReferenceEluLayerTest : public testing::TestWithParam<EluParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.pshape, params.inType, params.outType, params.alpha);
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<EluParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType << "_";
|
||||
result << "alpha=" << param.alpha;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
||||
const element::Type& expected_output_type, const double alpha) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
const auto Elu = std::make_shared<op::v0::Elu>(in, alpha);
|
||||
return std::make_shared<ov::Function>(NodeVector {Elu}, ParameterVector {in});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceEluLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<EluParams> generateEluFloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<EluParams> eluParams {
|
||||
EluParams(ov::PartialShape {3, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{-2.f, 3.f, -2.f, 1.f, -1.f, 0.f},
|
||||
std::vector<T>{-0.432332358f, 3.f, -0.432332358f, 1.f, -0.316060279f, 0.f},
|
||||
0.5f),
|
||||
EluParams(ov::PartialShape {3, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{-2.f, 3.f, -2.f, 1.f, -1.f, 0.f},
|
||||
std::vector<T>{0.864664717f, 3.f, 0.864664717f, 1.f, 0.632120559f, 0.f},
|
||||
-1.f)
|
||||
};
|
||||
return eluParams;
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<EluParams> generateEluIntParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<EluParams> eluParams {
|
||||
EluParams(ov::PartialShape {3, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{-2, 3, -2, 1, -1, 0},
|
||||
std::vector<T>{0, 3, 0, 1, 0, 0},
|
||||
0.5f),
|
||||
EluParams(ov::PartialShape {3, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{-2, 3, -2, 1, -1, 0},
|
||||
std::vector<T>{0, 3, 0, 1, 0, 0},
|
||||
-1.f)
|
||||
};
|
||||
return eluParams;
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<EluParams> generateEluUintParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<EluParams> eluParams {
|
||||
EluParams(ov::PartialShape {3, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{5, 4, 3, 2, 1, 0},
|
||||
std::vector<T>{5, 4, 3, 2, 1, 0},
|
||||
0.5f),
|
||||
EluParams(ov::PartialShape {3, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{5, 4, 3, 2, 1, 0},
|
||||
std::vector<T>{5, 4, 3, 2, 1, 0},
|
||||
-1.f)
|
||||
};
|
||||
return eluParams;
|
||||
}
|
||||
std::vector<EluParams> generateEluCombinedParams() {
|
||||
const std::vector<std::vector<EluParams>> eluTypeParams {
|
||||
generateEluFloatParams<element::Type_t::f32>(),
|
||||
generateEluFloatParams<element::Type_t::f16>(),
|
||||
generateEluFloatParams<element::Type_t::bf16>(),
|
||||
generateEluIntParams<element::Type_t::i8>(),
|
||||
generateEluIntParams<element::Type_t::i16>(),
|
||||
generateEluIntParams<element::Type_t::i32>(),
|
||||
generateEluIntParams<element::Type_t::i64>(),
|
||||
generateEluUintParams<element::Type_t::u8>(),
|
||||
generateEluUintParams<element::Type_t::u16>(),
|
||||
generateEluUintParams<element::Type_t::u32>(),
|
||||
generateEluUintParams<element::Type_t::u64>()
|
||||
};
|
||||
std::vector<EluParams> combinedParams;
|
||||
|
||||
for (const auto& params : eluTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Elu_With_Hardcoded_Refs, ReferenceEluLayerTest,
|
||||
testing::ValuesIn(generateEluCombinedParams()), ReferenceEluLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
194
docs/template_plugin/tests/functional/op_reference/exp.cpp
Normal file
194
docs/template_plugin/tests/functional/op_reference/exp.cpp
Normal file
@ -0,0 +1,194 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "openvino/op/exp.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
using namespace InferenceEngine;
|
||||
|
||||
namespace {
|
||||
struct ExpParams {
|
||||
template <class IT>
|
||||
ExpParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues)
|
||||
: pshape(shape),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateTensor(iType, iValues)),
|
||||
refData(CreateTensor(iType, oValues)) {}
|
||||
|
||||
ov::PartialShape pshape;
|
||||
ov::element::Type inType;
|
||||
ov::element::Type outType;
|
||||
ov::runtime::Tensor inputData;
|
||||
ov::runtime::Tensor refData;
|
||||
};
|
||||
|
||||
class ReferenceExpLayerTest : public testing::TestWithParam<ExpParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.pshape, params.inType, params.outType);
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<ExpParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
||||
const element::Type& expected_output_type) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
const auto Exp = std::make_shared<op::v0::Exp>(in);
|
||||
return std::make_shared<ov::Function>(NodeVector {Exp}, ParameterVector {in});
|
||||
}
|
||||
};
|
||||
|
||||
class ReferenceExpInPlaceLayerTest : public testing::TestWithParam<ExpParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.pshape, params.inType, params.outType);
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<ExpParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
||||
const element::Type& expected_output_type) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
const auto Exp = std::make_shared<op::v0::Exp>(in);
|
||||
const auto ExpInPlace = std::make_shared<op::v0::Exp>(Exp);
|
||||
return std::make_shared<ov::Function>(NodeVector {ExpInPlace}, ParameterVector {in});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceExpLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
TEST_P(ReferenceExpInPlaceLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<ExpParams> generateExpFloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<ExpParams> expParams {
|
||||
ExpParams(ov::PartialShape {8},
|
||||
IN_ET,
|
||||
std::vector<T>{-4, -3, -2, -1, 0, 1, 2, 3},
|
||||
std::vector<T>{expf(-4), expf(-3), expf(-2), expf(-1), expf(0), expf(1), expf(2), expf(3)}),
|
||||
ExpParams(ov::PartialShape {1},
|
||||
IN_ET,
|
||||
std::vector<T>{13},
|
||||
std::vector<T>{expf(13)})
|
||||
};
|
||||
return expParams;
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<ExpParams> generateExpIntParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<ExpParams> expParams {
|
||||
ExpParams(ov::PartialShape {8},
|
||||
IN_ET,
|
||||
std::vector<T>{-4, -3, -2, -1, 0, 1, 2, 3},
|
||||
std::vector<T>{static_cast<T>(expf(-4)), static_cast<T>(expf(-3)), static_cast<T>(expf(-2)), static_cast<T>(expf(-1)),
|
||||
static_cast<T>(expf(0)), static_cast<T>(expf(1)), static_cast<T>(expf(2)), static_cast<T>(expf(3))}),
|
||||
ExpParams(ov::PartialShape {1},
|
||||
IN_ET,
|
||||
std::vector<T>{13},
|
||||
std::vector<T>{static_cast<T>(expf(13))})
|
||||
};
|
||||
return expParams;
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<ExpParams> generateExpUintParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<ExpParams> expParams {
|
||||
ExpParams(ov::PartialShape {8},
|
||||
IN_ET,
|
||||
std::vector<T>{0, 1, 2, 3, 4, 5, 10, 100},
|
||||
std::vector<T>{static_cast<T>(expf(0)), static_cast<T>(expf(1)), static_cast<T>(expf(2)), static_cast<T>(expf(3)),
|
||||
static_cast<T>(expf(4)), static_cast<T>(expf(5)), static_cast<T>(expf(10)), static_cast<T>(expf(100))}),
|
||||
ExpParams(ov::PartialShape {1},
|
||||
IN_ET,
|
||||
std::vector<T>{13},
|
||||
std::vector<T>{static_cast<T>(expf(13))})
|
||||
};
|
||||
return expParams;
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<ExpParams> generateExpInPlaceFloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<ExpParams> expParams {
|
||||
ExpParams(ov::PartialShape {2},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 3},
|
||||
std::vector<T>{expf(expf(1)), expf(expf(3))})
|
||||
};
|
||||
return expParams;
|
||||
}
|
||||
|
||||
std::vector<ExpParams> generateExpCombinedParams() {
|
||||
const std::vector<std::vector<ExpParams>> expTypeParams {
|
||||
generateExpFloatParams<element::Type_t::f32>(),
|
||||
generateExpFloatParams<element::Type_t::f16>(),
|
||||
generateExpIntParams<element::Type_t::i32>(),
|
||||
generateExpIntParams<element::Type_t::i64>(),
|
||||
generateExpUintParams<element::Type_t::u32>(),
|
||||
generateExpUintParams<element::Type_t::u64>()
|
||||
};
|
||||
std::vector<ExpParams> combinedParams;
|
||||
|
||||
for (const auto& params : expTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
std::vector<ExpParams> generateExpInPlaceCombinedParams() {
|
||||
const std::vector<std::vector<ExpParams>> expTypeParams {
|
||||
generateExpInPlaceFloatParams<element::Type_t::f16>(),
|
||||
generateExpInPlaceFloatParams<element::Type_t::f32>()
|
||||
};
|
||||
std::vector<ExpParams> combinedParams;
|
||||
|
||||
for (const auto& params : expTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Exp_With_Hardcoded_Refs, ReferenceExpLayerTest,
|
||||
testing::ValuesIn(generateExpCombinedParams()), ReferenceExpLayerTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Exp_In_Place_With_Hardcoded_Refs, ReferenceExpInPlaceLayerTest,
|
||||
testing::ValuesIn(generateExpInPlaceCombinedParams()), ReferenceExpInPlaceLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
174
docs/template_plugin/tests/functional/op_reference/gelu.cpp
Normal file
174
docs/template_plugin/tests/functional/op_reference/gelu.cpp
Normal file
@ -0,0 +1,174 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "openvino/op/gelu.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
using namespace InferenceEngine;
|
||||
|
||||
namespace {
|
||||
struct GeluParams {
|
||||
template <class IT>
|
||||
GeluParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues,
|
||||
const ov::op::GeluApproximationMode mode)
|
||||
: mode(mode),
|
||||
pshape(shape),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateTensor(iType, iValues)),
|
||||
refData(CreateTensor(iType, oValues)) {}
|
||||
|
||||
ov::op::GeluApproximationMode mode = ov::op::GeluApproximationMode::ERF;
|
||||
ov::PartialShape pshape;
|
||||
ov::element::Type inType;
|
||||
ov::element::Type outType;
|
||||
ov::runtime::Tensor inputData;
|
||||
ov::runtime::Tensor refData;
|
||||
};
|
||||
|
||||
class ReferenceGeluV0LayerTest : public testing::TestWithParam<GeluParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.pshape, params.inType, params.outType, params.mode);
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<GeluParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
||||
const element::Type& expected_output_type, const op::GeluApproximationMode mode) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
const auto Gelu = std::make_shared<op::v0::Gelu>(in);
|
||||
return std::make_shared<ov::Function>(NodeVector {Gelu}, ParameterVector {in});
|
||||
}
|
||||
};
|
||||
|
||||
class ReferenceGeluV7LayerTest : public testing::TestWithParam<GeluParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.pshape, params.inType, params.outType, params.mode);
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<GeluParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType << "_";
|
||||
result << "ApproxMode=" << param.mode;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
||||
const element::Type& expected_output_type, const op::GeluApproximationMode mode) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
const auto Gelu = std::make_shared<op::v7::Gelu>(in, mode);
|
||||
return std::make_shared<ov::Function>(NodeVector {Gelu}, ParameterVector {in});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceGeluV0LayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
TEST_P(ReferenceGeluV7LayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<GeluParams> generateGeluV0FloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<GeluParams> geluParams {
|
||||
GeluParams(ov::PartialShape {8},
|
||||
IN_ET,
|
||||
std::vector<T>{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0},
|
||||
std::vector<T>{-0.00012636185, -0.0040495098, -0.04550028, -0.15865529, 0.0, 0.8413447, 1.9544997, 2.9959507},
|
||||
op::GeluApproximationMode::ERF),
|
||||
GeluParams(ov::PartialShape {3},
|
||||
IN_ET,
|
||||
std::vector<T>{-0.5, 0.1, 0.4},
|
||||
std::vector<T>{-0.15426877, 0.05398279, 0.2621686},
|
||||
op::GeluApproximationMode::ERF)
|
||||
};
|
||||
return geluParams;
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<GeluParams> generateGeluV7FloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<GeluParams> geluParams {
|
||||
GeluParams(ov::PartialShape {8},
|
||||
IN_ET,
|
||||
std::vector<T>{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0},
|
||||
std::vector<T>{-0.00012636185, -0.0040495098, -0.04550028, -0.15865529, 0.0, 0.8413447, 1.9544997, 2.9959507},
|
||||
op::GeluApproximationMode::ERF),
|
||||
GeluParams(ov::PartialShape {8},
|
||||
IN_ET,
|
||||
std::vector<T>{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0},
|
||||
std::vector<T>{-0.00012636185, -0.0040495098, -0.04550028, -0.15865529, 0.0, 0.8413447, 1.9544997, 2.9959507},
|
||||
op::GeluApproximationMode::TANH),
|
||||
GeluParams(ov::PartialShape {3},
|
||||
IN_ET,
|
||||
std::vector<T>{-0.5, 0.1, 0.4},
|
||||
std::vector<T>{-0.15426877, 0.05398279, 0.2621686},
|
||||
op::GeluApproximationMode::ERF),
|
||||
GeluParams(ov::PartialShape {3},
|
||||
IN_ET,
|
||||
std::vector<T>{-0.5, 0.1, 0.4},
|
||||
std::vector<T>{-0.15428599, 0.053982753, 0.262161165},
|
||||
op::GeluApproximationMode::TANH)
|
||||
};
|
||||
return geluParams;
|
||||
}
|
||||
|
||||
std::vector<GeluParams> generateGeluV0CombinedParams() {
|
||||
const std::vector<std::vector<GeluParams>> geluTypeParams {
|
||||
generateGeluV0FloatParams<element::Type_t::f32>(),
|
||||
generateGeluV0FloatParams<element::Type_t::f16>()
|
||||
};
|
||||
std::vector<GeluParams> combinedParams;
|
||||
|
||||
for (const auto& params : geluTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
std::vector<GeluParams> generateGeluV7CombinedParams() {
|
||||
const std::vector<std::vector<GeluParams>> geluTypeParams {
|
||||
generateGeluV7FloatParams<element::Type_t::f32>(),
|
||||
generateGeluV7FloatParams<element::Type_t::f16>()
|
||||
};
|
||||
std::vector<GeluParams> combinedParams;
|
||||
|
||||
for (const auto& params : geluTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Gelu_v2_With_Hardcoded_Refs, ReferenceGeluV0LayerTest,
|
||||
testing::ValuesIn(generateGeluV0CombinedParams()), ReferenceGeluV0LayerTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Gelu_v7_With_Hardcoded_Refs, ReferenceGeluV7LayerTest,
|
||||
testing::ValuesIn(generateGeluV7CombinedParams()), ReferenceGeluV7LayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -0,0 +1,111 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <ngraph/ngraph.hpp>
|
||||
|
||||
#include "openvino/op/hard_sigmoid.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
|
||||
namespace {
|
||||
struct HardSigmoidParams {
|
||||
template <class IT>
|
||||
HardSigmoidParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues,
|
||||
const float alpha, const float beta)
|
||||
: pshape(shape),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateTensor(iType, iValues)),
|
||||
refData(CreateTensor(iType, oValues)),
|
||||
alpha(alpha),
|
||||
beta(beta) {}
|
||||
|
||||
ov::PartialShape pshape;
|
||||
ov::element::Type inType;
|
||||
ov::element::Type outType;
|
||||
ov::runtime::Tensor inputData;
|
||||
ov::runtime::Tensor refData;
|
||||
float alpha;
|
||||
float beta;
|
||||
};
|
||||
|
||||
class ReferenceHardSigmoidLayerTest : public testing::TestWithParam<HardSigmoidParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.pshape, params.inType, params.outType, params.alpha, params.beta);
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<HardSigmoidParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType << "_";
|
||||
result << "alpha=" << param.alpha << "_";
|
||||
result << "beta=" << param.beta;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
||||
const element::Type& expected_output_type, const float alphaData, const float betaData) {
|
||||
std::vector<float> alphaArray;
|
||||
std::vector<float> betaArray;
|
||||
alphaArray.push_back(alphaData);
|
||||
betaArray.push_back(betaData);
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
const auto alpha = ngraph::op::Constant::create(input_type, Shape{}, {alphaData});
|
||||
const auto beta = ngraph::op::Constant::create(input_type, Shape{}, {betaData});
|
||||
const auto HardSigmoid = std::make_shared<op::v0::HardSigmoid>(in, alpha, beta);
|
||||
return std::make_shared<ov::Function>(NodeVector {HardSigmoid}, ParameterVector {in});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceHardSigmoidLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<HardSigmoidParams> generateHardSigmoidFloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<HardSigmoidParams> hardSigmoidParams {
|
||||
HardSigmoidParams(ov::PartialShape {3},
|
||||
IN_ET,
|
||||
std::vector<T>{-1.0f, 0.0f, 1.0f},
|
||||
std::vector<T>{0.1f, 0.6f, 1.f},
|
||||
0.5,
|
||||
0.6),
|
||||
HardSigmoidParams(ov::PartialShape {2, 5},
|
||||
IN_ET,
|
||||
std::vector<T>{-3.0f, -1.0f, 0.0f, 1.0f, 3.0f, 0.5f, -0.2f, 6.0f, 8.0f, 0.1f},
|
||||
std::vector<T>{0.0f, 0.3f, 0.5f, 0.7f, 1.0f, 0.6f, 0.46f, 1.0f, 1.0f, 0.52f},
|
||||
0.2,
|
||||
0.5)
|
||||
};
|
||||
return hardSigmoidParams;
|
||||
}
|
||||
|
||||
std::vector<HardSigmoidParams> generateHardSigmoidCombinedParams() {
|
||||
const std::vector<std::vector<HardSigmoidParams>> hardSigmoidTypeParams {
|
||||
generateHardSigmoidFloatParams<element::Type_t::f32>(),
|
||||
generateHardSigmoidFloatParams<element::Type_t::f16>()
|
||||
};
|
||||
std::vector<HardSigmoidParams> combinedParams;
|
||||
|
||||
for (const auto& params : hardSigmoidTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_HardSigmoid_With_Hardcoded_Refs, ReferenceHardSigmoidLayerTest,
|
||||
testing::ValuesIn(generateHardSigmoidCombinedParams()), ReferenceHardSigmoidLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -0,0 +1,90 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "openvino/op/hsigmoid.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
|
||||
namespace {
|
||||
struct HSigmoidParams {
|
||||
template <class IT>
|
||||
HSigmoidParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues)
|
||||
: pshape(shape),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateTensor(iType, iValues)),
|
||||
refData(CreateTensor(iType, oValues)) {}
|
||||
|
||||
ov::PartialShape pshape;
|
||||
ov::element::Type inType;
|
||||
ov::element::Type outType;
|
||||
ov::runtime::Tensor inputData;
|
||||
ov::runtime::Tensor refData;
|
||||
};
|
||||
|
||||
class ReferenceHSigmoidLayerTest : public testing::TestWithParam<HSigmoidParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.pshape, params.inType, params.outType);
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<HSigmoidParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
||||
const element::Type& HSigmoidected_output_type) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
const auto HSigmoid = std::make_shared<op::v5::HSigmoid>(in);
|
||||
return std::make_shared<ov::Function>(NodeVector {HSigmoid}, ParameterVector {in});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceHSigmoidLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<HSigmoidParams> generateHSigmoidFloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<HSigmoidParams> hSigmoidParams {
|
||||
HSigmoidParams(ov::PartialShape {13},
|
||||
IN_ET,
|
||||
std::vector<T>{-10.f, -5.f, -4.f, -3.f, -2.f, -1.f, 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 10.f},
|
||||
std::vector<T>{0.f, 0.f, 0.f, 0.f, 0.16666667f, 0.33333333f, 0.5f, 0.66666667f, 0.83333333f, 1.f, 1.f, 1.f, 1.f})
|
||||
};
|
||||
return hSigmoidParams;
|
||||
}
|
||||
|
||||
std::vector<HSigmoidParams> generateHSigmoidCombinedParams() {
|
||||
const std::vector<std::vector<HSigmoidParams>> hSigmoidTypeParams {
|
||||
generateHSigmoidFloatParams<element::Type_t::f32>(),
|
||||
generateHSigmoidFloatParams<element::Type_t::f16>()
|
||||
};
|
||||
std::vector<HSigmoidParams> combinedParams;
|
||||
|
||||
for (const auto& params : hSigmoidTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_HSigmoid_With_Hardcoded_Refs, ReferenceHSigmoidLayerTest,
|
||||
testing::ValuesIn(generateHSigmoidCombinedParams()), ReferenceHSigmoidLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -0,0 +1,95 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "openvino/op/hswish.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
using namespace InferenceEngine;
|
||||
|
||||
namespace {
|
||||
struct HSwishParams {
|
||||
template <class IT>
|
||||
HSwishParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues)
|
||||
: pshape(shape),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateTensor(iType, iValues)),
|
||||
refData(CreateTensor(iType, oValues)) {}
|
||||
|
||||
ov::PartialShape pshape;
|
||||
ov::element::Type inType;
|
||||
ov::element::Type outType;
|
||||
ov::runtime::Tensor inputData;
|
||||
ov::runtime::Tensor refData;
|
||||
};
|
||||
|
||||
class ReferenceHSwishLayerTest : public testing::TestWithParam<HSwishParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.pshape, params.inType, params.outType);
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<HSwishParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
||||
const element::Type& HSwishected_output_type) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
const auto HSwish = std::make_shared<op::v4::HSwish>(in);
|
||||
return std::make_shared<ov::Function>(NodeVector {HSwish}, ParameterVector {in});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceHSwishLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<HSwishParams> generateHSwishFloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<HSwishParams> hSwishParams {
|
||||
HSwishParams(ov::PartialShape {2, 3},
|
||||
IN_ET,
|
||||
std::vector<T>{1.f, 8.f, -8.f, 17.f, -0.5f, -1.f},
|
||||
std::vector<T>{0.66666667f, 8.f, 0.f, 17.f, -0.20833333f, -0.33333333f}),
|
||||
HSwishParams(ov::PartialShape {2, 2, 1, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{0.1f, 0.6f, 20.f, -7.f, -5.3f, 3.5f, -9.f, 11.f},
|
||||
std::vector<T>{0.05166667f, 0.36f, 20.f, 0.f, 0.f, 3.5f, 0.f, 11.f})
|
||||
};
|
||||
return hSwishParams;
|
||||
}
|
||||
|
||||
std::vector<HSwishParams> generateHSwishCombinedParams() {
|
||||
const std::vector<std::vector<HSwishParams>> hSwishTypeParams {
|
||||
generateHSwishFloatParams<element::Type_t::f32>(),
|
||||
generateHSwishFloatParams<element::Type_t::f16>()
|
||||
};
|
||||
std::vector<HSwishParams> combinedParams;
|
||||
|
||||
for (const auto& params : hSwishTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_HSwish_With_Hardcoded_Refs, ReferenceHSwishLayerTest,
|
||||
testing::ValuesIn(generateHSwishCombinedParams()), ReferenceHSwishLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -0,0 +1,248 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "openvino/op/log_softmax.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
using namespace InferenceEngine;
|
||||
|
||||
namespace {
|
||||
struct LogSoftmaxParams {
|
||||
template <class IT>
|
||||
LogSoftmaxParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues,
|
||||
const int64_t axis)
|
||||
: axis(axis),
|
||||
pshape(shape),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateTensor(iType, iValues)),
|
||||
refData(CreateTensor(iType, oValues)) {}
|
||||
|
||||
int64_t axis = 0;
|
||||
|
||||
ov::PartialShape pshape;
|
||||
ov::element::Type inType;
|
||||
ov::element::Type outType;
|
||||
ov::runtime::Tensor inputData;
|
||||
ov::runtime::Tensor refData;
|
||||
};
|
||||
|
||||
class ReferenceLogSoftmaxLayerTest : public testing::TestWithParam<LogSoftmaxParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.pshape, params.inType, params.outType, params.axis);
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<LogSoftmaxParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType << "_";
|
||||
result << "axis=" << param.axis;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
||||
const element::Type& expected_output_type, const int64_t axis) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
const auto LogSoftmax = std::make_shared<op::v5::LogSoftmax>(in, axis);
|
||||
return std::make_shared<ov::Function>(NodeVector {LogSoftmax}, ParameterVector {in});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceLogSoftmaxLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<LogSoftmaxParams> generateLogSoftmaxFloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<LogSoftmaxParams> logSoftmaxParams {
|
||||
LogSoftmaxParams(ov::PartialShape {1},
|
||||
IN_ET,
|
||||
std::vector<T>{1},
|
||||
std::vector<T>{0},
|
||||
0),
|
||||
LogSoftmaxParams(ov::PartialShape {2, 4},
|
||||
IN_ET,
|
||||
std::vector<T>{0, 1, 2, 3, 10000, 10001, 10002, 10003},
|
||||
std::vector<T>{-10000., -10000., -10000., -10000., 0., 0., 0., 0.},
|
||||
0),
|
||||
LogSoftmaxParams(ov::PartialShape {2, 4},
|
||||
IN_ET,
|
||||
std::vector<T>{0, 1, 2, 3, 10000, 10001, 10002, 10003},
|
||||
std::vector<T>{-3.4401896, -2.4401896, -1.4401897, -0.4401897, -3.4401896, -2.4401896, -1.4401897, -0.4401897},
|
||||
1),
|
||||
LogSoftmaxParams(ov::PartialShape {2, 4},
|
||||
IN_ET,
|
||||
std::vector<T>{0, 1, 2, 3, 10000, 10001, 10002, 10003},
|
||||
std::vector<T>{-3.4401896, -2.4401896, -1.4401897, -0.4401897, -3.4401896, -2.4401896, -1.4401897, -0.4401897},
|
||||
-1),
|
||||
LogSoftmaxParams(ov::PartialShape {2, 4},
|
||||
IN_ET,
|
||||
std::vector<T>{0, 1, 2, 3, 10000, 10001, 10002, 10003},
|
||||
std::vector<T>{-10000., -10000., -10000., -10000., 0., 0., 0., 0.},
|
||||
-2),
|
||||
LogSoftmaxParams(ov::PartialShape {3, 2, 3},
|
||||
IN_ET,
|
||||
std::vector<T>{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8},
|
||||
std::vector<T>{-12.0024818,
|
||||
-12.0024818,
|
||||
-12.0024818,
|
||||
-12.0024818,
|
||||
-12.0024818,
|
||||
-12.0024818,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03},
|
||||
0),
|
||||
LogSoftmaxParams(ov::PartialShape {3, 2, 3},
|
||||
IN_ET,
|
||||
std::vector<T>{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8},
|
||||
std::vector<T>{-3.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735},
|
||||
1),
|
||||
LogSoftmaxParams(ov::PartialShape {3, 2, 3},
|
||||
IN_ET,
|
||||
std::vector<T>{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8},
|
||||
std::vector<T>{-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596},
|
||||
2),
|
||||
LogSoftmaxParams(ov::PartialShape {3, 2, 3},
|
||||
IN_ET,
|
||||
std::vector<T>{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8},
|
||||
std::vector<T>{-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596},
|
||||
-1),
|
||||
LogSoftmaxParams(ov::PartialShape {3, 2, 3},
|
||||
IN_ET,
|
||||
std::vector<T>{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8},
|
||||
std::vector<T>{-3.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735},
|
||||
-2),
|
||||
LogSoftmaxParams(ov::PartialShape {3, 2, 3},
|
||||
IN_ET,
|
||||
std::vector<T>{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8},
|
||||
std::vector<T>{-12.0024818,
|
||||
-12.0024818,
|
||||
-12.0024818,
|
||||
-12.0024818,
|
||||
-12.0024818,
|
||||
-12.0024818,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03},
|
||||
-3)
|
||||
};
|
||||
return logSoftmaxParams;
|
||||
}
|
||||
|
||||
std::vector<LogSoftmaxParams> generateLogSoftmaxCombinedParams() {
|
||||
const std::vector<std::vector<LogSoftmaxParams>> logSoftmaxTypeParams {
|
||||
generateLogSoftmaxFloatParams<element::Type_t::f32>(),
|
||||
generateLogSoftmaxFloatParams<element::Type_t::f16>()
|
||||
};
|
||||
std::vector<LogSoftmaxParams> combinedParams;
|
||||
|
||||
for (const auto& params : logSoftmaxTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LogSoftmax_With_Hardcoded_Refs, ReferenceLogSoftmaxLayerTest,
|
||||
testing::ValuesIn(generateLogSoftmaxCombinedParams()), ReferenceLogSoftmaxLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
128
docs/template_plugin/tests/functional/op_reference/mish.cpp
Normal file
128
docs/template_plugin/tests/functional/op_reference/mish.cpp
Normal file
@ -0,0 +1,128 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <random>
|
||||
#include "openvino/op/mish.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
|
||||
namespace {
|
||||
struct MishParams {
|
||||
template <class IT>
|
||||
MishParams(const ov::PartialShape& dynamicShape, const ov::Shape& inputShape,
|
||||
const ov::element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues,
|
||||
const std::string& test_name = "")
|
||||
: dynamicShape(dynamicShape),
|
||||
inputShape(inputShape),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateTensor(iType, iValues)),
|
||||
refData(CreateTensor(iType, oValues)),
|
||||
testcaseName(test_name) {}
|
||||
|
||||
ov::PartialShape dynamicShape;
|
||||
ov::PartialShape inputShape;
|
||||
ov::element::Type inType;
|
||||
ov::element::Type outType;
|
||||
ov::runtime::Tensor inputData;
|
||||
ov::runtime::Tensor refData;
|
||||
std::string testcaseName;
|
||||
};
|
||||
|
||||
class ReferenceMishLayerTest : public testing::TestWithParam<MishParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.dynamicShape, params.inType);
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<MishParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "dShape=" << param.dynamicShape << "_";
|
||||
result << "iShape=" << param.inputShape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
if (param.testcaseName != "") {
|
||||
result << "oType=" << param.outType << "_";
|
||||
result << param.testcaseName;
|
||||
} else {
|
||||
result << "oType=" << param.outType;
|
||||
}
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
const auto Mish = std::make_shared<op::v4::Mish>(in);
|
||||
return std::make_shared<ov::Function>(NodeVector {Mish}, ParameterVector {in});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceMishLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<MishParams> generateMishFloatParams(const PartialShape& dynamicShape, const Shape& staticShape, const std::string& test_name = "") {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
// generate input tensor (with possible type conversion)
|
||||
auto staticSize = shape_size(staticShape);
|
||||
std::vector<T> expected;
|
||||
std::vector<T> input;
|
||||
{
|
||||
std::mt19937 gen{0}; // use fixed seed for reproducibility of the test
|
||||
std::normal_distribution<> d{0.0, 20.0};
|
||||
|
||||
for (auto i = staticSize; i > 0; i--) {
|
||||
auto x = static_cast<T>(d(gen));
|
||||
auto y = static_cast<T>(static_cast<double>(x) * std::tanh(std::log(1.0 + std::exp(x))));
|
||||
input.push_back(x);
|
||||
expected.push_back(y);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<MishParams> mishParams;
|
||||
|
||||
if (test_name != "") {
|
||||
mishParams = {
|
||||
MishParams(dynamicShape, staticShape, IN_ET, input, expected, test_name)
|
||||
};
|
||||
} else {
|
||||
mishParams = {
|
||||
MishParams(dynamicShape, staticShape, IN_ET, input, expected)
|
||||
};
|
||||
}
|
||||
return mishParams;
|
||||
}
|
||||
|
||||
std::vector<MishParams> generateMishCombinedParams() {
|
||||
const std::vector<std::vector<MishParams>> mishTypeParams {
|
||||
generateMishFloatParams<element::Type_t::f32>({2, 5}, {2, 5}),
|
||||
generateMishFloatParams<element::Type_t::f32>({2, 3, 4, 5}, {2, 3, 4, 5}),
|
||||
generateMishFloatParams<element::Type_t::f32>(PartialShape::dynamic(), {2, 3, 4, 5}),
|
||||
generateMishFloatParams<element::Type_t::f32>({2, Dimension::dynamic(), 4, 5}, {2, 3, 4, 5}, "dimensionDynamic"),
|
||||
generateMishFloatParams<element::Type_t::f16>({2, 5}, {2, 5}),
|
||||
generateMishFloatParams<element::Type_t::f16>({2, 3, 4, 5}, {2, 3, 4, 5}),
|
||||
generateMishFloatParams<element::Type_t::f16>(PartialShape::dynamic(), {2, 3, 4, 5}),
|
||||
generateMishFloatParams<element::Type_t::f16>({2, Dimension::dynamic(), 4, 5}, {2, 3, 4, 5}, "dimensionDynamic")
|
||||
};
|
||||
std::vector<MishParams> combinedParams;
|
||||
|
||||
for (const auto& params : mishTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Mish_With_Hardcoded_Refs, ReferenceMishLayerTest,
|
||||
testing::ValuesIn(generateMishCombinedParams()), ReferenceMishLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
421
docs/template_plugin/tests/functional/op_reference/prelu.cpp
Normal file
421
docs/template_plugin/tests/functional/op_reference/prelu.cpp
Normal file
@ -0,0 +1,421 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <ngraph_functions/builders.hpp>
|
||||
#include "openvino/op/prelu.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
|
||||
namespace {
|
||||
struct PreluParams {
|
||||
template <class IT>
|
||||
PreluParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues,
|
||||
const ov::Shape& slopeShape, const std::vector<IT>& negativeSlopeValues, const std::string& test_name = "")
|
||||
: pshape(shape),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateTensor(iType, iValues)),
|
||||
refData(CreateTensor(iType, oValues)),
|
||||
negativeSlopeShape(slopeShape),
|
||||
negativeSlope(CreateTensor(iType, negativeSlopeValues)),
|
||||
testcaseName(test_name) {}
|
||||
|
||||
ov::PartialShape pshape;
|
||||
ov::element::Type inType;
|
||||
ov::element::Type outType;
|
||||
ov::runtime::Tensor inputData;
|
||||
ov::runtime::Tensor refData;
|
||||
ov::Shape negativeSlopeShape;
|
||||
ov::runtime::Tensor negativeSlope;
|
||||
std::string testcaseName;
|
||||
};
|
||||
|
||||
class ReferencePreluLayerTest : public testing::TestWithParam<PreluParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.pshape, params.negativeSlopeShape, params.inType);
|
||||
inputData = {params.inputData, params.negativeSlope};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<PreluParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType << "_";
|
||||
if (param.testcaseName != "") {
|
||||
result << "slopeShape=" << param.negativeSlopeShape << "_";
|
||||
result << param.testcaseName;
|
||||
} else {
|
||||
result << "slopeShape=" << param.negativeSlopeShape;
|
||||
}
|
||||
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const Shape& slope_shape, const element::Type& input_type) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
const auto SLOPE = std::make_shared<op::v0::Parameter>(input_type, slope_shape);
|
||||
const auto Prelu = std::make_shared<op::v0::PRelu>(in, SLOPE);
|
||||
return std::make_shared<ov::Function>(NodeVector {Prelu}, ParameterVector {in, SLOPE});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferencePreluLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<PreluParams> generatePreluFloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<PreluParams> preluParams {
|
||||
PreluParams(ov::PartialShape {6},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 2, -3, -4, 5, 6},
|
||||
std::vector<T>{1, 2, -6, -8, 5, 6},
|
||||
ov::Shape {1},
|
||||
std::vector<T>{2}),
|
||||
PreluParams(ov::PartialShape {6},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 2, -3, -4, 5, 6},
|
||||
std::vector<T>{1, 2, -12, -20, 5, 6},
|
||||
ov::Shape {6},
|
||||
std::vector<T>{2, 3, 4, 5, 6, 7}),
|
||||
PreluParams(ov::PartialShape {3, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{-2, 3, -2, 1, -1, 0},
|
||||
std::vector<T>{0, 3, 0, 1, 0, 0},
|
||||
ov::Shape {2},
|
||||
std::vector<T>{0, 1}),
|
||||
PreluParams(ov::PartialShape {2, 6},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 2, -3, -4, 5, 6, 7, 8, -9, -10, 11, 12},
|
||||
std::vector<T>{1, 2, -9, -16, 5, 6, 7, 8, -27, -40, 11, 12},
|
||||
ov::Shape {6},
|
||||
std::vector<T>{1, 2, 3, 4, 5, 6}),
|
||||
PreluParams(ov::PartialShape {3, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{-1, -1, -1, -1, -1, -1},
|
||||
std::vector<T>{-2, -0.5, -2, -0.5, -2, -0.5},
|
||||
ov::Shape {2},
|
||||
std::vector<T>{2, 0.5},
|
||||
"C_2_const"),
|
||||
PreluParams(ov::PartialShape {2, 2, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{-0.5, -2, -3, -4, -5, -6, -7, -8},
|
||||
std::vector<T>{0.25, 1, 6, 8, 2.5, 3, 14, 16},
|
||||
ov::Shape {2},
|
||||
std::vector<T>{-0.5, -2}),
|
||||
PreluParams(ov::PartialShape {3, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{-2, 3, -2, 1, -1, 0},
|
||||
std::vector<T>{1, 3, 1, 1, 0.5, 0},
|
||||
ov::Shape {2},
|
||||
std::vector<T>{-0.5, -1},
|
||||
"negative_slope"),
|
||||
PreluParams(ov::PartialShape {2, 6},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 2, -3, -4, 5, 6, 1, 2, -3, -4, 5, 6},
|
||||
std::vector<T>{1, 2, -6, -8, 5, 6, 1, 2, -12, -8, 5, 6},
|
||||
ov::Shape {2, 6},
|
||||
std::vector<T>{2, 2, 2, 2, 2, 2, 1, 1, 4, 2, 1, 1}),
|
||||
PreluParams(ov::PartialShape {2, 2, 2, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4},
|
||||
std::vector<T>{1, 2, -3, -8, 1, 2, -9, -16, 1, 2, -3, -8, 1, 2, -9, -16},
|
||||
ov::Shape {2, 1, 2},
|
||||
std::vector<T>{1, 2, 3, 4}),
|
||||
PreluParams(ov::PartialShape {2, 2, 2, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4},
|
||||
std::vector<T>{1, 2, -3, -8, 1, 2, -9, -16, 1, 2, -3, -8, 1, 2, -9, -16},
|
||||
ov::Shape {1, 2, 1, 2},
|
||||
std::vector<T>{1, 2, 3, 4}),
|
||||
PreluParams(ov::PartialShape {2, 2, 6},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 2, -3, -4, -5, 6, -1, -2, -3, -4, -5, -6, 1, 2, -3, -4, 5, 6, -2, 4, -6, -8, 10, 12},
|
||||
std::vector<T>{1, 2, -9, -16, -5, 6, -2, -2, -9, -16, -5, -42, 1, 2, -9, -16, 5, 6, -2, 4, -18, -32, 10, 12},
|
||||
ov::Shape {2, 1, 6},
|
||||
std::vector<T>{2, 1, 3, 4, 1, 7, 1, 2, 3, 4, 5, 6}),
|
||||
PreluParams(ov::PartialShape {2, 3, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 2, -3, -4, -5, 6, -1, -2, -3, -4, -5, -6},
|
||||
std::vector<T>{1, 2, -9, -16, -5, 6, -1, -4, -9, -16, -25, -36},
|
||||
ov::Shape {2, 3, 2},
|
||||
std::vector<T>{2, 1, 3, 4, 1, 7, 1, 2, 3, 4, 5, 6}),
|
||||
PreluParams(ov::PartialShape {2, 1, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{-10, -10, -10, -10},
|
||||
std::vector<T>{-1, -100, -1, -100},
|
||||
ov::Shape {2},
|
||||
std::vector<T>{0.1, 10}),
|
||||
PreluParams(ov::PartialShape {1, 2, 1, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{-10, -10, -10, -10},
|
||||
std::vector<T>{-1, -1, -100, -100},
|
||||
ov::Shape {2},
|
||||
std::vector<T>{0.1, 10}),
|
||||
PreluParams(ov::PartialShape {1, 5, 1, 1},
|
||||
IN_ET,
|
||||
std::vector<T>{-1, 0, -1, -1, -1},
|
||||
std::vector<T>{-1, 0, -3, -4, -5},
|
||||
ov::Shape {5},
|
||||
std::vector<T>{1, 2, 3, 4, 5}),
|
||||
PreluParams(ov::PartialShape {2, 3, 4, 5},
|
||||
IN_ET,
|
||||
std::vector<T>{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.},
|
||||
std::vector<T>{-0., -1., -2., -3., -4., -5., -6., -7., -8., -9., -10., -11., -12., -13., -14.,
|
||||
-15., -16., -17., -18., -19., -20., -21., -22., -23., -24., -25., -26., -27., -28., -29.,
|
||||
-30., -31., -32., -33., -34., -35., -36., -37., -38., -39., -40., -41., -42., -43., -44.,
|
||||
-45., -46., -47., -48., -49., -50., -51., -52., -53., -54., -55., -56., -57., -58., -59.,
|
||||
-60., -61., -62., -63., -64., -65., -66., -67., -68., -69., -70., -71., -72., -73., -74.,
|
||||
-75., -76., -77., -78., -79., -80., -81., -82., -83., -84., -85., -86., -87., -88., -89.,
|
||||
-90., -91., -92., -93., -94., -95., -96., -97., -98., -99., -100., -101., -102., -103., -104.,
|
||||
-105., -106., -107., -108., -109., -110., -111., -112., -113., -114., -115., -116., -117., -118., -119.},
|
||||
ov::Shape {2, 3, 4, 5},
|
||||
std::vector<T>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
|
||||
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
|
||||
36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
|
||||
54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
|
||||
72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
|
||||
90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
||||
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}),
|
||||
PreluParams(ov::PartialShape {2, 3, 4, 5},
|
||||
IN_ET,
|
||||
std::vector<T>{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.},
|
||||
std::vector<T>{-0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2.,
|
||||
-3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0.,
|
||||
-1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3.,
|
||||
-4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1.,
|
||||
-2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4.,
|
||||
-0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2.,
|
||||
-3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4.},
|
||||
ov::Shape {5},
|
||||
std::vector<T>{0, 1, 2, 3, 4}),
|
||||
PreluParams(ov::PartialShape {2, 3, 4, 5},
|
||||
IN_ET,
|
||||
std::vector<T>{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.},
|
||||
std::vector<T>{-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.,
|
||||
-0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.,
|
||||
-2., -2., -2., -2., -2., -2., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.,
|
||||
-0., -0., -0., -0., -0., -0., -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2.,
|
||||
-2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.},
|
||||
ov::Shape {3},
|
||||
std::vector<T>{0, 1, 2}),
|
||||
PreluParams(ov::PartialShape {2, 3, 4, 5},
|
||||
IN_ET,
|
||||
std::vector<T>{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.},
|
||||
std::vector<T>{-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.,
|
||||
-0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.,
|
||||
-2., -2., -2., -2., -2., -2., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.,
|
||||
-0., -0., -0., -0., -0., -0., -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2.,
|
||||
-2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.},
|
||||
ov::Shape {3, 1, 1},
|
||||
std::vector<T>{0, 1, 2})
|
||||
};
|
||||
return preluParams;
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<PreluParams> generatePreluI8Params() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<PreluParams> preluParams {
|
||||
PreluParams(ov::PartialShape {6},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 2, -3, -4, 5, 6},
|
||||
std::vector<T>{1, 2, -6, -8, 5, 6},
|
||||
ov::Shape {1},
|
||||
std::vector<T>{2}),
|
||||
PreluParams(ov::PartialShape {6},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 2, -3, -4, 5, 6},
|
||||
std::vector<T>{1, 2, -12, -20, 5, 6},
|
||||
ov::Shape {6},
|
||||
std::vector<T>{2, 3, 4, 5, 6, 7}),
|
||||
PreluParams(ov::PartialShape {3, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{-2, 3, -2, 1, -1, 0},
|
||||
std::vector<T>{0, 3, 0, 1, 0, 0},
|
||||
ov::Shape {2},
|
||||
std::vector<T>{0, 1}),
|
||||
PreluParams(ov::PartialShape {2, 6},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 2, -3, -4, 5, 6, 7, 8, -9, -10, 11, 12},
|
||||
std::vector<T>{1, 2, -9, -16, 5, 6, 7, 8, -27, -40, 11, 12},
|
||||
ov::Shape {6},
|
||||
std::vector<T>{1, 2, 3, 4, 5, 6}),
|
||||
PreluParams(ov::PartialShape {2, 6},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 2, -3, -4, 5, 6, 1, 2, -3, -4, 5, 6},
|
||||
std::vector<T>{1, 2, -6, -8, 5, 6, 1, 2, -12, -8, 5, 6},
|
||||
ov::Shape {2, 6},
|
||||
std::vector<T>{2, 2, 2, 2, 2, 2, 1, 1, 4, 2, 1, 1}),
|
||||
PreluParams(ov::PartialShape {2, 2, 2, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4},
|
||||
std::vector<T>{1, 2, -3, -8, 1, 2, -9, -16, 1, 2, -3, -8, 1, 2, -9, -16},
|
||||
ov::Shape {2, 1, 2},
|
||||
std::vector<T>{1, 2, 3, 4}),
|
||||
PreluParams(ov::PartialShape {2, 2, 2, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4},
|
||||
std::vector<T>{1, 2, -3, -8, 1, 2, -9, -16, 1, 2, -3, -8, 1, 2, -9, -16},
|
||||
ov::Shape {1, 2, 1, 2},
|
||||
std::vector<T>{1, 2, 3, 4}),
|
||||
PreluParams(ov::PartialShape {2, 2, 6},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 2, -3, -4, -5, 6, -1, -2, -3, -4, -5, -6, 1, 2, -3, -4, 5, 6, -2, 4, -6, -8, 10, 12},
|
||||
std::vector<T>{1, 2, -9, -16, -5, 6, -2, -2, -9, -16, -5, -42, 1, 2, -9, -16, 5, 6, -2, 4, -18, -32, 10, 12},
|
||||
ov::Shape {2, 1, 6},
|
||||
std::vector<T>{2, 1, 3, 4, 1, 7, 1, 2, 3, 4, 5, 6}),
|
||||
PreluParams(ov::PartialShape {2, 3, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 2, -3, -4, -5, 6, -1, -2, -3, -4, -5, -6},
|
||||
std::vector<T>{1, 2, -9, -16, -5, 6, -1, -4, -9, -16, -25, -36},
|
||||
ov::Shape {2, 3, 2},
|
||||
std::vector<T>{2, 1, 3, 4, 1, 7, 1, 2, 3, 4, 5, 6}),
|
||||
PreluParams(ov::PartialShape {1, 5, 1, 1},
|
||||
IN_ET,
|
||||
std::vector<T>{-1, 0, -1, -1, -1},
|
||||
std::vector<T>{-1, 0, -3, -4, -5},
|
||||
ov::Shape {5},
|
||||
std::vector<T>{1, 2, 3, 4, 5}),
|
||||
PreluParams(ov::PartialShape {2, 3, 4, 5},
|
||||
IN_ET,
|
||||
std::vector<T>{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
|
||||
std::vector<T>{-0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14,
|
||||
-15, -16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27, -28, -29,
|
||||
-30, -31, -32, -33, -34, -35, -36, -37, -38, -39, -40, -41, -42, -43, -44,
|
||||
-45, -46, -47, -48, -49, -50, -51, -52, -53, -54, -55, -56, -57, -58, -59,
|
||||
-60, -61, -62, -63, -64, -65, -66, -67, -68, -69, -70, -71, -72, -73, -74,
|
||||
-75, -76, -77, -78, -79, -80, -81, -82, -83, -84, -85, -86, -87, -88, -89,
|
||||
-90, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100, -101, -102, -103, -104,
|
||||
-105, -106, -107, -108, -109, -110, -111, -112, -113, -114, -115, -116, -117, -118, -119},
|
||||
ov::Shape {2, 3, 4, 5},
|
||||
std::vector<T>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
|
||||
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
|
||||
36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
|
||||
54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
|
||||
72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
|
||||
90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
||||
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}),
|
||||
PreluParams(ov::PartialShape {2, 3, 4, 5},
|
||||
IN_ET,
|
||||
std::vector<T>{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
|
||||
std::vector<T>{-0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2,
|
||||
-3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0,
|
||||
-1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3,
|
||||
-4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1,
|
||||
-2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4,
|
||||
-0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4, -0, -1, -2,
|
||||
-3, -4, -0, -1, -2, -3, -4, -0, -1, -2, -3, -4},
|
||||
ov::Shape {5},
|
||||
std::vector<T>{0, 1, 2, 3, 4}),
|
||||
PreluParams(ov::PartialShape {2, 3, 4, 5},
|
||||
IN_ET,
|
||||
std::vector<T>{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
|
||||
std::vector<T>{-0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0,
|
||||
-0, -0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
-2, -2, -2, -2, -2, -2, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0,
|
||||
-0, -0, -0, -0, -0, -0, -0, -0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2},
|
||||
ov::Shape {3},
|
||||
std::vector<T>{0, 1, 2}),
|
||||
PreluParams(ov::PartialShape {2, 3, 4, 5},
|
||||
IN_ET,
|
||||
std::vector<T>{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
|
||||
std::vector<T>{-0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0,
|
||||
-0, -0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
-2, -2, -2, -2, -2, -2, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0,
|
||||
-0, -0, -0, -0, -0, -0, -0, -0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2},
|
||||
ov::Shape {3, 1, 1},
|
||||
std::vector<T>{0, 1, 2})
|
||||
};
|
||||
return preluParams;
|
||||
}
|
||||
|
||||
std::vector<PreluParams> generatePreluCombinedParams() {
|
||||
const std::vector<std::vector<PreluParams>> preluTypeParams {
|
||||
generatePreluFloatParams<element::Type_t::f32>(),
|
||||
generatePreluFloatParams<element::Type_t::f16>(),
|
||||
generatePreluFloatParams<element::Type_t::bf16>(),
|
||||
generatePreluI8Params<element::Type_t::i8>()
|
||||
};
|
||||
std::vector<PreluParams> combinedParams;
|
||||
|
||||
for (const auto& params : preluTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Prelu_With_Hardcoded_Refs, ReferencePreluLayerTest,
|
||||
testing::ValuesIn(generatePreluCombinedParams()), ReferencePreluLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
124
docs/template_plugin/tests/functional/op_reference/relu.cpp
Normal file
124
docs/template_plugin/tests/functional/op_reference/relu.cpp
Normal file
@ -0,0 +1,124 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "openvino/op/relu.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
|
||||
namespace {
|
||||
struct ReluParams {
|
||||
template <class IT>
|
||||
ReluParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues)
|
||||
: pshape(shape),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateTensor(iType, iValues)),
|
||||
refData(CreateTensor(iType, oValues)) {}
|
||||
|
||||
ov::PartialShape pshape;
|
||||
ov::element::Type inType;
|
||||
ov::element::Type outType;
|
||||
ov::runtime::Tensor inputData;
|
||||
ov::runtime::Tensor refData;
|
||||
};
|
||||
|
||||
class ReferenceReluLayerTest : public testing::TestWithParam<ReluParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.pshape, params.inType, params.outType);
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<ReluParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
||||
const element::Type& Reluected_output_type) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
const auto Relu = std::make_shared<op::v0::Relu>(in);
|
||||
return std::make_shared<ov::Function>(NodeVector {Relu}, ParameterVector {in});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceReluLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<ReluParams> generateReluFloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<ReluParams> reluParams {
|
||||
ReluParams(ov::PartialShape {2, 5},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5},
|
||||
std::vector<T>{1, 8, 0, 17, 0, 1, 8, 0, 17, 0}),
|
||||
ReluParams(ov::PartialShape {2, 2, 2, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5, 1},
|
||||
std::vector<T>{1, 8, 0, 17, 0, 1, 8, 0, 17, 0, 1, 8, 0, 17, 0, 1})
|
||||
};
|
||||
return reluParams;
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<ReluParams> generateReluIntParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<ReluParams> reluParams {
|
||||
ReluParams(ov::PartialShape {2, 5},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 8, -8, 17, -2, 1, 8, -8, 17, -1},
|
||||
std::vector<T>{1, 8, 0, 17, 0, 1, 8, 0, 17, 0})
|
||||
};
|
||||
return reluParams;
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<ReluParams> generateReluUintParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<ReluParams> reluParams {
|
||||
ReluParams(ov::PartialShape {2, 5},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 8, 17, 1, 8, 17, 1, 8, 17, 0},
|
||||
std::vector<T>{1, 8, 17, 1, 8, 17, 1, 8, 17, 0})
|
||||
};
|
||||
return reluParams;
|
||||
}
|
||||
|
||||
std::vector<ReluParams> generateReluCombinedParams() {
|
||||
const std::vector<std::vector<ReluParams>> reluTypeParams {
|
||||
generateReluFloatParams<element::Type_t::f32>(),
|
||||
generateReluFloatParams<element::Type_t::f16>(),
|
||||
generateReluIntParams<element::Type_t::i64>(),
|
||||
generateReluIntParams<element::Type_t::i32>(),
|
||||
generateReluUintParams<element::Type_t::u64>(),
|
||||
generateReluUintParams<element::Type_t::u32>()
|
||||
};
|
||||
std::vector<ReluParams> combinedParams;
|
||||
|
||||
for (const auto& params : reluTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Relu_With_Hardcoded_Refs, ReferenceReluLayerTest,
|
||||
testing::ValuesIn(generateReluCombinedParams()), ReferenceReluLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
140
docs/template_plugin/tests/functional/op_reference/selu.cpp
Normal file
140
docs/template_plugin/tests/functional/op_reference/selu.cpp
Normal file
@ -0,0 +1,140 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "openvino/op/selu.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
|
||||
namespace {
|
||||
struct SeluParams {
|
||||
template <class IT>
|
||||
SeluParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues,
|
||||
const ov::Shape& alphaShape, const ov::Shape& lambdaShape,
|
||||
const std::vector<IT>& alphaValues, const std::vector<IT>& lambdaValues,
|
||||
const std::string& test_name = "")
|
||||
: pshape(shape),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateTensor(iType, iValues)),
|
||||
refData(CreateTensor(iType, oValues)),
|
||||
alphaShape(alphaShape),
|
||||
lambdaShape(lambdaShape),
|
||||
alpha(CreateTensor(iType, alphaValues)),
|
||||
lambda(CreateTensor(iType, lambdaValues)),
|
||||
testcaseName(test_name) {}
|
||||
|
||||
ov::PartialShape pshape;
|
||||
ov::element::Type inType;
|
||||
ov::element::Type outType;
|
||||
ov::runtime::Tensor inputData;
|
||||
ov::runtime::Tensor refData;
|
||||
ov::Shape alphaShape;
|
||||
ov::Shape lambdaShape;
|
||||
ov::runtime::Tensor alpha;
|
||||
ov::runtime::Tensor lambda;
|
||||
std::string testcaseName;
|
||||
};
|
||||
|
||||
class ReferenceSeluLayerTest : public testing::TestWithParam<SeluParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params);
|
||||
inputData = {params.inputData, params.alpha, params.lambda};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<SeluParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType << "_";
|
||||
result << "alpha=" << param.alpha.data() << "_";
|
||||
if (param.testcaseName != "") {
|
||||
result << "lambda=" << param.lambda.data() << "_";
|
||||
result << param.testcaseName;
|
||||
} else {
|
||||
result << "lambda=" << param.lambda.data();
|
||||
}
|
||||
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const SeluParams& params) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(params.inType, params.pshape);
|
||||
const auto alpha = std::make_shared<op::v0::Parameter>(params.inType, params.alphaShape);
|
||||
const auto lambda = std::make_shared<op::v0::Parameter>(params.inType, params.lambdaShape);
|
||||
const auto Selu = std::make_shared<op::v0::Selu>(in, alpha, lambda);
|
||||
return std::make_shared<ov::Function>(NodeVector {Selu}, ParameterVector {in, alpha, lambda});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceSeluLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<SeluParams> generateSeluFloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<SeluParams> seluParams {
|
||||
SeluParams(ov::PartialShape {2},
|
||||
IN_ET,
|
||||
std::vector<T>{-1, 3},
|
||||
std::vector<T>{-1.1113307, 3.152103},
|
||||
ov::Shape {1},
|
||||
ov::Shape {1},
|
||||
std::vector<T>{1.67326324},
|
||||
std::vector<T>{1.05070098}),
|
||||
SeluParams(ov::PartialShape {4},
|
||||
IN_ET,
|
||||
std::vector<T>{-1.0, 0.0, 1.0, 2.0},
|
||||
std::vector<T>{-1.1113307, 0., 1.050701, 2.101402},
|
||||
ov::Shape {1},
|
||||
ov::Shape {1},
|
||||
std::vector<T>{1.67326324},
|
||||
std::vector<T>{1.05070098}),
|
||||
SeluParams(ov::PartialShape {1},
|
||||
IN_ET,
|
||||
std::vector<T>{112.0},
|
||||
std::vector<T>{117.67851},
|
||||
ov::Shape {1},
|
||||
ov::Shape {1},
|
||||
std::vector<T>{1.67326324},
|
||||
std::vector<T>{1.05070098}),
|
||||
SeluParams(ov::PartialShape {3},
|
||||
IN_ET,
|
||||
std::vector<T>{-3.0, -12.5, -7.0},
|
||||
std::vector<T>{-1.6705687, -1.7580928, -1.7564961},
|
||||
ov::Shape {1},
|
||||
ov::Shape {1},
|
||||
std::vector<T>{1.67326324},
|
||||
std::vector<T>{1.05070098})
|
||||
};
|
||||
return seluParams;
|
||||
}
|
||||
|
||||
std::vector<SeluParams> generateSeluCombinedParams() {
|
||||
const std::vector<std::vector<SeluParams>> seluTypeParams {
|
||||
generateSeluFloatParams<element::Type_t::f32>(),
|
||||
generateSeluFloatParams<element::Type_t::f16>(),
|
||||
generateSeluFloatParams<element::Type_t::bf16>()
|
||||
};
|
||||
std::vector<SeluParams> combinedParams;
|
||||
|
||||
for (const auto& params : seluTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Selu_With_Hardcoded_Refs, ReferenceSeluLayerTest,
|
||||
testing::ValuesIn(generateSeluCombinedParams()), ReferenceSeluLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
137
docs/template_plugin/tests/functional/op_reference/sigmoid.cpp
Normal file
137
docs/template_plugin/tests/functional/op_reference/sigmoid.cpp
Normal file
@ -0,0 +1,137 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "openvino/op/sigmoid.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
|
||||
namespace {
|
||||
struct SigmoidParams {
|
||||
template <class IT>
|
||||
SigmoidParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues)
|
||||
: pshape(shape),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateTensor(iType, iValues)),
|
||||
refData(CreateTensor(iType, oValues)) {}
|
||||
|
||||
ov::PartialShape pshape;
|
||||
ov::element::Type inType;
|
||||
ov::element::Type outType;
|
||||
ov::runtime::Tensor inputData;
|
||||
ov::runtime::Tensor refData;
|
||||
};
|
||||
|
||||
class ReferenceSigmoidLayerTest : public testing::TestWithParam<SigmoidParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.pshape, params.inType, params.outType);
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<SigmoidParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
||||
const element::Type& Sigmoidected_output_type) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
const auto Sigmoid = std::make_shared<op::v0::Sigmoid>(in);
|
||||
return std::make_shared<ov::Function>(NodeVector {Sigmoid}, ParameterVector {in});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceSigmoidLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<SigmoidParams> generateSigmoidFloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
float x1 = 1.0f;
|
||||
float x2 = 4.0f;
|
||||
float sigma1 = 1.0f / (1.0f + std::exp(-x1));
|
||||
float sigma2 = 1.0f / (1.0f + std::exp(-x2));
|
||||
|
||||
std::vector<SigmoidParams> sigmoidParams {
|
||||
SigmoidParams(ov::PartialShape {1, 1, 2, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{x1, x2, x1, x2},
|
||||
std::vector<T>{sigma1, sigma2, sigma1, sigma2}),
|
||||
SigmoidParams(ov::PartialShape {1, 1, 4},
|
||||
IN_ET,
|
||||
std::vector<T>{x1, x2, x1, x2},
|
||||
std::vector<T>{sigma1, sigma2, sigma1, sigma2})
|
||||
};
|
||||
return sigmoidParams;
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<SigmoidParams> generateSigmoidIntParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<SigmoidParams> sigmoidParams {
|
||||
SigmoidParams(ov::PartialShape {1, 1, 2, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 4, -1, -4},
|
||||
std::vector<T>{1, 1, 0, 0}),
|
||||
SigmoidParams(ov::PartialShape {1, 1, 4},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 4, -1, -4},
|
||||
std::vector<T>{1, 1, 0, 0})
|
||||
};
|
||||
return sigmoidParams;
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<SigmoidParams> generateSigmoidUintParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<SigmoidParams> sigmoidParams {
|
||||
SigmoidParams(ov::PartialShape {1, 1, 2, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 4, 1, 4},
|
||||
std::vector<T>{1, 1, 1, 1}),
|
||||
SigmoidParams(ov::PartialShape {1, 1, 4},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 4, 1, 4},
|
||||
std::vector<T>{1, 1, 1, 1})
|
||||
};
|
||||
return sigmoidParams;
|
||||
}
|
||||
|
||||
std::vector<SigmoidParams> generateSigmoidCombinedParams() {
|
||||
const std::vector<std::vector<SigmoidParams>> sigmoidTypeParams {
|
||||
generateSigmoidFloatParams<element::Type_t::f32>(),
|
||||
generateSigmoidFloatParams<element::Type_t::f16>(),
|
||||
generateSigmoidIntParams<element::Type_t::i64>(),
|
||||
generateSigmoidIntParams<element::Type_t::i32>(),
|
||||
generateSigmoidUintParams<element::Type_t::u64>(),
|
||||
generateSigmoidUintParams<element::Type_t::u32>()
|
||||
};
|
||||
std::vector<SigmoidParams> combinedParams;
|
||||
|
||||
for (const auto& params : sigmoidTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Sigmoid_With_Hardcoded_Refs, ReferenceSigmoidLayerTest,
|
||||
testing::ValuesIn(generateSigmoidCombinedParams()), ReferenceSigmoidLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
191
docs/template_plugin/tests/functional/op_reference/softmax.cpp
Normal file
191
docs/template_plugin/tests/functional/op_reference/softmax.cpp
Normal file
@ -0,0 +1,191 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "openvino/op/softmax.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
|
||||
namespace {
|
||||
struct SoftmaxParams {
|
||||
template <class IT>
|
||||
SoftmaxParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues,
|
||||
const int64_t axis, const std::string& test_name)
|
||||
: axis(axis),
|
||||
pshape(shape),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateTensor(iType, iValues)),
|
||||
refData(CreateTensor(iType, oValues)),
|
||||
test_case_name(test_name) {}
|
||||
|
||||
int64_t axis = 0;
|
||||
|
||||
ov::PartialShape pshape;
|
||||
ov::element::Type inType;
|
||||
ov::element::Type outType;
|
||||
ov::runtime::Tensor inputData;
|
||||
ov::runtime::Tensor refData;
|
||||
std::string test_case_name;
|
||||
};
|
||||
|
||||
class ReferenceSoftmaxLayerTest : public testing::TestWithParam<SoftmaxParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.pshape, params.inType, params.outType, params.axis);
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<SoftmaxParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType << "_";
|
||||
if (param.test_case_name != "") {
|
||||
result << "axis=" << param.axis << "_";
|
||||
result << param.test_case_name;
|
||||
} else {
|
||||
result << "axis=" << param.axis;
|
||||
}
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
||||
const element::Type& expected_output_type, const int64_t axis) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
const auto Softmax = std::make_shared<op::v1::Softmax>(in, axis);
|
||||
return std::make_shared<ov::Function>(NodeVector {Softmax}, ParameterVector {in});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceSoftmaxLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<SoftmaxParams> generateSoftmaxFloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
auto d0 = expf(-10) + expf(-1);
|
||||
auto d1 = expf(-20) + expf(-2);
|
||||
auto d2 = expf(-30) + expf(-3);
|
||||
auto d3 = expf(-40) + expf(-4);
|
||||
auto d4 = expf(-50) + expf(-5);
|
||||
auto d5 = expf(-60) + expf(-6);
|
||||
|
||||
auto d0_a1 = expf(-10) + expf(-20) + expf(-30);
|
||||
auto d1_a1 = expf(-40) + expf(-50) + expf(-60);
|
||||
|
||||
auto d0_a0 = expf(-10) + expf(-40);
|
||||
auto d1_a0 = expf(-20) + expf(-50);
|
||||
auto d2_a0 = expf(-30) + expf(-60);
|
||||
|
||||
auto low = std::numeric_limits<T>::lowest();
|
||||
auto high = std::numeric_limits<T>::max();
|
||||
|
||||
auto d0_uf = expf(low) + expf(3);
|
||||
auto d1_uf = expf(1) + expf(4);
|
||||
auto d2_uf = expf(2) + expf(5);
|
||||
|
||||
auto d0_of = expf(high - high) + expf(3 - high);
|
||||
auto d1_of = expf(1) + expf(4);
|
||||
auto d2_of = expf(2) + expf(5);
|
||||
|
||||
std::vector<SoftmaxParams> softmaxParams {
|
||||
SoftmaxParams(ov::PartialShape {2, 2, 3},
|
||||
IN_ET,
|
||||
std::vector<T>{-10, -20, -30, -40, -50, -60, -1, -2, -3, -4, -5, -6},
|
||||
std::vector<T>{expf(-10) / d0,
|
||||
expf(-20) / d1,
|
||||
expf(-30) / d2,
|
||||
expf(-40) / d3,
|
||||
expf(-50) / d4,
|
||||
expf(-60) / d5,
|
||||
expf(-1) / d0,
|
||||
expf(-2) / d1,
|
||||
expf(-3) / d2,
|
||||
expf(-4) / d3,
|
||||
expf(-5) / d4,
|
||||
expf(-6) / d5},
|
||||
0,
|
||||
""),
|
||||
SoftmaxParams(ov::PartialShape {2, 3},
|
||||
IN_ET,
|
||||
std::vector<T>{-10, -20, -30, -40, -50, -60},
|
||||
std::vector<T>{expf(-10) / d0_a1,
|
||||
expf(-20) / d0_a1,
|
||||
expf(-30) / d0_a1,
|
||||
expf(-40) / d1_a1,
|
||||
expf(-50) / d1_a1,
|
||||
expf(-60) / d1_a1},
|
||||
1,
|
||||
""),
|
||||
SoftmaxParams(ov::PartialShape {2, 3},
|
||||
IN_ET,
|
||||
std::vector<T>{-10, -20, -30, -40, -50, -60},
|
||||
std::vector<T>{expf(-10) / d0_a0,
|
||||
expf(-20) / d1_a0,
|
||||
expf(-30) / d2_a0,
|
||||
expf(-40) / d0_a0,
|
||||
expf(-50) / d1_a0,
|
||||
expf(-60) / d2_a0},
|
||||
0,
|
||||
"test"),
|
||||
SoftmaxParams(ov::PartialShape {1, 2, 3},
|
||||
IN_ET,
|
||||
std::vector<T>{-10, -20, -30, -40, -50, -60},
|
||||
std::vector<T>{1, 1, 1, 1, 1, 1},
|
||||
0,
|
||||
"trivial"),
|
||||
SoftmaxParams(ov::PartialShape {2, 3},
|
||||
IN_ET,
|
||||
std::vector<T>{low, 1, 2, 3, 4, 5},
|
||||
std::vector<T>{expf(low) / d0_uf,
|
||||
expf(1) / d1_uf,
|
||||
expf(2) / d2_uf,
|
||||
expf(3) / d0_uf,
|
||||
expf(4) / d1_uf,
|
||||
expf(5) / d2_uf},
|
||||
0,
|
||||
"underflow"),
|
||||
SoftmaxParams(ov::PartialShape {2, 3},
|
||||
IN_ET,
|
||||
std::vector<T>{high, 1, 2, 3, 4, 5},
|
||||
std::vector<T>{expf(high - high) / d0_of,
|
||||
expf(1) / d1_of,
|
||||
expf(2) / d2_of,
|
||||
expf(3 - high) / d0_of,
|
||||
expf(4) / d1_of,
|
||||
expf(5) / d2_of},
|
||||
0,
|
||||
"overflow")
|
||||
};
|
||||
return softmaxParams;
|
||||
}
|
||||
|
||||
std::vector<SoftmaxParams> generateSoftmaxCombinedParams() {
|
||||
const std::vector<std::vector<SoftmaxParams>> softmaxTypeParams {
|
||||
generateSoftmaxFloatParams<element::Type_t::f64>(),
|
||||
generateSoftmaxFloatParams<element::Type_t::f32>(),
|
||||
generateSoftmaxFloatParams<element::Type_t::f16>(),
|
||||
generateSoftmaxFloatParams<element::Type_t::bf16>()
|
||||
};
|
||||
std::vector<SoftmaxParams> combinedParams;
|
||||
|
||||
for (const auto& params : softmaxTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Softmax_With_Hardcoded_Refs, ReferenceSoftmaxLayerTest,
|
||||
testing::ValuesIn(generateSoftmaxCombinedParams()), ReferenceSoftmaxLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -0,0 +1,90 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "openvino/op/softplus.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
|
||||
namespace {
|
||||
struct SoftPlusParams {
|
||||
template <class IT>
|
||||
SoftPlusParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues)
|
||||
: pshape(shape),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateTensor(iType, iValues)),
|
||||
refData(CreateTensor(iType, oValues)) {}
|
||||
|
||||
ov::PartialShape pshape;
|
||||
ov::element::Type inType;
|
||||
ov::element::Type outType;
|
||||
ov::runtime::Tensor inputData;
|
||||
ov::runtime::Tensor refData;
|
||||
};
|
||||
|
||||
class ReferenceSoftPlusLayerTest : public testing::TestWithParam<SoftPlusParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.pshape, params.inType, params.outType);
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<SoftPlusParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
||||
const element::Type& SoftPlusected_output_type) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
const auto SoftPlus = std::make_shared<op::v4::SoftPlus>(in);
|
||||
return std::make_shared<ov::Function>(NodeVector {SoftPlus}, ParameterVector {in});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceSoftPlusLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<SoftPlusParams> generateSoftPlusFloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<SoftPlusParams> softPlusParams {
|
||||
SoftPlusParams(ov::PartialShape {4},
|
||||
IN_ET,
|
||||
std::vector<T>{-1.0, 0.0, 1.0, 20.0},
|
||||
std::vector<T>{0.31326166, 0.69314718, 1.3132616, 20.0})
|
||||
};
|
||||
return softPlusParams;
|
||||
}
|
||||
|
||||
std::vector<SoftPlusParams> generateSoftPlusCombinedParams() {
|
||||
const std::vector<std::vector<SoftPlusParams>> softPlusTypeParams {
|
||||
generateSoftPlusFloatParams<element::Type_t::f32>(),
|
||||
generateSoftPlusFloatParams<element::Type_t::f16>(),
|
||||
generateSoftPlusFloatParams<element::Type_t::bf16>()
|
||||
};
|
||||
std::vector<SoftPlusParams> combinedParams;
|
||||
|
||||
for (const auto& params : softPlusTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_SoftPlus_With_Hardcoded_Refs, ReferenceSoftPlusLayerTest,
|
||||
testing::ValuesIn(generateSoftPlusCombinedParams()), ReferenceSoftPlusLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
133
docs/template_plugin/tests/functional/op_reference/swish.cpp
Normal file
133
docs/template_plugin/tests/functional/op_reference/swish.cpp
Normal file
@ -0,0 +1,133 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "openvino/op/swish.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
|
||||
namespace {
|
||||
struct SwishParams {
|
||||
template <class IT>
|
||||
SwishParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector<IT>& iValues,
|
||||
const float beta = 1)
|
||||
: pshape(shape),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateTensor(iType, iValues)),
|
||||
beta(beta) {
|
||||
std::vector<IT> oValues;
|
||||
std::vector<float> output;
|
||||
std::vector<IT> betaVector;
|
||||
|
||||
for (auto element : iValues)
|
||||
output.push_back(static_cast<float>(element));
|
||||
|
||||
std::transform(output.begin(), output.end(), output.begin(), [&beta](float x) -> float {
|
||||
return (x / (1.0f + std::exp(x * beta * -1.0f)));
|
||||
});
|
||||
|
||||
for (auto element : output)
|
||||
oValues.push_back(static_cast<IT>(element));
|
||||
refData = CreateTensor(outType, oValues);
|
||||
|
||||
betaVector.push_back(static_cast<IT>(beta));
|
||||
betaBlob = CreateTensor(inType, betaVector);
|
||||
}
|
||||
|
||||
ov::PartialShape pshape;
|
||||
ov::element::Type inType;
|
||||
ov::element::Type outType;
|
||||
ov::runtime::Tensor inputData;
|
||||
ov::runtime::Tensor refData;
|
||||
ov::runtime::Tensor betaBlob;
|
||||
|
||||
float beta;
|
||||
};
|
||||
|
||||
class ReferenceSwishLayerTest : public testing::TestWithParam<SwishParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
threshold = 0.06; // 0.01 failed in fp32 test
|
||||
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params.pshape, params.inType, params.outType, params.beta);
|
||||
if (params.beta != 1) {
|
||||
inputData = {params.inputData, params.betaBlob};
|
||||
refOutData = {params.refData};
|
||||
} else {
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
}
|
||||
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<SwishParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "shape=" << param.pshape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "oType=" << param.outType << "_";
|
||||
result << "beta=" << param.beta;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
||||
const element::Type& Swishected_output_type, const float beta) {
|
||||
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
|
||||
if (beta != 1) {
|
||||
const auto BETA = std::make_shared<op::v0::Parameter>(input_type, Shape {});
|
||||
const auto Swish = std::make_shared<op::v4::Swish>(in, BETA);
|
||||
return std::make_shared<Function>(NodeVector {Swish}, ParameterVector {in, BETA});
|
||||
} else {
|
||||
const auto Swish = std::make_shared<op::v4::Swish>(in);
|
||||
return std::make_shared<ov::Function>(NodeVector {Swish}, ParameterVector {in});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceSwishLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<SwishParams> generateSwishFloatParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
|
||||
std::vector<SwishParams> swishParams {
|
||||
SwishParams(ov::PartialShape {2, 4},
|
||||
IN_ET,
|
||||
std::vector<T>{0.4, -5.7, -6, 3, -0.9, 23, 5, 3.3},
|
||||
0.6f),
|
||||
SwishParams(ov::PartialShape {2, 3},
|
||||
IN_ET,
|
||||
std::vector<T>{1, 8, -8, 17, -0.5, -1}),
|
||||
SwishParams(ov::PartialShape {2, 2, 1, 2},
|
||||
IN_ET,
|
||||
std::vector<T>{0.1, 0.6, 20, -7, -5.3, 3.5, -9, 11},
|
||||
0.33f)
|
||||
};
|
||||
return swishParams;
|
||||
}
|
||||
|
||||
std::vector<SwishParams> generateSwishCombinedParams() {
|
||||
const std::vector<std::vector<SwishParams>> swishTypeParams {
|
||||
generateSwishFloatParams<element::Type_t::f32>(),
|
||||
generateSwishFloatParams<element::Type_t::f16>()
|
||||
};
|
||||
std::vector<SwishParams> combinedParams;
|
||||
|
||||
for (const auto& params : swishTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Swish_With_Hardcoded_Refs, ReferenceSwishLayerTest,
|
||||
testing::ValuesIn(generateSwishCombinedParams()), ReferenceSwishLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -8,7 +8,7 @@
|
||||
#include "functional_test_utils/skip_tests_config.hpp"
|
||||
|
||||
std::vector<std::string> disabledTestPatterns() {
|
||||
return {
|
||||
std::vector<std::string> retVector{
|
||||
// CVS-66280
|
||||
R"(.*canLoadCorrectNetworkAndCheckConfig.*)",
|
||||
R"(.*canSetCorrectConfigLoadNetworkAndCheckConfig.*)",
|
||||
@ -29,5 +29,16 @@ std::vector<std::string> disabledTestPatterns() {
|
||||
|
||||
// TODO: Round with f16 is not supported
|
||||
R"(.*smoke_Hetero_BehaviorTests.*OVExecNetwork.*readFromV10IR.*)",
|
||||
|
||||
// CVS-64094
|
||||
R"(.*ReferenceLogSoftmaxLayerTest.*4.*iType=f16.*axis=.*1.*)",
|
||||
// CVS-64080
|
||||
R"(.*ReferenceMishLayerTest.*dimensionDynamic.*)",
|
||||
};
|
||||
|
||||
#ifdef _WIN32
|
||||
// CVS-63989
|
||||
retVector.emplace_back(R"(.*ReferenceSigmoidLayerTest.*u64.*)");
|
||||
#endif
|
||||
return retVector;
|
||||
}
|
||||
|
@ -44,7 +44,6 @@ bool evaluate_exp(const HostTensorPtr& arg0, const HostTensorPtr& out) {
|
||||
out->set_unary(arg0);
|
||||
|
||||
switch (arg0->get_element_type()) {
|
||||
NGRAPH_TYPE_CASE(evaluate_exp, boolean, arg0, out, count);
|
||||
NGRAPH_TYPE_CASE(evaluate_exp, i32, arg0, out, count);
|
||||
NGRAPH_TYPE_CASE(evaluate_exp, i64, arg0, out, count);
|
||||
NGRAPH_TYPE_CASE(evaluate_exp, u32, arg0, out, count);
|
||||
@ -68,7 +67,6 @@ bool op::Exp::evaluate(const HostTensorVector& outputs, const HostTensorVector&
|
||||
bool op::Exp::has_evaluate() const {
|
||||
NGRAPH_OP_SCOPE(v0_Exp_has_evaluate);
|
||||
switch (get_input_element_type(0)) {
|
||||
case ngraph::element::boolean:
|
||||
case ngraph::element::i32:
|
||||
case ngraph::element::i64:
|
||||
case ngraph::element::u32:
|
||||
|
@ -41,7 +41,6 @@ bool evaluate_sigmoid(const HostTensorPtr& arg0, const HostTensorPtr& out) {
|
||||
out->set_unary(arg0);
|
||||
|
||||
switch (arg0->get_element_type()) {
|
||||
NGRAPH_TYPE_CASE(evaluate_sigmoid, boolean, arg0, out, count);
|
||||
NGRAPH_TYPE_CASE(evaluate_sigmoid, i32, arg0, out, count);
|
||||
NGRAPH_TYPE_CASE(evaluate_sigmoid, i64, arg0, out, count);
|
||||
NGRAPH_TYPE_CASE(evaluate_sigmoid, u32, arg0, out, count);
|
||||
@ -65,7 +64,6 @@ bool ov::op::v0::Sigmoid::evaluate(const HostTensorVector& outputs, const HostTe
|
||||
bool ov::op::v0::Sigmoid::has_evaluate() const {
|
||||
NGRAPH_OP_SCOPE(v0_Sigmoid_has_evaluate);
|
||||
switch (get_input_element_type(0)) {
|
||||
case ngraph::element::boolean:
|
||||
case ngraph::element::i32:
|
||||
case ngraph::element::i64:
|
||||
case ngraph::element::u32:
|
||||
|
@ -210,6 +210,7 @@ set(SRC
|
||||
type_prop/reduce_min.cpp
|
||||
type_prop/reduce_prod.cpp
|
||||
type_prop/reduce_sum.cpp
|
||||
type_prop/relu.cpp
|
||||
type_prop/reorg_yolo.cpp
|
||||
type_prop/reshape.cpp
|
||||
type_prop/result.cpp
|
||||
@ -229,6 +230,7 @@ set(SRC
|
||||
type_prop/selu.cpp
|
||||
type_prop/shape_of.cpp
|
||||
type_prop/shuffle_channels.cpp
|
||||
type_prop/sigmoid.cpp
|
||||
type_prop/sign.cpp
|
||||
type_prop/sin.cpp
|
||||
type_prop/sinh.cpp
|
||||
@ -269,6 +271,7 @@ set(SRC
|
||||
visitors/op/broadcast.cpp
|
||||
visitors/op/bucketize.cpp
|
||||
visitors/op/ceiling.cpp
|
||||
visitors/op/clamp.cpp
|
||||
visitors/op/constant.cpp
|
||||
visitors/op/convert.cpp
|
||||
visitors/op/convert_color_nv12.cpp
|
||||
@ -286,6 +289,7 @@ set(SRC
|
||||
visitors/op/elu.cpp
|
||||
visitors/op/equal.cpp
|
||||
visitors/op/erf.cpp
|
||||
visitors/op/exp.cpp
|
||||
visitors/op/extractimagepatches.cpp
|
||||
visitors/op/fake_quantize.cpp
|
||||
visitors/op/floor_mod.cpp
|
||||
@ -297,11 +301,15 @@ set(SRC
|
||||
visitors/op/greater.cpp
|
||||
visitors/op/grn.cpp
|
||||
visitors/op/group_conv.cpp
|
||||
visitors/op/hard_sigmoid.cpp
|
||||
visitors/op/hsigmoid.cpp
|
||||
visitors/op/hswish.cpp
|
||||
visitors/op/interpolate.cpp
|
||||
visitors/op/if.cpp
|
||||
visitors/op/less_equal.cpp
|
||||
visitors/op/less.cpp
|
||||
visitors/op/log.cpp
|
||||
visitors/op/log_softmax.cpp
|
||||
visitors/op/logical_and.cpp
|
||||
visitors/op/logical_or.cpp
|
||||
visitors/op/logical_not.cpp
|
||||
@ -328,6 +336,7 @@ set(SRC
|
||||
visitors/op/pad.cpp
|
||||
visitors/op/parameter.cpp
|
||||
visitors/op/power.cpp
|
||||
visitors/op/prelu.cpp
|
||||
visitors/op/prior_box.cpp
|
||||
visitors/op/prior_box_clustered.cpp
|
||||
visitors/op/proposal.cpp
|
||||
@ -343,6 +352,7 @@ set(SRC
|
||||
visitors/op/reduce_prod.cpp
|
||||
visitors/op/reduce_sum.cpp
|
||||
visitors/op/region_yolo.cpp
|
||||
visitors/op/relu.cpp
|
||||
visitors/op/reorg_yolo.cpp
|
||||
visitors/op/reshape.cpp
|
||||
visitors/op/result.cpp
|
||||
@ -358,6 +368,7 @@ set(SRC
|
||||
visitors/op/space_to_depth.cpp
|
||||
visitors/op/selu.cpp
|
||||
visitors/op/shuffle_channels.cpp
|
||||
visitors/op/sigmoid.cpp
|
||||
visitors/op/sign.cpp
|
||||
visitors/op/sin.cpp
|
||||
visitors/op/sinh.cpp
|
||||
@ -455,7 +466,6 @@ set(MULTI_TEST_SRC
|
||||
backend/constant.in.cpp
|
||||
backend/convolution_backprop.in.cpp
|
||||
backend/binary_convolution.in.cpp
|
||||
backend/clamp.in.cpp
|
||||
backend/ctc_greedy_decoder.in.cpp
|
||||
backend/ctc_greedy_decoder_seq_len.in.cpp
|
||||
backend/deformable_psroi_pooling.in.cpp
|
||||
@ -470,8 +480,6 @@ set(MULTI_TEST_SRC
|
||||
backend/experimental_detectron_topk_rois.in.cpp
|
||||
backend/strided_slice.in.cpp
|
||||
backend/dynamic.in.cpp
|
||||
backend/elu.in.cpp
|
||||
backend/exp.in.cpp
|
||||
backend/experimental_detectron_detection_output.in.cpp
|
||||
backend/experimental_detectron_prior_grid.in.cpp
|
||||
backend/fake_quantize.in.cpp
|
||||
@ -481,20 +489,16 @@ set(MULTI_TEST_SRC
|
||||
backend/gather.in.cpp
|
||||
backend/gather_elements.in.cpp
|
||||
backend/gather_nd.in.cpp
|
||||
backend/gelu.in.cpp
|
||||
backend/group_convolution.in.cpp
|
||||
backend/group_convolution_backprop_data.in.cpp
|
||||
backend/hard_sigmoid.in.cpp
|
||||
backend/idft.in.cpp
|
||||
backend/interpolate.in.cpp
|
||||
backend/log.in.cpp
|
||||
backend/log_softmax.in.cpp
|
||||
backend/lrn.in.cpp
|
||||
backend/matmul.in.cpp
|
||||
backend/matrix_nms.in.cpp
|
||||
backend/maximum.in.cpp
|
||||
backend/max_pool.in.cpp
|
||||
backend/mish.in.cpp
|
||||
backend/mod.in.cpp
|
||||
backend/multiclass_nms.in.cpp
|
||||
backend/multiple_backends.in.cpp
|
||||
@ -509,7 +513,6 @@ set(MULTI_TEST_SRC
|
||||
backend/pad.in.cpp
|
||||
backend/parameter_as_output.in.cpp
|
||||
backend/power.in.cpp
|
||||
backend/prelu.in.cpp
|
||||
backend/prior_box_clustered.in.cpp
|
||||
backend/prior_box.in.cpp
|
||||
backend/proposal.in.cpp
|
||||
@ -517,7 +520,6 @@ set(MULTI_TEST_SRC
|
||||
backend/range.in.cpp
|
||||
backend/recurrent_cells.in.cpp
|
||||
backend/region_yolo.in.cpp
|
||||
backend/relu.in.cpp
|
||||
backend/reorg_yolo.in.cpp
|
||||
backend/reshape.in.cpp
|
||||
backend/result.in.cpp
|
||||
@ -526,19 +528,14 @@ set(MULTI_TEST_SRC
|
||||
backend/round.in.cpp
|
||||
backend/scatter_nd_update.in.cpp
|
||||
backend/space_to_depth.in.cpp
|
||||
backend/selu.in.cpp
|
||||
backend/shape_of.in.cpp
|
||||
backend/shuffle_channels.in.cpp
|
||||
backend/sigmoid.in.cpp
|
||||
backend/softmax.in.cpp
|
||||
backend/softplus.in.cpp
|
||||
backend/space_to_batch.in.cpp
|
||||
backend/split.in.cpp
|
||||
backend/sqrt.in.cpp
|
||||
backend/squared_difference.in.cpp
|
||||
backend/squeeze.in.cpp
|
||||
backend/subtract.in.cpp
|
||||
backend/swish.in.cpp
|
||||
backend/tile.in.cpp
|
||||
backend/topk.in.cpp
|
||||
backend/transpose.in.cpp
|
||||
|
@ -1,403 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2021 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
|
||||
|
||||
namespace {
|
||||
template <typename T, test::TestCaseType tct = test::TestCaseType::STATIC>
|
||||
void clamp_test(const element::Type& type,
|
||||
const PartialShape& dynamic_shape,
|
||||
const Shape& static_shape,
|
||||
const std::vector<T>& input,
|
||||
double min,
|
||||
double max,
|
||||
const std::vector<T>& output) {
|
||||
auto data = make_shared<op::Parameter>(type, dynamic_shape);
|
||||
auto clamp = make_shared<op::Clamp>(data, min, max);
|
||||
auto function = make_shared<Function>(clamp, ParameterVector{data});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine, tct>(function);
|
||||
test_case.template add_input<T>(static_shape, input);
|
||||
test_case.template add_expected_output<T>(static_shape, output);
|
||||
return test_case.run();
|
||||
}
|
||||
} // namespace
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, clamp_integral) {
|
||||
Shape in_shape{6};
|
||||
element::Type et = element::i32;
|
||||
|
||||
float min = 0.4; // ceiled to 1
|
||||
float max = 5.6; // floored to 5
|
||||
|
||||
auto input = make_shared<op::Parameter>(et, in_shape);
|
||||
auto clamp = make_shared<op::Clamp>(input, min, max);
|
||||
auto f = make_shared<Function>(clamp, ParameterVector{input});
|
||||
|
||||
vector<int32_t> in_vec{-1, 3, -10, 20, 6, 2};
|
||||
vector<int32_t> out_vec{1, 3, 1, 5, 5, 2};
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_input(in_shape, in_vec);
|
||||
test_case.add_expected_output(in_shape, out_vec);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, clamp_integral_negative) {
|
||||
Shape in_shape{6};
|
||||
element::Type et = element::i32;
|
||||
|
||||
float min = -5.6; // ceiled to -5
|
||||
float max = -0.4; // floored to -1
|
||||
|
||||
auto input = make_shared<op::Parameter>(et, in_shape);
|
||||
auto clamp = make_shared<op::Clamp>(input, min, max);
|
||||
auto f = make_shared<Function>(clamp, ParameterVector{input});
|
||||
|
||||
vector<int32_t> in_vec{-6, 1, -2, 0, -1, 2};
|
||||
vector<int32_t> out_vec{-5, -1, -2, -1, -1, -1};
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_input(in_shape, in_vec);
|
||||
test_case.add_expected_output(in_shape, out_vec);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, clamp_float) {
|
||||
auto type = element::f32;
|
||||
typedef float ctype;
|
||||
|
||||
auto sshape = Shape{5, 2};
|
||||
auto dshape = PartialShape::dynamic();
|
||||
|
||||
auto min = numeric_limits<ctype>::min();
|
||||
auto max = numeric_limits<ctype>::max();
|
||||
auto pinf = numeric_limits<float>::infinity();
|
||||
auto ninf = -numeric_limits<float>::infinity();
|
||||
|
||||
vector<ctype> input{min, max, ninf, pinf, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.000001};
|
||||
|
||||
// static shape
|
||||
clamp_test<ctype>(type,
|
||||
sshape,
|
||||
sshape,
|
||||
{-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8},
|
||||
0.2,
|
||||
0.6,
|
||||
{0.2, 0.2, 0.2, 0.2, 0.3, 0.4, 0.5, 0.6, 0.6, 0.6});
|
||||
|
||||
clamp_test<ctype>(type,
|
||||
sshape,
|
||||
sshape,
|
||||
input,
|
||||
10.0,
|
||||
20.0,
|
||||
{10.0, 20.0, 10.0, 20.0, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.0});
|
||||
|
||||
clamp_test<ctype>(type,
|
||||
sshape,
|
||||
sshape,
|
||||
input,
|
||||
10.0,
|
||||
pinf,
|
||||
{10.0, max, 10.0, pinf, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.000001});
|
||||
|
||||
clamp_test<ctype>(type,
|
||||
sshape,
|
||||
sshape,
|
||||
input,
|
||||
ninf,
|
||||
20.0,
|
||||
{min, 20.0, ninf, 20.0, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.0});
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, clamp_int8) {
|
||||
auto type = element::i8;
|
||||
typedef int8_t ctype;
|
||||
|
||||
auto sshape = Shape{4, 2};
|
||||
auto dshape = PartialShape::dynamic();
|
||||
|
||||
auto min = numeric_limits<ctype>::min();
|
||||
auto max = numeric_limits<ctype>::max();
|
||||
auto pinf = numeric_limits<double>::infinity();
|
||||
auto ninf = -numeric_limits<double>::infinity();
|
||||
|
||||
vector<ctype> input{min, max, 9, 10, 11, 19, 20, 21};
|
||||
|
||||
// static shape
|
||||
clamp_test<ctype>(type, sshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20});
|
||||
clamp_test<ctype>(type, sshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21});
|
||||
clamp_test<ctype>(type, sshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20});
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, clamp_int16) {
|
||||
auto type = element::i16;
|
||||
typedef int16_t ctype;
|
||||
|
||||
auto sshape = Shape{4, 2};
|
||||
auto dshape = PartialShape::dynamic();
|
||||
|
||||
auto min = numeric_limits<ctype>::min();
|
||||
auto max = numeric_limits<ctype>::max();
|
||||
auto pinf = numeric_limits<double>::infinity();
|
||||
auto ninf = -numeric_limits<double>::infinity();
|
||||
|
||||
vector<ctype> input{min, max, 9, 10, 11, 19, 20, 21};
|
||||
|
||||
// static shape
|
||||
clamp_test<ctype>(type, sshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20});
|
||||
clamp_test<ctype>(type, sshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21});
|
||||
clamp_test<ctype>(type, sshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20});
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, clamp_int32) {
|
||||
auto type = element::i32;
|
||||
typedef int32_t ctype;
|
||||
|
||||
auto sshape = Shape{4, 2};
|
||||
auto dshape = PartialShape::dynamic();
|
||||
|
||||
auto min = numeric_limits<ctype>::min();
|
||||
auto max = numeric_limits<ctype>::max();
|
||||
auto pinf = numeric_limits<double>::infinity();
|
||||
auto ninf = -numeric_limits<double>::infinity();
|
||||
|
||||
vector<ctype> input{min, max, 9, 10, 11, 19, 20, 21};
|
||||
|
||||
// static shape
|
||||
clamp_test<ctype>(type, sshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20});
|
||||
clamp_test<ctype>(type, sshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21});
|
||||
clamp_test<ctype>(type, sshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20});
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, clamp_int64) {
|
||||
auto type = element::i64;
|
||||
typedef int64_t ctype;
|
||||
|
||||
auto sshape = Shape{4, 2};
|
||||
auto dshape = PartialShape::dynamic();
|
||||
|
||||
auto min = numeric_limits<ctype>::min();
|
||||
auto max = numeric_limits<ctype>::max();
|
||||
auto pinf = numeric_limits<double>::infinity();
|
||||
auto ninf = -numeric_limits<double>::infinity();
|
||||
|
||||
vector<ctype> input{min, max, 9, 10, 11, 19, 20, 21};
|
||||
|
||||
// static shape
|
||||
clamp_test<ctype>(type, sshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20});
|
||||
clamp_test<ctype>(type, sshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21});
|
||||
clamp_test<ctype>(type, sshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20});
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, clamp_uint8) {
|
||||
auto type = element::u8;
|
||||
typedef uint8_t ctype;
|
||||
|
||||
auto sshape = Shape{4, 2};
|
||||
auto dshape = PartialShape::dynamic();
|
||||
|
||||
auto min = numeric_limits<ctype>::min();
|
||||
// TODO: Fix CPU DEX / MLIR correctness bug: using signed comparison for unsigned ints
|
||||
// auto max = numeric_limits<ctype>::max();
|
||||
// auto pinf = numeric_limits<double>::infinity();
|
||||
ctype max = (static_cast<ctype>(1) << (numeric_limits<ctype>::digits - 1)) - 1;
|
||||
auto pinf = static_cast<double>(max);
|
||||
auto ninf = -numeric_limits<double>::infinity();
|
||||
|
||||
vector<ctype> input{min, max, 9, 10, 11, 19, 20, 21};
|
||||
|
||||
// static shape
|
||||
clamp_test<ctype>(type, sshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20});
|
||||
clamp_test<ctype>(type, sshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21});
|
||||
clamp_test<ctype>(type, sshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20});
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, clamp_uint16) {
|
||||
auto type = element::u16;
|
||||
typedef uint16_t ctype;
|
||||
|
||||
auto sshape = Shape{4, 2};
|
||||
auto dshape = PartialShape::dynamic();
|
||||
|
||||
auto min = numeric_limits<ctype>::min();
|
||||
// TODO: Fix CPU DEX / MLIR correctness bug: using signed comparison for unsigned ints
|
||||
// auto max = numeric_limits<ctype>::max();
|
||||
// auto pinf = numeric_limits<double>::infinity();
|
||||
ctype max = (static_cast<ctype>(1) << (numeric_limits<ctype>::digits - 1)) - 1;
|
||||
auto pinf = static_cast<double>(max);
|
||||
auto ninf = -numeric_limits<double>::infinity();
|
||||
|
||||
vector<ctype> input{min, max, 9, 10, 11, 19, 20, 21};
|
||||
|
||||
// static shape
|
||||
clamp_test<ctype>(type, sshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20});
|
||||
clamp_test<ctype>(type, sshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21});
|
||||
clamp_test<ctype>(type, sshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20});
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, clamp_uint32) {
|
||||
auto type = element::u32;
|
||||
typedef uint32_t ctype;
|
||||
|
||||
auto sshape = Shape{4, 2};
|
||||
auto dshape = PartialShape::dynamic();
|
||||
|
||||
auto min = numeric_limits<ctype>::min();
|
||||
// TODO: Fix CPU DEX / MLIR correctness bug: using signed comparison for unsigned ints
|
||||
// auto max = numeric_limits<ctype>::max();
|
||||
// auto pinf = numeric_limits<double>::infinity();
|
||||
ctype max = (static_cast<ctype>(1) << (numeric_limits<ctype>::digits - 1)) - 1;
|
||||
auto pinf = static_cast<double>(max);
|
||||
auto ninf = -numeric_limits<double>::infinity();
|
||||
|
||||
vector<ctype> input{min, max, 9, 10, 11, 19, 20, 21};
|
||||
|
||||
// static shape
|
||||
clamp_test<ctype>(type, sshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20});
|
||||
clamp_test<ctype>(type, sshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21});
|
||||
clamp_test<ctype>(type, sshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20});
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, clamp_uint64) {
|
||||
auto type = element::u64;
|
||||
typedef uint64_t ctype;
|
||||
|
||||
auto sshape = Shape{4, 2};
|
||||
auto dshape = PartialShape::dynamic();
|
||||
|
||||
auto min = numeric_limits<ctype>::min();
|
||||
// TODO: Fix CPU DEX / MLIR correctness bug: using signed comparison for unsigned ints
|
||||
// auto max = numeric_limits<ctype>::max();
|
||||
// auto pinf = numeric_limits<double>::infinity();
|
||||
ctype max = (static_cast<ctype>(1) << (32 - 1)) - 1;
|
||||
auto pinf = static_cast<double>(max);
|
||||
auto ninf = -numeric_limits<double>::infinity();
|
||||
|
||||
vector<ctype> input{min, max, 9, 10, 11, 19, 20, 21};
|
||||
|
||||
// static shape
|
||||
clamp_test<ctype>(type, sshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20});
|
||||
clamp_test<ctype>(type, sshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21});
|
||||
clamp_test<ctype>(type, sshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20});
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, clamp_float16) {
|
||||
auto type = element::f16;
|
||||
typedef float16 ctype;
|
||||
|
||||
auto sshape = Shape{5, 2};
|
||||
auto dshape = PartialShape::dynamic();
|
||||
|
||||
auto min = numeric_limits<ctype>::min();
|
||||
auto max = numeric_limits<ctype>::max();
|
||||
auto pinf = numeric_limits<float>::infinity();
|
||||
auto ninf = -numeric_limits<float>::infinity();
|
||||
|
||||
vector<ctype> input{min, max, ninf, pinf, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.000001};
|
||||
|
||||
// static shape
|
||||
clamp_test<ctype>(type,
|
||||
sshape,
|
||||
sshape,
|
||||
{-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8},
|
||||
0.2,
|
||||
0.6,
|
||||
{0.2, 0.2, 0.2, 0.2, 0.3, 0.4, 0.5, 0.6, 0.6, 0.6});
|
||||
|
||||
clamp_test<ctype>(type,
|
||||
sshape,
|
||||
sshape,
|
||||
input,
|
||||
10.0,
|
||||
20.0,
|
||||
{10.0, 20.0, 10.0, 20.0, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.0});
|
||||
|
||||
clamp_test<ctype>(type,
|
||||
sshape,
|
||||
sshape,
|
||||
input,
|
||||
10.0,
|
||||
pinf,
|
||||
{10.0, max, 10.0, pinf, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.000001});
|
||||
|
||||
clamp_test<ctype>(type,
|
||||
sshape,
|
||||
sshape,
|
||||
input,
|
||||
ninf,
|
||||
20.0,
|
||||
{min, 20.0, ninf, 20.0, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.0});
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, clamp_bfloat16) {
|
||||
auto type = element::bf16;
|
||||
typedef bfloat16 ctype;
|
||||
|
||||
auto sshape = Shape{5, 2};
|
||||
auto dshape = PartialShape::dynamic();
|
||||
|
||||
auto min = numeric_limits<ctype>::min();
|
||||
auto max = numeric_limits<ctype>::max();
|
||||
auto pinf = numeric_limits<float>::infinity();
|
||||
auto ninf = -numeric_limits<float>::infinity();
|
||||
|
||||
vector<ctype> input{min, max, ninf, pinf, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.000001};
|
||||
|
||||
// static shape
|
||||
clamp_test<ctype>(type,
|
||||
sshape,
|
||||
sshape,
|
||||
{-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8},
|
||||
0.2,
|
||||
0.6,
|
||||
{0.2, 0.2, 0.2, 0.2, 0.3, 0.4, 0.5, 0.6, 0.6, 0.6});
|
||||
|
||||
clamp_test<ctype>(type,
|
||||
sshape,
|
||||
sshape,
|
||||
input,
|
||||
10.0,
|
||||
20.0,
|
||||
{10.0, 20.0, 10.0, 20.0, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.0});
|
||||
|
||||
clamp_test<ctype>(type,
|
||||
sshape,
|
||||
sshape,
|
||||
input,
|
||||
10.0,
|
||||
pinf,
|
||||
{10.0, max, 10.0, pinf, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.000001});
|
||||
|
||||
clamp_test<ctype>(type,
|
||||
sshape,
|
||||
sshape,
|
||||
input,
|
||||
ninf,
|
||||
20.0,
|
||||
{min, 20.0, ninf, 20.0, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.0});
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
||||
// clang-format off
|
||||
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
|
||||
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
|
||||
#endif
|
||||
|
||||
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
|
||||
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
|
||||
#endif
|
||||
// clang-format on
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, elu) {
|
||||
auto A = make_shared<op::Parameter>(element::f32, Shape{3, 2});
|
||||
auto elu = make_shared<op::Elu>(A, 0.5f);
|
||||
auto function = make_shared<Function>(NodeVector{elu}, ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(function);
|
||||
test_case.add_input(vector<float>{-2.f, 3.f, -2.f, 1.f, -1.f, 0.f});
|
||||
test_case.add_expected_output(vector<float>{-0.432332358f, 3.f, -0.432332358f, 1.f, -0.316060279f, 0.f});
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, elu_negative_alpha) {
|
||||
auto A = make_shared<op::Parameter>(element::f32, Shape{3, 2});
|
||||
auto elu = make_shared<op::Elu>(A, -1.f);
|
||||
auto function = make_shared<Function>(NodeVector{elu}, ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(function);
|
||||
test_case.add_input(vector<float>{-2.f, 3.f, -2.f, 1.f, -1.f, 0.f});
|
||||
test_case.add_expected_output(vector<float>{0.864664717f, 3.f, 0.864664717f, 1.f, 0.632120559f, 0.f});
|
||||
test_case.run();
|
||||
}
|
@ -1,85 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
||||
// clang-format off
|
||||
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
|
||||
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
|
||||
#endif
|
||||
|
||||
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
|
||||
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
|
||||
#endif
|
||||
// clang-format on
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, exp) {
|
||||
Shape shape{8};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::Exp>(A), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_input<float>({-4, -3, -2, -1, 0, 1, 2, 3});
|
||||
test_case.add_expected_output<float>(shape,
|
||||
{expf(-4), expf(-3), expf(-2), expf(-1), expf(0), expf(1), expf(2), expf(3)});
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, exp_negative) {
|
||||
Shape shape{5};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::Exp>(A), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_input<float>({-4, -3, -2, -1, -5});
|
||||
test_case.add_expected_output<float>(shape, {expf(-4), expf(-3), expf(-2), expf(-1), expf(-5)});
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, exp_scalar) {
|
||||
Shape shape{};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::Exp>(A), ParameterVector{A});
|
||||
|
||||
vector<float> a{13};
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_input<float>({a});
|
||||
test_case.add_expected_output<float>(shape, {expf(13)});
|
||||
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 2);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, exp_in_place) {
|
||||
Shape shape{2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
;
|
||||
auto T = make_shared<op::Exp>(A);
|
||||
auto T2 = make_shared<op::Exp>(T);
|
||||
|
||||
auto f = make_shared<Function>(T2, ParameterVector{A});
|
||||
|
||||
vector<float> a{1, 3};
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_input<float>({a});
|
||||
test_case.add_expected_output<float>(shape, {expf(expf(1)), expf(expf(3))});
|
||||
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 2);
|
||||
}
|
@ -1,85 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gelu_erf_mode_inference_f32_8D) {
|
||||
Shape in_shape{8};
|
||||
element::Type et = element::f32;
|
||||
|
||||
auto param = make_shared<op::Parameter>(et, in_shape);
|
||||
auto gelu = make_shared<op::v7::Gelu>(param);
|
||||
auto f = make_shared<Function>(gelu, ParameterVector{param});
|
||||
|
||||
vector<float> in_vec{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0};
|
||||
vector<float>
|
||||
out_vec{-0.00012636185, -0.0040495098, -0.04550028, -0.15865529, 0.0, 0.8413447, 1.9544997, 2.9959507};
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_input<float>(in_shape, in_vec);
|
||||
test_case.add_expected_output<float>(in_shape, out_vec);
|
||||
test_case.run_with_tolerance_as_fp(1e-4f);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gelu_tanh_mode_inference_f32_8D) {
|
||||
Shape in_shape{8};
|
||||
element::Type et = element::f32;
|
||||
|
||||
auto param = make_shared<op::Parameter>(et, in_shape);
|
||||
auto gelu = make_shared<op::v7::Gelu>(param, op::GeluApproximationMode::TANH);
|
||||
auto f = make_shared<Function>(gelu, ParameterVector{param});
|
||||
|
||||
vector<float> in_vec{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0};
|
||||
vector<float>
|
||||
out_vec{-0.00012636185, -0.0040495098, -0.04550028, -0.15865529, 0.0, 0.8413447, 1.9544997, 2.9959507};
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_input<float>(in_shape, in_vec);
|
||||
test_case.add_expected_output<float>(in_shape, out_vec);
|
||||
test_case.run_with_tolerance_as_fp(1e-3f);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gelu_erf_mode_inference_f32_3D) {
|
||||
Shape in_shape{3};
|
||||
element::Type et = element::f32;
|
||||
|
||||
auto param = make_shared<op::Parameter>(et, in_shape);
|
||||
auto gelu = make_shared<op::v7::Gelu>(param);
|
||||
auto f = make_shared<Function>(gelu, ParameterVector{param});
|
||||
|
||||
vector<float> in_vec{-0.5, 0.1, 0.4};
|
||||
vector<float> out_vec{-0.15426877, 0.05398279, 0.2621686};
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_input(in_shape, in_vec);
|
||||
test_case.add_expected_output(in_shape, out_vec);
|
||||
test_case.run_with_tolerance_as_fp(1e-4f);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gelu_tanh_mode_inference_f32_3D) {
|
||||
Shape in_shape{3};
|
||||
element::Type et = element::f32;
|
||||
|
||||
auto param = make_shared<op::Parameter>(et, in_shape);
|
||||
auto gelu = make_shared<op::v7::Gelu>(param, op::GeluApproximationMode::TANH);
|
||||
auto f = make_shared<Function>(gelu, ParameterVector{param});
|
||||
|
||||
vector<float> in_vec{-0.5, 0.1, 0.4};
|
||||
vector<float> out_vec{-0.15428599, 0.053982753, 0.262161165};
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_input(in_shape, in_vec);
|
||||
test_case.add_expected_output(in_shape, out_vec);
|
||||
test_case.run_with_tolerance_as_fp(1e-4f);
|
||||
}
|
@ -1,61 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <fstream>
|
||||
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, hard_sigmoid_1d) {
|
||||
const Shape a_shape{3};
|
||||
const auto A = make_shared<op::Parameter>(element::f32, a_shape);
|
||||
|
||||
const auto alpha = op::Constant::create(element::f32, Shape{}, {0.5f});
|
||||
const auto beta = op::Constant::create(element::f32, Shape{}, {0.6f});
|
||||
|
||||
const auto R = make_shared<op::v0::HardSigmoid>(A, alpha, beta);
|
||||
const auto f = make_shared<Function>(R, ParameterVector{A});
|
||||
|
||||
std::vector<float> a{-1.0f, 0.0f, 1.0f};
|
||||
|
||||
EXPECT_EQ(R->get_output_shape(0), a_shape);
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
|
||||
test_case.add_input<float>({a});
|
||||
test_case.add_expected_output<float>({0.1f, 0.6f, 1.f});
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, hard_sigmoid_2d) {
|
||||
const Shape a_shape{2, 5};
|
||||
const auto A = make_shared<op::Parameter>(element::f32, a_shape);
|
||||
|
||||
const auto alpha = op::Constant::create(element::f32, Shape{}, {0.2f});
|
||||
const auto beta = op::Constant::create(element::f32, Shape{}, {0.5f});
|
||||
|
||||
const auto R = make_shared<op::v0::HardSigmoid>(A, alpha, beta);
|
||||
const auto f = make_shared<Function>(R, ParameterVector{A});
|
||||
|
||||
std::vector<float> a{-3.0f, -1.0f, 0.0f, 1.0f, 3.0f, 0.5f, -0.2f, 6.0f, 8.0f, 0.1f};
|
||||
|
||||
EXPECT_EQ(R->get_output_shape(0), a_shape);
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
|
||||
test_case.add_input<float>({a});
|
||||
test_case.add_expected_output<float>(a_shape, {0.0f, 0.3f, 0.5f, 0.7f, 1.0f, 0.6f, 0.46f, 1.0f, 1.0f, 0.52f});
|
||||
test_case.run();
|
||||
}
|
@ -1,332 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
// clang-format off
|
||||
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
|
||||
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
|
||||
#endif
|
||||
|
||||
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
|
||||
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
|
||||
#endif
|
||||
// clang-format on
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "runtime/backend.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/all_close.hpp"
|
||||
#include "util/all_close_f.hpp"
|
||||
#include "util/ndarray.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
#include "engines_util/execute_tools.hpp"
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, log_softmax_1d_single_value) {
|
||||
Shape shape{1};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{1});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
std::vector<float> expected_result{0};
|
||||
|
||||
auto f = make_shared<Function>(make_shared<op::v5::LogSoftmax>(A, 0), ParameterVector{A});
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close(expected_result, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis0) {
|
||||
Shape shape{2, 4};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{0, 1, 2, 3, 10000, 10001, 10002, 10003});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
std::vector<float> expected_result{-10000., -10000., -10000., -10000., 0., 0., 0., 0.};
|
||||
|
||||
auto f = make_shared<Function>(make_shared<op::v5::LogSoftmax>(A, 0), ParameterVector{A});
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close(expected_result, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis1) {
|
||||
Shape shape{2, 4};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{0, 1, 2, 3, 10000, 10001, 10002, 10003});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
std::vector<float>
|
||||
expected_result{-3.4401896, -2.4401896, -1.4401897, -0.4401897, -3.4401896, -2.4401896, -1.4401897, -0.4401897};
|
||||
|
||||
auto f = make_shared<Function>(make_shared<op::v5::LogSoftmax>(A, 1), ParameterVector{A});
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close(expected_result, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis_neg1) {
|
||||
Shape shape{2, 4};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{0, 1, 2, 3, 10000, 10001, 10002, 10003});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
std::vector<float>
|
||||
expected_result{-3.4401896, -2.4401896, -1.4401897, -0.4401897, -3.4401896, -2.4401896, -1.4401897, -0.4401897};
|
||||
|
||||
auto f = make_shared<Function>(make_shared<op::v5::LogSoftmax>(A, -1), ParameterVector{A});
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close(expected_result, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis_neg2) {
|
||||
Shape shape{2, 4};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{0, 1, 2, 3, 10000, 10001, 10002, 10003});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
std::vector<float> expected_result{-10000., -10000., -10000., -10000., 0., 0., 0., 0.};
|
||||
|
||||
auto f = make_shared<Function>(make_shared<op::v5::LogSoftmax>(A, -2), ParameterVector{A});
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close(expected_result, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_0) {
|
||||
Shape shape{3, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
std::vector<float> expected_result{-12.0024818,
|
||||
-12.0024818,
|
||||
-12.0024818,
|
||||
-12.0024818,
|
||||
-12.0024818,
|
||||
-12.0024818,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03};
|
||||
|
||||
auto f = make_shared<Function>(make_shared<op::v5::LogSoftmax>(A, 0), ParameterVector{A});
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close(expected_result, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_1) {
|
||||
Shape shape{3, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
std::vector<float> expected_result{-3.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735};
|
||||
|
||||
auto f = make_shared<Function>(make_shared<op::v5::LogSoftmax>(A, 1), ParameterVector{A});
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close(expected_result, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_2) {
|
||||
Shape shape{3, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
std::vector<float> expected_result{-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596};
|
||||
|
||||
auto f = make_shared<Function>(make_shared<op::v5::LogSoftmax>(A, 2), ParameterVector{A});
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close(expected_result, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg1) {
|
||||
Shape shape{3, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
std::vector<float> expected_result{-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596,
|
||||
-2.40760596,
|
||||
-1.40760596,
|
||||
-0.40760596};
|
||||
|
||||
auto f = make_shared<Function>(make_shared<op::v5::LogSoftmax>(A, -1), ParameterVector{A});
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close(expected_result, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg2) {
|
||||
Shape shape{3, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
std::vector<float> expected_result{-3.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-3.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735,
|
||||
-0.04858735};
|
||||
|
||||
auto f = make_shared<Function>(make_shared<op::v5::LogSoftmax>(A, -2), ParameterVector{A});
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close(expected_result, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg3) {
|
||||
Shape shape{3, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
std::vector<float> expected_result{-12.0024818,
|
||||
-12.0024818,
|
||||
-12.0024818,
|
||||
-12.0024818,
|
||||
-12.0024818,
|
||||
-12.0024818,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-6.00248181,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03,
|
||||
-2.48181414e-03};
|
||||
|
||||
auto f = make_shared<Function>(make_shared<op::v5::LogSoftmax>(A, -3), ParameterVector{A});
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close(expected_result, read_vector<float>(result)));
|
||||
}
|
@ -1,89 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
||||
#include "engines_util/execute_tools.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
#include "runtime/backend.hpp"
|
||||
#include "util/all_close.hpp"
|
||||
#include "util/all_close_f.hpp"
|
||||
#include "util/ndarray.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
|
||||
template <element::Type_t Type, typename T = fundamental_type_for<Type>>
|
||||
static void mish_test(const PartialShape& dynamic_shape, const Shape& static_shape, const double fp_tolerance = 1e-5) {
|
||||
bool must_support_dynamic = dynamic_shape.is_dynamic();
|
||||
auto data = make_shared<op::Parameter>(Type, dynamic_shape);
|
||||
auto f = make_shared<Function>(make_shared<op::v4::Mish>(data), ParameterVector{data});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}", must_support_dynamic);
|
||||
|
||||
auto create_output_tensor = [&]() {
|
||||
if (must_support_dynamic)
|
||||
return backend->create_dynamic_tensor(Type, dynamic_shape);
|
||||
return backend->create_tensor(Type, dynamic_shape.get_shape());
|
||||
};
|
||||
|
||||
auto a = backend->create_tensor(Type, static_shape);
|
||||
auto result = create_output_tensor();
|
||||
|
||||
// generate input tensor (with possible type conversion)
|
||||
auto static_size = shape_size(static_shape);
|
||||
std::vector<T> expected;
|
||||
std::vector<T> input;
|
||||
{
|
||||
std::mt19937 gen{0}; // use fixed seed for reproducibility of the test
|
||||
std::normal_distribution<> d{0.0, 20.0};
|
||||
|
||||
for (auto i = static_size; i > 0; i--) {
|
||||
auto x = static_cast<T>(d(gen));
|
||||
auto y = static_cast<T>(static_cast<double>(x) * std::tanh(std::log(1.0 + std::exp(x))));
|
||||
input.push_back(x);
|
||||
expected.push_back(y);
|
||||
}
|
||||
|
||||
copy_data(a, input);
|
||||
}
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
|
||||
auto actual = read_vector<T>(result);
|
||||
|
||||
// size equility test
|
||||
EXPECT_EQ(actual.size(), static_size);
|
||||
EXPECT_EQ(result->get_shape(), static_shape);
|
||||
|
||||
// backend is allowed to trade off accuracy for performance
|
||||
for (size_t i = 0; i < static_size; i++)
|
||||
EXPECT_NEAR(actual[i], expected[i], fp_tolerance) << "input[i] is " << input[i];
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, mish_f32) {
|
||||
mish_test<element::f32>({2, 5}, {2, 5});
|
||||
mish_test<element::f32>({2, 3, 4, 5}, {2, 3, 4, 5});
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, mish_f16) {
|
||||
mish_test<element::f16>({2, 5}, {2, 5});
|
||||
mish_test<element::f16>({2, 3, 4, 5}, {2, 3, 4, 5});
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, mish_dynamic) {
|
||||
mish_test<element::f32>(PartialShape::dynamic(), {2, 3, 4, 5});
|
||||
mish_test<element::f32>({2, Dimension::dynamic(), 4, 5}, {2, 3, 4, 5});
|
||||
}
|
@ -1,850 +0,0 @@
|
||||
// Co pyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_1d_1_param) {
|
||||
Shape shape_a{6};
|
||||
Shape shape_slope{1};
|
||||
|
||||
std::vector<float> a{1, 2, -3, -4, 5, 6};
|
||||
std::vector<float> slope{2};
|
||||
std::vector<float> out{1, 2, -6, -8, 5, 6};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_1d_1_const) {
|
||||
Shape shape_a{6};
|
||||
Shape shape_slope{1};
|
||||
|
||||
std::vector<float> a{1, 2, -3, -4, 5, 6};
|
||||
std::vector<float> slope{2};
|
||||
std::vector<float> out{1, 2, -6, -8, 5, 6};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_1d_param) {
|
||||
Shape shape_a{6};
|
||||
Shape shape_slope{6};
|
||||
|
||||
std::vector<float> a{1, 2, -3, -4, 5, 6};
|
||||
std::vector<float> slope{2, 3, 4, 5, 6, 7};
|
||||
std::vector<float> out{1, 2, -12, -20, 5, 6};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_1d_6_const) {
|
||||
Shape shape_a{6};
|
||||
Shape shape_slope{6};
|
||||
|
||||
std::vector<float> a{1, 2, -3, -4, 5, 6};
|
||||
std::vector<float> slope{2, 3, 4, 5, 6, 7};
|
||||
std::vector<float> out{1, 2, -12, -20, 5, 6};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_2d_2_W_param) {
|
||||
Shape shape_a{3, 2};
|
||||
Shape shape_slope{2};
|
||||
|
||||
std::vector<float> a{-2, 3, -2, 1, -1, 0};
|
||||
std::vector<float> slope{0, 1};
|
||||
std::vector<float> out{0, 3, 0, 1, 0, 0};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_2d_2_W_const) {
|
||||
Shape shape_a{3, 2};
|
||||
Shape shape_slope{2};
|
||||
|
||||
std::vector<float> a{-2, 3, -2, 1, -1, 0};
|
||||
std::vector<float> slope{0, 1};
|
||||
std::vector<float> out{0, 3, 0, 1, 0, 0};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_2d_6_W_param) {
|
||||
Shape shape_a{2, 6};
|
||||
Shape shape_slope{6};
|
||||
|
||||
std::vector<float> a{1, 2, -3, -4, 5, 6, 7, 8, -9, -10, 11, 12};
|
||||
std::vector<float> slope{1, 2, 3, 4, 5, 6};
|
||||
std::vector<float> out{1, 2, -9, -16, 5, 6, 7, 8, -27, -40, 11, 12};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_2d_6_W_const) {
|
||||
Shape shape_a{2, 6};
|
||||
Shape shape_slope{6};
|
||||
|
||||
std::vector<float> a{1, 2, -3, -4, 5, 6, 7, 8, -9, -10, 11, 12};
|
||||
std::vector<float> slope{1, 2, 3, 4, 5, 6};
|
||||
std::vector<float> out{1, 2, -9, -16, 5, 6, 7, 8, -27, -40, 11, 12};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_2d_C_2_param) {
|
||||
Shape shape_a{3, 2};
|
||||
Shape shape_slope{2};
|
||||
|
||||
std::vector<float> a{-1, -1, -1, -1, -1, -1};
|
||||
std::vector<float> slope{2, 0.5};
|
||||
std::vector<float> out{-2, -0.5, -2, -0.5, -2, -0.5};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_2d_C_2_const) {
|
||||
Shape shape_a{3, 2};
|
||||
Shape shape_slope{2};
|
||||
|
||||
std::vector<float> a{-1, -1, -1, -1, -1, -1};
|
||||
std::vector<float> slope{2, 0.5};
|
||||
std::vector<float> out{-2, -0.5, -2, -0.5, -2, -0.5};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_negative_equal_dims_slope_param) {
|
||||
Shape shape_a{2, 2, 2};
|
||||
Shape shape_slope{2};
|
||||
|
||||
std::vector<float> a{-0.5, -2, -3, -4, -5, -6, -7, -8};
|
||||
std::vector<float> slope{-0.5, -2};
|
||||
// std::vector<float> out{0.25, 4, 1.5, 8, 2.5, 12, 3.5, 16}; // broadcast (1, 1, 2)
|
||||
std::vector<float> out{0.25, 1, 6, 8, 2.5, 3, 14, 16}; // broadcast (1, 2, 1)
|
||||
// std::vector<float> out{0.25, 1, 1.5, 2, 10, 12, 14, 16}; // broadcast (2, 1, 1)
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_negative_equal_dims_slope_const) {
|
||||
Shape shape_a{2, 2, 2};
|
||||
Shape shape_slope{2};
|
||||
|
||||
std::vector<float> a{-0.5, -2, -3, -4, -5, -6, -7, -8};
|
||||
std::vector<float> slope{-0.5, -2};
|
||||
// std::vector<float> out{0.25, 4, 1.5, 8, 2.5, 12, 3.5, 16}; // broadcast (1, 1, 2)
|
||||
std::vector<float> out{0.25, 1, 6, 8, 2.5, 3, 14, 16}; // broadcast (1, 2, 1)
|
||||
// std::vector<float> out{0.25, 1, 1.5, 2, 10, 12, 14, 16}; // broadcast (2, 1, 1)
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_negative_slope_param) {
|
||||
Shape shape_a{3, 2};
|
||||
Shape shape_slope{2};
|
||||
|
||||
std::vector<float> a{-2, 3, -2, 1, -1, 0};
|
||||
std::vector<float> slope{-0.5, -1};
|
||||
std::vector<float> out{1, 3, 1, 1, 0.5, 0};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_negative_slope_const) {
|
||||
Shape shape_a{3, 2};
|
||||
Shape shape_slope{2};
|
||||
|
||||
std::vector<float> a{-2, 3, -2, 1, -1, 0};
|
||||
std::vector<float> slope{-0.5, -1};
|
||||
std::vector<float> out{1, 3, 1, 1, 0.5, 0};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_2d_same_shape_param) {
|
||||
Shape shape_a{2, 6};
|
||||
Shape shape_slope{2, 6};
|
||||
|
||||
std::vector<float> a{1, 2, -3, -4, 5, 6, 1, 2, -3, -4, 5, 6};
|
||||
std::vector<float> slope{2, 2, 2, 2, 2, 2, 1, 1, 4, 2, 1, 1};
|
||||
std::vector<float> out{1, 2, -6, -8, 5, 6, 1, 2, -12, -8, 5, 6};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_2d_same_shape_const) {
|
||||
Shape shape_a{2, 6};
|
||||
Shape shape_slope{2, 6};
|
||||
|
||||
std::vector<float> a{1, 2, -3, -4, 5, 6, 1, 2, -3, -4, 5, 6};
|
||||
std::vector<float> slope{2, 2, 2, 2, 2, 2, 1, 1, 4, 2, 1, 1};
|
||||
std::vector<float> out{1, 2, -6, -8, 5, 6, 1, 2, -12, -8, 5, 6};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_C_1_W_slope_param) {
|
||||
Shape shape_a{2, 2, 2, 2};
|
||||
Shape shape_slope{2, 1, 2};
|
||||
|
||||
std::vector<float> a{1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4};
|
||||
std::vector<float> slope{1, 2, 3, 4};
|
||||
std::vector<float> out{1, 2, -3, -8, 1, 2, -9, -16, 1, 2, -3, -8, 1, 2, -9, -16};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_C_1_W_slope_const) {
|
||||
Shape shape_a{2, 2, 2, 2};
|
||||
Shape shape_slope{2, 1, 2};
|
||||
|
||||
std::vector<float> a{1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4};
|
||||
std::vector<float> slope{1, 2, 3, 4};
|
||||
std::vector<float> out{1, 2, -3, -8, 1, 2, -9, -16, 1, 2, -3, -8, 1, 2, -9, -16};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_4d_slope_1_C_1_W_param) {
|
||||
Shape shape_a{2, 2, 2, 2};
|
||||
Shape shape_slope{1, 2, 1, 2};
|
||||
|
||||
std::vector<float> a{1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4};
|
||||
std::vector<float> slope{1, 2, 3, 4};
|
||||
std::vector<float> out{1, 2, -3, -8, 1, 2, -9, -16, 1, 2, -3, -8, 1, 2, -9, -16};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_4d_slope_1_C_1_W_const) {
|
||||
Shape shape_a{2, 2, 2, 2};
|
||||
Shape shape_slope{1, 2, 1, 2};
|
||||
|
||||
std::vector<float> a{1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4};
|
||||
std::vector<float> slope{1, 2, 3, 4};
|
||||
std::vector<float> out{1, 2, -3, -8, 1, 2, -9, -16, 1, 2, -3, -8, 1, 2, -9, -16};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_3d_W_param) {
|
||||
Shape shape_a{2, 2, 6};
|
||||
Shape shape_slope{2, 1, 6};
|
||||
|
||||
std::vector<float> a{1, 2, -3, -4, -5, 6, -1, -2, -3, -4, -5, -6, 1, 2, -3, -4, 5, 6, -2, 4, -6, -8, 10, 12};
|
||||
std::vector<float> slope{2, 1, 3, 4, 1, 7, 1, 2, 3, 4, 5, 6};
|
||||
std::vector<float> out{1, 2, -9, -16, -5, 6, -2, -2, -9, -16, -5, -42,
|
||||
1, 2, -9, -16, 5, 6, -2, 4, -18, -32, 10, 12};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_3d_W_const) {
|
||||
Shape shape_a{2, 2, 6};
|
||||
Shape shape_slope{2, 1, 6};
|
||||
|
||||
std::vector<float> a{1, 2, -3, -4, -5, 6, -1, -2, -3, -4, -5, -6, 1, 2, -3, -4, 5, 6, -2, 4, -6, -8, 10, 12};
|
||||
std::vector<float> slope{2, 1, 3, 4, 1, 7, 1, 2, 3, 4, 5, 6};
|
||||
std::vector<float> out{1, 2, -9, -16, -5, 6, -2, -2, -9, -16, -5, -42,
|
||||
1, 2, -9, -16, 5, 6, -2, 4, -18, -32, 10, 12};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_3d_same_shape_param) {
|
||||
Shape shape_a{2, 3, 2};
|
||||
Shape shape_slope{2, 3, 2};
|
||||
|
||||
std::vector<float> a{
|
||||
1,
|
||||
2,
|
||||
-3,
|
||||
-4,
|
||||
-5,
|
||||
6,
|
||||
-1,
|
||||
-2,
|
||||
-3,
|
||||
-4,
|
||||
-5,
|
||||
-6,
|
||||
};
|
||||
std::vector<float> slope{2, 1, 3, 4, 1, 7, 1, 2, 3, 4, 5, 6};
|
||||
std::vector<float> out{1, 2, -9, -16, -5, 6, -1, -4, -9, -16, -25, -36};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_3d_same_shape_const) {
|
||||
Shape shape_a{2, 3, 2};
|
||||
Shape shape_slope{2, 3, 2};
|
||||
|
||||
std::vector<float> a{
|
||||
1,
|
||||
2,
|
||||
-3,
|
||||
-4,
|
||||
-5,
|
||||
6,
|
||||
-1,
|
||||
-2,
|
||||
-3,
|
||||
-4,
|
||||
-5,
|
||||
-6,
|
||||
};
|
||||
std::vector<float> slope{2, 1, 3, 4, 1, 7, 1, 2, 3, 4, 5, 6};
|
||||
std::vector<float> out{1, 2, -9, -16, -5, 6, -1, -4, -9, -16, -25, -36};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_3d_broadcast_C_W_slope_param) {
|
||||
Shape shape_a{2, 1, 2};
|
||||
Shape shape_slope{2};
|
||||
|
||||
std::vector<float> a{-10, -10, -10, -10};
|
||||
std::vector<float> slope{0.1, 10};
|
||||
std::vector<float> out{-1, -100, -1, -100}; // broadcast (1, 2, 1)
|
||||
// std::vector<float> out{-1, -1, -100, -100}; // broadcast (2, 1, 1)
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_3d_broadcast_C_W_slope_const) {
|
||||
Shape shape_a{2, 1, 2};
|
||||
Shape shape_slope{2};
|
||||
|
||||
std::vector<float> a{-10, -10, -10, -10};
|
||||
std::vector<float> slope{0.1, 10};
|
||||
std::vector<float> out{-1, -100, -1, -100}; // broadcast (1, 2, 1)
|
||||
// std::vector<float> out{-1, -1, -100, -100}; // broadcast (2, 1, 1)
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_4d_broadcast_C_W_slope_param) {
|
||||
Shape shape_a{1, 2, 1, 2};
|
||||
Shape shape_slope{2};
|
||||
|
||||
std::vector<float> a{-10, -10, -10, -10};
|
||||
std::vector<float> slope{0.1, 10};
|
||||
// std::vector<float> out{-1, -100, -1, -100}; // broadcast (1, 1, 1, 2)
|
||||
std::vector<float> out{-1, -1, -100, -100}; // broadcast (1, 2, 1, 1)
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_4d_broadcast_C_W_slope_const) {
|
||||
Shape shape_a{1, 2, 1, 2};
|
||||
Shape shape_slope{2};
|
||||
|
||||
std::vector<float> a{-10, -10, -10, -10};
|
||||
std::vector<float> slope{0.1, 10};
|
||||
// std::vector<float> out{-1, -100, -1, -100}; // broadcast (1, 1, 1, 2)
|
||||
std::vector<float> out{-1, -1, -100, -100}; // broadcast (1, 2, 1, 1)
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_4d_broadcast_C_slope_param) {
|
||||
Shape shape_a{1, 5, 1, 1};
|
||||
Shape shape_slope{5};
|
||||
|
||||
std::vector<float> a{-1, 0, -1, -1, -1};
|
||||
std::vector<float> slope{1, 2, 3, 4, 5};
|
||||
std::vector<float> out{-1, 0, -3, -4, -5};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_4d_broadcast_C_slope_const) {
|
||||
Shape shape_a{1, 5, 1, 1};
|
||||
Shape shape_slope{5};
|
||||
|
||||
std::vector<float> a{-1, 0, -1, -1, -1};
|
||||
std::vector<float> slope{1, 2, 3, 4, 5};
|
||||
std::vector<float> out{-1, 0, -3, -4, -5};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_batch_nd_elementwise_param) {
|
||||
Shape shape_a{2, 3, 4, 5};
|
||||
Shape shape_slope{2, 3, 4, 5};
|
||||
|
||||
std::vector<float> a{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.};
|
||||
std::vector<float> slope(shape_size(shape_slope));
|
||||
std::iota(std::begin(slope), std::end(slope), 0);
|
||||
|
||||
std::vector<float> out{
|
||||
-0., -1., -2., -3., -4., -5., -6., -7., -8., -9., -10., -11., -12., -13., -14.,
|
||||
-15., -16., -17., -18., -19., -20., -21., -22., -23., -24., -25., -26., -27., -28., -29.,
|
||||
-30., -31., -32., -33., -34., -35., -36., -37., -38., -39., -40., -41., -42., -43., -44.,
|
||||
-45., -46., -47., -48., -49., -50., -51., -52., -53., -54., -55., -56., -57., -58., -59.,
|
||||
-60., -61., -62., -63., -64., -65., -66., -67., -68., -69., -70., -71., -72., -73., -74.,
|
||||
-75., -76., -77., -78., -79., -80., -81., -82., -83., -84., -85., -86., -87., -88., -89.,
|
||||
-90., -91., -92., -93., -94., -95., -96., -97., -98., -99., -100., -101., -102., -103., -104.,
|
||||
-105., -106., -107., -108., -109., -110., -111., -112., -113., -114., -115., -116., -117., -118., -119.};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_batch_nd_elementwise_const) {
|
||||
Shape shape_a{2, 3, 4, 5};
|
||||
Shape shape_slope{2, 3, 4, 5};
|
||||
|
||||
std::vector<float> a{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.};
|
||||
std::vector<float> slope(shape_size(shape_slope));
|
||||
std::iota(std::begin(slope), std::end(slope), 0);
|
||||
|
||||
std::vector<float> out{
|
||||
-0., -1., -2., -3., -4., -5., -6., -7., -8., -9., -10., -11., -12., -13., -14.,
|
||||
-15., -16., -17., -18., -19., -20., -21., -22., -23., -24., -25., -26., -27., -28., -29.,
|
||||
-30., -31., -32., -33., -34., -35., -36., -37., -38., -39., -40., -41., -42., -43., -44.,
|
||||
-45., -46., -47., -48., -49., -50., -51., -52., -53., -54., -55., -56., -57., -58., -59.,
|
||||
-60., -61., -62., -63., -64., -65., -66., -67., -68., -69., -70., -71., -72., -73., -74.,
|
||||
-75., -76., -77., -78., -79., -80., -81., -82., -83., -84., -85., -86., -87., -88., -89.,
|
||||
-90., -91., -92., -93., -94., -95., -96., -97., -98., -99., -100., -101., -102., -103., -104.,
|
||||
-105., -106., -107., -108., -109., -110., -111., -112., -113., -114., -115., -116., -117., -118., -119.};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_1d_W_slope_param) {
|
||||
Shape shape_a{2, 3, 4, 5};
|
||||
Shape shape_slope{5};
|
||||
|
||||
std::vector<float> a{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.};
|
||||
|
||||
std::vector<float> slope{0, 1, 2, 3, 4};
|
||||
|
||||
std::vector<float> out{-0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2.,
|
||||
-3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0.,
|
||||
-1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3.,
|
||||
-4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1.,
|
||||
-2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4.,
|
||||
-0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2.,
|
||||
-3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4.};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_1d_W_slope_const) {
|
||||
Shape shape_a{2, 3, 4, 5};
|
||||
Shape shape_slope{5};
|
||||
|
||||
std::vector<float> a{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.};
|
||||
|
||||
std::vector<float> slope{0, 1, 2, 3, 4};
|
||||
|
||||
std::vector<float> out{-0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2.,
|
||||
-3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0.,
|
||||
-1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3.,
|
||||
-4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1.,
|
||||
-2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4.,
|
||||
-0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4., -0., -1., -2.,
|
||||
-3., -4., -0., -1., -2., -3., -4., -0., -1., -2., -3., -4.};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_1d_C_slope_param) {
|
||||
Shape shape_a{2, 3, 4, 5};
|
||||
Shape shape_slope{3};
|
||||
|
||||
std::vector<float> a{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.};
|
||||
|
||||
std::vector<float> slope{0, 1, 2};
|
||||
|
||||
std::vector<float> out{-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.,
|
||||
-0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.,
|
||||
-2., -2., -2., -2., -2., -2., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.,
|
||||
-0., -0., -0., -0., -0., -0., -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2.,
|
||||
-2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_1d_C_slope_const) {
|
||||
Shape shape_a{2, 3, 4, 5};
|
||||
Shape shape_slope{3};
|
||||
|
||||
std::vector<float> a{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.};
|
||||
|
||||
std::vector<float> slope{0, 1, 2};
|
||||
|
||||
std::vector<float> out{-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.,
|
||||
-0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.,
|
||||
-2., -2., -2., -2., -2., -2., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.,
|
||||
-0., -0., -0., -0., -0., -0., -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2.,
|
||||
-2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::v0::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_C_1_1_slope_param) {
|
||||
Shape shape_a{2, 3, 4, 5};
|
||||
Shape shape_slope{3, 1, 1};
|
||||
|
||||
std::vector<float> a{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.};
|
||||
|
||||
std::vector<float> slope{0, 1, 2};
|
||||
|
||||
std::vector<float> out{-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.,
|
||||
-0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.,
|
||||
-2., -2., -2., -2., -2., -2., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.,
|
||||
-0., -0., -0., -0., -0., -0., -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2.,
|
||||
-2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.};
|
||||
|
||||
const auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
const auto SLOPE = make_shared<op::Parameter>(element::f32, shape_slope);
|
||||
const auto f = make_shared<Function>(make_shared<op::PRelu>(A, SLOPE), ParameterVector{A, SLOPE});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a, slope});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, prelu_C_1_1_slope_const) {
|
||||
Shape shape_a{2, 3, 4, 5};
|
||||
Shape shape_slope{3, 1, 1};
|
||||
|
||||
std::vector<float> a{-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.};
|
||||
|
||||
std::vector<float> slope{0, 1, 2};
|
||||
|
||||
std::vector<float> out{-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.,
|
||||
-0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.,
|
||||
-2., -2., -2., -2., -2., -2., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.,
|
||||
-0., -0., -0., -0., -0., -0., -0., -0., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,
|
||||
-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -2., -2., -2., -2., -2., -2., -2., -2.,
|
||||
-2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2., -2.};
|
||||
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto SLOPE = make_shared<op::Constant>(element::f32, shape_slope, slope);
|
||||
auto f = make_shared<Function>(make_shared<op::PRelu>(A, SLOPE), ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>({a});
|
||||
test_case.add_expected_output<float>(shape_a, out);
|
||||
test_case.run();
|
||||
}
|
@ -1,95 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "engines_util/execute_tools.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
#include "runtime/backend.hpp"
|
||||
#include "util/all_close.hpp"
|
||||
#include "util/all_close_f.hpp"
|
||||
#include "util/ndarray.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, relu_2Dfprop) {
|
||||
auto shape_a = Shape{2, 5};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto relu = make_shared<op::Relu>(A);
|
||||
auto shape_rt = Shape{2, 5};
|
||||
auto f = make_shared<Function>(relu, ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
vector<float> expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0};
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(read_vector<float>(result), expected, MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, relu_2Dfprop_i32) {
|
||||
auto shape_a = Shape{2, 5};
|
||||
auto A = make_shared<op::Parameter>(element::i32, shape_a);
|
||||
auto relu = make_shared<op::Relu>(A);
|
||||
auto shape_rt = Shape{2, 5};
|
||||
auto f = make_shared<Function>(relu, ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::i32, shape_a);
|
||||
copy_data(a, vector<int32_t>{1, 8, -8, 17, -2, 1, 8, -8, 17, -1});
|
||||
auto result = backend->create_tensor(element::i32, shape_rt);
|
||||
vector<int32_t> expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0};
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ(expected, read_vector<int32_t>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, relu_4Dfprop) {
|
||||
auto shape_a = Shape{2, 2, 2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto relu = make_shared<op::Relu>(A);
|
||||
auto shape_rt = Shape{2, 2, 2, 2};
|
||||
auto f = make_shared<Function>(relu, ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5, 1});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
vector<float> expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0, 1, 8, 0, 17, 0, 1};
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(read_vector<float>(result), expected, MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, fuse_max_with_constant_zero_input_as_relu) {
|
||||
auto shape_a = Shape{2, 5};
|
||||
auto A = op::Constant::create(element::f32, shape_a, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
|
||||
auto B = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto max = make_shared<op::v1::Maximum>(A, B);
|
||||
auto shape_rt = Shape{2, 5};
|
||||
auto f = make_shared<Function>(max, ParameterVector{B});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto b = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(b, vector<float>{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
vector<float> expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0};
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {b});
|
||||
EXPECT_TRUE(test::all_close_f(read_vector<float>(result), expected, MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
@ -1,95 +0,0 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, selu_2Dfprop) {
|
||||
Shape rt_shape{2};
|
||||
Shape c_shape{1};
|
||||
element::Type et = element::f32;
|
||||
|
||||
auto input = make_shared<op::Parameter>(et, rt_shape);
|
||||
auto alpha = op::Constant::create(et, c_shape, {1.67326324});
|
||||
auto lambda = op::Constant::create(et, c_shape, {1.05070098});
|
||||
auto selu = make_shared<op::v0::Selu>(input, alpha, lambda);
|
||||
auto f = make_shared<Function>(selu, ParameterVector{input});
|
||||
|
||||
vector<float> input_data{-1, 3};
|
||||
vector<float> expected_out{-1.1113307, 3.152103};
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_input<float>(rt_shape, input_data);
|
||||
test_case.add_expected_output(rt_shape, expected_out);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, selu_4Dfprop) {
|
||||
Shape in_shape{4};
|
||||
Shape c_shape{1};
|
||||
element::Type et = element::f32;
|
||||
|
||||
auto input = make_shared<op::Parameter>(et, in_shape);
|
||||
auto alpha = op::Constant::create(et, c_shape, {1.67326324});
|
||||
auto lambda = op::Constant::create(et, c_shape, {1.05070098});
|
||||
auto selu = make_shared<op::v0::Selu>(input, alpha, lambda);
|
||||
auto f = make_shared<Function>(selu, ParameterVector{input});
|
||||
|
||||
vector<float> in_vec{-1.0, 0.0, 1.0, 2.0};
|
||||
vector<float> out_vec{-1.1113307, 0., 1.050701, 2.101402};
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_input<float>(in_shape, in_vec);
|
||||
test_case.add_expected_output<float>(in_shape, out_vec);
|
||||
test_case.run_with_tolerance_as_fp(1e-4f);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, selu_1Dfprop) {
|
||||
Shape in_shape{1};
|
||||
Shape c_shape{1};
|
||||
element::Type et = element::f32;
|
||||
|
||||
auto input = make_shared<op::Parameter>(et, in_shape);
|
||||
auto alpha = op::Constant::create(et, c_shape, {1.67326324});
|
||||
auto lambda = op::Constant::create(et, c_shape, {1.05070098});
|
||||
auto selu = make_shared<op::v0::Selu>(input, alpha, lambda);
|
||||
auto f = make_shared<Function>(selu, ParameterVector{input});
|
||||
|
||||
vector<float> in_vec{112.0};
|
||||
vector<float> out_vec{117.67851};
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_input<float>(in_shape, in_vec);
|
||||
test_case.add_expected_output<float>(in_shape, out_vec);
|
||||
test_case.run_with_tolerance_as_fp(1e-4f);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, selu_3Dfprop_negative) {
|
||||
Shape in_shape{3};
|
||||
Shape c_shape{1};
|
||||
element::Type et = element::f32;
|
||||
|
||||
auto input = make_shared<op::Parameter>(et, in_shape);
|
||||
auto alpha = op::Constant::create(et, c_shape, {1.67326324});
|
||||
auto lambda = op::Constant::create(et, c_shape, {1.05070098});
|
||||
auto selu = make_shared<op::v0::Selu>(input, alpha, lambda);
|
||||
auto f = make_shared<Function>(selu, ParameterVector{input});
|
||||
|
||||
vector<float> in_vec{-3.0, -12.5, -7.0};
|
||||
vector<float> out_vec{-1.6705687, -1.7580928, -1.7564961};
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_input<float>(in_shape, in_vec);
|
||||
test_case.add_expected_output<float>(in_shape, out_vec);
|
||||
test_case.run_with_tolerance_as_fp(1e-4f);
|
||||
}
|
@ -1,76 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
// clang-format off
|
||||
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
|
||||
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
|
||||
#endif
|
||||
|
||||
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
|
||||
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
|
||||
#endif
|
||||
// clang-format on
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "runtime/backend.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/all_close.hpp"
|
||||
#include "util/all_close_f.hpp"
|
||||
#include "util/ndarray.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
#include "engines_util/execute_tools.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sigmoid_n1c1h2w2) {
|
||||
auto input = make_shared<op::Parameter>(element::f32, Shape{1, 1, 2, 2});
|
||||
auto sigmoid_node = make_shared<op::Sigmoid>(input);
|
||||
auto func = make_shared<Function>(sigmoid_node, ParameterVector{input});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, input->get_shape());
|
||||
shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, input->get_shape());
|
||||
|
||||
float x1 = 1.0f;
|
||||
float x2 = 4.0f;
|
||||
float sigma1 = 1.0f / (1.0f + std::exp(-x1));
|
||||
float sigma2 = 1.0f / (1.0f + std::exp(-x2));
|
||||
|
||||
vector<float> dataA{x1, x2, x1, x2};
|
||||
copy_data(a, dataA);
|
||||
|
||||
auto handle = backend->compile(func);
|
||||
handle->call_with_validate({result}, {a});
|
||||
vector<float> expected{sigma1, sigma2, sigma1, sigma2};
|
||||
EXPECT_TRUE(test::all_close_f(read_vector<float>(result), expected));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sigmoid_n1c1h4) {
|
||||
auto input = make_shared<op::Parameter>(element::f32, Shape{1, 1, 4});
|
||||
auto sigmoid_node = make_shared<op::Sigmoid>(input);
|
||||
auto func = make_shared<Function>(sigmoid_node, ParameterVector{input});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, input->get_shape());
|
||||
shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, input->get_shape());
|
||||
|
||||
float x1 = 1.0f;
|
||||
float x2 = 4.0f;
|
||||
float sigma1 = 1.0f / (1.0f + std::exp(-x1));
|
||||
float sigma2 = 1.0f / (1.0f + std::exp(-x2));
|
||||
|
||||
vector<float> dataA{x1, x2, x1, x2};
|
||||
copy_data(a, dataA);
|
||||
|
||||
auto handle = backend->compile(func);
|
||||
handle->call_with_validate({result}, {a});
|
||||
vector<float> expected{sigma1, sigma2, sigma1, sigma2};
|
||||
EXPECT_TRUE(test::all_close_f(read_vector<float>(result), expected));
|
||||
}
|
@ -1,219 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
// clang-format off
|
||||
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
|
||||
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
|
||||
#endif
|
||||
|
||||
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
|
||||
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
|
||||
#endif
|
||||
// clang-format on
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "runtime/backend.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/all_close.hpp"
|
||||
#include "util/all_close_f.hpp"
|
||||
#include "util/ndarray.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
#include "engines_util/execute_tools.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d) {
|
||||
Shape shape{2, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::v1::Softmax>(A, 0), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{-10, -20, -30, -40, -50, -60, -1, -2, -3, -4, -5, -6});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto d0 = expf(-10) + expf(-1);
|
||||
auto d1 = expf(-20) + expf(-2);
|
||||
auto d2 = expf(-30) + expf(-3);
|
||||
auto d3 = expf(-40) + expf(-4);
|
||||
auto d4 = expf(-50) + expf(-5);
|
||||
auto d5 = expf(-60) + expf(-6);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
vector<float> expected{expf(-10) / d0,
|
||||
expf(-20) / d1,
|
||||
expf(-30) / d2,
|
||||
expf(-40) / d3,
|
||||
expf(-50) / d4,
|
||||
expf(-60) / d5,
|
||||
expf(-1) / d0,
|
||||
expf(-2) / d1,
|
||||
expf(-3) / d2,
|
||||
expf(-4) / d3,
|
||||
expf(-5) / d4,
|
||||
expf(-6) / d5};
|
||||
|
||||
EXPECT_TRUE(test::all_close(expected, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d_double) {
|
||||
Shape shape{2, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f64, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::v1::Softmax>(A, 0), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f64, shape);
|
||||
copy_data(a, vector<double>{-10, -20, -30, -40, -50, -60, -1, -2, -3, -4, -5, -6});
|
||||
auto result = backend->create_tensor(element::f64, shape);
|
||||
|
||||
auto d0 = exp(-10) + exp(-1);
|
||||
auto d1 = exp(-20) + exp(-2);
|
||||
auto d2 = exp(-30) + exp(-3);
|
||||
auto d3 = exp(-40) + exp(-4);
|
||||
auto d4 = exp(-50) + exp(-5);
|
||||
auto d5 = exp(-60) + exp(-6);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
vector<double> expected{exp(-10) / d0,
|
||||
exp(-20) / d1,
|
||||
exp(-30) / d2,
|
||||
exp(-40) / d3,
|
||||
exp(-50) / d4,
|
||||
exp(-60) / d5,
|
||||
exp(-1) / d0,
|
||||
exp(-2) / d1,
|
||||
exp(-3) / d2,
|
||||
exp(-4) / d3,
|
||||
exp(-5) / d4,
|
||||
exp(-6) / d5};
|
||||
|
||||
EXPECT_TRUE(test::all_close(expected, read_vector<double>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, softmax_2d_axis_1) {
|
||||
Shape shape{2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::v1::Softmax>(A, 1), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{-10, -20, -30, -40, -50, -60});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto d0 = expf(-10) + expf(-20) + expf(-30);
|
||||
auto d1 = expf(-40) + expf(-50) + expf(-60);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
vector<float> expected{expf(-10) / d0,
|
||||
expf(-20) / d0,
|
||||
expf(-30) / d0,
|
||||
expf(-40) / d1,
|
||||
expf(-50) / d1,
|
||||
expf(-60) / d1};
|
||||
EXPECT_TRUE(test::all_close_f(expected, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, softmax_2d_axis_0) {
|
||||
Shape shape{2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::v1::Softmax>(A, 0), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{-10, -20, -30, -40, -50, -60});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto d0 = expf(-10) + expf(-40);
|
||||
auto d1 = expf(-20) + expf(-50);
|
||||
auto d2 = expf(-30) + expf(-60);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
vector<float> expected{expf(-10) / d0,
|
||||
expf(-20) / d1,
|
||||
expf(-30) / d2,
|
||||
expf(-40) / d0,
|
||||
expf(-50) / d1,
|
||||
expf(-60) / d2};
|
||||
EXPECT_TRUE(test::all_close(expected, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d_trivial) {
|
||||
Shape shape{1, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::v1::Softmax>(A, 0), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{-10, -20, -30, -40, -50, -60});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
vector<float> expected{1, 1, 1, 1, 1, 1};
|
||||
EXPECT_TRUE(test::all_close(expected, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, softmax_underflow) {
|
||||
Shape shape{2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::v1::Softmax>(A, 0), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto low = std::numeric_limits<float>::lowest();
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{low, 1, 2, 3, 4, 5});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto d0 = expf(low) + expf(3);
|
||||
auto d1 = expf(1) + expf(4);
|
||||
auto d2 = expf(2) + expf(5);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
vector<float> expected{expf(low) / d0, expf(1) / d1, expf(2) / d2, expf(3) / d0, expf(4) / d1, expf(5) / d2};
|
||||
EXPECT_TRUE(test::all_close_f(expected, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, softmax_overflow) {
|
||||
Shape shape{2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::v1::Softmax>(A, 0), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto high = std::numeric_limits<float>::max();
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{high, 1, 2, 3, 4, 5});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto d0 = expf(high - high) + expf(3 - high);
|
||||
auto d1 = expf(1) + expf(4);
|
||||
auto d2 = expf(2) + expf(5);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
vector<float> expected{expf(high - high) / d0,
|
||||
expf(1) / d1,
|
||||
expf(2) / d2,
|
||||
expf(3 - high) / d0,
|
||||
expf(4) / d1,
|
||||
expf(5) / d2};
|
||||
EXPECT_TRUE(test::all_close_f(expected, read_vector<float>(result)));
|
||||
}
|
@ -1,43 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
||||
// clang-format off
|
||||
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
|
||||
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
|
||||
#endif
|
||||
|
||||
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
|
||||
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
|
||||
#endif
|
||||
// clang-format on
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, softplus) {
|
||||
auto A = make_shared<op::Parameter>(element::f32, Shape{4});
|
||||
auto softplus = make_shared<op::v4::SoftPlus>(A);
|
||||
auto function = make_shared<Function>(NodeVector{softplus}, ParameterVector{A});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(function);
|
||||
test_case.add_input(vector<float>{-1.0, 0.0, 1.0, 20.0});
|
||||
test_case.add_expected_output(vector<float>{0.31326166, 0.69314718, 1.3132616, 20.0});
|
||||
test_case.run();
|
||||
}
|
@ -1,78 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, swish_2D_with_beta0_6) {
|
||||
Shape in_shape{2, 4};
|
||||
element::Type et = element::f32;
|
||||
auto beta = 0.6f;
|
||||
|
||||
auto args0 = make_shared<op::Parameter>(et, in_shape);
|
||||
auto args1 = make_shared<op::Parameter>(et, Shape{});
|
||||
auto swish = make_shared<op::v4::Swish>(args0, args1);
|
||||
auto f = make_shared<Function>(swish, ParameterVector{args0, args1});
|
||||
|
||||
vector<vector<float>> in_vec{vector<float>{0.4, -5.7, -6, 3, -0.9, 23, 5, 3.3}, vector<float>{beta}};
|
||||
vector<float> out_vec{in_vec[0]};
|
||||
std::transform(out_vec.begin(), out_vec.end(), out_vec.begin(), [&beta](float x) -> float {
|
||||
return (x / (1.0f + std::exp(x * beta * -1.0f)));
|
||||
});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>(in_vec);
|
||||
test_case.add_expected_output<float>(in_shape, out_vec);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, swish_2D_without_beta) {
|
||||
Shape in_shape{2, 3};
|
||||
element::Type et = element::f32;
|
||||
|
||||
auto args0 = make_shared<op::Parameter>(et, in_shape);
|
||||
auto swish = make_shared<op::v4::Swish>(args0);
|
||||
auto f = make_shared<Function>(swish, ParameterVector{args0});
|
||||
|
||||
vector<float> in_vec{1, 8, -8, 17, -0.5, -1};
|
||||
vector<float> out_vec{in_vec};
|
||||
std::transform(out_vec.begin(), out_vec.end(), out_vec.begin(), [](float x) -> float {
|
||||
return (x / (1.0f + std::exp(x * -1.0f)));
|
||||
});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_input<float>(in_vec);
|
||||
test_case.add_expected_output<float>(in_shape, out_vec);
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, swish_4D_with_beta0_33) {
|
||||
Shape in_shape{2, 2, 1, 2};
|
||||
element::Type et = element::f32;
|
||||
auto beta = 0.33f;
|
||||
|
||||
auto args0 = make_shared<op::Parameter>(et, in_shape);
|
||||
auto args1 = make_shared<op::Parameter>(et, Shape{});
|
||||
auto swish = make_shared<op::v4::Swish>(args0, args1);
|
||||
auto f = make_shared<Function>(swish, ParameterVector{args0, args1});
|
||||
|
||||
vector<vector<float>> in_vec{vector<float>{0.1, 0.6, 20, -7, -5.3, 3.5, -9, 11}, vector<float>{beta}};
|
||||
vector<float> out_vec{in_vec[0]};
|
||||
std::transform(out_vec.begin(), out_vec.end(), out_vec.begin(), [&beta](float x) -> float {
|
||||
return (x / (1.0f + std::exp(x * beta * -1.0f)));
|
||||
});
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_multiple_inputs<float>(in_vec);
|
||||
test_case.add_expected_output<float>(in_shape, out_vec);
|
||||
test_case.run();
|
||||
}
|
26
ngraph/test/type_prop/relu.cpp
Normal file
26
ngraph/test/type_prop/relu.cpp
Normal file
@ -0,0 +1,26 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
TEST(type_prop, relu_2d) {
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
Shape relu_shape{2, 4};
|
||||
auto relu = make_shared<op::Relu>(param);
|
||||
ASSERT_EQ(relu->get_element_type(), element::f32);
|
||||
ASSERT_EQ(relu->get_shape(), relu_shape);
|
||||
}
|
||||
|
||||
TEST(type_prop, relu_4d) {
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{2, 2, 2, 2});
|
||||
Shape relu_shape{2, 2, 2, 2};
|
||||
auto relu = make_shared<op::Relu>(param);
|
||||
ASSERT_EQ(relu->get_element_type(), element::f32);
|
||||
ASSERT_EQ(relu->get_shape(), relu_shape);
|
||||
}
|
36
ngraph/test/type_prop/sigmoid.cpp
Normal file
36
ngraph/test/type_prop/sigmoid.cpp
Normal file
@ -0,0 +1,36 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
TEST(type_prop, sigmoid) {
|
||||
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 3, 6});
|
||||
auto sigmoid_func = make_shared<op::Sigmoid>(data);
|
||||
EXPECT_EQ(sigmoid_func->get_element_type(), element::f32);
|
||||
EXPECT_EQ(sigmoid_func->get_shape(), data->get_output_shape(0));
|
||||
}
|
||||
|
||||
TEST(type_prop, sigmoid_partial) {
|
||||
auto data = make_shared<op::Parameter>(element::f32, PartialShape{1, Dimension::dynamic(), 6});
|
||||
auto sigmoid_func = make_shared<op::Sigmoid>(data);
|
||||
EXPECT_EQ(sigmoid_func->get_element_type(), element::f32);
|
||||
ASSERT_TRUE(sigmoid_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0)));
|
||||
|
||||
// rank unknown
|
||||
auto sigmoid_partial = make_shared<op::Sigmoid>(make_shared<op::Parameter>(element::f32, PartialShape::dynamic()));
|
||||
ASSERT_TRUE(sigmoid_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
|
||||
}
|
||||
|
||||
TEST(type_prop, sigmoid_partial_static_rank) {
|
||||
auto data = make_shared<op::Parameter>(element::f32, PartialShape{1, Dimension::dynamic(), 6});
|
||||
auto sigmoid_func = make_shared<op::Sigmoid>(data);
|
||||
EXPECT_EQ(sigmoid_func->get_element_type(), element::f32);
|
||||
ASSERT_TRUE(sigmoid_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0)));
|
||||
ASSERT_TRUE(sigmoid_func->get_output_partial_shape(0).rank().is_static());
|
||||
}
|
32
ngraph/test/visitors/op/clamp.cpp
Normal file
32
ngraph/test/visitors/op/clamp.cpp
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/op/util/attr_types.hpp"
|
||||
#include "ngraph/opsets/opset1.hpp"
|
||||
#include "util/visitor.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
using ngraph::test::NodeBuilder;
|
||||
using ngraph::test::ValueMap;
|
||||
|
||||
TEST(attributes, clamp_op) {
|
||||
NodeBuilder::get_ops().register_factory<opset1::Clamp>();
|
||||
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
|
||||
double min = 0.4;
|
||||
double max = 5.6;
|
||||
|
||||
const auto clamp = make_shared<opset1::Clamp>(data, min, max);
|
||||
NodeBuilder builder(clamp);
|
||||
auto g_clamp = ov::as_type_ptr<opset1::Clamp>(builder.create());
|
||||
|
||||
const auto expected_attr_count = 2;
|
||||
EXPECT_EQ(builder.get_value_map_size(), expected_attr_count);
|
||||
|
||||
EXPECT_EQ(g_clamp->get_min(), clamp->get_min());
|
||||
EXPECT_EQ(g_clamp->get_max(), clamp->get_max());
|
||||
}
|
9
ngraph/test/visitors/op/exp.cpp
Normal file
9
ngraph/test/visitors/op/exp.cpp
Normal file
@ -0,0 +1,9 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "unary_ops.hpp"
|
||||
|
||||
using Type = ::testing::Types<UnaryOperatorType<ngraph::op::v0::Exp, ngraph::element::f32>>;
|
||||
|
||||
INSTANTIATE_TYPED_TEST_SUITE_P(visitor_without_atrribute, UnaryOperatorVisitor, Type, UnaryOperatorTypeName);
|
26
ngraph/test/visitors/op/hard_sigmoid.cpp
Normal file
26
ngraph/test/visitors/op/hard_sigmoid.cpp
Normal file
@ -0,0 +1,26 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/op/util/attr_types.hpp"
|
||||
#include "ngraph/opsets/opset1.hpp"
|
||||
#include "util/visitor.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
using ngraph::test::NodeBuilder;
|
||||
|
||||
TEST(attributes, hardsigmoid_op) {
|
||||
NodeBuilder::get_ops().register_factory<opset1::HardSigmoid>();
|
||||
const auto data = make_shared<op::Parameter>(element::f32, Shape{2, 5});
|
||||
const auto alpha = make_shared<op::Parameter>(element::f32, Shape{});
|
||||
const auto beta = make_shared<op::Parameter>(element::f32, Shape{});
|
||||
|
||||
const auto hardsigmoid = make_shared<opset1::HardSigmoid>(data, alpha, beta);
|
||||
NodeBuilder builder(hardsigmoid);
|
||||
|
||||
const auto expected_attr_count = 0;
|
||||
EXPECT_EQ(builder.get_value_map_size(), expected_attr_count);
|
||||
}
|
9
ngraph/test/visitors/op/hsigmoid.cpp
Normal file
9
ngraph/test/visitors/op/hsigmoid.cpp
Normal file
@ -0,0 +1,9 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "unary_ops.hpp"
|
||||
|
||||
using Type = ::testing::Types<UnaryOperatorType<ngraph::op::v5::HSigmoid, ngraph::element::f32>>;
|
||||
|
||||
INSTANTIATE_TYPED_TEST_SUITE_P(visitor_without_atrribute, UnaryOperatorVisitor, Type, UnaryOperatorTypeName);
|
9
ngraph/test/visitors/op/hswish.cpp
Normal file
9
ngraph/test/visitors/op/hswish.cpp
Normal file
@ -0,0 +1,9 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "unary_ops.hpp"
|
||||
|
||||
using Type = ::testing::Types<UnaryOperatorType<ngraph::op::v4::HSwish, ngraph::element::f32>>;
|
||||
|
||||
INSTANTIATE_TYPED_TEST_SUITE_P(visitor_without_atrribute, UnaryOperatorVisitor, Type, UnaryOperatorTypeName);
|
33
ngraph/test/visitors/op/log_softmax.cpp
Normal file
33
ngraph/test/visitors/op/log_softmax.cpp
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/op/util/attr_types.hpp"
|
||||
#include "ngraph/opsets/opset1.hpp"
|
||||
#include "ngraph/opsets/opset3.hpp"
|
||||
#include "ngraph/opsets/opset4.hpp"
|
||||
#include "ngraph/opsets/opset5.hpp"
|
||||
#include "util/visitor.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
using ngraph::test::NodeBuilder;
|
||||
using ngraph::test::ValueMap;
|
||||
|
||||
TEST(attributes, logsoftmax_op) {
|
||||
NodeBuilder::get_ops().register_factory<opset5::LogSoftmax>();
|
||||
auto data = make_shared<op::Parameter>(element::f32, Shape{3, 2, 3});
|
||||
|
||||
int64_t axis = 2;
|
||||
|
||||
const auto logsoftmax = make_shared<opset5::LogSoftmax>(data, axis);
|
||||
NodeBuilder builder(logsoftmax);
|
||||
auto g_logsoftmax = ov::as_type_ptr<opset5::LogSoftmax>(builder.create());
|
||||
|
||||
const auto expected_attr_count = 1;
|
||||
EXPECT_EQ(builder.get_value_map_size(), expected_attr_count);
|
||||
|
||||
EXPECT_EQ(g_logsoftmax->get_axis(), logsoftmax->get_axis());
|
||||
}
|
25
ngraph/test/visitors/op/prelu.cpp
Normal file
25
ngraph/test/visitors/op/prelu.cpp
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/op/util/attr_types.hpp"
|
||||
#include "ngraph/opsets/opset1.hpp"
|
||||
#include "util/visitor.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
using ngraph::test::NodeBuilder;
|
||||
|
||||
TEST(attributes, prelu_op) {
|
||||
NodeBuilder::get_ops().register_factory<opset1::PRelu>();
|
||||
const auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 1, 2});
|
||||
const auto slope = make_shared<op::Parameter>(element::f32, Shape{5});
|
||||
|
||||
const auto prelu = make_shared<opset1::PRelu>(data, slope);
|
||||
NodeBuilder builder(prelu);
|
||||
|
||||
const auto expected_attr_count = 0;
|
||||
EXPECT_EQ(builder.get_value_map_size(), expected_attr_count);
|
||||
}
|
9
ngraph/test/visitors/op/relu.cpp
Normal file
9
ngraph/test/visitors/op/relu.cpp
Normal file
@ -0,0 +1,9 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "unary_ops.hpp"
|
||||
|
||||
using Type = ::testing::Types<UnaryOperatorType<ngraph::op::v0::Relu, ngraph::element::f32>>;
|
||||
|
||||
INSTANTIATE_TYPED_TEST_SUITE_P(visitor_without_atrribute, UnaryOperatorVisitor, Type, UnaryOperatorTypeName);
|
9
ngraph/test/visitors/op/sigmoid.cpp
Normal file
9
ngraph/test/visitors/op/sigmoid.cpp
Normal file
@ -0,0 +1,9 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "unary_ops.hpp"
|
||||
|
||||
using Type = ::testing::Types<UnaryOperatorType<ngraph::op::v0::Sigmoid, ngraph::element::f32>>;
|
||||
|
||||
INSTANTIATE_TYPED_TEST_SUITE_P(visitor_without_atrribute, UnaryOperatorVisitor, Type, UnaryOperatorTypeName);
|
Loading…
Reference in New Issue
Block a user