Migrate arithmetic1 to template plugin test (#8052)

* Migrate arithmetic1 to template plugin test

* Remove backend tests of arithmetic1
This commit is contained in:
David Nam 2021-10-21 01:16:51 +09:00 committed by GitHub
parent 5cd63f47e5
commit eb7d7e4413
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 1978 additions and 1261 deletions

View File

@ -0,0 +1,220 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "base_reference_test.hpp"
#include "openvino/op/add.hpp"
using namespace ov;
using namespace reference_tests;
namespace {
struct AddParams {
template <class IT>
AddParams(const PartialShape& shape1,
const PartialShape& shape2,
const element::Type& iType,
const std::vector<IT>& iValues1,
const std::vector<IT>& iValues2,
const std::vector<IT>& oValues)
: pshape1(shape1),
pshape2(shape2),
inType(iType),
outType(iType),
inputData1(CreateTensor(iType, iValues1)),
inputData2(CreateTensor(iType, iValues2)),
refData(CreateTensor(iType, oValues)) {}
PartialShape pshape1;
PartialShape pshape2;
element::Type inType;
element::Type outType;
runtime::Tensor inputData1;
runtime::Tensor inputData2;
runtime::Tensor refData;
};
class ReferenceAddLayerTest : public testing::TestWithParam<AddParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.pshape1, params.pshape2, params.inType, params.outType);
inputData = {params.inputData1, params.inputData2};
refOutData = {params.refData};
}
static std::string getTestCaseName(const testing::TestParamInfo<AddParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "shape1=" << param.pshape1 << "_";
result << "shape2=" << param.pshape2 << "_";
result << "iType=" << param.inType << "_";
result << "oType=" << param.outType;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape1,
const PartialShape& input_shape2,
const element::Type& input_type,
const element::Type& expected_output_type) {
const auto in1 = std::make_shared<op::v0::Parameter>(input_type, input_shape1);
const auto in2 = std::make_shared<op::v0::Parameter>(input_type, input_shape2);
const auto add = std::make_shared<op::v1::Add>(in1, in2);
return std::make_shared<Function>(NodeVector{add}, ParameterVector{in1, in2});
}
};
class ReferenceAddInPlaceLayerTest : public testing::TestWithParam<AddParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.pshape1, params.pshape2, params.inType, params.outType);
inputData = {params.inputData1, params.inputData2};
refOutData = {params.refData};
}
static std::string getTestCaseName(const testing::TestParamInfo<AddParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "shape1=" << param.pshape1 << "_";
result << "shape2=" << param.pshape2 << "_";
result << "iType=" << param.inType << "_";
result << "oType=" << param.outType;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape1,
const PartialShape& input_shape2,
const element::Type& input_type,
const element::Type& expected_output_type) {
const auto in1 = std::make_shared<op::v0::Parameter>(input_type, input_shape1);
const auto in2 = std::make_shared<op::v0::Parameter>(input_type, input_shape2);
auto add = std::make_shared<op::v1::Add>(in1, in2);
add = std::make_shared<op::v1::Add>(add, add);
add = std::make_shared<op::v1::Add>(add, add);
add = std::make_shared<op::v1::Add>(add, add);
return std::make_shared<Function>(NodeVector{add}, ParameterVector{in1, in2});
}
};
TEST_P(ReferenceAddLayerTest, AddWithHardcodedRefs) {
Exec();
}
TEST_P(ReferenceAddInPlaceLayerTest, AddWithHardcodedRefs) {
Exec();
}
template <element::Type_t IN_ET>
std::vector<AddParams> generateParamsForAdd() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<AddParams> params{
AddParams(ov::PartialShape{2, 2},
ov::PartialShape{2, 2},
IN_ET,
std::vector<T>{1, 2, 3, 4},
std::vector<T>{5, 6, 7, 8},
std::vector<T>{6, 8, 10, 12}),
AddParams(ov::PartialShape{1, 2},
ov::PartialShape{3, 2, 2},
IN_ET,
std::vector<T>{1, 2},
std::vector<T>{5, 6, 7, 8, 2, 3, 1, 5, 6, 7, 1, 3},
std::vector<T>{6, 8, 8, 10, 3, 5, 2, 7, 7, 9, 2, 5}),
AddParams(ov::PartialShape{1},
ov::PartialShape{1},
IN_ET,
std::vector<T>{2},
std::vector<T>{8},
std::vector<T>{10}),
AddParams(ov::PartialShape{2, 2},
ov::PartialShape{1},
IN_ET,
std::vector<T>{2, 4, 7, 8},
std::vector<T>{8},
std::vector<T>{10, 12, 15, 16}),
};
return params;
}
template <element::Type_t IN_ET>
std::vector<AddParams> generateParamsForAddInPlace() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<AddParams> params{
AddParams(ov::PartialShape{2, 2},
ov::PartialShape{2, 2},
IN_ET,
std::vector<T>{1, 2, 3, 4},
std::vector<T>{5, 6, 7, 8},
std::vector<T>{48, 64, 80, 96})
};
return params;
}
std::vector<AddParams> generateCombinedParamsForAdd() {
const std::vector<std::vector<AddParams>> allTypeParams{
generateParamsForAdd<element::Type_t::f32>(),
generateParamsForAdd<element::Type_t::f16>(),
generateParamsForAdd<element::Type_t::bf16>(),
generateParamsForAdd<element::Type_t::i64>(),
generateParamsForAdd<element::Type_t::i32>(),
generateParamsForAdd<element::Type_t::i16>(),
generateParamsForAdd<element::Type_t::i8>(),
generateParamsForAdd<element::Type_t::u64>(),
generateParamsForAdd<element::Type_t::u32>(),
generateParamsForAdd<element::Type_t::u16>(),
generateParamsForAdd<element::Type_t::u8>()
};
std::vector<AddParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
std::vector<AddParams> generateCombinedParamsForAddInPlace() {
const std::vector<std::vector<AddParams>> allTypeParams{
generateParamsForAddInPlace<element::Type_t::f32>(),
generateParamsForAddInPlace<element::Type_t::f16>(),
generateParamsForAddInPlace<element::Type_t::bf16>(),
generateParamsForAddInPlace<element::Type_t::i64>(),
generateParamsForAddInPlace<element::Type_t::i32>(),
generateParamsForAddInPlace<element::Type_t::i16>(),
generateParamsForAddInPlace<element::Type_t::i8>(),
generateParamsForAddInPlace<element::Type_t::u64>(),
generateParamsForAddInPlace<element::Type_t::u32>(),
generateParamsForAddInPlace<element::Type_t::u16>(),
generateParamsForAddInPlace<element::Type_t::u8>()
};
std::vector<AddParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(
smoke_Add_With_Hardcoded_Refs,
ReferenceAddLayerTest,
::testing::ValuesIn(generateCombinedParamsForAdd()),
ReferenceAddLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_Add_In_Place_With_Hardcoded_Refs,
ReferenceAddInPlaceLayerTest,
::testing::ValuesIn(generateCombinedParamsForAddInPlace()),
ReferenceAddLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,362 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "base_reference_test.hpp"
#include "openvino/op/divide.hpp"
using namespace ov;
using namespace reference_tests;
namespace {
struct DivideParams {
template <class IT>
DivideParams(const PartialShape& iShape1,
const PartialShape& iShape2,
const element::Type& iType,
const std::vector<IT>& iValues1,
const std::vector<IT>& iValues2,
const std::vector<IT>& oValues)
: pshape1(iShape1),
pshape2(iShape2),
inType(iType),
outType(iType),
inputData1(CreateTensor(iType, iValues1)),
inputData2(CreateTensor(iType, iValues2)),
refData(CreateTensor(iType, oValues)) {}
PartialShape pshape1;
PartialShape pshape2;
element::Type inType;
element::Type outType;
runtime::Tensor inputData1;
runtime::Tensor inputData2;
runtime::Tensor refData;
};
struct DivideRoundingParams : public DivideParams {
template <class IT>
DivideRoundingParams(const PartialShape& iShape1,
const PartialShape& iShape2,
const element::Type& iType,
const std::vector<IT>& iValues1,
const std::vector<IT>& iValues2,
const std::vector<IT>& oValues,
const bool pythondiv)
: DivideParams(iShape1, iShape2, iType, iValues1, iValues2, oValues), pythonDivision(pythondiv) {}
bool pythonDivision;
};
class ReferenceDivideLayerTest : public testing::TestWithParam<DivideParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.pshape1, params.pshape2, params.inType, params.outType);
inputData = {params.inputData1, params.inputData2};
refOutData = {params.refData};
}
static std::string getTestCaseName(const testing::TestParamInfo<DivideParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "iShape1=" << param.pshape1 << "_";
result << "iShape2=" << param.pshape2 << "_";
result << "iType=" << param.inType << "_";
result << "oType=" << param.outType;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape1,
const PartialShape& input_shape2,
const element::Type& input_type,
const element::Type& expected_output_type) {
const auto in1 = std::make_shared<op::v0::Parameter>(input_type, input_shape1);
const auto in2 = std::make_shared<op::v0::Parameter>(input_type, input_shape2);
const auto divide = std::make_shared<op::v1::Divide>(in1, in2);
return std::make_shared<Function>(NodeVector{divide}, ParameterVector{in1, in2});
}
};
class ReferenceDivideRoundingLayerTest : public testing::TestWithParam<DivideRoundingParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.pshape1, params.pshape2, params.inType, params.outType, params.pythonDivision);
inputData = {params.inputData1, params.inputData2};
refOutData = {params.refData};
}
static std::string getTestCaseName(const testing::TestParamInfo<DivideRoundingParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "iShape1=" << param.pshape1 << "_";
result << "iShape2=" << param.pshape2 << "_";
result << "iType=" << param.inType << "_";
result << "oType=" << param.outType;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape1,
const PartialShape& input_shape2,
const element::Type& input_type,
const element::Type& expected_output_type,
const bool pythondiv) {
const auto in1 = std::make_shared<op::v0::Parameter>(input_type, input_shape1);
const auto in2 = std::make_shared<op::v0::Parameter>(input_type, input_shape2);
const auto divide = std::make_shared<op::v1::Divide>(in1, in2, pythondiv);
return std::make_shared<Function>(NodeVector{divide}, ParameterVector{in1, in2});
}
};
TEST_P(ReferenceDivideLayerTest, DivideWithHardcodedRefs) {
Exec();
}
TEST_P(ReferenceDivideRoundingLayerTest, DivideWithHardcodedRefs) {
Exec();
}
template <element::Type_t IN_ET>
std::vector<DivideParams> generateParamsForDivide() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<DivideParams> params{
DivideParams(ov::PartialShape{2, 2},
ov::PartialShape{2, 2},
IN_ET,
std::vector<T>{2, 4, 8, 16},
std::vector<T>{1, 2, 4, 8},
std::vector<T>{2, 2, 2, 2})
};
return params;
}
template <element::Type_t IN_ET>
std::vector<DivideParams> generateParamsForDivideFloat32() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<DivideParams> params{
DivideParams(ov::PartialShape{1},
ov::PartialShape{1},
IN_ET,
std::vector<T>{18},
std::vector<T>{8},
std::vector<T>{2.25}),
DivideParams(ov::PartialShape{2, 2},
ov::PartialShape{2, 2},
IN_ET,
std::vector<T>{2, 4, 8, 16},
std::vector<T>{0, 0, 0, 0},
std::vector<T>{std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity()})
};
return params;
}
template <element::Type_t IN_ET>
std::vector<DivideParams> generateParamsForDivideInt32() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<DivideParams> params{
DivideParams(ov::PartialShape{2, 2},
ov::PartialShape{2, 2},
IN_ET,
std::vector<T>{0x40000140, 0x40000001, 8, 16},
std::vector<T>{2, 5, 4, 8},
std::vector<T>{536871072, 214748365, 2, 2}),
DivideParams(ov::PartialShape{1},
ov::PartialShape{1},
IN_ET,
std::vector<T>{18},
std::vector<T>{8},
std::vector<T>{2})
};
return params;
}
template <element::Type_t IN_ET>
std::vector<DivideParams> generateParamsForDivideBroadcast() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<DivideParams> params{
DivideParams(ov::PartialShape{3, 2, 1},
ov::PartialShape{1, 6},
IN_ET,
std::vector<T>{12, 24, 36, 48, 60, 72},
std::vector<T>{1, 2, 3, 4, 6, 1},
std::vector<T>{12, 6, 4, 3, 2, 12,
24, 12, 8, 6, 4, 24,
36, 18, 12, 9, 6, 36,
48, 24, 16, 12, 8, 48,
60, 30, 20, 15, 10, 60,
72, 36, 24, 18, 12, 72})
};
return params;
}
template <element::Type_t IN_ET>
std::vector<DivideParams> generateParamsForDividePythonRoundingInt32() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<DivideParams> params{
DivideParams(ov::PartialShape{2, 2},
ov::PartialShape{2, 2},
IN_ET,
std::vector<T>{-10, -10, 10, 10},
std::vector<T>{-3, 3, -3, 3},
std::vector<T>{3, -4, -4, 3})
};
return params;
}
template <element::Type_t IN_ET>
std::vector<DivideRoundingParams> generateParamsForDivideCppRoundingInt32() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<DivideRoundingParams> params{
DivideRoundingParams(ov::PartialShape{2, 2},
ov::PartialShape{2, 2},
IN_ET,
std::vector<T>{-10, -10, 10, 10},
std::vector<T>{-3, 3, -3, 3},
std::vector<T>{3, -3, -3, 3},
false)
};
return params;
}
std::vector<DivideParams> generateCombinedParamsForDivide() {
const std::vector<std::vector<DivideParams>> allTypeParams{
generateParamsForDivide<element::Type_t::f32>(),
generateParamsForDivide<element::Type_t::f16>(),
generateParamsForDivide<element::Type_t::bf16>(),
generateParamsForDivide<element::Type_t::i64>(),
generateParamsForDivide<element::Type_t::i32>(),
generateParamsForDivide<element::Type_t::u64>(),
generateParamsForDivide<element::Type_t::u32>()
};
std::vector<DivideParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
std::vector<DivideParams> generateCombinedParamsForDivideFloat32() {
const std::vector<std::vector<DivideParams>> allTypeParams{
generateParamsForDivideFloat32<element::Type_t::f32>()
};
std::vector<DivideParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
std::vector<DivideParams> generateCombinedParamsForDivideInt32() {
const std::vector<std::vector<DivideParams>> allTypeParams{
generateParamsForDivideInt32<element::Type_t::i32>()
};
std::vector<DivideParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
std::vector<DivideParams> generateCombinedParamsForDivideBroadcast() {
const std::vector<std::vector<DivideParams>> allTypeParams{
generateParamsForDivideBroadcast<element::Type_t::f32>(),
generateParamsForDivideBroadcast<element::Type_t::i32>()
};
std::vector<DivideParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
std::vector<DivideParams> generateCombinedParamsForDividePythonRoundingInt32() {
const std::vector<std::vector<DivideParams>> allTypeParams{
generateParamsForDividePythonRoundingInt32<element::Type_t::i32>()
};
std::vector<DivideParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
std::vector<DivideRoundingParams> generateCombinedParamsForDivideCppRoundingInt32() {
const std::vector<std::vector<DivideRoundingParams>> allTypeParams{
generateParamsForDivideCppRoundingInt32<element::Type_t::i32>()
};
std::vector<DivideRoundingParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(
smoke_Divide_With_Hardcoded_Refs,
ReferenceDivideLayerTest,
::testing::ValuesIn(generateCombinedParamsForDivide()),
ReferenceDivideLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_Divide_Float32_With_Hardcoded_Refs,
ReferenceDivideLayerTest,
::testing::ValuesIn(generateCombinedParamsForDivideFloat32()),
ReferenceDivideLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_Divide_Int32_With_Hardcoded_Refs,
ReferenceDivideLayerTest,
::testing::ValuesIn(generateCombinedParamsForDivideInt32()),
ReferenceDivideLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_Divide_Broadcast_With_Hardcoded_Refs,
ReferenceDivideLayerTest,
::testing::ValuesIn(generateCombinedParamsForDivideBroadcast()),
ReferenceDivideLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_Divide_Python_Rounding_Int32_With_Hardcoded_Refs,
ReferenceDivideLayerTest,
::testing::ValuesIn(generateCombinedParamsForDividePythonRoundingInt32()),
ReferenceDivideLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_Divide_Cpp_Rounding_Int32_With_Hardcoded_Refs,
ReferenceDivideRoundingLayerTest,
::testing::ValuesIn(generateCombinedParamsForDivideCppRoundingInt32()),
ReferenceDivideRoundingLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,194 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "base_reference_test.hpp"
#include "openvino/op/floor_mod.hpp"
using namespace ov;
using namespace reference_tests;
namespace {
struct FloorModParams {
template <class IT>
FloorModParams(const PartialShape& iShape1,
const PartialShape& iShape2,
const element::Type& iType,
const std::vector<IT>& iValues1,
const std::vector<IT>& iValues2,
const std::vector<IT>& oValues)
: pshape1(iShape1),
pshape2(iShape2),
inType(iType),
outType(iType),
inputData1(CreateTensor(iType, iValues1)),
inputData2(CreateTensor(iType, iValues2)),
refData(CreateTensor(iType, oValues)) {}
PartialShape pshape1;
PartialShape pshape2;
element::Type inType;
element::Type outType;
runtime::Tensor inputData1;
runtime::Tensor inputData2;
runtime::Tensor refData;
};
class ReferenceFloorModLayerTest : public testing::TestWithParam<FloorModParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.pshape1, params.pshape2, params.inType, params.outType);
inputData = {params.inputData1, params.inputData2};
refOutData = {params.refData};
}
static std::string getTestCaseName(const testing::TestParamInfo<FloorModParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "iShape1=" << param.pshape1 << "_";
result << "iShape2=" << param.pshape2 << "_";
result << "iType=" << param.inType << "_";
result << "oType=" << param.outType;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape1,
const PartialShape& input_shape2,
const element::Type& input_type,
const element::Type& expected_output_type) {
const auto in1 = std::make_shared<op::v0::Parameter>(input_type, input_shape1);
const auto in2 = std::make_shared<op::v0::Parameter>(input_type, input_shape2);
const auto floormod = std::make_shared<op::v1::FloorMod>(in1, in2);
return std::make_shared<Function>(NodeVector{floormod}, ParameterVector{in1, in2});
}
};
TEST_P(ReferenceFloorModLayerTest, DivideWithHardcodedRefs) {
Exec();
}
template <element::Type_t IN_ET>
std::vector<FloorModParams> generateParamsForFloorMod() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<FloorModParams> params{
FloorModParams(ov::PartialShape{4},
ov::PartialShape{4},
IN_ET,
std::vector<T>{7, -7, 7, -7},
std::vector<T>{3, 3, -3, -3},
std::vector<T>{1, 2, -2, -1})
};
return params;
}
template <element::Type_t IN_ET>
std::vector<FloorModParams> generateParamsForFloorModBroadcast() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<FloorModParams> params{
FloorModParams(ov::PartialShape{2, 1, 2},
ov::PartialShape{2, 1},
IN_ET,
std::vector<T>{1, 2, 3, 4},
std::vector<T>{2, 3},
std::vector<T>{1.0f, 0.0f, 1.0f, 2.0f, 1.0f, 0.0f, 0.0f, 1.0f}),
};
return params;
}
template <element::Type_t IN_ET>
std::vector<FloorModParams> generateParamsForFloorModScalar() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<FloorModParams> params{
FloorModParams(ov::PartialShape{1},
ov::PartialShape{1},
IN_ET,
std::vector<T>{2},
std::vector<T>{4},
std::vector<T>{2}),
};
return params;
}
std::vector<FloorModParams> generateCombinedParamsForFloorMod() {
const std::vector<std::vector<FloorModParams>> allTypeParams{
generateParamsForFloorMod<element::Type_t::f32>(),
generateParamsForFloorMod<element::Type_t::f16>(),
generateParamsForFloorMod<element::Type_t::bf16>(),
generateParamsForFloorMod<element::Type_t::i64>(),
generateParamsForFloorMod<element::Type_t::i32>(),
generateParamsForFloorMod<element::Type_t::i8>()
};
std::vector<FloorModParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
std::vector<FloorModParams> generateCombinedParamsForFloorModBroadcast() {
const std::vector<std::vector<FloorModParams>> allTypeParams{
generateParamsForFloorModBroadcast<element::Type_t::f32>(),
generateParamsForFloorModBroadcast<element::Type_t::f16>()
};
std::vector<FloorModParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
std::vector<FloorModParams> generateCombinedParamsForFloorModScalar() {
const std::vector<std::vector<FloorModParams>> allTypeParams{
generateParamsForFloorModScalar<element::Type_t::f32>(),
generateParamsForFloorModScalar<element::Type_t::f16>(),
generateParamsForFloorModScalar<element::Type_t::bf16>(),
generateParamsForFloorModScalar<element::Type_t::i64>(),
generateParamsForFloorModScalar<element::Type_t::i32>(),
generateParamsForFloorModScalar<element::Type_t::i8>(),
generateParamsForFloorModScalar<element::Type_t::u64>(),
generateParamsForFloorModScalar<element::Type_t::u32>(),
generateParamsForFloorModScalar<element::Type_t::u8>()
};
std::vector<FloorModParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(
smoke_FloorMod_With_Hardcoded_Refs,
ReferenceFloorModLayerTest,
::testing::ValuesIn(generateCombinedParamsForFloorMod()),
ReferenceFloorModLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_FloorMod_Broadcast_With_Hardcoded_Refs,
ReferenceFloorModLayerTest,
::testing::ValuesIn(generateCombinedParamsForFloorModBroadcast()),
ReferenceFloorModLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_FloorMode_Scalar_With_Hardcoded_Refs,
ReferenceFloorModLayerTest,
::testing::ValuesIn(generateCombinedParamsForFloorModScalar()),
ReferenceFloorModLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,217 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "base_reference_test.hpp"
#include "openvino/op/maximum.hpp"
using namespace ov;
using namespace reference_tests;
namespace {
struct MaximumParams {
template <class IT>
MaximumParams(const PartialShape& iShape1,
const PartialShape& iShape2,
const element::Type& iType,
const std::vector<IT>& iValues1,
const std::vector<IT>& iValues2,
const std::vector<IT>& oValues)
: pshape1(iShape1),
pshape2(iShape2),
inType(iType),
outType(iType),
inputData1(CreateTensor(iType, iValues1)),
inputData2(CreateTensor(iType, iValues2)),
refData(CreateTensor(iType, oValues)) {}
PartialShape pshape1;
PartialShape pshape2;
element::Type inType;
element::Type outType;
runtime::Tensor inputData1;
runtime::Tensor inputData2;
runtime::Tensor refData;
};
class ReferenceMaximumLayerTest : public testing::TestWithParam<MaximumParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.pshape1, params.pshape2, params.inType, params.outType);
inputData = {params.inputData1, params.inputData2};
refOutData = {params.refData};
}
static std::string getTestCaseName(const testing::TestParamInfo<MaximumParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "iShape1=" << param.pshape1 << "_";
result << "iShape2=" << param.pshape2 << "_";
result << "iType=" << param.inType << "_";
result << "oType=" << param.outType;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape1,
const PartialShape& input_shape2,
const element::Type& input_type,
const element::Type& expected_output_type) {
const auto in1 = std::make_shared<op::v0::Parameter>(input_type, input_shape1);
const auto in2 = std::make_shared<op::v0::Parameter>(input_type, input_shape2);
const auto maximum = std::make_shared<op::v1::Maximum>(in1, in2);
return std::make_shared<Function>(NodeVector{maximum}, ParameterVector{in1, in2});
}
};
TEST_P(ReferenceMaximumLayerTest, MaximumWithHardcodedRefs) {
Exec();
}
template <element::Type_t IN_ET>
std::vector<MaximumParams> generateParamsForMaximumFloat() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<MaximumParams> params{
MaximumParams(ov::PartialShape{2, 2, 2},
ov::PartialShape{2, 2, 2},
IN_ET,
std::vector<T>{1, 8, -8, 17, -0.5, 0.5, 2, 1},
std::vector<T>{1, 2, 4, 8, 0, 0, 1, 1.5},
std::vector<T>{1, 8, 4, 17, 0, 0.5, 2, 1.5})
};
return params;
}
template <element::Type_t IN_ET>
std::vector<MaximumParams> generateParamsForMaximumInt32() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<MaximumParams> params{
MaximumParams(ov::PartialShape{2, 2},
ov::PartialShape{2, 2},
IN_ET,
std::vector<T>{0x40000140, 0x40000001, -8, 17},
std::vector<T>{0x40000170, 0x40000000, 4, 8},
std::vector<T>{0x40000170, 0x40000001, 4, 17})
};
return params;
}
template <element::Type_t IN_ET>
std::vector<MaximumParams> generateParamsForMaximumInt64() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<MaximumParams> params{
MaximumParams(ov::PartialShape{2, 2, 2},
ov::PartialShape{2, 2, 2},
IN_ET,
std::vector<T>{1, 8, -8, 17, -5, 67635216, 2, 17179887632},
std::vector<T>{1, 2, 4, 8, 0, 18448, 1, 28059},
std::vector<T>{1, 8, 4, 17, 0, 67635216, 2, 17179887632})
};
return params;
}
template <element::Type_t IN_ET>
std::vector<MaximumParams> generateParamsForMaximumUnsignedInt() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<MaximumParams> params{
MaximumParams(ov::PartialShape{2, 2, 2},
ov::PartialShape{2, 2, 2},
IN_ET,
std::vector<T>{1, 8, 7, 17, 5, 67635216, 2, 17179887},
std::vector<T>{1, 2, 4, 8, 0, 18448, 1, 28059},
std::vector<T>{1, 8, 7, 17, 5, 67635216, 2, 17179887})
};
return params;
}
std::vector<MaximumParams> generateCombinedParamsForMaximumFloat() {
const std::vector<std::vector<MaximumParams>> allTypeParams{
generateParamsForMaximumFloat<element::Type_t::f32>(),
generateParamsForMaximumFloat<element::Type_t::f16>()
};
std::vector<MaximumParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
std::vector<MaximumParams> generateCombinedParamsForMaximumInt32() {
const std::vector<std::vector<MaximumParams>> allTypeParams{
generateParamsForMaximumInt32<element::Type_t::i32>()
};
std::vector<MaximumParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
std::vector<MaximumParams> generateCombinedParamsForMaximumInt64() {
const std::vector<std::vector<MaximumParams>> allTypeParams{
generateParamsForMaximumInt64<element::Type_t::i64>()
};
std::vector<MaximumParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
std::vector<MaximumParams> generateCombinedParamsForMaximumUnsignedInt() {
const std::vector<std::vector<MaximumParams>> allTypeParams{
generateParamsForMaximumUnsignedInt<element::Type_t::u64>(),
generateParamsForMaximumUnsignedInt<element::Type_t::u32>()
};
std::vector<MaximumParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(
smoke_Maximum_Float_With_Hardcoded_Refs,
ReferenceMaximumLayerTest,
::testing::ValuesIn(generateCombinedParamsForMaximumFloat()),
ReferenceMaximumLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_Maximum_Int32_With_Hardcoded_Refs,
ReferenceMaximumLayerTest,
::testing::ValuesIn(generateCombinedParamsForMaximumInt32()),
ReferenceMaximumLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_Maximume_Int64_With_Hardcoded_Refs,
ReferenceMaximumLayerTest,
::testing::ValuesIn(generateCombinedParamsForMaximumInt64()),
ReferenceMaximumLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_Maximume_UnsignedInt_With_Hardcoded_Refs,
ReferenceMaximumLayerTest,
::testing::ValuesIn(generateCombinedParamsForMaximumUnsignedInt()),
ReferenceMaximumLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,251 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "base_reference_test.hpp"
#include "openvino/op/mod.hpp"
using namespace ov;
using namespace reference_tests;
namespace {
struct ModParams {
template <class IT>
ModParams(const PartialShape& iShape1,
const PartialShape& iShape2,
const element::Type& iType,
const std::vector<IT>& iValues1,
const std::vector<IT>& iValues2,
const std::vector<IT>& oValues)
: pshape1(iShape1),
pshape2(iShape2),
inType(iType),
outType(iType),
inputData1(CreateTensor(iType, iValues1)),
inputData2(CreateTensor(iType, iValues2)),
refData(CreateTensor(iType, oValues)) {}
PartialShape pshape1;
PartialShape pshape2;
element::Type inType;
element::Type outType;
runtime::Tensor inputData1;
runtime::Tensor inputData2;
runtime::Tensor refData;
};
class ReferenceModLayerTest : public testing::TestWithParam<ModParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.pshape1, params.pshape2, params.inType, params.outType);
inputData = {params.inputData1, params.inputData2};
refOutData = {params.refData};
}
static std::string getTestCaseName(const testing::TestParamInfo<ModParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "iShape1=" << param.pshape1 << "_";
result << "iShape2=" << param.pshape2 << "_";
result << "iType=" << param.inType << "_";
result << "oType=" << param.outType;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape1,
const PartialShape& input_shape2,
const element::Type& input_type,
const element::Type& expected_output_type) {
const auto in1 = std::make_shared<op::v0::Parameter>(input_type, input_shape1);
const auto in2 = std::make_shared<op::v0::Parameter>(input_type, input_shape2);
const auto mod = std::make_shared<op::v1::Mod>(in1, in2);
return std::make_shared<Function>(NodeVector{mod}, ParameterVector{in1, in2});
}
};
class ReferenceModInPlaceLayerTest : public testing::TestWithParam<ModParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.pshape1, params.pshape2, params.inType, params.outType);
inputData = {params.inputData1, params.inputData2};
refOutData = {params.refData};
}
static std::string getTestCaseName(const testing::TestParamInfo<ModParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "iShape1=" << param.pshape1 << "_";
result << "iShape2=" << param.pshape2 << "_";
result << "iType=" << param.inType << "_";
result << "oType=" << param.outType;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape1,
const PartialShape& input_shape2,
const element::Type& input_type,
const element::Type& expected_output_type) {
const auto in1 = std::make_shared<op::v0::Parameter>(input_type, input_shape1);
const auto in2 = std::make_shared<op::v0::Parameter>(input_type, input_shape2);
auto mod = std::make_shared<op::v1::Mod>(in1, in2);
mod = std::make_shared<op::v1::Mod>(mod, mod);
return std::make_shared<Function>(NodeVector{mod}, ParameterVector{in1, in2});
}
};
TEST_P(ReferenceModLayerTest, ModWithHardcodedRefs) {
Exec();
}
TEST_P(ReferenceModInPlaceLayerTest, ModWithHardcodedRefs) {
Exec();
}
template <element::Type_t IN_ET>
std::vector<ModParams> generateParamsForMod() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<ModParams> params{
ModParams(ov::PartialShape{1, 2},
ov::PartialShape{1, 2},
IN_ET,
std::vector<T>{256, 56},
std::vector<T>{256, 56},
std::vector<T>{0, 0}),
ModParams(ov::PartialShape{2, 2},
ov::PartialShape{2, 2},
IN_ET,
std::vector<T>{256, 56, 21, 14},
std::vector<T>{112, 56, 6, 8},
std::vector<T>{32, 0, 3, 6}),
ModParams(ov::PartialShape{1, 2},
ov::PartialShape{3, 2, 2},
IN_ET,
std::vector<T>{1, 2},
std::vector<T>{5, 6, 7, 8, 2, 3, 1, 5, 6, 7, 1, 3},
std::vector<T>{1, 2, 1, 2, 1, 2, 0, 2, 1, 2, 0, 2}),
ModParams(ov::PartialShape{1},
ov::PartialShape{1},
IN_ET,
std::vector<T>{57},
std::vector<T>{13},
std::vector<T>{5}),
ModParams(ov::PartialShape{2, 2},
ov::PartialShape{1},
IN_ET,
std::vector<T>{2, 4, 7, 8},
std::vector<T>{8},
std::vector<T>{2, 4, 7, 0})
};
return params;
}
template <element::Type_t IN_ET>
std::vector<ModParams> generateParamsForModNegative() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<ModParams> params{
ModParams(ov::PartialShape{2, 2},
ov::PartialShape{2, 2},
IN_ET,
std::vector<T>{-57, -14, -12, -6},
std::vector<T>{13, -7, 5, -5},
std::vector<T>{-5, 0, -2, -1})
};
return params;
}
template <element::Type_t IN_ET>
std::vector<ModParams> generateParamsForModInPlace() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<ModParams> params{
ModParams(ov::PartialShape{2, 2},
ov::PartialShape{2, 2},
IN_ET,
std::vector<T>{1, 2, 3, 4},
std::vector<T>{5, 6, 7, 8},
std::vector<T>{0, 0, 0, 0})
};
return params;
}
std::vector<ModParams> generateCombinedParamsForMod() {
const std::vector<std::vector<ModParams>> allTypeParams{
generateParamsForMod<element::Type_t::f32>(),
generateParamsForMod<element::Type_t::f16>(),
generateParamsForMod<element::Type_t::i64>(),
generateParamsForMod<element::Type_t::i32>()
};
std::vector<ModParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
std::vector<ModParams> generateCombinedParamsForModNegative() {
const std::vector<std::vector<ModParams>> allTypeParams{
generateParamsForModNegative<element::Type_t::f32>(),
generateParamsForModNegative<element::Type_t::f16>(),
generateParamsForModNegative<element::Type_t::i64>(),
generateParamsForModNegative<element::Type_t::i32>()
};
std::vector<ModParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
std::vector<ModParams> generateCombinedParamsForModInPlace() {
const std::vector<std::vector<ModParams>> allTypeParams{
generateParamsForModInPlace<element::Type_t::f32>(),
generateParamsForModInPlace<element::Type_t::f16>(),
generateParamsForModInPlace<element::Type_t::i64>(),
generateParamsForModInPlace<element::Type_t::i32>()
};
std::vector<ModParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(
smoke_Mod_With_Hardcoded_Refs,
ReferenceModLayerTest,
::testing::ValuesIn(generateCombinedParamsForMod()),
ReferenceModLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_Mod_Negative_With_Hardcoded_Refs,
ReferenceModLayerTest,
::testing::ValuesIn(generateCombinedParamsForModNegative()),
ReferenceModLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_Mod_InPlace_With_Hardcoded_Refs,
ReferenceModInPlaceLayerTest,
::testing::ValuesIn(generateCombinedParamsForModInPlace()),
ReferenceModInPlaceLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,166 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "base_reference_test.hpp"
#include "openvino/op/multiply.hpp"
using namespace ov;
using namespace reference_tests;
namespace {
struct MultiplyParams {
template <class IT>
MultiplyParams(const PartialShape& iShape1,
const PartialShape& iShape2,
const element::Type& iType,
const std::vector<IT>& iValues1,
const std::vector<IT>& iValues2,
const std::vector<IT>& oValues)
: pshape1(iShape1),
pshape2(iShape2),
inType(iType),
outType(iType),
inputData1(CreateTensor(iType, iValues1)),
inputData2(CreateTensor(iType, iValues2)),
refData(CreateTensor(iType, oValues)) {}
PartialShape pshape1;
PartialShape pshape2;
element::Type inType;
element::Type outType;
runtime::Tensor inputData1;
runtime::Tensor inputData2;
runtime::Tensor refData;
};
class ReferenceMultiplyLayerTest : public testing::TestWithParam<MultiplyParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.pshape1, params.pshape2, params.inType, params.outType);
inputData = {params.inputData1, params.inputData2};
refOutData = {params.refData};
}
static std::string getTestCaseName(const testing::TestParamInfo<MultiplyParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "iShape1=" << param.pshape1 << "_";
result << "iShape2=" << param.pshape2 << "_";
result << "iType=" << param.inType << "_";
result << "oType=" << param.outType;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape1,
const PartialShape& input_shape2,
const element::Type& input_type,
const element::Type& expected_output_type) {
const auto in1 = std::make_shared<op::v0::Parameter>(input_type, input_shape1);
const auto in2 = std::make_shared<op::v0::Parameter>(input_type, input_shape2);
const auto multiply = std::make_shared<op::v1::Multiply>(in1, in2);
return std::make_shared<Function>(NodeVector{multiply}, ParameterVector{in1, in2});
}
};
TEST_P(ReferenceMultiplyLayerTest, MultiplyWithHardcodedRefs) {
Exec();
}
template <element::Type_t IN_ET>
std::vector<MultiplyParams> generateParamsForMultiply() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<MultiplyParams> params{
MultiplyParams(ov::PartialShape{2, 2},
ov::PartialShape{2, 2},
IN_ET,
std::vector<T>{1, 2, 3, 4},
std::vector<T>{5, 6, 7, 8},
std::vector<T>{5, 12, 21, 32}),
MultiplyParams(ov::PartialShape{3, 2, 1},
ov::PartialShape{1, 6},
IN_ET,
std::vector<T>{12, 24, 36, 48, 60, 72},
std::vector<T>{1, 2, 3, 4, 6, 1},
std::vector<T>{12, 24, 36, 48, 72, 12, 24, 48, 72, 96, 144, 24,
36, 72, 108, 144, 216, 36, 48, 96, 144, 192, 288, 48,
60, 120, 180, 240, 360, 60, 72, 144, 216, 288, 432, 72}),
MultiplyParams(ov::PartialShape{1},
ov::PartialShape{1},
IN_ET,
std::vector<T>{2},
std::vector<T>{8},
std::vector<T>{16})
};
return params;
}
template <element::Type_t IN_ET>
std::vector<MultiplyParams> generateParamsForMultiplyFloat() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<MultiplyParams> params{
MultiplyParams(ov::PartialShape{1},
ov::PartialShape{1},
IN_ET,
std::vector<T>{3.1},
std::vector<T>{8},
std::vector<T>{24.8})
};
return params;
}
std::vector<MultiplyParams> generateCombinedParamsForMultiply() {
const std::vector<std::vector<MultiplyParams>> allTypeParams{
generateParamsForMultiply<element::Type_t::f32>(),
generateParamsForMultiply<element::Type_t::f16>(),
generateParamsForMultiply<element::Type_t::bf16>(),
generateParamsForMultiply<element::Type_t::i64>(),
generateParamsForMultiply<element::Type_t::i32>(),
generateParamsForMultiply<element::Type_t::u64>(),
generateParamsForMultiply<element::Type_t::u32>()
};
std::vector<MultiplyParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
std::vector<MultiplyParams> generateCombinedParamsForMultiplyFloat() {
const std::vector<std::vector<MultiplyParams>> allTypeParams{
generateParamsForMultiplyFloat<element::Type_t::f32>(),
generateParamsForMultiplyFloat<element::Type_t::f16>()
};
std::vector<MultiplyParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(
smoke_Multiply_With_Hardcoded_Refs,
ReferenceMultiplyLayerTest,
::testing::ValuesIn(generateCombinedParamsForMultiply()),
ReferenceMultiplyLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_Multiply_Float_With_Hardcoded_Refs,
ReferenceMultiplyLayerTest,
::testing::ValuesIn(generateCombinedParamsForMultiplyFloat()),
ReferenceMultiplyLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,134 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "base_reference_test.hpp"
#include "openvino/op/power.hpp"
using namespace ov;
using namespace reference_tests;
namespace {
struct PowerParams {
template <class IT>
PowerParams(const PartialShape& iShape1,
const PartialShape& iShape2,
const element::Type& iType,
const std::vector<IT>& iValues1,
const std::vector<IT>& iValues2,
const std::vector<IT>& oValues)
: pshape1(iShape1),
pshape2(iShape2),
inType(iType),
outType(iType),
inputData1(CreateTensor(iType, iValues1)),
inputData2(CreateTensor(iType, iValues2)),
refData(CreateTensor(iType, oValues)) {}
PartialShape pshape1;
PartialShape pshape2;
element::Type inType;
element::Type outType;
runtime::Tensor inputData1;
runtime::Tensor inputData2;
runtime::Tensor refData;
};
class ReferencePowerLayerTest : public testing::TestWithParam<PowerParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.pshape1, params.pshape2, params.inType, params.outType);
inputData = {params.inputData1, params.inputData2};
refOutData = {params.refData};
}
static std::string getTestCaseName(const testing::TestParamInfo<PowerParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "iShape1=" << param.pshape1 << "_";
result << "iShape2=" << param.pshape2 << "_";
result << "iType=" << param.inType << "_";
result << "oType=" << param.outType;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape1,
const PartialShape& input_shape2,
const element::Type& input_type,
const element::Type& expected_output_type) {
const auto in1 = std::make_shared<op::v0::Parameter>(input_type, input_shape1);
const auto in2 = std::make_shared<op::v0::Parameter>(input_type, input_shape2);
const auto power = std::make_shared<op::v1::Power>(in1, in2);
return std::make_shared<Function>(NodeVector{power}, ParameterVector{in1, in2});
}
};
TEST_P(ReferencePowerLayerTest, PowerWithHardcodedRefs) {
Exec();
}
template <element::Type_t IN_ET>
std::vector<PowerParams> generateParamsForPower() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<PowerParams> params{
PowerParams(ov::PartialShape{2, 2},
ov::PartialShape{2, 2},
IN_ET,
std::vector<T>{1, 2, 3, 5},
std::vector<T>{2, 0, 6, 3},
std::vector<T>{1, 1, 729, 125}),
PowerParams(ov::PartialShape{2, 1, 5},
ov::PartialShape{2, 1},
IN_ET,
std::vector<T>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
std::vector<T>{1, 2},
std::vector<T>{1, 2, 3, 4, 5, 1, 4, 9, 16, 25, 6, 7, 8, 9, 10, 36, 49, 64, 81, 100}),
PowerParams(ov::PartialShape{1},
ov::PartialShape{1},
IN_ET,
std::vector<T>{2},
std::vector<T>{3},
std::vector<T>{8}),
PowerParams(ov::PartialShape{2, 2},
ov::PartialShape{1},
IN_ET,
std::vector<T>{2, 3, 4, 5},
std::vector<T>{2},
std::vector<T>{4, 9, 16, 25})
};
return params;
}
std::vector<PowerParams> generateCombinedParamsForPower() {
const std::vector<std::vector<PowerParams>> allTypeParams{
generateParamsForPower<element::Type_t::f32>(),
generateParamsForPower<element::Type_t::f16>(),
generateParamsForPower<element::Type_t::bf16>(),
generateParamsForPower<element::Type_t::i64>(),
generateParamsForPower<element::Type_t::i32>(),
generateParamsForPower<element::Type_t::u64>(),
generateParamsForPower<element::Type_t::u32>()
};
std::vector<PowerParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(
smoke_Power_With_Hardcoded_Refs,
ReferencePowerLayerTest,
::testing::ValuesIn(generateCombinedParamsForPower()),
ReferencePowerLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,212 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "base_reference_test.hpp"
#include "openvino/op/squared_difference.hpp"
using namespace ov;
using namespace reference_tests;
namespace {
struct SquaredDifferenceParams {
template <class IT>
SquaredDifferenceParams(const PartialShape& iShape1,
const PartialShape& iShape2,
const element::Type& iType,
const std::vector<IT>& iValues1,
const std::vector<IT>& iValues2,
const std::vector<IT>& oValues)
: pshape1(iShape1),
pshape2(iShape2),
inType(iType),
outType(iType),
inputData1(CreateTensor(iType, iValues1)),
inputData2(CreateTensor(iType, iValues2)),
refData(CreateTensor(iType, oValues)) {}
PartialShape pshape1;
PartialShape pshape2;
element::Type inType;
element::Type outType;
runtime::Tensor inputData1;
runtime::Tensor inputData2;
runtime::Tensor refData;
};
class ReferenceSquaredDifferenceLayerTest : public testing::TestWithParam<SquaredDifferenceParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.pshape1, params.pshape2, params.inType, params.outType);
inputData = {params.inputData1, params.inputData2};
refOutData = {params.refData};
}
static std::string getTestCaseName(const testing::TestParamInfo<SquaredDifferenceParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "iShape1=" << param.pshape1 << "_";
result << "iShape2=" << param.pshape2 << "_";
result << "iType=" << param.inType << "_";
result << "oType=" << param.outType;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape1,
const PartialShape& input_shape2,
const element::Type& input_type,
const element::Type& expected_output_type) {
const auto in1 = std::make_shared<op::v0::Parameter>(input_type, input_shape1);
const auto in2 = std::make_shared<op::v0::Parameter>(input_type, input_shape2);
const auto squared_difference = std::make_shared<op::v0::SquaredDifference>(in1, in2);
return std::make_shared<Function>(NodeVector{squared_difference}, ParameterVector{in1, in2});
}
};
class ReferenceSquaredDifferenceInPlaceLayerTest : public testing::TestWithParam<SquaredDifferenceParams>,
public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.pshape1, params.pshape2, params.inType, params.outType);
inputData = {params.inputData1, params.inputData2};
refOutData = {params.refData};
}
static std::string getTestCaseName(const testing::TestParamInfo<SquaredDifferenceParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "iShape1=" << param.pshape1 << "_";
result << "iShape2=" << param.pshape2 << "_";
result << "iType=" << param.inType << "_";
result << "oType=" << param.outType;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape1,
const PartialShape& input_shape2,
const element::Type& input_type,
const element::Type& expected_output_type) {
const auto in1 = std::make_shared<op::v0::Parameter>(input_type, input_shape1);
const auto in2 = std::make_shared<op::v0::Parameter>(input_type, input_shape2);
auto squared_difference = std::make_shared<op::v0::SquaredDifference>(in1, in2);
squared_difference = std::make_shared<op::v0::SquaredDifference>(squared_difference, squared_difference);
return std::make_shared<Function>(NodeVector{squared_difference}, ParameterVector{in1, in2});
}
};
TEST_P(ReferenceSquaredDifferenceLayerTest, SquaredDifferenceWithHardcodedRefs) {
Exec();
}
TEST_P(ReferenceSquaredDifferenceInPlaceLayerTest, SquaredDifferenceWithHardcodedRefs) {
Exec();
}
template <element::Type_t IN_ET>
std::vector<SquaredDifferenceParams> generateParamsForSquaredDifference() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<SquaredDifferenceParams> params{
SquaredDifferenceParams(ov::PartialShape{1, 2},
ov::PartialShape{1, 2},
IN_ET,
std::vector<T>{256, 56},
std::vector<T>{256, 56},
std::vector<T>{0, 0}),
SquaredDifferenceParams(ov::PartialShape{2, 2},
ov::PartialShape{2, 2},
IN_ET,
std::vector<T>{256, 56, -21, -14},
std::vector<T>{-112, 56, 6, -8},
std::vector<T>{135424, 0, 729, 36}),
SquaredDifferenceParams(ov::PartialShape{1, 2},
ov::PartialShape{3, 2, 2},
IN_ET,
std::vector<T>{1, 2},
std::vector<T>{5, 6, 7, 8, 2, 3, 1, 5, 6, 7, 1, 3},
std::vector<T>{16, 16, 36, 36, 1, 1, 0, 9, 25, 25, 0, 1}),
SquaredDifferenceParams(ov::PartialShape{1},
ov::PartialShape{1},
IN_ET,
std::vector<T>{57},
std::vector<T>{13},
std::vector<T>{1936}),
SquaredDifferenceParams(ov::PartialShape{2, 2},
ov::PartialShape{1},
IN_ET,
std::vector<T>{2, 4, 7, 8},
std::vector<T>{8},
std::vector<T>{36, 16, 1, 0})
};
return params;
}
template <element::Type_t IN_ET>
std::vector<SquaredDifferenceParams> generateParamsForSquaredDifferenceInPlace() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<SquaredDifferenceParams> params{
SquaredDifferenceParams(ov::PartialShape{2, 2},
ov::PartialShape{2, 2},
IN_ET,
std::vector<T>{1, 2, 3, 4},
std::vector<T>{5, 6, 7, 8},
std::vector<T>{0, 0, 0, 0})
};
return params;
}
std::vector<SquaredDifferenceParams> generateCombinedParamsForSquaredDifference() {
const std::vector<std::vector<SquaredDifferenceParams>> allTypeParams{
generateParamsForSquaredDifference<element::Type_t::f32>(),
generateParamsForSquaredDifference<element::Type_t::f16>(),
generateParamsForSquaredDifference<element::Type_t::i64>(),
generateParamsForSquaredDifference<element::Type_t::i32>()
};
std::vector<SquaredDifferenceParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
std::vector<SquaredDifferenceParams> generateCombinedParamsForSquaredDifferenceInPlace() {
const std::vector<std::vector<SquaredDifferenceParams>> allTypeParams{
generateParamsForSquaredDifferenceInPlace<element::Type_t::f32>(),
generateParamsForSquaredDifferenceInPlace<element::Type_t::f16>(),
generateParamsForSquaredDifferenceInPlace<element::Type_t::i64>(),
generateParamsForSquaredDifferenceInPlace<element::Type_t::i32>()};
std::vector<SquaredDifferenceParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(
smoke_SquaredDifference_With_Hardcoded_Refs,
ReferenceSquaredDifferenceLayerTest,
::testing::ValuesIn(generateCombinedParamsForSquaredDifference()),
ReferenceSquaredDifferenceLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_SquaredDifferenceInPlace_With_Hardcoded_Refs,
ReferenceSquaredDifferenceInPlaceLayerTest,
::testing::ValuesIn(generateCombinedParamsForSquaredDifferenceInPlace()),
ReferenceSquaredDifferenceInPlaceLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,169 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "base_reference_test.hpp"
#include "openvino/op/subtract.hpp"
using namespace ov;
using namespace reference_tests;
namespace {
struct SubtractParams {
template <class IT>
SubtractParams(const PartialShape& iShape1,
const PartialShape& iShape2,
const element::Type& iType,
const std::vector<IT>& iValues1,
const std::vector<IT>& iValues2,
const std::vector<IT>& oValues)
: pshape1(iShape1),
pshape2(iShape2),
inType(iType),
outType(iType),
inputData1(CreateTensor(iType, iValues1)),
inputData2(CreateTensor(iType, iValues2)),
refData(CreateTensor(iType, oValues)) {}
PartialShape pshape1;
PartialShape pshape2;
element::Type inType;
element::Type outType;
runtime::Tensor inputData1;
runtime::Tensor inputData2;
runtime::Tensor refData;
};
class ReferenceSubtractLayerTest : public testing::TestWithParam<SubtractParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.pshape1, params.pshape2, params.inType, params.outType);
inputData = {params.inputData1, params.inputData2};
refOutData = {params.refData};
}
static std::string getTestCaseName(const testing::TestParamInfo<SubtractParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "iShape1=" << param.pshape1 << "_";
result << "iShape2=" << param.pshape2 << "_";
result << "iType=" << param.inType << "_";
result << "oType=" << param.outType;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape1,
const PartialShape& input_shape2,
const element::Type& input_type,
const element::Type& expected_output_type) {
const auto in1 = std::make_shared<op::v0::Parameter>(input_type, input_shape1);
const auto in2 = std::make_shared<op::v0::Parameter>(input_type, input_shape2);
const auto subtract = std::make_shared<op::v1::Subtract>(in1, in2);
return std::make_shared<Function>(NodeVector{subtract}, ParameterVector{in1, in2});
}
};
TEST_P(ReferenceSubtractLayerTest, SubtractWithHardcodedRefs) {
Exec();
}
template <element::Type_t IN_ET>
std::vector<SubtractParams> generateParamsForSubtract() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<SubtractParams> params{
SubtractParams(ov::PartialShape{2, 2},
ov::PartialShape{2, 2},
IN_ET,
std::vector<T>{2, 4, 8, 16},
std::vector<T>{1, 2, 4, 8},
std::vector<T>{1, 2, 4, 8}),
SubtractParams(ov::PartialShape{3, 2, 1},
ov::PartialShape{1, 6},
IN_ET,
std::vector<T>{12, 24, 36, 48, 60, 72},
std::vector<T>{1, 2, 3, 4, 6, 1},
std::vector<T>{11, 10, 9, 8, 6, 11,
23, 22, 21, 20, 18, 23,
35, 34, 33, 32, 30, 35,
47, 46, 45, 44, 42, 47,
59, 58, 57, 56, 54, 59,
71, 70, 69, 68, 66, 71}),
SubtractParams(ov::PartialShape{1},
ov::PartialShape{1},
IN_ET,
std::vector<T>{8},
std::vector<T>{2},
std::vector<T>{6})
};
return params;
}
template <element::Type_t IN_ET>
std::vector<SubtractParams> generateParamsForSubtractFloat() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<SubtractParams> params{
SubtractParams(ov::PartialShape{1},
ov::PartialShape{1},
IN_ET,
std::vector<T>{3.1},
std::vector<T>{8},
std::vector<T>{-4.9})
};
return params;
}
std::vector<SubtractParams> generateCombinedParamsForSubtract() {
const std::vector<std::vector<SubtractParams>> allTypeParams{
generateParamsForSubtract<element::Type_t::f32>(),
generateParamsForSubtract<element::Type_t::f16>(),
generateParamsForSubtract<element::Type_t::bf16>(),
generateParamsForSubtract<element::Type_t::i64>(),
generateParamsForSubtract<element::Type_t::i32>(),
generateParamsForSubtract<element::Type_t::u64>(),
generateParamsForSubtract<element::Type_t::u32>()
};
std::vector<SubtractParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
std::vector<SubtractParams> generateCombinedParamsForSubtractFloat() {
const std::vector<std::vector<SubtractParams>> allTypeParams{
generateParamsForSubtractFloat<element::Type_t::f32>(),
generateParamsForSubtractFloat<element::Type_t::f16>()
};
std::vector<SubtractParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(
smoke_Subtract_With_Hardcoded_Refs,
ReferenceSubtractLayerTest,
::testing::ValuesIn(generateCombinedParamsForSubtract()),
ReferenceSubtractLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_Subtract_Float_With_Hardcoded_Refs,
ReferenceSubtractLayerTest,
::testing::ValuesIn(generateCombinedParamsForSubtractFloat()),
ReferenceSubtractLayerTest::getTestCaseName);
} // namespace

View File

@ -101,6 +101,7 @@ set(SRC
type_prop/acos.cpp type_prop/acos.cpp
type_prop/adaptive_avg_pool.cpp type_prop/adaptive_avg_pool.cpp
type_prop/adaptive_max_pool.cpp type_prop/adaptive_max_pool.cpp
type_prop/add.cpp
type_prop/asin.cpp type_prop/asin.cpp
type_prop/asinh.cpp type_prop/asinh.cpp
type_prop/assign.cpp type_prop/assign.cpp
@ -285,6 +286,7 @@ set(SRC
visitors/op/deformable_psroi_pooling.cpp visitors/op/deformable_psroi_pooling.cpp
visitors/op/depth_to_space.cpp visitors/op/depth_to_space.cpp
visitors/op/detection_output.cpp visitors/op/detection_output.cpp
visitors/op/divide.cpp
visitors/op/einsum.cpp visitors/op/einsum.cpp
visitors/op/elu.cpp visitors/op/elu.cpp
visitors/op/equal.cpp visitors/op/equal.cpp
@ -451,7 +453,6 @@ set(MULTI_TEST_SRC
backend/abs.in.cpp backend/abs.in.cpp
backend/adaptive_avg_pool.in.cpp backend/adaptive_avg_pool.in.cpp
backend/adaptive_max_pool.in.cpp backend/adaptive_max_pool.in.cpp
backend/add.in.cpp
backend/aliased_output.in.cpp backend/aliased_output.in.cpp
backend/api.in.cpp backend/api.in.cpp
backend/auto_broadcast.in.cpp backend/auto_broadcast.in.cpp
@ -468,7 +469,6 @@ set(MULTI_TEST_SRC
backend/deformable_psroi_pooling.in.cpp backend/deformable_psroi_pooling.in.cpp
backend/detection_output.in.cpp backend/detection_output.in.cpp
backend/dft.in.cpp backend/dft.in.cpp
backend/divide.in.cpp
backend/depth_to_space.in.cpp backend/depth_to_space.in.cpp
backend/dyn_reshape.in.cpp backend/dyn_reshape.in.cpp
backend/experimental_detectron_generate_proposals.in.cpp backend/experimental_detectron_generate_proposals.in.cpp
@ -479,7 +479,6 @@ set(MULTI_TEST_SRC
backend/experimental_detectron_prior_grid.in.cpp backend/experimental_detectron_prior_grid.in.cpp
backend/fake_quantize.in.cpp backend/fake_quantize.in.cpp
backend/floor.in.cpp backend/floor.in.cpp
backend/floor_mod.in.cpp
backend/function_name.in.cpp backend/function_name.in.cpp
backend/gather.in.cpp backend/gather.in.cpp
backend/gather_elements.in.cpp backend/gather_elements.in.cpp
@ -490,13 +489,10 @@ set(MULTI_TEST_SRC
backend/lrn.in.cpp backend/lrn.in.cpp
backend/matmul.in.cpp backend/matmul.in.cpp
backend/matrix_nms.in.cpp backend/matrix_nms.in.cpp
backend/maximum.in.cpp
backend/max_pool.in.cpp backend/max_pool.in.cpp
backend/mod.in.cpp
backend/multiclass_nms.in.cpp backend/multiclass_nms.in.cpp
backend/multiple_backends.in.cpp backend/multiple_backends.in.cpp
backend/multiple_result.in.cpp backend/multiple_result.in.cpp
backend/multiply.in.cpp
backend/negative.in.cpp backend/negative.in.cpp
backend/node_name.in.cpp backend/node_name.in.cpp
backend/normalize_l2.in.cpp backend/normalize_l2.in.cpp
@ -504,7 +500,6 @@ set(MULTI_TEST_SRC
backend/one_hot.in.cpp backend/one_hot.in.cpp
backend/pad.in.cpp backend/pad.in.cpp
backend/parameter_as_output.in.cpp backend/parameter_as_output.in.cpp
backend/power.in.cpp
backend/prior_box_clustered.in.cpp backend/prior_box_clustered.in.cpp
backend/prior_box.in.cpp backend/prior_box.in.cpp
backend/proposal.in.cpp backend/proposal.in.cpp
@ -525,9 +520,7 @@ set(MULTI_TEST_SRC
backend/space_to_batch.in.cpp backend/space_to_batch.in.cpp
backend/split.in.cpp backend/split.in.cpp
backend/sqrt.in.cpp backend/sqrt.in.cpp
backend/squared_difference.in.cpp
backend/squeeze.in.cpp backend/squeeze.in.cpp
backend/subtract.in.cpp
backend/tile.in.cpp backend/tile.in.cpp
backend/topk.in.cpp backend/topk.in.cpp
backend/transpose.in.cpp backend/transpose.in.cpp

View File

@ -1,129 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "engines_util/test_engines.hpp"
#include "engines_util/test_case.hpp"
#include "util/test_control.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
NGRAPH_TEST(${BACKEND_NAME}, add) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Add>(A, B), ParameterVector{A, B});
vector<float> a{1, 2, 3, 4};
vector<float> b{5, 6, 7, 8};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {6, 8, 10, 12});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, add_overload) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Add>(A, B), ParameterVector{A, B});
vector<float> a{1, 2, 3, 4};
vector<float> b{5, 6, 7, 8};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {6, 8, 10, 12});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, add_in_place) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto T = make_shared<op::v1::Add>(A, B);
auto T2 = make_shared<op::v1::Add>(T, T);
auto T3 = make_shared<op::v1::Add>(T2, T2);
auto T4 = make_shared<op::v1::Add>(T3, T3);
auto f = make_shared<Function>(T4, ParameterVector{A, B});
vector<float> a{1, 2, 3, 4};
vector<float> b{5, 6, 7, 8};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {48, 64, 80, 96});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, add_broadcast) {
Shape shape_a{1, 2};
Shape shape_b{3, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_b);
auto f = make_shared<Function>(make_shared<op::v1::Add>(A, B), ParameterVector{A, B});
vector<float> a{1, 2};
vector<float> b{5, 6, 7, 8, 2, 3, 1, 5, 6, 7, 1, 3};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape_b, {6, 8, 8, 10, 3, 5, 2, 7, 7, 9, 2, 5});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, add_scalars) {
Shape shape{};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Add>(A, B), ParameterVector{A, B});
vector<float> a{2};
vector<float> b{8};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {10});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, add_vector_and_scalar) {
Shape shape_a{2, 2};
Shape shape_b{};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_b);
auto f = make_shared<Function>(make_shared<op::v1::Add>(A, B), ParameterVector{A, B});
vector<float> a{2, 4, 7, 8};
vector<float> b{8};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape_a, {10, 12, 15, 16});
test_case.run();
}

View File

@ -1,253 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h"
#include "util/type_prop.hpp"
#include "runtime/backend.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "engines_util/execute_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, divide) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Divide>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{2, 4, 8, 16});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, 4, 8});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_TRUE(test::all_close_f((vector<float>{2, 2, 2, 2}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, divide_int32) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto B = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Divide>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{0x40000140, 0x40000001, 8, 16});
auto b = backend->create_tensor(element::i32, shape);
copy_data(b, vector<int32_t>{2, 5, 4, 8});
auto result = backend->create_tensor(element::i32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_EQ((vector<int32_t>{536871072, 214748365, 2, 2}), read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, divide_cpp_rounding_int32) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto B = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Divide>(A, B, false), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{-10, -10, 10, 10});
auto b = backend->create_tensor(element::i32, shape);
copy_data(b, vector<int32_t>{-3, 3, -3, 3});
auto result = backend->create_tensor(element::i32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_EQ((vector<int32_t>{3, -3, -3, 3}), read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, divide_python_rounding_int32) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto B = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Divide>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{-10, -10, 10, 10});
auto b = backend->create_tensor(element::i32, shape);
copy_data(b, vector<int32_t>{-3, 3, -3, 3});
auto result = backend->create_tensor(element::i32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_EQ((vector<int32_t>{3, -4, -4, 3}), read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, divide_overload) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Divide>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{2, 4, 8, 16});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, 4, 8});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_TRUE(test::all_close_f((vector<float>{2, 2, 2, 2}), read_vector<float>(result)));
}
namespace {
template <typename Value>
void divide_broadcast() {
const auto element_type = ngraph::element::from<Value>();
const Shape shape_a{3, 2, 1};
const Shape shape_b{1, 6};
const Shape shape_o{3, 2, 6};
std::vector<Value> in_a{12, 24, 36, 48, 60, 72};
std::vector<Value> in_b{1, 2, 3, 4, 6, 1};
// clang-format off
std::vector<Value> out{12, 6, 4, 3, 2, 12,
24, 12, 8, 6, 4, 24,
36, 18, 12, 9, 6, 36,
48, 24, 16, 12, 8, 48,
60, 30, 20, 15, 10, 60,
72, 36, 24, 18, 12, 72};
// clang-format on
auto A = make_shared<op::Parameter>(element_type, shape_a);
auto B = make_shared<op::Parameter>(element_type, shape_b);
auto f = make_shared<Function>(make_shared<op::v1::Divide>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element_type, shape_a, in_a.data());
auto b = backend->create_tensor(element_type, shape_b, in_b.data());
auto result = backend->create_tensor(element_type, shape_o);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_EQ(out, read_vector<Value>(result));
}
} // namespace
NGRAPH_TEST(${BACKEND_NAME}, divide_int32_broadcast) {
divide_broadcast<int32_t>();
}
NGRAPH_TEST(${BACKEND_NAME}, divide_f32_broadcast) {
divide_broadcast<float>();
}
NGRAPH_TEST(${BACKEND_NAME}, divide_int32_scalar) {
Shape shape{};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto B = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Divide>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{18});
auto b = backend->create_tensor(element::i32, shape);
copy_data(b, vector<int32_t>{8});
auto result = backend->create_tensor(element::i32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_EQ(vector<int32_t>{2}, read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, divide_f32_scalar) {
Shape shape{};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Divide>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{18});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{8});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_TRUE(test::all_close_f((vector<float>{2.25}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, divide_by_zero_float32) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Divide>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{2, 4, 8, 16});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{0, 0, 0, 0});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_EQ((vector<float>{std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity()}),
read_vector<float>(result));
}

View File

@ -1,112 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "engines_util/test_engines.hpp"
#include "engines_util/test_case.hpp"
#include "util/test_control.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
NGRAPH_TEST(${BACKEND_NAME}, floor_mod_int32) {
Shape shape{4};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto B = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::FloorMod>(A, B), ParameterVector{A, B});
std::vector<int32_t> a{7, -7, 7, -7};
std::vector<int32_t> b{3, 3, -3, -3};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<int32_t>({a, b});
test_case.add_expected_output<int32_t>(shape, {1, 2, -2, -1});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, floor_mod_int64) {
Shape shape{4};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto B = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::FloorMod>(A, B), ParameterVector{A, B});
std::vector<int32_t> a{7, -7, 7, -7};
std::vector<int32_t> b{3, 3, -3, -3};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<int32_t>({a, b});
test_case.add_expected_output<int32_t>(shape, {1, 2, -2, -1});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, floor_mod_float) {
Shape shape{4};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::FloorMod>(A, B), ParameterVector{A, B});
std::vector<float> a{7, -7, 7, -7};
std::vector<float> b{3, 3, -3, -3};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {1, 2, -2, -1});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, floor_mod_broadcasted) {
Shape shape_a{2, 1, 2};
Shape shape_b{2, 1};
Shape shape_r{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_b);
auto f = make_shared<Function>(make_shared<op::FloorMod>(A, B), ParameterVector{A, B});
std::vector<float> a{1, 2, 3, 4};
std::vector<float> b{2, 3};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape_r, {1.0f, 0.0f, 1.0f, 2.0f, 1.0f, 0.0f, 0.0f, 1.0f});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, floor_mod_scalars) {
Shape shape{};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::FloorMod>(A, B), ParameterVector{A, B});
std::vector<float> a{2};
std::vector<float> b{4};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {2.0f});
test_case.run();
}

View File

@ -1,77 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "engines_util/test_engines.hpp"
#include "engines_util/test_case.hpp"
#include "util/test_control.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
NGRAPH_TEST(${BACKEND_NAME}, maximum) {
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Maximum>(A, B), ParameterVector{A, B});
std::vector<float> a{1, 8, -8, 17, -0.5, 0.5, 2, 1};
std::vector<float> b{1, 2, 4, 8, 0, 0, 1, 1.5};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {1, 8, 4, 17, 0, 0.5, 2, 1.5});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, maximum_int32) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto B = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Maximum>(A, B), ParameterVector{A, B});
std::vector<int32_t> a{0x40000140, 0x40000001, -8, 17};
std::vector<int32_t> b{0x40000170, 0x40000000, 4, 8};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<int32_t>({a, b});
test_case.add_expected_output<int32_t>(shape, {0x40000170, 0x40000001, 4, 17});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, maximum_int64) {
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::i64, shape);
auto B = make_shared<op::Parameter>(element::i64, shape);
auto f = make_shared<Function>(make_shared<op::v1::Maximum>(A, B), ParameterVector{A, B});
std::vector<int64_t> a{1, 8, -8, 17, -5, 67635216, 2, 17179887632};
std::vector<int64_t> b{1, 2, 4, 8, 0, 18448, 1, 280592};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<int64_t>({a, b});
test_case.add_expected_output<int64_t>(shape, {1, 8, 4, 17, 0, 67635216, 2, 17179887632});
test_case.run();
}

View File

@ -1,142 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "engines_util/test_engines.hpp"
#include "engines_util/test_case.hpp"
#include "util/test_control.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
NGRAPH_TEST(${BACKEND_NAME}, mod_no_broadcast) {
Shape shape{1, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Mod>(A, B), ParameterVector{A, B});
vector<float> a{256, 56};
vector<float> b{256, 56};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {0, 0});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, mod_no_broadcast_remainder) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Mod>(A, B), ParameterVector{A, B});
vector<float> a{256, 56, 21, 14};
vector<float> b{112, 56, 6, 8};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {32, 0, 3, 6});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, mod_broadcast) {
Shape shape_a{1, 2};
Shape shape_b{3, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_b);
auto f = make_shared<Function>(make_shared<op::v1::Mod>(A, B), ParameterVector{A, B});
vector<float> a{1, 2};
vector<float> b{5, 6, 7, 8, 2, 3, 1, 5, 6, 7, 1, 3};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape_b, {1, 2, 1, 2, 1, 2, 0, 2, 1, 2, 0, 2});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, mod_scalars) {
Shape shape{};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Mod>(A, B), ParameterVector{A, B});
vector<float> a{57};
vector<float> b{13};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {5});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, mod_negative_numbers) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Mod>(A, B), ParameterVector{A, B});
vector<float> a{-57, -14, -12, -6};
vector<float> b{13, -7, 5, -5};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {-5, 0, -2, -1});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, mod_vector_and_scalar) {
Shape shape_a{2, 2};
Shape shape_b{};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_b);
auto f = make_shared<Function>(make_shared<op::v1::Mod>(A, B), ParameterVector{A, B});
vector<float> a{2, 4, 7, 8};
vector<float> b{8};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape_a, {2, 4, 7, 0});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, mod_in_place) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto T = make_shared<op::v1::Mod>(A, B);
auto T2 = make_shared<op::v1::Mod>(T, T);
auto f = make_shared<Function>(T2, ParameterVector{A, B});
vector<float> a{1, 2, 3, 4};
vector<float> b{5, 6, 7, 8};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {0, 0, 0, 0});
test_case.run();
}

View File

@ -1,151 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "engines_util/test_engines.hpp"
#include "engines_util/test_case.hpp"
#include "util/test_control.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
NGRAPH_TEST(${BACKEND_NAME}, multiply) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Multiply>(A, B), ParameterVector{A, B});
std::vector<float> a{1, 2, 3, 4};
std::vector<float> b{5, 6, 7, 8};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {5, 12, 21, 32});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, multiply_overload) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Multiply>(A, B), ParameterVector{A, B});
std::vector<float> a{1, 2, 3, 4};
std::vector<float> b{5, 6, 7, 8};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {5, 12, 21, 32});
test_case.run();
}
namespace {
template <typename Value>
void multiply_broadcst() {
const auto element_type = ngraph::element::from<Value>();
const Shape shape_a{3, 2, 1};
const Shape shape_b{1, 6};
const Shape shape_o{3, 2, 6};
std::vector<Value> in_a{12, 24, 36, 48, 60, 72};
std::vector<Value> in_b{1, 2, 3, 4, 6, 1};
// clang-format off
std::vector<Value> out{12, 24, 36, 48, 72, 12,
24, 48, 72, 96, 144, 24,
36, 72, 108, 144, 216, 36,
48, 96, 144, 192, 288, 48,
60, 120, 180, 240, 360, 60,
72, 144, 216, 288, 432, 72};
// clang-format on
auto A = make_shared<op::Parameter>(element_type, shape_a);
auto B = make_shared<op::Parameter>(element_type, shape_b);
auto f = make_shared<Function>(make_shared<op::v1::Multiply>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element_type, shape_a, in_a.data());
auto b = backend->create_tensor(element_type, shape_b, in_b.data());
auto result = backend->create_tensor(element_type, shape_o);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_EQ(out, read_vector<Value>(result));
}
} // namespace
NGRAPH_TEST(${BACKEND_NAME}, multiply_int32_broadcast) {
multiply_broadcst<int32_t>();
}
NGRAPH_TEST(${BACKEND_NAME}, multiply_f32_broadcast) {
multiply_broadcst<float>();
}
NGRAPH_TEST(${BACKEND_NAME}, multiply_int32_scalar) {
Shape shape{};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto B = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Multiply>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{2});
auto b = backend->create_tensor(element::i32, shape);
copy_data(b, vector<int32_t>{8});
auto result = backend->create_tensor(element::i32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_EQ(vector<int32_t>{16}, read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, multiply_f32_scalar) {
Shape shape{};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Multiply>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{3.1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{8});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_TRUE(test::all_close_f((vector<float>{24.8}), read_vector<float>(result)));
}

View File

@ -1,99 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "engines_util/test_engines.hpp"
#include "engines_util/test_case.hpp"
#include "util/test_control.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
NGRAPH_TEST(${BACKEND_NAME}, power) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Power>(A, B), ParameterVector{A, B});
std::vector<float> a{1, 2, 3, 5};
std::vector<float> b{2, 0, 6, 3};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {1, 1, 729, 125});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, power_broadcasted) {
Shape shape_a{2, 1, 5};
Shape shape_b{2, 1};
Shape shape_r{2, 2, 5};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_b);
auto f = make_shared<Function>(make_shared<op::v1::Power>(A, B), ParameterVector{A, B});
std::vector<float> a{1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
std::vector<float> b{1, 2};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape_r,
{1, 2, 3, 4, 5, 1, 4, 9, 16, 25, 6, 7, 8, 9, 10, 36, 49, 64, 81, 100});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, power_scalars) {
Shape shape{};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Power>(A, B), ParameterVector{A, B});
std::vector<float> a{2};
std::vector<float> b{3};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {8});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, power_vector_and_scalar) {
Shape shape_a{2, 2};
Shape shape_b{};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_b);
auto f = make_shared<Function>(make_shared<op::v1::Power>(A, B), ParameterVector{A, B});
std::vector<float> a{2, 3, 4, 5};
std::vector<float> b{2};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape_a, {4, 9, 16, 25});
test_case.run();
}

View File

@ -1,127 +0,0 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "engines_util/test_engines.hpp"
#include "engines_util/test_case.hpp"
#include "util/test_control.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
NGRAPH_TEST(${BACKEND_NAME}, squared_difference_no_broadcast) {
Shape shape{1, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::SquaredDifference>(A, B), ParameterVector{A, B});
vector<float> a{256, 56};
vector<float> b{256, 56};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {0, 0});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, squared_difference_negative_numbers) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::SquaredDifference>(A, B), ParameterVector{A, B});
vector<float> a{256, 56, -21, -14};
vector<float> b{-112, 56, 6, -8};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {135424, 0, 729, 36});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, squared_difference_broadcast) {
Shape shape_a{1, 2};
Shape shape_b{3, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_b);
auto f = make_shared<Function>(make_shared<op::SquaredDifference>(A, B), ParameterVector{A, B});
vector<float> a{1, 2};
vector<float> b{5, 6, 7, 8, 2, 3, 1, 5, 6, 7, 1, 3};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape_b, {16, 16, 36, 36, 1, 1, 0, 9, 25, 25, 0, 1});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, squared_difference_scalars) {
Shape shape{};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::SquaredDifference>(A, B), ParameterVector{A, B});
vector<float> a{57};
vector<float> b{13};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {1936});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, squared_difference_vector_and_scalar) {
Shape shape_a{2, 2};
Shape shape_b{};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_b);
auto f = make_shared<Function>(make_shared<op::SquaredDifference>(A, B), ParameterVector{A, B});
vector<float> a{2, 4, 7, 8};
vector<float> b{8};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape_a, {36, 16, 1, 0});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, squared_difference_in_place) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto T = make_shared<op::SquaredDifference>(A, B);
auto T2 = make_shared<op::SquaredDifference>(T, T);
auto f = make_shared<Function>(T2, ParameterVector{A, B});
vector<float> a{1, 2, 3, 4};
vector<float> b{5, 6, 7, 8};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {0, 0, 0, 0});
test_case.run();
}

View File

@ -1,162 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h"
#include "runtime/backend.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "engines_util/execute_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, subtract) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Subtract>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{2, 4, 8, 16});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, 4, 8});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_TRUE(test::all_close_f((vector<float>{1, 2, 4, 8}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, subtract_overload) {
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(std::make_shared<op::v1::Subtract>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{2, 4, 8, 16});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, 4, 8});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_TRUE(test::all_close_f((vector<float>{1, 2, 4, 8}), read_vector<float>(result)));
}
namespace {
template <typename Value>
void subtract_broadcst() {
const auto element_type = ngraph::element::from<Value>();
const Shape shape_a{3, 2, 1};
const Shape shape_b{1, 6};
const Shape shape_o{3, 2, 6};
std::vector<Value> in_a{12, 24, 36, 48, 60, 72};
std::vector<Value> in_b{1, 2, 3, 4, 6, 1};
// clang-format off
std::vector<Value> out{11, 10, 9, 8, 6, 11,
23, 22, 21, 20, 18, 23,
35, 34, 33, 32, 30, 35,
47, 46, 45, 44, 42, 47,
59, 58, 57, 56, 54, 59,
71, 70, 69, 68, 66, 71};
// clang-format on
auto A = make_shared<op::Parameter>(element_type, shape_a);
auto B = make_shared<op::Parameter>(element_type, shape_b);
auto f = make_shared<Function>(make_shared<op::v1::Subtract>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element_type, shape_a, in_a.data());
auto b = backend->create_tensor(element_type, shape_b, in_b.data());
auto result = backend->create_tensor(element_type, shape_o);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_EQ(out, read_vector<Value>(result));
}
} // namespace
NGRAPH_TEST(${BACKEND_NAME}, subtract_int32_broadcast) {
subtract_broadcst<int32_t>();
}
NGRAPH_TEST(${BACKEND_NAME}, subtract_f32_broadcast) {
subtract_broadcst<float>();
}
NGRAPH_TEST(${BACKEND_NAME}, subtract_int32_scalar) {
Shape shape{};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto B = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Subtract>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{2});
auto b = backend->create_tensor(element::i32, shape);
copy_data(b, vector<int32_t>{8});
auto result = backend->create_tensor(element::i32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_EQ(vector<int32_t>{-6}, read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, subtract_f32_scalar) {
Shape shape{};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::Subtract>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{3.1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{8});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_TRUE(test::all_close_f((vector<float>{-4.9}), read_vector<float>(result)));
}

View File

@ -0,0 +1,9 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "arithmetic_ops.hpp"
using Type = ::testing::Types<ngraph::op::v1::Add>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_add, ArithmeticOperator, Type);

View File

@ -0,0 +1,42 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/*
#include "binary_ops.hpp"
#include "ngraph/opsets/opset1.hpp"
using Type = ::testing::Types<BinaryOperatorType<ngraph::opset1::Divide, ngraph::element::f32>>;
INSTANTIATE_TYPED_TEST_SUITE_P(visitor_with_auto_broadcast, BinaryOperatorVisitor, Type, BinaryOperatorTypeName);
*/
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/opsets/opset1.hpp"
#include "util/visitor.hpp"
using namespace std;
using namespace ngraph;
using ngraph::test::NodeBuilder;
using ngraph::test::ValueMap;
TEST(attributes, divide) {
NodeBuilder::get_ops().register_factory<opset1::Divide>();
const auto in1 = make_shared<op::Parameter>(element::f32, Shape{2, 4});
const auto in2 = make_shared<op::Parameter>(element::f32, Shape{2, 4});
const bool pythondiv = true;
const op::AutoBroadcastSpec& auto_broadcast = op::AutoBroadcastSpec(op::AutoBroadcastType::NUMPY);
const auto divide = make_shared<opset1::Divide>(in1, in2, pythondiv, auto_broadcast);
NodeBuilder builder(divide);
auto g_divide = ov::as_type_ptr<opset1::Divide>(builder.create());
const auto expected_attr_count = 2;
EXPECT_EQ(builder.get_value_map_size(), expected_attr_count);
EXPECT_EQ(g_divide->is_pythondiv(), divide->is_pythondiv());
EXPECT_EQ(g_divide->get_autob(), divide->get_autob());
}