Migrate ngraph backend test of constant operation (#7996)
* Add test cases for PReLU in cpu plugin * For case when slope is vector * Add Constant template plugin reference tests * Update CMakeLists.txt and delete constant.in.cpp * Add tests of tensor_2constant and constant_multi_use * Add test of constant_equality_bool * Remove wrong comments * Remove some of strange if * Merge to one CreateFunction * Remove test names and update test for types * Add bf16 and f64 tests * Add missing type tests * Clear actualOutData to allow multiple use of Validate() * Update SetUp and CreateFunction to support CentOS CI * Remove inputData = {}
This commit is contained in:
parent
7c56155dae
commit
bd756df2f5
@ -64,6 +64,7 @@ void CommonReferenceTest::Infer() {
|
||||
|
||||
void CommonReferenceTest::Validate() {
|
||||
ASSERT_EQ(executableNetwork.outputs().size(), refOutData.size());
|
||||
actualOutData.clear();
|
||||
for (const auto& output : executableNetwork.outputs()) {
|
||||
actualOutData.emplace_back(inferRequest.get_tensor(output));
|
||||
}
|
||||
|
309
docs/template_plugin/tests/functional/op_reference/constant.cpp
Normal file
309
docs/template_plugin/tests/functional/op_reference/constant.cpp
Normal file
@ -0,0 +1,309 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <random>
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/abs.hpp"
|
||||
#include "openvino/op/equal.hpp"
|
||||
#include "base_reference_test.hpp"
|
||||
#include "functional_test_utils/skip_tests_config.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ov;
|
||||
|
||||
namespace {
|
||||
struct ConstantParams {
|
||||
template <class IT, class OT>
|
||||
ConstantParams(const PartialShape& inputShape,
|
||||
const element::Type& inType, const element::Type& refType,
|
||||
const std::vector<IT>& inputData, const std::vector<OT>& refData,
|
||||
const std::string& test_name = "")
|
||||
: inputShape(inputShape),
|
||||
inType(inType),
|
||||
refType(refType),
|
||||
inputData(CreateTensor(inType, inputData)),
|
||||
refData(CreateTensor(refType, refData)),
|
||||
testcaseName(test_name) {}
|
||||
|
||||
PartialShape inputShape;
|
||||
element::Type inType;
|
||||
element::Type refType;
|
||||
runtime::Tensor inputData;
|
||||
runtime::Tensor refData;
|
||||
std::string testcaseName;
|
||||
};
|
||||
|
||||
class ReferenceConstantLayerTest : public testing::TestWithParam<ConstantParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params);
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<ConstantParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "iShape=" << param.inputShape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
if (param.testcaseName != "") {
|
||||
result << "oType=" << param.refType << "_";
|
||||
result << param.testcaseName;
|
||||
} else {
|
||||
result << "oType=" << param.refType;
|
||||
}
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const ParamType& params) {
|
||||
auto A = op::v0::Constant::create(params.inType, params.inputShape.to_shape(), params.inputData.data());
|
||||
return std::make_shared<Function>(A, ParameterVector{});
|
||||
}
|
||||
};
|
||||
|
||||
class ReferenceConstantLayerTest_2Constant : public ReferenceConstantLayerTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params);
|
||||
refOutData = {params.refData, params.refData};
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const ParamType& params) {
|
||||
auto A = op::v0::Constant::create(params.inType, params.inputShape.to_shape(), params.inputData.data());
|
||||
auto B = op::v0::Constant::create(params.inType, params.inputShape.to_shape(), params.inputData.data());
|
||||
return std::make_shared<Function>(NodeVector{A, B}, ParameterVector{});
|
||||
}
|
||||
};
|
||||
|
||||
class ReferenceConstantLayerTest_WithOp : public ReferenceConstantLayerTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params);
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const ParamType& params) {
|
||||
auto A = op::v0::Constant::create(params.inType, params.inputShape.to_shape(), params.inputData.data());
|
||||
return std::make_shared<Function>(std::make_shared<op::v0::Abs>(A), ParameterVector{});
|
||||
}
|
||||
};
|
||||
|
||||
class ReferenceConstantLayerTest_MultiUse : public ReferenceConstantLayerTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params);
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const ParamType& params) {
|
||||
const auto A = std::make_shared<op::v0::Constant>(
|
||||
params.inType,
|
||||
params.inputShape.to_shape(),
|
||||
std::vector<std::string>{std::to_string(*reinterpret_cast<int*>(params.inputData.data()))});
|
||||
return std::make_shared<Function>(A, ParameterVector{});
|
||||
}
|
||||
};
|
||||
|
||||
class ReferenceConstantLayerTest_EqualityBool : public ReferenceConstantLayerTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params);
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
|
||||
protected:
|
||||
static std::shared_ptr<Function> CreateFunction(const ParamType& params) {
|
||||
auto A = op::v0::Constant::create(params.inType, params.inputShape.to_shape(), params.inputData.data());
|
||||
auto B = op::v0::Constant::create(params.inType, params.inputShape.to_shape(), {true, true, true, true});
|
||||
return std::make_shared<Function>(std::make_shared<op::v1::Equal>(A, B), ParameterVector{});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceConstantLayerTest, CompareWithHardcodedRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
TEST_P(ReferenceConstantLayerTest_2Constant, CompareWithHardcodedRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
TEST_P(ReferenceConstantLayerTest_WithOp, CompareWithHardcodedRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
TEST_P(ReferenceConstantLayerTest_MultiUse, CompareWithHardcodedRefs) {
|
||||
Exec();
|
||||
Infer();
|
||||
Validate();
|
||||
}
|
||||
|
||||
TEST_P(ReferenceConstantLayerTest_EqualityBool, CompareWithHardcodedRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<ConstantParams> generateConstantParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
std::vector<ConstantParams> constantParams {
|
||||
// tensor_constant
|
||||
ConstantParams({2, 2, 2}, IN_ET, IN_ET,
|
||||
std::vector<T>{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
std::vector<T>{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
"tensor_constant"),
|
||||
};
|
||||
return constantParams;
|
||||
}
|
||||
|
||||
std::vector<ConstantParams> generateConstantDefinedTypeParams() {
|
||||
std::vector<ConstantParams> constantParams {
|
||||
// scalar_constant_float32
|
||||
ConstantParams({}, element::Type_t::f32, element::Type_t::f32,
|
||||
std::vector<float>{4.75},
|
||||
std::vector<float>{4.75f},
|
||||
"scalar_constant_float32"),
|
||||
// scalar_constant_int64
|
||||
ConstantParams({}, element::Type_t::i64, element::Type_t::i64,
|
||||
std::vector<int64_t>{0x4000000000000001},
|
||||
std::vector<int64_t>{0x4000000000000001},
|
||||
"scalar_constant_int64"),
|
||||
// tensor_constant_float32
|
||||
ConstantParams({2, 2}, element::Type_t::f32, element::Type_t::f32,
|
||||
std::vector<float>{4.75, 4.5, -5.25, 0.0},
|
||||
std::vector<float>{4.75f, 4.5f, -5.25f, 0.0f},
|
||||
"tensor_constant_float32"),
|
||||
// tensor_constant_int64
|
||||
ConstantParams({2}, element::Type_t::i64, element::Type_t::i64,
|
||||
std::vector<int64_t>{0x4000000000000001, 0x4000000000000002},
|
||||
std::vector<int64_t>{0x4000000000000001, 0x4000000000000002},
|
||||
"tensor_constant_int64"),
|
||||
};
|
||||
return constantParams;
|
||||
}
|
||||
|
||||
std::vector<ConstantParams> generateConstantCombinedParams() {
|
||||
const std::vector<std::vector<ConstantParams>> constantTypeParams {
|
||||
generateConstantParams<element::Type_t::i8>(),
|
||||
generateConstantParams<element::Type_t::i16>(),
|
||||
generateConstantParams<element::Type_t::i32>(),
|
||||
generateConstantParams<element::Type_t::i64>(),
|
||||
generateConstantParams<element::Type_t::u8>(),
|
||||
generateConstantParams<element::Type_t::u16>(),
|
||||
generateConstantParams<element::Type_t::u32>(),
|
||||
generateConstantParams<element::Type_t::u64>(),
|
||||
generateConstantParams<element::Type_t::bf16>(),
|
||||
generateConstantParams<element::Type_t::f16>(),
|
||||
generateConstantParams<element::Type_t::f32>(),
|
||||
generateConstantParams<element::Type_t::f64>(),
|
||||
generateConstantDefinedTypeParams(),
|
||||
};
|
||||
std::vector<ConstantParams> combinedParams;
|
||||
|
||||
for (const auto& params : constantTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
std::vector<ConstantParams> generateConstant2ConstantCombinedParams() {
|
||||
const std::vector<std::vector<ConstantParams>> constantTypeParams {
|
||||
generateConstantParams<element::Type_t::i8>(),
|
||||
generateConstantParams<element::Type_t::i16>(),
|
||||
generateConstantParams<element::Type_t::i32>(),
|
||||
generateConstantParams<element::Type_t::i64>(),
|
||||
generateConstantParams<element::Type_t::u8>(),
|
||||
generateConstantParams<element::Type_t::u16>(),
|
||||
generateConstantParams<element::Type_t::u32>(),
|
||||
generateConstantParams<element::Type_t::u64>(),
|
||||
generateConstantParams<element::Type_t::bf16>(),
|
||||
generateConstantParams<element::Type_t::f16>(),
|
||||
generateConstantParams<element::Type_t::f32>(),
|
||||
generateConstantParams<element::Type_t::f64>(),
|
||||
};
|
||||
std::vector<ConstantParams> combinedParams;
|
||||
|
||||
for (const auto& params : constantTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<ConstantParams> generateConstantWithOpParams() {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
std::vector<ConstantParams> constantParams {
|
||||
// tensor_constant_with_op
|
||||
ConstantParams({2, 2, 2}, IN_ET, IN_ET,
|
||||
std::vector<T>{-1, 2, 3, -4, 5, -6, -7, 8},
|
||||
std::vector<T>{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
"tensor_constant_with_op"),
|
||||
};
|
||||
return constantParams;
|
||||
}
|
||||
|
||||
std::vector<ConstantParams> generateConstantWithOpCombinedParams() {
|
||||
const std::vector<std::vector<ConstantParams>> constantTypeParams {
|
||||
generateConstantWithOpParams<element::Type_t::i8>(),
|
||||
generateConstantWithOpParams<element::Type_t::i16>(),
|
||||
generateConstantWithOpParams<element::Type_t::i32>(),
|
||||
generateConstantWithOpParams<element::Type_t::i64>(),
|
||||
generateConstantWithOpParams<element::Type_t::bf16>(),
|
||||
generateConstantWithOpParams<element::Type_t::f16>(),
|
||||
generateConstantWithOpParams<element::Type_t::f32>(),
|
||||
generateConstantWithOpParams<element::Type_t::f64>(),
|
||||
};
|
||||
std::vector<ConstantParams> combinedParams;
|
||||
|
||||
for (const auto& params : constantTypeParams) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
}
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
std::vector<ConstantParams> generateConstantDefinedTypeMultiUseCombinedParams() {
|
||||
const std::vector<ConstantParams> combinedParams {
|
||||
// constant_multi_use
|
||||
ConstantParams({}, element::Type_t::i32, element::Type_t::i32,
|
||||
std::vector<int32_t>{388},
|
||||
std::vector<int32_t>{388},
|
||||
"constant_multi_use"),
|
||||
};
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
std::vector<ConstantParams> generateConstantDefinedTypeEqualityBoolCombinedParams() {
|
||||
const std::vector<ConstantParams> combinedParams {
|
||||
// constant_equality_bool
|
||||
ConstantParams({4}, element::Type_t::boolean, element::Type_t::boolean,
|
||||
std::vector<char>{true, false, true, false},
|
||||
std::vector<char>{true, false, true, false},
|
||||
"constant_equality_bool"),
|
||||
};
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Constant_With_Hardcoded_Refs, ReferenceConstantLayerTest,
|
||||
testing::ValuesIn(generateConstantCombinedParams()), ReferenceConstantLayerTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Constant_With_Hardcoded_Refs, ReferenceConstantLayerTest_2Constant,
|
||||
testing::ValuesIn(generateConstant2ConstantCombinedParams()), ReferenceConstantLayerTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Constant_With_Hardcoded_Refs, ReferenceConstantLayerTest_WithOp,
|
||||
testing::ValuesIn(generateConstantWithOpCombinedParams()), ReferenceConstantLayerTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Constant_With_Hardcoded_Refs, ReferenceConstantLayerTest_MultiUse,
|
||||
testing::ValuesIn(generateConstantDefinedTypeMultiUseCombinedParams()), ReferenceConstantLayerTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Constant_With_Hardcoded_Refs, ReferenceConstantLayerTest_EqualityBool,
|
||||
testing::ValuesIn(generateConstantDefinedTypeEqualityBoolCombinedParams()), ReferenceConstantLayerTest::getTestCaseName);
|
||||
} // namespace
|
@ -473,7 +473,6 @@ set(MULTI_TEST_SRC
|
||||
backend/broadcast.in.cpp
|
||||
backend/builder_reduce_ops_opset1.in.cpp
|
||||
backend/concat.in.cpp
|
||||
backend/constant.in.cpp
|
||||
backend/ctc_greedy_decoder.in.cpp
|
||||
backend/ctc_greedy_decoder_seq_len.in.cpp
|
||||
backend/depth_to_space.in.cpp
|
||||
|
@ -1,237 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "engines_util/execute_tools.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
#include "runtime/backend.hpp"
|
||||
#include "util/all_close.hpp"
|
||||
#include "util/all_close_f.hpp"
|
||||
#include "util/ndarray.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, tensor_constant) {
|
||||
Shape shape{2, 2, 2};
|
||||
auto A = op::Constant::create(element::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8});
|
||||
auto f = make_shared<Function>(A, ParameterVector{});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 2, 3, 4, 5, 6, 7, 8}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, tensor_2constant) {
|
||||
Shape shape{2, 2, 2};
|
||||
auto A = op::Constant::create(element::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8});
|
||||
auto f = make_shared<Function>(NodeVector{A, A}, ParameterVector{});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto result0 = backend->create_tensor(element::f32, shape);
|
||||
auto result1 = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result0, result1}, {});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 2, 3, 4, 5, 6, 7, 8}),
|
||||
read_vector<float>(result0),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 2, 3, 4, 5, 6, 7, 8}),
|
||||
read_vector<float>(result1),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_with_op) {
|
||||
Shape shape{2, 2, 2};
|
||||
auto A = op::Constant::create(element::f32, shape, {-1, 2, 3, -4, 5, -6, -7, 8});
|
||||
auto f = make_shared<Function>(make_shared<op::Abs>(A), ParameterVector{});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 2, 3, 4, 5, 6, 7, 8}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, constant_multi_use) {
|
||||
auto A = make_shared<op::Constant>(element::i32, Shape{}, std::vector<std::string>{"388"});
|
||||
auto f = make_shared<Function>(A, ParameterVector{});
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
std::shared_ptr<runtime::Tensor> r1 = backend->create_tensor(element::i32, Shape{});
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({r1}, std::vector<std::shared_ptr<runtime::Tensor>>{});
|
||||
EXPECT_EQ(read_vector<int>(r1), std::vector<int>{388});
|
||||
|
||||
std::shared_ptr<runtime::Tensor> r2 = backend->create_tensor(element::i32, Shape{});
|
||||
handle->call_with_validate({r2}, std::vector<std::shared_ptr<runtime::Tensor>>{});
|
||||
EXPECT_EQ(read_vector<int>(r2), std::vector<int>{388});
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, scalar_constant_float32) {
|
||||
auto r = op::Constant::create(element::f32, Shape{}, {4.75});
|
||||
auto f = make_shared<Function>(r, ParameterVector{});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto result = backend->create_tensor(element::f32, Shape{});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {});
|
||||
EXPECT_TRUE(test::all_close_f(vector<float>{4.75f}, read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, scalar_constant_int64) {
|
||||
auto r = op::Constant::create(element::i64, Shape{}, {0x4000000000000001});
|
||||
auto f = make_shared<Function>(r, ParameterVector{});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto result = backend->create_tensor(element::i64, Shape{});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {});
|
||||
EXPECT_EQ(vector<int64_t>{0x4000000000000001}, read_vector<int64_t>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_float32) {
|
||||
Shape shape{2, 2};
|
||||
auto r = op::Constant::create(element::f32, shape, {4.75, 4.5, -5.25, 0.0});
|
||||
auto f = make_shared<Function>(r, ParameterVector{});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{4.75f, 4.5f, -5.25f, 0.0f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_int64) {
|
||||
Shape shape{2};
|
||||
auto r = op::Constant::create(element::i64, shape, {0x4000000000000001, 0x4000000000000002});
|
||||
auto f = make_shared<Function>(r, ParameterVector{});
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
// Create some tensors for input/output
|
||||
auto result = backend->create_tensor(element::i64, shape);
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {});
|
||||
EXPECT_EQ((vector<int64_t>{0x4000000000000001, 0x4000000000000002}), read_vector<int64_t>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, constant_equality_bool) {
|
||||
Shape shape{4};
|
||||
// auto A = make_shared<op::Parameter>(element::boolean, shape);
|
||||
// auto B = make_shared<op::Parameter>(element::boolean, shape);
|
||||
// auto f = make_shared<Function>(make_shared<op::v1::Equal>(A, B), ParameterVector{A, B});
|
||||
|
||||
auto A = op::Constant::create(element::boolean, shape, {true, false, true, false});
|
||||
auto B = op::Constant::create(element::boolean, shape, {true, true, true, true});
|
||||
auto f = make_shared<Function>(make_shared<op::v1::Equal>(A, B), ParameterVector{});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto result = backend->create_tensor(element::boolean, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {});
|
||||
EXPECT_EQ((vector<char>{true, false, true, false}), read_vector<char>(result));
|
||||
}
|
||||
|
||||
namespace {
|
||||
std::vector<uint8_t> read_raw_data(std::shared_ptr<ngraph::runtime::Tensor> tv) {
|
||||
const size_t mem_size = tv->get_size_in_bytes();
|
||||
std::vector<uint8_t> rc(mem_size);
|
||||
tv->read(rc.data(), mem_size);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void run_constant_equality_for_low_precision(const Shape& shape,
|
||||
const std::vector<uint8_t>& data,
|
||||
element::Type element_type) {
|
||||
const void* raw_data = data.data();
|
||||
auto A = op::Constant::create(element_type, shape, raw_data);
|
||||
|
||||
const auto constant_raw_data = static_cast<const uint8_t*>(A->get_data_ptr());
|
||||
EXPECT_EQ(std::memcmp(raw_data, constant_raw_data, data.size()), 0) << "wrong data hold in Constant";
|
||||
|
||||
auto f = make_shared<Function>(A, ParameterVector{});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto result = backend->create_tensor(element_type, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {});
|
||||
EXPECT_EQ(data, read_raw_data(result));
|
||||
}
|
||||
} // namespace
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, constant_equality_u4_2x2x3) {
|
||||
const Shape shape{2, 2, 3};
|
||||
const std::vector<uint8_t> data{0x12, 0x34, 0x56, 0x78, 0x9a, 0xFF};
|
||||
constexpr auto element_type = element::u4;
|
||||
|
||||
run_constant_equality_for_low_precision(shape, data, element_type);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, constant_equality_u4_1x3) {
|
||||
const Shape shape{1, 3};
|
||||
const std::vector<uint8_t> data{0x12, 0x34}; // last 8 bits constains rubbish
|
||||
constexpr auto element_type = element::u4;
|
||||
|
||||
run_constant_equality_for_low_precision(shape, data, element_type);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, constant_equality_u4_1x10) {
|
||||
const Shape shape{1, 10};
|
||||
const std::vector<uint8_t> data{0x12, 0x34}; // last 6 bits constains rubbish
|
||||
constexpr auto element_type = element::u1;
|
||||
|
||||
run_constant_equality_for_low_precision(shape, data, element_type);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, constant_equality_i4_2x2x3) {
|
||||
const Shape shape{2, 2, 3};
|
||||
const std::vector<uint8_t> data{0x12, 0x34, 0x56, 0x78, 0x9a, 0xFF};
|
||||
constexpr auto element_type = element::i4;
|
||||
|
||||
run_constant_equality_for_low_precision(shape, data, element_type);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, constant_equality_i4_1x3) {
|
||||
const Shape shape{1, 3};
|
||||
const std::vector<uint8_t> data{0x12, 0x34}; // last 8 bits constains rubbish
|
||||
constexpr auto element_type = element::i4;
|
||||
|
||||
run_constant_equality_for_low_precision(shape, data, element_type);
|
||||
}
|
Loading…
Reference in New Issue
Block a user