Remove legacy API from common test utils (#19647)
* Remove legacy API from common test utils * Fixed code style * Fixed build * Try to fix Windows build * Fixed GNA build
This commit is contained in:
parent
497f42bd82
commit
7becaf8494
@ -12,7 +12,6 @@
|
||||
#include "openvino/core/axis_vector.hpp"
|
||||
|
||||
using namespace ov;
|
||||
using namespace ngraph;
|
||||
|
||||
namespace {
|
||||
using ElementValue = int32_t;
|
||||
@ -32,8 +31,8 @@ AxisVector get_axis_order(AxisOrder order, size_t size) {
|
||||
|
||||
struct TestParams {
|
||||
AxisOrder order;
|
||||
ngraph::test::NDArrayBase<ElementValue> input;
|
||||
ngraph::test::NDArrayBase<ElementValue> output;
|
||||
ov::test::NDArrayBase<ElementValue> input;
|
||||
ov::test::NDArrayBase<ElementValue> output;
|
||||
};
|
||||
|
||||
struct ReshapeOptKernel : ::testing::TestWithParam<TestParams> {};
|
||||
|
@ -25,6 +25,7 @@
|
||||
// clang-format on
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
#include "common_test_utils/ov_test_utils.hpp"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "default_opset.hpp"
|
||||
#include "openvino/opsets/opset12.hpp"
|
||||
#include "common_test_utils/test_case.hpp"
|
||||
@ -765,20 +766,21 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_gemm_abc) {
|
||||
|
||||
Inputs inputs;
|
||||
inputs.emplace_back(
|
||||
test::NDArray<float, 2>({{1, 2, 3, 4, 5, 6}, {7, 8, 9, 10, 11, 12}, {13, 14, 15, 16, 17, 18}}).get_vector());
|
||||
ov::test::NDArray<float, 2>({{1, 2, 3, 4, 5, 6}, {7, 8, 9, 10, 11, 12}, {13, 14, 15, 16, 17, 18}})
|
||||
.get_vector());
|
||||
|
||||
inputs.emplace_back(test::NDArray<float, 2>({{19, 20, 21, 22},
|
||||
{23, 24, 25, 26},
|
||||
{27, 28, 29, 30},
|
||||
{31, 32, 33, 34},
|
||||
{35, 36, 37, 38},
|
||||
{39, 40, 41, 42}})
|
||||
inputs.emplace_back(ov::test::NDArray<float, 2>({{19, 20, 21, 22},
|
||||
{23, 24, 25, 26},
|
||||
{27, 28, 29, 30},
|
||||
{31, 32, 33, 34},
|
||||
{35, 36, 37, 38},
|
||||
{39, 40, 41, 42}})
|
||||
.get_vector());
|
||||
|
||||
inputs.emplace_back(test::NDArray<float, 2>({{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}).get_vector());
|
||||
inputs.emplace_back(ov::test::NDArray<float, 2>({{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}).get_vector());
|
||||
|
||||
auto expected_output =
|
||||
test::NDArray<float, 2>({{340, 350.5, 361, 371.5}, {862, 890.5, 919, 947.5}, {1384, 1430.5, 1477, 1523.5}})
|
||||
ov::test::NDArray<float, 2>({{340, 350.5, 361, 371.5}, {862, 890.5, 919, 947.5}, {1384, 1430.5, 1477, 1523.5}})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -793,11 +795,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_matmul) {
|
||||
|
||||
std::vector<std::vector<float>> inputs;
|
||||
|
||||
inputs.emplace_back(test::NDArray<float, 2>({{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}).get_vector());
|
||||
inputs.emplace_back(ov::test::NDArray<float, 2>({{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}).get_vector());
|
||||
|
||||
inputs.emplace_back(test::NDArray<float, 2>({{13, 14, 15}, {16, 17, 18}, {19, 20, 21}, {22, 23, 24}}).get_vector());
|
||||
inputs.emplace_back(
|
||||
ov::test::NDArray<float, 2>({{13, 14, 15}, {16, 17, 18}, {19, 20, 21}, {22, 23, 24}}).get_vector());
|
||||
|
||||
auto expected_output = test::NDArray<float, 2>({{190, 200, 210}, {470, 496, 522}, {750, 792, 834}}).get_vector();
|
||||
auto expected_output =
|
||||
ov::test::NDArray<float, 2>({{190, 200, 210}, {470, 496, 522}, {750, 792, 834}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -997,11 +1001,11 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_sub) {
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/sub.onnx"));
|
||||
|
||||
Inputs inputs;
|
||||
inputs.emplace_back(test::NDArray<float, 3>({{{1, 2, 3}}}).get_vector());
|
||||
inputs.emplace_back(ov::test::NDArray<float, 3>({{{1, 2, 3}}}).get_vector());
|
||||
|
||||
inputs.emplace_back(test::NDArray<float, 3>({{{4, 5, 7}}}).get_vector());
|
||||
inputs.emplace_back(ov::test::NDArray<float, 3>({{{4, 5, 7}}}).get_vector());
|
||||
|
||||
auto expected_output = test::NDArray<float, 3>({{{-3, -3, -4}}}).get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 3>({{{-3, -3, -4}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -1014,10 +1018,10 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_div) {
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/div.onnx"));
|
||||
|
||||
Inputs inputs;
|
||||
inputs.emplace_back(test::NDArray<float, 3>({{{1, 2, 3}}}).get_vector());
|
||||
inputs.emplace_back(test::NDArray<float, 3>({{{1, 4, 12}}}).get_vector());
|
||||
inputs.emplace_back(ov::test::NDArray<float, 3>({{{1, 2, 3}}}).get_vector());
|
||||
inputs.emplace_back(ov::test::NDArray<float, 3>({{{1, 4, 12}}}).get_vector());
|
||||
|
||||
auto expected_output = test::NDArray<float, 3>({{{1, 0.5, 0.25}}}).get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 3>({{{1, 0.5, 0.25}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -1030,17 +1034,18 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_add_bcast) {
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/add_bcast.onnx"));
|
||||
|
||||
Inputs inputs;
|
||||
inputs.emplace_back(test::NDArray<float, 3>({{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}})
|
||||
.get_vector());
|
||||
inputs.emplace_back(
|
||||
ov::test::NDArray<float, 3>({{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}})
|
||||
.get_vector());
|
||||
|
||||
inputs.emplace_back(test::NDArray<float, 1>({1, 2, 3, 4, 5}).get_vector());
|
||||
inputs.emplace_back(ov::test::NDArray<float, 1>({1, 2, 3, 4, 5}).get_vector());
|
||||
|
||||
auto expected_output =
|
||||
test::NDArray<float, 4>({{{{2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}},
|
||||
{{2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}},
|
||||
{{2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}}}})
|
||||
ov::test::NDArray<float, 4>({{{{2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}},
|
||||
{{2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}},
|
||||
{{2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}}}})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -1177,10 +1182,11 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_log_sum) {
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/reduce_log_sum.onnx"));
|
||||
|
||||
// input data shape (1, 1, 4, 4)
|
||||
Inputs inputs{test::NDArray<float, 4>({{{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}}).get_vector()};
|
||||
Inputs inputs{
|
||||
ov::test::NDArray<float, 4>({{{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}}).get_vector()};
|
||||
|
||||
// output data shape (1,)
|
||||
auto expected_output = test::NDArray<float, 4>({{{{2.77258872f}}}}).get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 4>({{{{2.77258872f}}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -1194,10 +1200,11 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_log_sum_exp) {
|
||||
"onnx/reduce_log_sum_exp.onnx"));
|
||||
|
||||
// input data shape (1, 1, 4, 4)
|
||||
Inputs inputs{test::NDArray<float, 4>({{{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}}).get_vector()};
|
||||
Inputs inputs{
|
||||
ov::test::NDArray<float, 4>({{{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}}).get_vector()};
|
||||
|
||||
// output data shape (1,)
|
||||
auto expected_output = test::NDArray<float, 4>({{{{3.77258872f}}}}).get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 4>({{{{3.77258872f}}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -1210,10 +1217,11 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_l1) {
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/reduce_l1.onnx"));
|
||||
|
||||
// input data shape (1, 1, 4, 4)
|
||||
Inputs inputs{test::NDArray<float, 4>({{{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}}).get_vector()};
|
||||
Inputs inputs{
|
||||
ov::test::NDArray<float, 4>({{{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}}).get_vector()};
|
||||
|
||||
// output data shape (1,)
|
||||
auto expected_output = test::NDArray<float, 4>({{{{16}}}}).get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 4>({{{{16}}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -1226,10 +1234,11 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_l2) {
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/reduce_l2.onnx"));
|
||||
|
||||
// input data shape (1, 1, 4, 4)
|
||||
Inputs inputs{test::NDArray<float, 4>({{{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}}).get_vector()};
|
||||
Inputs inputs{
|
||||
ov::test::NDArray<float, 4>({{{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}}).get_vector()};
|
||||
|
||||
// output data shape (1,)
|
||||
auto expected_output = test::NDArray<float, 4>({{{{4}}}}).get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 4>({{{{4}}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -1243,10 +1252,10 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_max) {
|
||||
|
||||
// input data shape (1, 1, 4, 4)
|
||||
Inputs inputs{
|
||||
test::NDArray<float, 4>({{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}, {13, 14, 15, 16}}}}).get_vector()};
|
||||
ov::test::NDArray<float, 4>({{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}, {13, 14, 15, 16}}}}).get_vector()};
|
||||
|
||||
// output data shape (1,)
|
||||
auto expected_output = test::NDArray<float, 4>({{{{16}}}}).get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 4>({{{{16}}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -1266,10 +1275,11 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_mean) {
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/reduce_mean.onnx"));
|
||||
|
||||
// input data shape (1, 1, 4, 4)
|
||||
Inputs inputs{test::NDArray<float, 4>({{{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}}).get_vector()};
|
||||
Inputs inputs{
|
||||
ov::test::NDArray<float, 4>({{{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}}).get_vector()};
|
||||
|
||||
// output data shape (1,)
|
||||
auto expected_output = test::NDArray<float, 4>({{{{1}}}}).get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 4>({{{{1}}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -1283,10 +1293,10 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_min) {
|
||||
|
||||
// input data shape (1, 1, 4, 4)
|
||||
Inputs inputs{
|
||||
test::NDArray<float, 4>({{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}, {13, 14, 15, 16}}}}).get_vector()};
|
||||
ov::test::NDArray<float, 4>({{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}, {13, 14, 15, 16}}}}).get_vector()};
|
||||
|
||||
// output data shape (1,)
|
||||
auto expected_output = test::NDArray<float, 4>({{{{1}}}}).get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 4>({{{{1}}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -1299,10 +1309,11 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_prod) {
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/reduce_prod.onnx"));
|
||||
|
||||
// input data shape (1, 1, 4, 4)
|
||||
Inputs inputs{test::NDArray<float, 4>({{{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}}).get_vector()};
|
||||
Inputs inputs{
|
||||
ov::test::NDArray<float, 4>({{{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}}).get_vector()};
|
||||
|
||||
// output data shape (1,)
|
||||
auto expected_output = test::NDArray<float, 4>({{{{1}}}}).get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 4>({{{{1}}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -1315,10 +1326,11 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_sum) {
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/reduce_sum.onnx"));
|
||||
|
||||
// input data shape (1, 1, 4, 4)
|
||||
Inputs inputs{test::NDArray<float, 4>({{{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}}).get_vector()};
|
||||
Inputs inputs{
|
||||
ov::test::NDArray<float, 4>({{{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}}).get_vector()};
|
||||
|
||||
// output data shape (1,)
|
||||
auto expected_output = test::NDArray<float, 4>({{{{16}}}}).get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 4>({{{{16}}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -1344,10 +1356,11 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_sum_square) {
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/reduce_sum_square.onnx"));
|
||||
|
||||
// input data shape (1, 1, 4, 4)
|
||||
Inputs inputs{test::NDArray<float, 4>({{{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}}).get_vector()};
|
||||
Inputs inputs{
|
||||
ov::test::NDArray<float, 4>({{{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}}).get_vector()};
|
||||
|
||||
// output data shape (1,)
|
||||
auto expected_output = test::NDArray<float, 4>({{{{16}}}}).get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 4>({{{{16}}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -1360,10 +1373,10 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_sum_13_axes_as_constant) {
|
||||
SERIALIZED_ZOO,
|
||||
"onnx/reduce_sum_13_axes_as_constant.onnx"));
|
||||
|
||||
Inputs inputs{test::NDArray<float, 4>({{{{1.0f, 1.0f, 1.0f, 1.0f},
|
||||
{1.0f, 1.0f, 1.0f, 1.0f},
|
||||
{1.0f, 1.0f, 1.0f, 1.0f},
|
||||
{1.0f, 1.0f, 1.0f, 1.0f}}}})
|
||||
Inputs inputs{ov::test::NDArray<float, 4>({{{{1.0f, 1.0f, 1.0f, 1.0f},
|
||||
{1.0f, 1.0f, 1.0f, 1.0f},
|
||||
{1.0f, 1.0f, 1.0f, 1.0f},
|
||||
{1.0f, 1.0f, 1.0f, 1.0f}}}})
|
||||
.get_vector()};
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -1380,7 +1393,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_sum_13_axes_as_constant_single_
|
||||
SERIALIZED_ZOO,
|
||||
"onnx/reduce_sum_13_axes_as_constant_single_axis.onnx"));
|
||||
|
||||
Inputs inputs{test::NDArray<float, 3>({{{1, 2, 3}, {4, 5, 6}}, {{7, 8, 9}, {10, 11, 12}}}).get_vector()};
|
||||
Inputs inputs{ov::test::NDArray<float, 3>({{{1, 2, 3}, {4, 5, 6}}, {{7, 8, 9}, {10, 11, 12}}}).get_vector()};
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
|
||||
@ -1397,10 +1410,10 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_sum_13_axes_as_constant_keepdim
|
||||
"onnx/reduce_sum_13_axes_as_constant_keepdims_off.onnx"));
|
||||
|
||||
// input data shape (1, 1, 4, 4)
|
||||
Inputs inputs{test::NDArray<float, 4>({{{{1.0f, 1.0f, 1.0f, 1.0f},
|
||||
{1.0f, 1.0f, 1.0f, 1.0f},
|
||||
{1.0f, 1.0f, 1.0f, 1.0f},
|
||||
{1.0f, 1.0f, 1.0f, 1.0f}}}})
|
||||
Inputs inputs{ov::test::NDArray<float, 4>({{{{1.0f, 1.0f, 1.0f, 1.0f},
|
||||
{1.0f, 1.0f, 1.0f, 1.0f},
|
||||
{1.0f, 1.0f, 1.0f, 1.0f},
|
||||
{1.0f, 1.0f, 1.0f, 1.0f}}}})
|
||||
.get_vector()};
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -2174,10 +2187,11 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_shape) {
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/shape.onnx"));
|
||||
|
||||
Inputs inputs;
|
||||
inputs.emplace_back(test::NDArray<float, 3>({{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}})
|
||||
.get_vector());
|
||||
inputs.emplace_back(
|
||||
ov::test::NDArray<float, 3>({{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}})
|
||||
.get_vector());
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -2191,13 +2205,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_elu) {
|
||||
|
||||
Inputs inputs;
|
||||
inputs.emplace_back(
|
||||
test::NDArray<float, 3>({{{-9, -8, -7, -6, -5}, {-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {-1, -1, -1, -1, -1}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
ov::test::NDArray<float, 3>({{{-9, -8, -7, -6, -5}, {-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {-1, -1, -1, -1, -1}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
.get_vector());
|
||||
|
||||
auto expected_output =
|
||||
test::NDArray<float, 3>(
|
||||
ov::test::NDArray<float, 3>(
|
||||
{{{-1.999753180391830f, -1.999329074744190f, -1.998176236068890f, -1.995042495646670f, -1.986524106001830f},
|
||||
{-1.963368722222530f, -1.900425863264270f, -1.729329433526770f, -1.264241117657120f, 0},
|
||||
{1, 2, 3, 4, 5},
|
||||
@ -2224,13 +2238,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_leaky_relu) {
|
||||
|
||||
Inputs inputs;
|
||||
inputs.emplace_back(
|
||||
test::NDArray<float, 3>({{{-9, -8, -7, -6, -5}, {-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {-1, -1, -1, -1, -1}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
ov::test::NDArray<float, 3>({{{-9, -8, -7, -6, -5}, {-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {-1, -1, -1, -1, -1}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
.get_vector());
|
||||
|
||||
auto expected_output =
|
||||
test::NDArray<float, 3>(
|
||||
ov::test::NDArray<float, 3>(
|
||||
{{{-0.9f, -0.8f, -0.7f, -0.6f, -0.5f}, {-0.4f, -0.3f, -0.2f, -0.1f, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{-0.4f, -0.3f, -0.2f, -0.1f, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {-0.1f, -0.1f, -0.1f, -0.1f, -0.1f}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
@ -2248,20 +2262,21 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_prelu_nd) {
|
||||
|
||||
Inputs inputs;
|
||||
inputs.emplace_back(
|
||||
test::NDArray<float, 3>({{{-9, -8, -7, -6, -5}, {-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {-1, -1, -1, -1, -1}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
ov::test::NDArray<float, 3>({{{-9, -8, -7, -6, -5}, {-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {-1, -1, -1, -1, -1}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
.get_vector());
|
||||
|
||||
inputs.emplace_back(test::NDArray<float, 3>({{{1, 0, 1, 0, 1}, {0, 1, 0, 1, 0}, {1, 0, 1, 0, 1}, {0, 1, 0, 1, 0}},
|
||||
{{0, 1, 0, 1, 0}, {1, 0, 1, 0, 1}, {0, 1, 0, 1, 0}, {1, 0, 1, 0, 1}},
|
||||
{{1, 0, 1, 0, 1}, {0, 1, 0, 1, 0}, {1, 0, 1, 0, 1}, {0, 1, 0, 1, 0}}})
|
||||
.get_vector());
|
||||
inputs.emplace_back(
|
||||
ov::test::NDArray<float, 3>({{{1, 0, 1, 0, 1}, {0, 1, 0, 1, 0}, {1, 0, 1, 0, 1}, {0, 1, 0, 1, 0}},
|
||||
{{0, 1, 0, 1, 0}, {1, 0, 1, 0, 1}, {0, 1, 0, 1, 0}, {1, 0, 1, 0, 1}},
|
||||
{{1, 0, 1, 0, 1}, {0, 1, 0, 1, 0}, {1, 0, 1, 0, 1}, {0, 1, 0, 1, 0}}})
|
||||
.get_vector());
|
||||
|
||||
auto expected_output =
|
||||
test::NDArray<float, 3>({{{-9, 0, -7, 0, -5}, {0, -3, 0, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{0, -3, 0, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {0, -1, 0, -1, 0}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
ov::test::NDArray<float, 3>({{{-9, 0, -7, 0, -5}, {0, -3, 0, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{0, -3, 0, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {0, -1, 0, -1, 0}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -2376,13 +2391,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_selu) {
|
||||
|
||||
Inputs inputs;
|
||||
inputs.emplace_back(
|
||||
test::NDArray<float, 3>({{{-9, -8, -7, -6, -5}, {-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {-1, -1, -1, -1, -1}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
ov::test::NDArray<float, 3>({{{-9, -8, -7, -6, -5}, {-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {-1, -1, -1, -1, -1}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
.get_vector());
|
||||
|
||||
auto expected_output =
|
||||
test::NDArray<float, 3>(
|
||||
ov::test::NDArray<float, 3>(
|
||||
{{{-5.99925954117548f, -5.99798722423258f, -5.99452870820667f, -5.98512748694000f, -5.95957231800549f},
|
||||
{-5.89010616666759f, -5.70127758979282f, -5.18798830058032f, -3.79272335297135f, 0},
|
||||
{3, 6, 9, 12, 15},
|
||||
@ -2409,13 +2424,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_sigmoid) {
|
||||
|
||||
Inputs inputs;
|
||||
inputs.emplace_back(
|
||||
test::NDArray<float, 3>({{{-9, -8, -7, -6, -5}, {-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {-1, -1, -1, -1, -1}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
ov::test::NDArray<float, 3>({{{-9, -8, -7, -6, -5}, {-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {-1, -1, -1, -1, -1}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
.get_vector());
|
||||
|
||||
auto expected_output =
|
||||
test::NDArray<float, 3>(
|
||||
ov::test::NDArray<float, 3>(
|
||||
{{{0.00012339457598623f,
|
||||
0.00033535013046648f,
|
||||
0.00091105119440065f,
|
||||
@ -2446,13 +2461,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_tanh) {
|
||||
|
||||
Inputs inputs;
|
||||
inputs.emplace_back(
|
||||
test::NDArray<float, 3>({{{-9, -8, -7, -6, -5}, {-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {-1, -1, -1, -1, -1}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
ov::test::NDArray<float, 3>({{{-9, -8, -7, -6, -5}, {-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {-1, -1, -1, -1, -1}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
.get_vector());
|
||||
|
||||
auto expected_output =
|
||||
test::NDArray<float, 3>(
|
||||
ov::test::NDArray<float, 3>(
|
||||
{{{-0.999999969540041f, -0.999999774929676f, -0.999998336943945f, -0.999987711650796f, -0.999909204262595f},
|
||||
{-0.999329299739067f, -0.995054753686731f, -0.964027580075817f, -0.761594155955765f, 0},
|
||||
{0.761594155955765f, 0.964027580075817f, 0.995054753686731f, 0.999329299739067f, 0.999909204262595f},
|
||||
@ -2479,15 +2494,15 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_thresholded_relu) {
|
||||
|
||||
Inputs inputs;
|
||||
inputs.emplace_back(
|
||||
test::NDArray<float, 3>({{{-9, -8, -7, -6, -5}, {-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {-1, -1, -1, -1, -1}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
ov::test::NDArray<float, 3>({{{-9, -8, -7, -6, -5}, {-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{-4, -3, -2, -1, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{1, 1, 1, 1, 1}, {-1, -1, -1, -1, -1}, {0, 0, 0, 0, 0}, {2, 2, 2, 2, 2}}})
|
||||
.get_vector());
|
||||
|
||||
auto expected_output =
|
||||
test::NDArray<float, 3>({{{0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{0, 0, 0, 0, 0}, {0, 0, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}}})
|
||||
ov::test::NDArray<float, 3>({{{0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 3, 4, 5}, {6, 7, 8, 9, 10}},
|
||||
{{0, 0, 0, 0, 0}, {0, 0, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}},
|
||||
{{0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}}})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -2502,9 +2517,9 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_matmul_vec_ten3d) {
|
||||
|
||||
Inputs inputs;
|
||||
inputs.emplace_back(std::vector<float>{0.f, 1.f});
|
||||
inputs.emplace_back(test::NDArray<float, 3>{{{0.f}, {1.f}}, {{2.f}, {3.f}}, {{4.f}, {5.f}}}.get_vector());
|
||||
inputs.emplace_back(ov::test::NDArray<float, 3>{{{0.f}, {1.f}}, {{2.f}, {3.f}}, {{4.f}, {5.f}}}.get_vector());
|
||||
|
||||
auto expected_output = test::NDArray<float, 2>{{1.f}, {3.f}, {5.f}}.get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 2>{{1.f}, {3.f}, {5.f}}.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -2574,15 +2589,15 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_sum_opset8) {
|
||||
|
||||
Inputs inputs;
|
||||
inputs.emplace_back(std::vector<float>{1.0f, 2.0f, 3.0f});
|
||||
inputs.emplace_back(test::NDArray<float, 2>{{10.0f}, {20.0f}, {30.0f}}.get_vector());
|
||||
inputs.emplace_back(test::NDArray<float, 3>{{{100.0f}}, {{200.0f}}, {{300.0f}}}.get_vector());
|
||||
inputs.emplace_back(ov::test::NDArray<float, 2>{{10.0f}, {20.0f}, {30.0f}}.get_vector());
|
||||
inputs.emplace_back(ov::test::NDArray<float, 3>{{{100.0f}}, {{200.0f}}, {{300.0f}}}.get_vector());
|
||||
|
||||
auto expected_output =
|
||||
test::NDArray<float, 3>{{{111.0f, 112.0f, 113.0f}, {121.0f, 122.0f, 123.0f}, {131.0f, 132.0f, 133.0f}},
|
||||
ov::test::NDArray<float, 3>{{{111.0f, 112.0f, 113.0f}, {121.0f, 122.0f, 123.0f}, {131.0f, 132.0f, 133.0f}},
|
||||
|
||||
{{211.0f, 212.0f, 213.0f}, {221.0f, 222.0f, 223.0f}, {231.0f, 232.0f, 233.0f}},
|
||||
{{211.0f, 212.0f, 213.0f}, {221.0f, 222.0f, 223.0f}, {231.0f, 232.0f, 233.0f}},
|
||||
|
||||
{{311.0f, 312.0f, 313.0f}, {321.0f, 322.0f, 323.0f}, {331.0f, 332.0f, 333.0f}}}
|
||||
{{311.0f, 312.0f, 313.0f}, {321.0f, 322.0f, 323.0f}, {331.0f, 332.0f, 333.0f}}}
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -2903,13 +2918,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_erf) {
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/erf.onnx"));
|
||||
|
||||
Inputs inputs;
|
||||
inputs.emplace_back(test::NDArray<float, 2>{
|
||||
inputs.emplace_back(ov::test::NDArray<float, 2>{
|
||||
{-std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity()},
|
||||
{-3.141592f, 0.0f},
|
||||
{0.5f, 1.0f}}.get_vector());
|
||||
|
||||
const std::vector<float> expected_output =
|
||||
test::NDArray<float, 2>{{-1.0f, 1.0f}, {-0.99999112f, 0.0f}, {0.52049988f, 0.84270079f}}.get_vector();
|
||||
ov::test::NDArray<float, 2>{{-1.0f, 1.0f}, {-0.99999112f, 0.0f}, {0.52049988f, 0.84270079f}}.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -5512,15 +5527,15 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_unsqueeze_ai_onnx_domain) {
|
||||
SERIALIZED_ZOO,
|
||||
"onnx/unsqueeze_ai_onnx_domain.onnx"));
|
||||
|
||||
auto input = test::NDArray<float, 3>({{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}})
|
||||
auto input = ov::test::NDArray<float, 3>({{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}})
|
||||
.get_vector();
|
||||
|
||||
auto expected_output =
|
||||
test::NDArray<float, 4>({{{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}}})
|
||||
ov::test::NDArray<float, 4>({{{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}}})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -5534,15 +5549,15 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_unsqueeze_default_domain) {
|
||||
SERIALIZED_ZOO,
|
||||
"onnx/unsqueeze_default_domain.onnx"));
|
||||
|
||||
auto input = test::NDArray<float, 3>({{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}})
|
||||
auto input = ov::test::NDArray<float, 3>({{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}})
|
||||
.get_vector();
|
||||
|
||||
auto expected_output =
|
||||
test::NDArray<float, 4>({{{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}}})
|
||||
ov::test::NDArray<float, 4>({{{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}}})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -5556,14 +5571,14 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_unsqueeze_default_domain_opset13) {
|
||||
SERIALIZED_ZOO,
|
||||
"onnx/unsqueeze_default_domain_opset13.onnx"));
|
||||
|
||||
auto input = test::NDArray<float, 3>({{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}})
|
||||
auto input = ov::test::NDArray<float, 3>({{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}})
|
||||
.get_vector();
|
||||
auto expected_output =
|
||||
test::NDArray<float, 4>({{{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}}})
|
||||
ov::test::NDArray<float, 4>({{{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}}})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -5577,14 +5592,14 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_unsqueeze_ai_onnx_domain_opset13) {
|
||||
SERIALIZED_ZOO,
|
||||
"onnx/unsqueeze_ai_onnx_domain_opset13.onnx"));
|
||||
|
||||
auto input = test::NDArray<float, 3>({{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}})
|
||||
auto input = ov::test::NDArray<float, 3>({{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}})
|
||||
.get_vector();
|
||||
auto expected_output =
|
||||
test::NDArray<float, 4>({{{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}}})
|
||||
ov::test::NDArray<float, 4>({{{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}}})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -6458,10 +6473,10 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_squeeze_default_domain_opset13) {
|
||||
SERIALIZED_ZOO,
|
||||
"onnx/squeeze_default_domain_opset13.onnx"));
|
||||
|
||||
auto input =
|
||||
test::NDArray<float, 3>({{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}}).get_vector();
|
||||
auto input = ov::test::NDArray<float, 3>({{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}})
|
||||
.get_vector();
|
||||
auto expected_output =
|
||||
test::NDArray<float, 2>({{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}).get_vector();
|
||||
ov::test::NDArray<float, 2>({{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_input(input);
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "common_test_utils/test_case.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
#include "onnx_utils.hpp"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
#include "common_test_utils/test_tools.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
#include "onnx_utils.hpp"
|
||||
@ -42,21 +43,22 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_conv2d_strides_padding) {
|
||||
|
||||
Inputs inputs;
|
||||
// data (1, 1, 7, 5) input tensor
|
||||
inputs.emplace_back(test::NDArray<float, 4>{{{{{0.f, 1.f, 2.f, 3.f, 4.f},
|
||||
{5.f, 6.f, 7.f, 8.f, 9.f},
|
||||
{10.f, 11.f, 12.f, 13.f, 14.f},
|
||||
{15.f, 16.f, 17.f, 18.f, 19.f},
|
||||
{20.f, 21.f, 22.f, 23.f, 24.f},
|
||||
{25.f, 26.f, 27.f, 28.f, 29.f},
|
||||
{30.f, 31.f, 32.f, 33.f, 34.f}}}}}
|
||||
inputs.emplace_back(ov::test::NDArray<float, 4>{{{{{0.f, 1.f, 2.f, 3.f, 4.f},
|
||||
{5.f, 6.f, 7.f, 8.f, 9.f},
|
||||
{10.f, 11.f, 12.f, 13.f, 14.f},
|
||||
{15.f, 16.f, 17.f, 18.f, 19.f},
|
||||
{20.f, 21.f, 22.f, 23.f, 24.f},
|
||||
{25.f, 26.f, 27.f, 28.f, 29.f},
|
||||
{30.f, 31.f, 32.f, 33.f, 34.f}}}}}
|
||||
.get_vector());
|
||||
|
||||
// filters (1, 1, 3, 3) aka convolution weights
|
||||
inputs.emplace_back(test::NDArray<float, 4>{{{{{1.f, 1.f, 1.f}, {1.f, 1.f, 1.f}, {1.f, 1.f, 1.f}}}}}.get_vector());
|
||||
inputs.emplace_back(
|
||||
ov::test::NDArray<float, 4>{{{{{1.f, 1.f, 1.f}, {1.f, 1.f, 1.f}, {1.f, 1.f, 1.f}}}}}.get_vector());
|
||||
|
||||
// (1, 1, 4, 3)
|
||||
auto expected_output =
|
||||
test::NDArray<float, 4>(
|
||||
ov::test::NDArray<float, 4>(
|
||||
{{{{12.f, 27.f, 24.f}, {63.f, 108.f, 81.f}, {123.f, 198.f, 141.f}, {112.f, 177.f, 124.f}}}})
|
||||
.get_vector();
|
||||
|
||||
@ -74,20 +76,21 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_conv2d_strides_no_padding) {
|
||||
|
||||
Inputs inputs;
|
||||
// data (1, 1, 7, 5) input tensor
|
||||
inputs.emplace_back(test::NDArray<float, 4>{{{{{0.f, 1.f, 2.f, 3.f, 4.f},
|
||||
{5.f, 6.f, 7.f, 8.f, 9.f},
|
||||
{10.f, 11.f, 12.f, 13.f, 14.f},
|
||||
{15.f, 16.f, 17.f, 18.f, 19.f},
|
||||
{20.f, 21.f, 22.f, 23.f, 24.f},
|
||||
{25.f, 26.f, 27.f, 28.f, 29.f},
|
||||
{30.f, 31.f, 32.f, 33.f, 34.f}}}}}
|
||||
inputs.emplace_back(ov::test::NDArray<float, 4>{{{{{0.f, 1.f, 2.f, 3.f, 4.f},
|
||||
{5.f, 6.f, 7.f, 8.f, 9.f},
|
||||
{10.f, 11.f, 12.f, 13.f, 14.f},
|
||||
{15.f, 16.f, 17.f, 18.f, 19.f},
|
||||
{20.f, 21.f, 22.f, 23.f, 24.f},
|
||||
{25.f, 26.f, 27.f, 28.f, 29.f},
|
||||
{30.f, 31.f, 32.f, 33.f, 34.f}}}}}
|
||||
.get_vector());
|
||||
|
||||
// filters (1, 1, 3, 3) aka convolution weights
|
||||
inputs.emplace_back(test::NDArray<float, 4>{{{{{1.f, 1.f, 1.f}, {1.f, 1.f, 1.f}, {1.f, 1.f, 1.f}}}}}.get_vector());
|
||||
inputs.emplace_back(
|
||||
ov::test::NDArray<float, 4>{{{{{1.f, 1.f, 1.f}, {1.f, 1.f, 1.f}, {1.f, 1.f, 1.f}}}}}.get_vector());
|
||||
|
||||
// (1, 1, 3, 2)
|
||||
auto expected_output = test::NDArray<float, 4>({{{{54.f, 72.f}, {144.f, 162.f}, {234.f, 252.f}}}}).get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 4>({{{{54.f, 72.f}, {144.f, 162.f}, {234.f, 252.f}}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -104,21 +107,22 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_conv2d_strides_assymetric_padding) {
|
||||
|
||||
Inputs inputs;
|
||||
// data (1, 1, 7, 5) input tensor
|
||||
inputs.emplace_back(test::NDArray<float, 4>{{{{{0.f, 1.f, 2.f, 3.f, 4.f},
|
||||
{5.f, 6.f, 7.f, 8.f, 9.f},
|
||||
{10.f, 11.f, 12.f, 13.f, 14.f},
|
||||
{15.f, 16.f, 17.f, 18.f, 19.f},
|
||||
{20.f, 21.f, 22.f, 23.f, 24.f},
|
||||
{25.f, 26.f, 27.f, 28.f, 29.f},
|
||||
{30.f, 31.f, 32.f, 33.f, 34.f}}}}}
|
||||
inputs.emplace_back(ov::test::NDArray<float, 4>{{{{{0.f, 1.f, 2.f, 3.f, 4.f},
|
||||
{5.f, 6.f, 7.f, 8.f, 9.f},
|
||||
{10.f, 11.f, 12.f, 13.f, 14.f},
|
||||
{15.f, 16.f, 17.f, 18.f, 19.f},
|
||||
{20.f, 21.f, 22.f, 23.f, 24.f},
|
||||
{25.f, 26.f, 27.f, 28.f, 29.f},
|
||||
{30.f, 31.f, 32.f, 33.f, 34.f}}}}}
|
||||
.get_vector());
|
||||
|
||||
// filters (1, 1, 3, 3) aka convolution weights
|
||||
inputs.emplace_back(test::NDArray<float, 4>{{{{{1.f, 1.f, 1.f}, {1.f, 1.f, 1.f}, {1.f, 1.f, 1.f}}}}}.get_vector());
|
||||
inputs.emplace_back(
|
||||
ov::test::NDArray<float, 4>{{{{{1.f, 1.f, 1.f}, {1.f, 1.f, 1.f}, {1.f, 1.f, 1.f}}}}}.get_vector());
|
||||
|
||||
// (1, 1, 4, 2)
|
||||
auto expected_output =
|
||||
test::NDArray<float, 4>({{{{21.f, 33.f}, {99.f, 117.f}, {189.f, 207.f}, {171.f, 183.f}}}}).get_vector();
|
||||
ov::test::NDArray<float, 4>({{{{21.f, 33.f}, {99.f, 117.f}, {189.f, 207.f}, {171.f, 183.f}}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -142,20 +146,20 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_conv2d_dilation_assymetric_pads_stride
|
||||
Inputs inputs;
|
||||
// {2, 1, 1, 1}
|
||||
inputs.emplace_back(
|
||||
test::NDArray<float, 4>({{{{-0.09103918075561523f}}}, {{{-0.32513630390167236f}}}}).get_vector());
|
||||
ov::test::NDArray<float, 4>({{{{-0.09103918075561523f}}}, {{{-0.32513630390167236f}}}}).get_vector());
|
||||
// {2, 1, 3, 3}
|
||||
inputs.emplace_back(
|
||||
test::NDArray<float, 4>({{{{0.4312484860420227f, -0.12559029459953308f, 0.44889551401138306f},
|
||||
{-0.3100617825984955f, 0.13522827625274658f, -0.06791308522224426f},
|
||||
{0.22671669721603394f, -0.17391827702522278f, -0.31299442052841187f}}},
|
||||
{{{-0.31545522809028625f, 0.06560015678405762f, 0.2656586766242981f},
|
||||
{0.41363757848739624f, 0.31231558322906494f, -0.376018226146698f},
|
||||
{-0.005708813667297363f, 0.34922850131988525f, 0.45095211267471313f}}}})
|
||||
ov::test::NDArray<float, 4>({{{{0.4312484860420227f, -0.12559029459953308f, 0.44889551401138306f},
|
||||
{-0.3100617825984955f, 0.13522827625274658f, -0.06791308522224426f},
|
||||
{0.22671669721603394f, -0.17391827702522278f, -0.31299442052841187f}}},
|
||||
{{{-0.31545522809028625f, 0.06560015678405762f, 0.2656586766242981f},
|
||||
{0.41363757848739624f, 0.31231558322906494f, -0.376018226146698f},
|
||||
{-0.005708813667297363f, 0.34922850131988525f, 0.45095211267471313f}}}})
|
||||
.get_vector());
|
||||
|
||||
// {2, 2, 1, 2}
|
||||
auto expected_output =
|
||||
test::NDArray<float, 4>(
|
||||
ov::test::NDArray<float, 4>(
|
||||
{{{{-0.012311071157455444f, 0.02822777070105076f}}, {{-0.028432954102754593f, -0.037657227367162704f}}},
|
||||
{{{-0.04396762326359749f, 0.10081233829259872f}}, {{-0.10154513269662857f, -0.13448859751224518f}}}})
|
||||
.get_vector();
|
||||
@ -298,12 +302,12 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_average_pool_2d) {
|
||||
// input data shape (1, 1, 4, 4)
|
||||
Inputs inputs;
|
||||
inputs.push_back(
|
||||
test::NDArray<float, 4>(
|
||||
ov::test::NDArray<float, 4>(
|
||||
{{{{0.f, 1.f, 2.f, 3.f}, {4.f, 5.f, 6.f, 7.f}, {8.f, 9.f, 10.f, 11.f}, {12.f, 13.f, 14.f, 15.f}}}})
|
||||
.get_vector());
|
||||
|
||||
// (1, 1, 2, 2)
|
||||
auto expected_output = test::NDArray<float, 4>({{{{2.5f, 4.5f}, {10.5f, 12.5f}}}}).get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 4>({{{{2.5f, 4.5f}, {10.5f, 12.5f}}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -320,13 +324,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_average_pool_2d_pads) {
|
||||
// input data shape (1, 1, 4, 4)
|
||||
Inputs inputs;
|
||||
inputs.push_back(
|
||||
test::NDArray<float, 4>(
|
||||
ov::test::NDArray<float, 4>(
|
||||
{{{{0.f, 1.f, 2.f, 3.f}, {4.f, 5.f, 6.f, 7.f}, {8.f, 9.f, 10.f, 11.f}, {12.f, 13.f, 14.f, 15.f}}}})
|
||||
.get_vector());
|
||||
|
||||
// (1, 1, 3, 3)
|
||||
auto expected_output =
|
||||
test::NDArray<float, 4>({{{{0.f, 1.5f, 3.f}, {6.f, 7.5f, 9.f}, {12.f, 13.5f, 15.f}}}}).get_vector();
|
||||
ov::test::NDArray<float, 4>({{{{0.f, 1.5f, 3.f}, {6.f, 7.5f, 9.f}, {12.f, 13.5f, 15.f}}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -370,13 +374,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_max_pool_2d_pads) {
|
||||
// input data shape (1, 1, 4, 4)
|
||||
Inputs inputs;
|
||||
inputs.push_back(
|
||||
test::NDArray<float, 4>(
|
||||
ov::test::NDArray<float, 4>(
|
||||
{{{{0.f, 1.f, 2.f, 3.f}, {4.f, 5.f, 6.f, 7.f}, {8.f, 9.f, 10.f, 11.f}, {12.f, 13.f, 14.f, 15.f}}}})
|
||||
.get_vector());
|
||||
|
||||
// (1, 1, 3, 3)
|
||||
auto expected_output =
|
||||
test::NDArray<float, 4>({{{{0.f, 2.f, 3.f}, {8.f, 10.f, 11.f}, {12.f, 14.f, 15.f}}}}).get_vector();
|
||||
ov::test::NDArray<float, 4>({{{{0.f, 2.f, 3.f}, {8.f, 10.f, 11.f}, {12.f, 14.f, 15.f}}}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
#include "common_test_utils/test_tools.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
#include "onnx_utils.hpp"
|
||||
@ -36,8 +37,8 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_affine) {
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/affine.onnx"));
|
||||
|
||||
// input/output shape (1, 3)
|
||||
auto input = test::NDArray<float, 2>{{{0.f, 1.f, 2.f}}}.get_vector();
|
||||
auto expected_output = test::NDArray<float, 2>{{{50.f, 50.5f, 51.f}}}.get_vector();
|
||||
auto input = ov::test::NDArray<float, 2>{{{0.f, 1.f, 2.f}}}.get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 2>{{{50.f, 50.5f, 51.f}}}.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_input(Shape{1, 3}, input);
|
||||
@ -50,14 +51,14 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_crop) {
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/crop.onnx"));
|
||||
|
||||
// input shape (1, 1, 4, 4)
|
||||
auto input = test::NDArray<float, 4>({{{{19.f, 20.f, 21.f, 22.f},
|
||||
{23.f, 24.f, 25.f, 26.f},
|
||||
{27.f, 28.f, 29.f, 30.f},
|
||||
{31.f, 32.f, 33.f, 34.f}}}})
|
||||
auto input = ov::test::NDArray<float, 4>({{{{19.f, 20.f, 21.f, 22.f},
|
||||
{23.f, 24.f, 25.f, 26.f},
|
||||
{27.f, 28.f, 29.f, 30.f},
|
||||
{31.f, 32.f, 33.f, 34.f}}}})
|
||||
.get_vector();
|
||||
|
||||
// output shape (1, 1, 2, 2)
|
||||
auto expected_output = test::NDArray<float, 4>{{{{24.f, 25.f}, {28.f, 29.f}}}}.get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 4>{{{{24.f, 25.f}, {28.f, 29.f}}}}.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_input(Shape{1, 1, 4, 4}, input);
|
||||
@ -70,14 +71,14 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_crop_with_scale) {
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/crop_with_scale.onnx"));
|
||||
|
||||
// input shape (1, 1, 4, 4)
|
||||
auto input = test::NDArray<float, 4>({{{{19.f, 20.f, 21.f, 22.f},
|
||||
{23.f, 24.f, 25.f, 26.f},
|
||||
{27.f, 28.f, 29.f, 30.f},
|
||||
{31.f, 32.f, 33.f, 34.f}}}})
|
||||
auto input = ov::test::NDArray<float, 4>({{{{19.f, 20.f, 21.f, 22.f},
|
||||
{23.f, 24.f, 25.f, 26.f},
|
||||
{27.f, 28.f, 29.f, 30.f},
|
||||
{31.f, 32.f, 33.f, 34.f}}}})
|
||||
.get_vector();
|
||||
|
||||
// output shape (1, 1, 2, 3)
|
||||
auto expected_output = test::NDArray<float, 4>{{{{24.f, 25.f, 26.f}, {28.f, 29.f, 30.f}}}}.get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 4>{{{{24.f, 25.f, 26.f}, {28.f, 29.f, 30.f}}}}.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_input(Shape{1, 1, 4, 4}, input);
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/constant_folding.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "onnx_import/core/null_node.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
#include "onnx_import/onnx_utils.hpp"
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "common_test_utils/test_case.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "onnx_utils.hpp"
|
||||
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
#include "common_test_utils/test_tools.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
#include "onnx_utils.hpp"
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
#include "common_test_utils/test_tools.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
#include "onnx_utils.hpp"
|
||||
@ -39,13 +40,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reshape_reduced_dims) {
|
||||
"onnx/reshape_reduced_dims.onnx"));
|
||||
|
||||
// input data shape (2, 3, 4)
|
||||
auto input = test::NDArray<float, 3>({{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}},
|
||||
{{12, 13, 14, 15}, {16, 17, 18, 19}, {20, 21, 22, 23}}})
|
||||
auto input = ov::test::NDArray<float, 3>({{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}},
|
||||
{{12, 13, 14, 15}, {16, 17, 18, 19}, {20, 21, 22, 23}}})
|
||||
.get_vector();
|
||||
|
||||
// output data shape (2, 12)
|
||||
auto expected_output = test::NDArray<float, 2>({{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
|
||||
{12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}})
|
||||
auto expected_output = ov::test::NDArray<float, 2>({{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
|
||||
{12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -60,15 +61,15 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reshape_reordered_dims) {
|
||||
"onnx/reshape_reordered_dims.onnx"));
|
||||
|
||||
// input data shape (2, 3, 4)
|
||||
auto input = test::NDArray<float, 3>({{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}},
|
||||
{{12, 13, 14, 15}, {16, 17, 18, 19}, {20, 21, 22, 23}}})
|
||||
auto input = ov::test::NDArray<float, 3>({{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}},
|
||||
{{12, 13, 14, 15}, {16, 17, 18, 19}, {20, 21, 22, 23}}})
|
||||
.get_vector();
|
||||
|
||||
// output data shape (4, 2, 3)
|
||||
auto expected_output = test::NDArray<float, 3>({{{0, 1, 2}, {3, 4, 5}},
|
||||
{{6, 7, 8}, {9, 10, 11}},
|
||||
{{12, 13, 14}, {15, 16, 17}},
|
||||
{{18, 19, 20}, {21, 22, 23}}})
|
||||
auto expected_output = ov::test::NDArray<float, 3>({{{0, 1, 2}, {3, 4, 5}},
|
||||
{{6, 7, 8}, {9, 10, 11}},
|
||||
{{12, 13, 14}, {15, 16, 17}},
|
||||
{{18, 19, 20}, {21, 22, 23}}})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -83,14 +84,14 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reshape_extended_dims) {
|
||||
"onnx/reshape_extended_dims.onnx"));
|
||||
|
||||
// input data shape (2, 3, 4)
|
||||
auto input = test::NDArray<float, 3>({{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}},
|
||||
{{12, 13, 14, 15}, {16, 17, 18, 19}, {20, 21, 22, 23}}})
|
||||
auto input = ov::test::NDArray<float, 3>({{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}},
|
||||
{{12, 13, 14, 15}, {16, 17, 18, 19}, {20, 21, 22, 23}}})
|
||||
.get_vector();
|
||||
|
||||
// output data shape (3, 2, 2, 2)
|
||||
auto expected_output = test::NDArray<float, 4>({{{{0, 1}, {2, 3}}, {{4, 5}, {6, 7}}},
|
||||
{{{8, 9}, {10, 11}}, {{12, 13}, {14, 15}}},
|
||||
{{{16, 17}, {18, 19}}, {{20, 21}, {22, 23}}}})
|
||||
auto expected_output = ov::test::NDArray<float, 4>({{{{0, 1}, {2, 3}}, {{4, 5}, {6, 7}}},
|
||||
{{{8, 9}, {10, 11}}, {{12, 13}, {14, 15}}},
|
||||
{{{16, 17}, {18, 19}}, {{20, 21}, {22, 23}}}})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -105,14 +106,14 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reshape_single_dim) {
|
||||
"onnx/reshape_single_dim.onnx"));
|
||||
|
||||
// input data shape (2, 3, 4)
|
||||
auto input = test::NDArray<float, 3>({{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}},
|
||||
{{12, 13, 14, 15}, {16, 17, 18, 19}, {20, 21, 22, 23}}})
|
||||
auto input = ov::test::NDArray<float, 3>({{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}},
|
||||
{{12, 13, 14, 15}, {16, 17, 18, 19}, {20, 21, 22, 23}}})
|
||||
.get_vector();
|
||||
|
||||
// output data shape (24, )
|
||||
auto expected_output =
|
||||
test::NDArray<float, 1>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23})
|
||||
.get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 1>(
|
||||
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_input(Shape{2, 3, 4}, input);
|
||||
@ -127,29 +128,29 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reshape_negative_dim) {
|
||||
"onnx/reshape_negative_dim.onnx"));
|
||||
|
||||
// 2x3x4
|
||||
auto input = test::NDArray<float, 3>({{{0.5488135f, 0.71518934f, 0.60276335f, 0.5448832f},
|
||||
{0.4236548f, 0.6458941f, 0.4375872f, 0.891773f},
|
||||
{0.96366274f, 0.3834415f, 0.79172504f, 0.5288949f}},
|
||||
auto input = ov::test::NDArray<float, 3>({{{0.5488135f, 0.71518934f, 0.60276335f, 0.5448832f},
|
||||
{0.4236548f, 0.6458941f, 0.4375872f, 0.891773f},
|
||||
{0.96366274f, 0.3834415f, 0.79172504f, 0.5288949f}},
|
||||
|
||||
{{0.56804454f, 0.92559665f, 0.07103606f, 0.0871293f},
|
||||
{0.0202184f, 0.83261985f, 0.77815676f, 0.87001216f},
|
||||
{0.9786183f, 0.7991586f, 0.46147937f, 0.7805292f}}})
|
||||
{{0.56804454f, 0.92559665f, 0.07103606f, 0.0871293f},
|
||||
{0.0202184f, 0.83261985f, 0.77815676f, 0.87001216f},
|
||||
{0.9786183f, 0.7991586f, 0.46147937f, 0.7805292f}}})
|
||||
.get_vector();
|
||||
|
||||
// 2x6x2
|
||||
auto expected_output = test::NDArray<float, 3>({{{0.5488135f, 0.71518934f},
|
||||
{0.60276335f, 0.5448832f},
|
||||
{0.4236548f, 0.6458941f},
|
||||
{0.4375872f, 0.891773f},
|
||||
{0.96366274f, 0.3834415f},
|
||||
{0.79172504f, 0.5288949f}},
|
||||
auto expected_output = ov::test::NDArray<float, 3>({{{0.5488135f, 0.71518934f},
|
||||
{0.60276335f, 0.5448832f},
|
||||
{0.4236548f, 0.6458941f},
|
||||
{0.4375872f, 0.891773f},
|
||||
{0.96366274f, 0.3834415f},
|
||||
{0.79172504f, 0.5288949f}},
|
||||
|
||||
{{0.56804454f, 0.92559665f},
|
||||
{0.07103606f, 0.0871293f},
|
||||
{0.0202184f, 0.83261985f},
|
||||
{0.77815676f, 0.87001216f},
|
||||
{0.9786183f, 0.7991586f},
|
||||
{0.46147937f, 0.7805292f}}})
|
||||
{{0.56804454f, 0.92559665f},
|
||||
{0.07103606f, 0.0871293f},
|
||||
{0.0202184f, 0.83261985f},
|
||||
{0.77815676f, 0.87001216f},
|
||||
{0.9786183f, 0.7991586f},
|
||||
{0.46147937f, 0.7805292f}}})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -164,13 +165,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reshape_negative_with_zero_dim) {
|
||||
"onnx/reshape_negative_with_zero_dims.onnx"));
|
||||
|
||||
// input data shape (2, 3, 4)
|
||||
auto input = test::NDArray<float, 3>({{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}},
|
||||
{{12, 13, 14, 15}, {16, 17, 18, 19}, {20, 21, 22, 23}}})
|
||||
auto input = ov::test::NDArray<float, 3>({{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}},
|
||||
{{12, 13, 14, 15}, {16, 17, 18, 19}, {20, 21, 22, 23}}})
|
||||
.get_vector();
|
||||
|
||||
// output data shape (2, 6, 2)
|
||||
auto expected_output = test::NDArray<float, 3>({{{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}, {10, 11}},
|
||||
{{12, 13}, {14, 15}, {16, 17}, {18, 19}, {20, 21}, {22, 23}}})
|
||||
auto expected_output = ov::test::NDArray<float, 3>({{{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}, {10, 11}},
|
||||
{{12, 13}, {14, 15}, {16, 17}, {18, 19}, {20, 21}, {22, 23}}})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -185,13 +186,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reshape_output_shape_as_input) {
|
||||
"onnx/reshape_output_shape_as_input.onnx"));
|
||||
|
||||
// input data shape (2, 3, 4)
|
||||
auto input = test::NDArray<float, 3>({{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}},
|
||||
{{12, 13, 14, 15}, {16, 17, 18, 19}, {20, 21, 22, 23}}})
|
||||
auto input = ov::test::NDArray<float, 3>({{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}},
|
||||
{{12, 13, 14, 15}, {16, 17, 18, 19}, {20, 21, 22, 23}}})
|
||||
.get_vector();
|
||||
|
||||
// output data shape (2, 6, 2)
|
||||
auto expected_output = test::NDArray<float, 3>({{{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}, {10, 11}},
|
||||
{{12, 13}, {14, 15}, {16, 17}, {18, 19}, {20, 21}, {22, 23}}})
|
||||
auto expected_output = ov::test::NDArray<float, 3>({{{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}, {10, 11}},
|
||||
{{12, 13}, {14, 15}, {16, 17}, {18, 19}, {20, 21}, {22, 23}}})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -352,12 +353,12 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_squeeze) {
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/squeeze.onnx"));
|
||||
|
||||
// {1, 4, 1, 1, 2}
|
||||
auto input = test::NDArray<float, 5>({{{{{1.0f, 2.0f}}}, {{{3.0f, 4.0f}}}, {{{5.0f, 6.0f}}}, {{{7.0f, 8.0f}}}}})
|
||||
auto input = ov::test::NDArray<float, 5>({{{{{1.0f, 2.0f}}}, {{{3.0f, 4.0f}}}, {{{5.0f, 6.0f}}}, {{{7.0f, 8.0f}}}}})
|
||||
.get_vector();
|
||||
|
||||
// {4, 2}
|
||||
auto expected_output =
|
||||
test::NDArray<float, 2>({{1.0f, 2.0f}, {3.0f, 4.0f}, {5.0f, 6.0f}, {7.0f, 8.0f}}).get_vector();
|
||||
ov::test::NDArray<float, 2>({{1.0f, 2.0f}, {3.0f, 4.0f}, {5.0f, 6.0f}, {7.0f, 8.0f}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_input(Shape{1, 4, 1, 1, 2}, input);
|
||||
@ -393,15 +394,15 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_unsqueeze) {
|
||||
auto function = onnx_import::import_onnx_model(
|
||||
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/unsqueeze.onnx"));
|
||||
|
||||
auto input = test::NDArray<float, 3>({{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}})
|
||||
auto input = ov::test::NDArray<float, 3>({{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}})
|
||||
.get_vector();
|
||||
|
||||
auto expected_output =
|
||||
test::NDArray<float, 4>({{{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}}})
|
||||
ov::test::NDArray<float, 4>({{{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
|
||||
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}}})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -415,15 +416,15 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_unsqueeze_negative_axes) {
|
||||
SERIALIZED_ZOO,
|
||||
"onnx/unsqueeze_negative_axes.onnx"));
|
||||
|
||||
auto input = test::NDArray<float, 4>({{{{-1.8427763f, -1.0467733f, 0.50550157f, 1.4897262f, 0.33057404f}},
|
||||
{{1.9244908f, -0.3804572f, 0.76275414f, -0.8183123f, 0.93889356f}},
|
||||
{{-0.05270234f, 0.7113202f, -0.45783648f, -1.3378475f, 0.26926285f}}}})
|
||||
auto input = ov::test::NDArray<float, 4>({{{{-1.8427763f, -1.0467733f, 0.50550157f, 1.4897262f, 0.33057404f}},
|
||||
{{1.9244908f, -0.3804572f, 0.76275414f, -0.8183123f, 0.93889356f}},
|
||||
{{-0.05270234f, 0.7113202f, -0.45783648f, -1.3378475f, 0.26926285f}}}})
|
||||
.get_vector();
|
||||
|
||||
auto expected_output =
|
||||
test::NDArray<float, 5>({{{{{-1.8427763f, -1.0467733f, 0.50550157f, 1.4897262f, 0.33057404f}}},
|
||||
{{{1.9244908f, -0.3804572f, 0.76275414f, -0.8183123f, 0.93889356f}}},
|
||||
{{{-0.05270234f, 0.7113202f, -0.45783648f, -1.3378475f, 0.26926285f}}}}})
|
||||
ov::test::NDArray<float, 5>({{{{{-1.8427763f, -1.0467733f, 0.50550157f, 1.4897262f, 0.33057404f}}},
|
||||
{{{1.9244908f, -0.3804572f, 0.76275414f, -0.8183123f, 0.93889356f}}},
|
||||
{{{-0.05270234f, 0.7113202f, -0.45783648f, -1.3378475f, 0.26926285f}}}}})
|
||||
.get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
@ -438,10 +439,10 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_concat) {
|
||||
|
||||
Inputs inputs;
|
||||
|
||||
inputs.emplace_back(test::NDArray<float, 1>({1, 2}).get_vector());
|
||||
inputs.emplace_back(test::NDArray<float, 1>({3, 4}).get_vector());
|
||||
inputs.emplace_back(ov::test::NDArray<float, 1>({1, 2}).get_vector());
|
||||
inputs.emplace_back(ov::test::NDArray<float, 1>({3, 4}).get_vector());
|
||||
|
||||
auto expected_output = test::NDArray<float, 1>({1, 2, 3, 4}).get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 1>({1, 2, 3, 4}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
@ -456,10 +457,10 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_concat_negative_axis) {
|
||||
|
||||
Inputs inputs;
|
||||
|
||||
inputs.emplace_back(test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector());
|
||||
inputs.emplace_back(test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector());
|
||||
inputs.emplace_back(ov::test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector());
|
||||
inputs.emplace_back(ov::test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector());
|
||||
|
||||
auto expected_output = test::NDArray<float, 2>({{1, 2}, {3, 4}, {5, 6}, {7, 8}}).get_vector();
|
||||
auto expected_output = ov::test::NDArray<float, 2>({{1, 2}, {3, 4}, {5, 6}, {7, 8}}).get_vector();
|
||||
|
||||
auto test_case = ov::test::TestCase(function, s_device);
|
||||
test_case.add_multiple_inputs(inputs);
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
#include "common_test_utils/test_tools.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
#include "onnx_utils.hpp"
|
||||
|
@ -4,6 +4,8 @@
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "ngraph/file_util.hpp"
|
||||
|
||||
// clang-format off
|
||||
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
|
||||
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "common_test_utils/test_case.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
#include "onnx_utils.hpp"
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "common_test_utils/test_case.hpp"
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
#include "onnx_import/onnx_utils.hpp"
|
||||
|
@ -17,9 +17,13 @@
|
||||
|
||||
#ifdef _WIN32
|
||||
// Copied from linux libc sys/stat.h:
|
||||
#ifndef S_ISREG
|
||||
# define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
|
||||
#endif
|
||||
#ifndef S_ISDIR
|
||||
# define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
using namespace ::testing;
|
||||
using namespace std;
|
||||
|
@ -22,7 +22,7 @@ function(add_common_utils ADD_TARGET_NAME)
|
||||
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
EXCLUDED_SOURCE_PATHS
|
||||
${TARGET_EXCLUDED_SOURCE_PATHS}
|
||||
ADD_CPPLINT
|
||||
ADD_CLANG_FORMAT
|
||||
DEVELOPER_PACKAGE
|
||||
tests
|
||||
LINK_LIBRARIES
|
||||
|
@ -21,8 +21,8 @@ namespace utils {
|
||||
/// \returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
|
||||
template <typename T>
|
||||
typename std::enable_if<std::is_floating_point<T>::value, ::testing::AssertionResult>::type all_close(
|
||||
const T * const a,
|
||||
const T * const b,
|
||||
const T* const a,
|
||||
const T* const b,
|
||||
size_t size,
|
||||
T rtol = static_cast<T>(1e-5),
|
||||
T atol = static_cast<T>(1e-8)) {
|
||||
@ -52,8 +52,8 @@ typename std::enable_if<std::is_floating_point<T>::value, ::testing::AssertionRe
|
||||
/// \returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
|
||||
template <typename T>
|
||||
typename std::enable_if<std::is_integral<T>::value, ::testing::AssertionResult>::type all_close(
|
||||
const T * const a,
|
||||
const T * const b,
|
||||
const T* const a,
|
||||
const T* const b,
|
||||
size_t size,
|
||||
T rtol = static_cast<T>(1e-5),
|
||||
T atol = static_cast<T>(1e-8)) {
|
||||
@ -70,7 +70,6 @@ typename std::enable_if<std::is_integral<T>::value, ::testing::AssertionResult>:
|
||||
return rc ? ::testing::AssertionSuccess() : ar_fail;
|
||||
}
|
||||
|
||||
|
||||
/// \brief Same as numpy.allclose
|
||||
/// \param a First tensor to compare
|
||||
/// \param b Second tensor to compare
|
||||
@ -89,7 +88,6 @@ typename std::enable_if<std::is_floating_point<T>::value, ::testing::AssertionRe
|
||||
return all_close(a.data(), b.data(), a.size(), rtol, atol);
|
||||
}
|
||||
|
||||
|
||||
/// \brief Same as numpy.allclose
|
||||
/// \param a First tensor to compare
|
||||
/// \param b Second tensor to compare
|
||||
|
@ -4,19 +4,19 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "openvino/core/partial_shape.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <fstream>
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
#include <ostream>
|
||||
#include <set>
|
||||
#include <sstream>
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "openvino/core/partial_shape.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
|
@ -5,24 +5,20 @@
|
||||
#pragma once
|
||||
|
||||
#include <cmath>
|
||||
#include <utility>
|
||||
#include <random>
|
||||
#include <utility>
|
||||
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ie_blob.h"
|
||||
#include "openvino/core/type/element_type_traits.hpp"
|
||||
#include "openvino/runtime/tensor.hpp"
|
||||
#include "ngraph/type/bfloat16.hpp"
|
||||
#include "ngraph/type/float16.hpp"
|
||||
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
|
||||
inline void fill_data(float *data, size_t size, size_t duty_ratio = 10) {
|
||||
inline void fill_data(float* data, size_t size, size_t duty_ratio = 10) {
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if ((i / duty_ratio) % 2 == 1) {
|
||||
data[i] = 0.0f;
|
||||
@ -54,7 +50,9 @@ inline std::vector<float> generate_float_numbers(std::size_t vec_len, float min,
|
||||
* @param blob tensor to fill in
|
||||
* @param values src tensor which should be broadcast
|
||||
*/
|
||||
void fill_data_with_broadcast(InferenceEngine::Blob::Ptr &blob, InferenceEngine::Blob::Ptr &values);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
void fill_data_with_broadcast(InferenceEngine::Blob::Ptr& blob, InferenceEngine::Blob::Ptr& values);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
void fill_data_with_broadcast(ov::Tensor& tensor, ov::Tensor& values);
|
||||
|
||||
/**
|
||||
@ -64,7 +62,9 @@ void fill_data_with_broadcast(ov::Tensor& tensor, ov::Tensor& values);
|
||||
* @param axis Axis to apply values
|
||||
* @param values data to broadcast
|
||||
*/
|
||||
void fill_data_with_broadcast(InferenceEngine::Blob::Ptr &blob, size_t axis, std::vector<float> values);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
void fill_data_with_broadcast(InferenceEngine::Blob::Ptr& blob, size_t axis, std::vector<float> values);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
void fill_data_with_broadcast(ov::Tensor& tensor, size_t axis, std::vector<float> values);
|
||||
/**
|
||||
* Make a view blob with new shape. It will reinterpret original tensor data as a tensor with new shape.
|
||||
@ -76,22 +76,31 @@ void fill_data_with_broadcast(ov::Tensor& tensor, size_t axis, std::vector<float
|
||||
* @param new_shape new one shape for view blob
|
||||
* @return new one blob view
|
||||
*/
|
||||
InferenceEngine::Blob::Ptr
|
||||
make_reshape_view(const InferenceEngine::Blob::Ptr &blob, InferenceEngine::SizeVector new_shape);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
InferenceEngine::Blob::Ptr make_reshape_view(const InferenceEngine::Blob::Ptr& blob,
|
||||
InferenceEngine::SizeVector new_shape);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
/**
|
||||
* Calculate size of buffer required for provided tensor descriptor.
|
||||
* @param tdesc provided tensor descriptor
|
||||
* @return size in bytes
|
||||
*/
|
||||
size_t byte_size(const InferenceEngine::TensorDesc &tdesc);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
size_t byte_size(const InferenceEngine::TensorDesc& tdesc);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
ov::Tensor make_tensor_with_precision_convert(const ov::Tensor& tensor, ov::element::Type prc);
|
||||
|
||||
|
||||
template<typename T>
|
||||
inline void fill_roi_raw_ptr(T* data, size_t data_size, const uint32_t range, const int32_t height, const int32_t width, const float omega,
|
||||
const bool is_roi_max_mode, const int32_t seed = 1) {
|
||||
template <typename T>
|
||||
inline void fill_roi_raw_ptr(T* data,
|
||||
size_t data_size,
|
||||
const uint32_t range,
|
||||
const int32_t height,
|
||||
const int32_t width,
|
||||
const float omega,
|
||||
const bool is_roi_max_mode,
|
||||
const int32_t seed = 1) {
|
||||
std::default_random_engine random(seed);
|
||||
std::uniform_int_distribution<int32_t> distribution(0, range);
|
||||
|
||||
@ -129,23 +138,33 @@ inline void fill_roi_raw_ptr(T* data, size_t data_size, const uint32_t range, co
|
||||
}
|
||||
}
|
||||
|
||||
template<InferenceEngine::Precision::ePrecision PRC>
|
||||
inline void
|
||||
fill_data_roi(InferenceEngine::Blob::Ptr &blob, const uint32_t range, const int height, const int width, const float omega,
|
||||
const bool is_roi_max_mode, const int seed = 1, void (*propGenerator)(InferenceEngine::Blob::Ptr &) = nullptr) {
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
template <InferenceEngine::Precision::ePrecision PRC>
|
||||
inline void fill_data_roi(InferenceEngine::Blob::Ptr& blob,
|
||||
const uint32_t range,
|
||||
const int height,
|
||||
const int width,
|
||||
const float omega,
|
||||
const bool is_roi_max_mode,
|
||||
const int seed = 1,
|
||||
void (*propGenerator)(InferenceEngine::Blob::Ptr&) = nullptr) {
|
||||
if (propGenerator != nullptr) {
|
||||
propGenerator(blob);
|
||||
return;
|
||||
}
|
||||
using T = typename InferenceEngine::PrecisionTrait<PRC>::value_type;
|
||||
auto *data = blob->buffer().as<T *>();
|
||||
auto* data = blob->buffer().as<T*>();
|
||||
fill_roi_raw_ptr<T>(data, blob->size(), range, height, width, omega, is_roi_max_mode, seed);
|
||||
}
|
||||
|
||||
template<InferenceEngine::Precision::ePrecision PRC>
|
||||
inline void
|
||||
fill_data_roi(ov::runtime::Tensor& tensor, const uint32_t range, const int height, const int width, const float omega,
|
||||
const bool is_roi_max_mode, const int seed = 1) {
|
||||
template <InferenceEngine::Precision::ePrecision PRC>
|
||||
inline void fill_data_roi(ov::runtime::Tensor& tensor,
|
||||
const uint32_t range,
|
||||
const int height,
|
||||
const int width,
|
||||
const float omega,
|
||||
const bool is_roi_max_mode,
|
||||
const int seed = 1) {
|
||||
using T = typename InferenceEngine::PrecisionTrait<PRC>::value_type;
|
||||
auto* data = static_cast<T*>(tensor.data());
|
||||
std::default_random_engine random(seed);
|
||||
@ -184,11 +203,15 @@ fill_data_roi(ov::runtime::Tensor& tensor, const uint32_t range, const int heigh
|
||||
data[i + 4] = static_cast<T>(max_y);
|
||||
}
|
||||
}
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
template<class T>
|
||||
void inline
|
||||
fill_data_random(T *pointer, std::size_t size, const uint32_t range = 10, double_t start_from = 0, const int32_t k = 1,
|
||||
const int seed = 1) {
|
||||
template <class T>
|
||||
void inline fill_data_random(T* pointer,
|
||||
std::size_t size,
|
||||
const uint32_t range = 10,
|
||||
double_t start_from = 0,
|
||||
const int32_t k = 1,
|
||||
const int seed = 1) {
|
||||
if (range == 0) {
|
||||
for (std::size_t i = 0; i < size; i++) {
|
||||
pointer[i] = static_cast<T>(start_from);
|
||||
@ -197,7 +220,7 @@ fill_data_random(T *pointer, std::size_t size, const uint32_t range = 10, double
|
||||
}
|
||||
|
||||
testing::internal::Random random(seed);
|
||||
const uint32_t k_range = k * range; // range with respect to k
|
||||
const uint32_t k_range = k * range; // range with respect to k
|
||||
random.Generate(k_range);
|
||||
|
||||
if (start_from < 0 && !std::is_signed<T>::value) {
|
||||
@ -245,10 +268,10 @@ void inline fill_random_unique_sequence(T* rawBlobDataPtr,
|
||||
while (elems.size() != size) {
|
||||
auto value = static_cast<float>(dist(generator));
|
||||
value /= static_cast<float>(k);
|
||||
if (std::is_same<ngraph::float16, T>::value) {
|
||||
elems.insert(static_cast<T>(ngraph::float16(value).to_bits()));
|
||||
} else if (std::is_same<ngraph::bfloat16, T>::value) {
|
||||
elems.insert(static_cast<T>(ngraph::bfloat16(value).to_bits()));
|
||||
if (std::is_same<ov::float16, T>::value) {
|
||||
elems.insert(static_cast<T>(ov::float16(value).to_bits()));
|
||||
} else if (std::is_same<ov::bfloat16, T>::value) {
|
||||
elems.insert(static_cast<T>(ov::bfloat16(value).to_bits()));
|
||||
} else {
|
||||
elems.insert(static_cast<T>(value));
|
||||
}
|
||||
@ -266,7 +289,11 @@ void inline fill_random_unique_sequence(T* rawBlobDataPtr,
|
||||
* - With k = 2 numbers resolution will 1/2 so outputs only .0 or .50
|
||||
* - With k = 4 numbers resolution will 1/4 so outputs only .0 .25 .50 0.75 and etc.
|
||||
*/
|
||||
void fill_tensor_random(ov::Tensor& tensor, const double range = 10, const double start_from = 0, const int32_t k = 1, const int seed = 1);
|
||||
void fill_tensor_random(ov::Tensor& tensor,
|
||||
const double range = 10,
|
||||
const double start_from = 0,
|
||||
const int32_t k = 1,
|
||||
const int seed = 1);
|
||||
|
||||
/** @brief Fill blob with random data.
|
||||
*
|
||||
@ -278,11 +305,15 @@ void fill_tensor_random(ov::Tensor& tensor, const double range = 10, const doubl
|
||||
* - With k = 2 numbers resolution will 1/2 so outputs only .0 or .50
|
||||
* - With k = 4 numbers resolution will 1/4 so outputs only .0 .25 .50 0.75 and etc.
|
||||
*/
|
||||
template<InferenceEngine::Precision::ePrecision PRC>
|
||||
void inline fill_data_random(InferenceEngine::Blob::Ptr &blob, const uint32_t range = 10, int32_t start_from = 0,
|
||||
const int32_t k = 1, const int seed = 1) {
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
template <InferenceEngine::Precision::ePrecision PRC>
|
||||
void inline fill_data_random(InferenceEngine::Blob::Ptr& blob,
|
||||
const uint32_t range = 10,
|
||||
int32_t start_from = 0,
|
||||
const int32_t k = 1,
|
||||
const int seed = 1) {
|
||||
using T = typename InferenceEngine::PrecisionTrait<PRC>::value_type;
|
||||
auto *rawBlobDataPtr = blob->buffer().as<T *>();
|
||||
auto* rawBlobDataPtr = blob->buffer().as<T*>();
|
||||
if (PRC == InferenceEngine::Precision::U4 || PRC == InferenceEngine::Precision::I4 ||
|
||||
PRC == InferenceEngine::Precision::BIN) {
|
||||
fill_data_random(rawBlobDataPtr, blob->byteSize(), range, start_from, k, seed);
|
||||
@ -290,6 +321,7 @@ void inline fill_data_random(InferenceEngine::Blob::Ptr &blob, const uint32_t ra
|
||||
fill_data_random(rawBlobDataPtr, blob->size(), range, start_from, k, seed);
|
||||
}
|
||||
}
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
/** @brief Fill blob with a sorted sequence of unique elements randomly generated.
|
||||
*
|
||||
@ -304,14 +336,15 @@ void inline fill_data_random(InferenceEngine::Blob::Ptr &blob, const uint32_t ra
|
||||
* - With k = 2 numbers resolution will 1/2 so outputs only .0 or .50
|
||||
* - With k = 4 numbers resolution will 1/4 so outputs only .0 .25 .50 0.75 and etc.
|
||||
*/
|
||||
template<InferenceEngine::Precision::ePrecision PRC>
|
||||
void inline fill_random_unique_sequence(InferenceEngine::Blob::Ptr &blob,
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
template <InferenceEngine::Precision::ePrecision PRC>
|
||||
void inline fill_random_unique_sequence(InferenceEngine::Blob::Ptr& blob,
|
||||
uint64_t range,
|
||||
int64_t start_from = 0,
|
||||
const int64_t k = 1,
|
||||
const int32_t seed = 1) {
|
||||
using T = typename InferenceEngine::PrecisionTrait<PRC>::value_type;
|
||||
auto *rawBlobDataPtr = blob->buffer().as<T *>();
|
||||
auto* rawBlobDataPtr = blob->buffer().as<T*>();
|
||||
|
||||
if (start_from < 0 && !std::is_signed<T>::value) {
|
||||
start_from = 0;
|
||||
@ -329,16 +362,21 @@ void inline fill_random_unique_sequence(InferenceEngine::Blob::Ptr &blob,
|
||||
auto value = static_cast<float>(dist(generator));
|
||||
value /= static_cast<float>(k);
|
||||
if (PRC == InferenceEngine::Precision::FP16) {
|
||||
elems.insert(static_cast<T>(ngraph::float16(value).to_bits()));
|
||||
elems.insert(static_cast<T>(ov::float16(value).to_bits()));
|
||||
} else {
|
||||
elems.insert(static_cast<T>(value));
|
||||
}
|
||||
}
|
||||
std::copy(elems.begin(), elems.end(), rawBlobDataPtr);
|
||||
}
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
template<typename T>
|
||||
void inline fill_data_ptr_consistently(T* data, size_t size, const uint32_t range = 10, int32_t start_from = 0, const int32_t k = 1) {
|
||||
template <typename T>
|
||||
void inline fill_data_ptr_consistently(T* data,
|
||||
size_t size,
|
||||
const uint32_t range = 10,
|
||||
int32_t start_from = 0,
|
||||
const int32_t k = 1) {
|
||||
int64_t value = start_from;
|
||||
const int64_t maxValue = start_from + range;
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
@ -351,51 +389,57 @@ void inline fill_data_ptr_consistently(T* data, size_t size, const uint32_t rang
|
||||
}
|
||||
}
|
||||
|
||||
template<InferenceEngine::Precision::ePrecision PRC>
|
||||
void inline fill_data_consistently(InferenceEngine::Blob::Ptr &blob, const uint32_t range = 10, int32_t start_from = 0,
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
template <InferenceEngine::Precision::ePrecision PRC>
|
||||
void inline fill_data_consistently(InferenceEngine::Blob::Ptr& blob,
|
||||
const uint32_t range = 10,
|
||||
int32_t start_from = 0,
|
||||
const int32_t k = 1) {
|
||||
using T = typename InferenceEngine::PrecisionTrait<PRC>::value_type;
|
||||
auto *rawBlobDataPtr = blob->buffer().as<T *>();
|
||||
auto* rawBlobDataPtr = blob->buffer().as<T*>();
|
||||
if (start_from < 0 && !std::is_signed<T>::value) {
|
||||
start_from = 0;
|
||||
}
|
||||
fill_data_ptr_consistently(rawBlobDataPtr, blob->size(), range, start_from, k);
|
||||
}
|
||||
|
||||
template<InferenceEngine::Precision::ePrecision PRC>
|
||||
void inline
|
||||
fill_data_random_float(InferenceEngine::Blob::Ptr &blob, const uint32_t range, int32_t start_from, const int32_t k,
|
||||
const int seed = 1) {
|
||||
template <InferenceEngine::Precision::ePrecision PRC>
|
||||
void inline fill_data_random_float(InferenceEngine::Blob::Ptr& blob,
|
||||
const uint32_t range,
|
||||
int32_t start_from,
|
||||
const int32_t k,
|
||||
const int seed = 1) {
|
||||
using T = typename InferenceEngine::PrecisionTrait<PRC>::value_type;
|
||||
std::default_random_engine random(seed);
|
||||
// 1/k is the resolution of the floating point numbers
|
||||
std::uniform_int_distribution<int32_t> distribution(k * start_from, k * (start_from + range));
|
||||
|
||||
auto *rawBlobDataPtr = blob->buffer().as<T *>();
|
||||
auto* rawBlobDataPtr = blob->buffer().as<T*>();
|
||||
for (size_t i = 0; i < blob->size(); i++) {
|
||||
auto value = static_cast<float>(distribution(random));
|
||||
value /= static_cast<float>(k);
|
||||
if (PRC == InferenceEngine::Precision::FP16) {
|
||||
rawBlobDataPtr[i] = static_cast<T>(ngraph::float16(value).to_bits());
|
||||
rawBlobDataPtr[i] = static_cast<T>(ov::float16(value).to_bits());
|
||||
} else if (PRC == InferenceEngine::Precision::BF16) {
|
||||
rawBlobDataPtr[i] = static_cast<T>(ngraph::bfloat16(value).to_bits());
|
||||
rawBlobDataPtr[i] = static_cast<T>(ov::bfloat16(value).to_bits());
|
||||
} else {
|
||||
rawBlobDataPtr[i] = static_cast<T>(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
void inline fill_data_ptr_normal_random_float(T* data,
|
||||
size_t size,
|
||||
const float mean,
|
||||
const float stddev,
|
||||
const int seed = 1) {
|
||||
size_t size,
|
||||
const float mean,
|
||||
const float stddev,
|
||||
const int seed = 1) {
|
||||
std::default_random_engine random(seed);
|
||||
std::normal_distribution<> normal_d{mean, stddev};
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
auto value = static_cast<float>(normal_d(random));
|
||||
if (typeid(T) == typeid(typename InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type)) {
|
||||
if (typeid(T) ==
|
||||
typeid(typename InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type)) {
|
||||
data[i] = static_cast<T>(ov::float16(value).to_bits());
|
||||
} else {
|
||||
data[i] = static_cast<T>(value);
|
||||
@ -403,26 +447,26 @@ void inline fill_data_ptr_normal_random_float(T* data,
|
||||
}
|
||||
}
|
||||
|
||||
template<InferenceEngine::Precision::ePrecision PRC>
|
||||
void inline fill_data_normal_random_float(InferenceEngine::Blob::Ptr &blob,
|
||||
template <InferenceEngine::Precision::ePrecision PRC>
|
||||
void inline fill_data_normal_random_float(InferenceEngine::Blob::Ptr& blob,
|
||||
const float mean,
|
||||
const float stddev,
|
||||
const int seed = 1) {
|
||||
using T = typename InferenceEngine::PrecisionTrait<PRC>::value_type;
|
||||
auto *rawBlobDataPtr = blob->buffer().as<T *>();
|
||||
auto* rawBlobDataPtr = blob->buffer().as<T*>();
|
||||
fill_data_ptr_normal_random_float<T>(rawBlobDataPtr, blob->size(), mean, stddev, seed);
|
||||
}
|
||||
|
||||
template<InferenceEngine::Precision::ePrecision PRC, typename T>
|
||||
void inline fill_data_float_array(InferenceEngine::Blob::Ptr &blob, const T values[], const size_t size) {
|
||||
template <InferenceEngine::Precision::ePrecision PRC, typename T>
|
||||
void inline fill_data_float_array(InferenceEngine::Blob::Ptr& blob, const T values[], const size_t size) {
|
||||
using Type = typename InferenceEngine::PrecisionTrait<PRC>::value_type;
|
||||
|
||||
auto *rawBlobDataPtr = blob->buffer().as<T *>();
|
||||
auto* rawBlobDataPtr = blob->buffer().as<T*>();
|
||||
for (size_t i = 0; i < std::min(size, blob->size()); i++) {
|
||||
auto value = values[i];
|
||||
if (typeid(Type) ==
|
||||
typeid(typename InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type)) {
|
||||
rawBlobDataPtr[i] = static_cast<Type>(ngraph::float16(value).to_bits());
|
||||
rawBlobDataPtr[i] = static_cast<Type>(ov::float16(value).to_bits());
|
||||
|
||||
} else {
|
||||
rawBlobDataPtr[i] = static_cast<Type>(value);
|
||||
@ -430,8 +474,8 @@ void inline fill_data_float_array(InferenceEngine::Blob::Ptr &blob, const T valu
|
||||
}
|
||||
}
|
||||
|
||||
template<>
|
||||
void inline fill_data_random<InferenceEngine::Precision::FP32>(InferenceEngine::Blob::Ptr &blob,
|
||||
template <>
|
||||
void inline fill_data_random<InferenceEngine::Precision::FP32>(InferenceEngine::Blob::Ptr& blob,
|
||||
const uint32_t range,
|
||||
int32_t start_from,
|
||||
const int32_t k,
|
||||
@ -439,45 +483,43 @@ void inline fill_data_random<InferenceEngine::Precision::FP32>(InferenceEngine::
|
||||
fill_data_random_float<InferenceEngine::Precision::FP32>(blob, range, start_from, k, seed);
|
||||
}
|
||||
|
||||
template<>
|
||||
void inline fill_data_random<InferenceEngine::Precision::FP16>(InferenceEngine::Blob::Ptr &blob,
|
||||
template <>
|
||||
void inline fill_data_random<InferenceEngine::Precision::FP16>(InferenceEngine::Blob::Ptr& blob,
|
||||
const uint32_t range,
|
||||
int32_t start_from,
|
||||
const int32_t k, const int seed) {
|
||||
const int32_t k,
|
||||
const int seed) {
|
||||
fill_data_random_float<InferenceEngine::Precision::FP16>(blob, range, start_from, k, seed);
|
||||
}
|
||||
|
||||
template<>
|
||||
void inline fill_data_random<InferenceEngine::Precision::BF16>(InferenceEngine::Blob::Ptr &blob,
|
||||
template <>
|
||||
void inline fill_data_random<InferenceEngine::Precision::BF16>(InferenceEngine::Blob::Ptr& blob,
|
||||
const uint32_t range,
|
||||
int32_t start_from,
|
||||
const int32_t k, const int seed) {
|
||||
const int32_t k,
|
||||
const int seed) {
|
||||
fill_data_random_float<InferenceEngine::Precision::BF16>(blob, range, start_from, k, seed);
|
||||
}
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
template<typename T>
|
||||
typename std::enable_if<std::is_signed<T>::value, T>::type
|
||||
inline ie_abs(const T &val) {
|
||||
template <typename T>
|
||||
typename std::enable_if<std::is_signed<T>::value, T>::type inline ie_abs(const T& val) {
|
||||
return std::abs(val);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
typename std::enable_if<std::is_unsigned<T>::value, T>::type
|
||||
inline ie_abs(const T &val) {
|
||||
template <typename T>
|
||||
typename std::enable_if<std::is_unsigned<T>::value, T>::type inline ie_abs(const T& val) {
|
||||
return val;
|
||||
}
|
||||
|
||||
inline ngraph::bfloat16 ie_abs(const ngraph::bfloat16 &val) {
|
||||
return ngraph::bfloat16::from_bits(val.to_bits() & 0x7FFF);
|
||||
inline ov::bfloat16 ie_abs(const ov::bfloat16& val) {
|
||||
return ov::bfloat16::from_bits(val.to_bits() & 0x7FFF);
|
||||
}
|
||||
|
||||
inline ngraph::float16 ie_abs(const ngraph::float16 &val) {
|
||||
return ngraph::float16::from_bits(val.to_bits() & 0x7FFF);
|
||||
inline ov::float16 ie_abs(const ov::float16& val) {
|
||||
return ov::float16::from_bits(val.to_bits() & 0x7FFF);
|
||||
}
|
||||
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
||||
|
||||
|
@ -4,33 +4,33 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <regex>
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include <fstream>
|
||||
#include <regex>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "common_test_utils/w_dirent.h"
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
|
||||
#include "openvino/runtime/internal_properties.hpp"
|
||||
#include "openvino/runtime/iplugin.hpp"
|
||||
#include "openvino/util/file_util.hpp"
|
||||
#include "openvino/util/shared_object.hpp"
|
||||
#include "openvino/runtime/internal_properties.hpp"
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <direct.h>
|
||||
#define rmdir(dir) _rmdir(dir)
|
||||
# include <direct.h>
|
||||
# define rmdir(dir) _rmdir(dir)
|
||||
#else // _WIN32
|
||||
#include <unistd.h>
|
||||
# include <unistd.h>
|
||||
#endif // _WIN32
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
|
||||
template<class T>
|
||||
template <class T>
|
||||
inline std::string to_string_c_locale(T value) {
|
||||
std::stringstream val_stream;
|
||||
val_stream.imbue(std::locale("C"));
|
||||
@ -38,25 +38,26 @@ inline std::string to_string_c_locale(T value) {
|
||||
return val_stream.str();
|
||||
}
|
||||
|
||||
inline std::string makePath(const std::string &folder, const std::string &file) {
|
||||
if (folder.empty()) return file;
|
||||
inline std::string makePath(const std::string& folder, const std::string& file) {
|
||||
if (folder.empty())
|
||||
return file;
|
||||
return folder + FileSeparator + file;
|
||||
}
|
||||
|
||||
inline long long fileSize(const char *fileName) {
|
||||
inline long long fileSize(const char* fileName) {
|
||||
std::ifstream in(fileName, std::ios_base::binary | std::ios_base::ate);
|
||||
return in.tellg();
|
||||
}
|
||||
|
||||
inline long long fileSize(const std::string &fileName) {
|
||||
inline long long fileSize(const std::string& fileName) {
|
||||
return fileSize(fileName.c_str());
|
||||
}
|
||||
|
||||
inline bool fileExists(const char *fileName) {
|
||||
inline bool fileExists(const char* fileName) {
|
||||
return fileSize(fileName) >= 0;
|
||||
}
|
||||
|
||||
inline bool fileExists(const std::string &fileName) {
|
||||
inline bool fileExists(const std::string& fileName) {
|
||||
return fileExists(fileName.c_str());
|
||||
}
|
||||
|
||||
@ -72,7 +73,7 @@ inline void removeFile(const std::string& path) {
|
||||
}
|
||||
}
|
||||
|
||||
inline void removeIRFiles(const std::string &xmlFilePath, const std::string &binFileName) {
|
||||
inline void removeIRFiles(const std::string& xmlFilePath, const std::string& binFileName) {
|
||||
if (fileExists(xmlFilePath)) {
|
||||
std::remove(xmlFilePath.c_str());
|
||||
}
|
||||
@ -87,8 +88,8 @@ inline void removeIRFiles(const std::string &xmlFilePath, const std::string &bin
|
||||
// < 0 - error
|
||||
// >= 0 - count of removed files
|
||||
inline int removeFilesWithExt(std::string path, std::string ext) {
|
||||
struct dirent *ent;
|
||||
DIR *dir = opendir(path.c_str());
|
||||
struct dirent* ent;
|
||||
DIR* dir = opendir(path.c_str());
|
||||
int ret = 0;
|
||||
if (dir != nullptr) {
|
||||
while ((ent = readdir(dir)) != NULL) {
|
||||
@ -114,8 +115,8 @@ inline int removeFilesWithExt(std::string path, std::string ext) {
|
||||
// Return value:
|
||||
// vector of strings representing file paths
|
||||
inline std::vector<std::string> listFilesWithExt(const std::string& path, const std::string& ext) {
|
||||
struct dirent *ent;
|
||||
DIR *dir = opendir(path.c_str());
|
||||
struct dirent* ent;
|
||||
DIR* dir = opendir(path.c_str());
|
||||
std::vector<std::string> res;
|
||||
if (dir != nullptr) {
|
||||
while ((ent = readdir(dir)) != NULL) {
|
||||
@ -131,11 +132,11 @@ inline std::vector<std::string> listFilesWithExt(const std::string& path, const
|
||||
return res;
|
||||
}
|
||||
|
||||
inline int removeDir(const std::string &path) {
|
||||
inline int removeDir(const std::string& path) {
|
||||
return rmdir(path.c_str());
|
||||
}
|
||||
|
||||
inline bool directoryExists(const std::string &path) {
|
||||
inline bool directoryExists(const std::string& path) {
|
||||
struct stat sb;
|
||||
|
||||
if (stat(path.c_str(), &sb) == 0 && S_ISDIR(sb.st_mode)) {
|
||||
@ -145,7 +146,6 @@ inline bool directoryExists(const std::string &path) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
inline void directoryFileListRecursive(const std::string& name, std::vector<std::string>& file_list) {
|
||||
struct CloseDir {
|
||||
void operator()(DIR* d) const noexcept {
|
||||
@ -156,7 +156,7 @@ inline void directoryFileListRecursive(const std::string& name, std::vector<std:
|
||||
};
|
||||
using Dir = std::unique_ptr<DIR, CloseDir>;
|
||||
Dir directory(opendir(name.c_str()));
|
||||
struct dirent *entire;
|
||||
struct dirent* entire;
|
||||
if (directory) {
|
||||
const std::string current_dir{"."};
|
||||
const std::string parent_dir{".."};
|
||||
@ -219,7 +219,7 @@ inline std::vector<std::string> getFileListByPatternRecursive(const std::vector<
|
||||
};
|
||||
|
||||
std::vector<std::string> result;
|
||||
for (auto &&folderPath : folderPaths) {
|
||||
for (auto&& folderPath : folderPaths) {
|
||||
if (!directoryExists(folderPath)) {
|
||||
std::string msg = "Input directory (" + folderPath + ") doesn't not exist!";
|
||||
throw std::runtime_error(msg);
|
||||
|
@ -12,8 +12,7 @@ class TestMatcher : public ov::pass::pattern::Matcher {
|
||||
|
||||
public:
|
||||
TestMatcher() = default;
|
||||
bool match_value(const ov::Output<ov::Node>& pattern_value,
|
||||
const ov::Output<ov::Node>& graph_value) override {
|
||||
bool match_value(const ov::Output<ov::Node>& pattern_value, const ov::Output<ov::Node>& graph_value) override {
|
||||
if (ov::is_type<ov::op::v0::Parameter>(pattern_value.get_node_shared_ptr())) {
|
||||
bool result = pattern_value == graph_value;
|
||||
if (result) {
|
||||
@ -28,7 +27,7 @@ public:
|
||||
public:
|
||||
bool match(const std::shared_ptr<ov::Node>& pattern_node, const std::shared_ptr<ov::Node>& graph_node) {
|
||||
OPENVINO_ASSERT(pattern_node && graph_node); // the same condition throws an exception in the
|
||||
// non-test version of `match`
|
||||
// non-test version of `match`
|
||||
OPENVINO_DEBUG << "Starting match pattern = " << pattern_node->get_name()
|
||||
<< " , graph_node = " << graph_node->get_name();
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
#include "openvino/core/shape.hpp"
|
||||
|
||||
namespace ngraph {
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace init {
|
||||
// Recursively define types for N-deep initializer lists
|
||||
@ -73,7 +73,8 @@ typename std::enable_if<(N > 1), void>::type fill_shape(ov::Shape& shape, const
|
||||
}
|
||||
|
||||
template <typename T, size_t N>
|
||||
typename std::enable_if<(N > 1), void>::type check_shape(const ov::Shape& shape, const NestedInitializerList<T, N>& inits) {
|
||||
typename std::enable_if<(N > 1), void>::type check_shape(const ov::Shape& shape,
|
||||
const NestedInitializerList<T, N>& inits) {
|
||||
if (shape.at(shape.size() - N) != inits.size()) {
|
||||
throw std::invalid_argument("Initializers do not match shape");
|
||||
}
|
||||
@ -188,4 +189,4 @@ public:
|
||||
}
|
||||
};
|
||||
} // namespace test
|
||||
} // namespace ngraph
|
||||
} // namespace ov
|
||||
|
@ -9,13 +9,12 @@
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
ov::Tensor create_and_fill_tensor(
|
||||
const ov::element::Type element_type,
|
||||
const ov::Shape &shape,
|
||||
const uint32_t range = 10,
|
||||
const double_t start_from = 0,
|
||||
const int32_t resolution = 1,
|
||||
const int seed = 1);
|
||||
ov::Tensor create_and_fill_tensor(const ov::element::Type element_type,
|
||||
const ov::Shape& shape,
|
||||
const uint32_t range = 10,
|
||||
const double_t start_from = 0,
|
||||
const int32_t resolution = 1,
|
||||
const int seed = 1);
|
||||
|
||||
template <class T>
|
||||
static ov::runtime::Tensor create_tensor(const ov::element::Type& element_type,
|
||||
@ -23,37 +22,33 @@ static ov::runtime::Tensor create_tensor(const ov::element::Type& element_type,
|
||||
const std::vector<T>& values,
|
||||
const size_t size = 0) {
|
||||
const size_t real_size = size ? size : values.size() * sizeof(T) / element_type.size();
|
||||
ov::runtime::Tensor tensor { element_type, shape };
|
||||
ov::runtime::Tensor tensor{element_type, shape};
|
||||
std::memcpy(tensor.data(), values.data(), std::min(real_size * element_type.size(), sizeof(T) * values.size()));
|
||||
return tensor;
|
||||
}
|
||||
|
||||
ov::runtime::Tensor create_and_fill_tensor_unique_sequence(
|
||||
const ov::element::Type element_type,
|
||||
const ov::Shape& shape,
|
||||
const int32_t start_from = 0,
|
||||
const int32_t resolution = 1,
|
||||
const int seed = 1);
|
||||
ov::runtime::Tensor create_and_fill_tensor_unique_sequence(const ov::element::Type element_type,
|
||||
const ov::Shape& shape,
|
||||
const int32_t start_from = 0,
|
||||
const int32_t resolution = 1,
|
||||
const int seed = 1);
|
||||
|
||||
ov::runtime::Tensor create_and_fill_tensor_normal_distribution(
|
||||
const ov::element::Type element_type,
|
||||
const ov::Shape& shape,
|
||||
const float mean,
|
||||
const float stddev,
|
||||
const int seed = 1);
|
||||
ov::runtime::Tensor create_and_fill_tensor_normal_distribution(const ov::element::Type element_type,
|
||||
const ov::Shape& shape,
|
||||
const float mean,
|
||||
const float stddev,
|
||||
const int seed = 1);
|
||||
|
||||
ov::runtime::Tensor create_and_fill_tensor_consistently(
|
||||
const ov::element::Type element_type,
|
||||
const ov::Shape& shape,
|
||||
const uint32_t range,
|
||||
const int32_t start_from,
|
||||
const int32_t resolution);
|
||||
ov::runtime::Tensor create_and_fill_tensor_consistently(const ov::element::Type element_type,
|
||||
const ov::Shape& shape,
|
||||
const uint32_t range,
|
||||
const int32_t start_from,
|
||||
const int32_t resolution);
|
||||
|
||||
void compare(
|
||||
const ov::Tensor &expected,
|
||||
const ov::Tensor &actual,
|
||||
const double abs_threshold = std::numeric_limits<double>::max(),
|
||||
const double rel_threshold = std::numeric_limits<double>::max());
|
||||
void compare(const ov::Tensor& expected,
|
||||
const ov::Tensor& actual,
|
||||
const double abs_threshold = std::numeric_limits<double>::max(),
|
||||
const double rel_threshold = std::numeric_limits<double>::max());
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
||||
|
@ -5,11 +5,11 @@
|
||||
#pragma once
|
||||
|
||||
#include "common_test_utils/graph_comparator.hpp"
|
||||
#include "common_test_utils/test_common.hpp"
|
||||
#include "openvino/core/dimension.hpp"
|
||||
#include "openvino/core/model.hpp"
|
||||
#include "openvino/pass/manager.hpp"
|
||||
#include "openvino/pass/pass.hpp"
|
||||
#include "common_test_utils/test_common.hpp"
|
||||
#include "transformations/init_node_info.hpp"
|
||||
|
||||
#define DYN ov::Dimension::dynamic()
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include "common_test_utils/all_close.hpp"
|
||||
#include "common_test_utils/all_close_f.hpp"
|
||||
#include "common_test_utils/test_tools.hpp"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "openvino/runtime/core.hpp"
|
||||
#include "openvino/util/file_util.hpp"
|
||||
|
||||
@ -29,7 +28,7 @@ public:
|
||||
OPENVINO_ASSERT(input_pshape.compatible(shape),
|
||||
"Provided input shape ",
|
||||
shape,
|
||||
" is not compatible with nGraph function's expected input shape ",
|
||||
" is not compatible with OpenVINO model's expected input shape ",
|
||||
input_pshape,
|
||||
" for input ",
|
||||
m_input_index);
|
||||
|
@ -14,7 +14,7 @@ namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
class PostgreSQLLink;
|
||||
} //namespace utils
|
||||
} // namespace utils
|
||||
|
||||
class TestsCommon : virtual public ::testing::Test {
|
||||
/// \brief Holds a pointer on PostgreSQL interface implementation (see postgres_link.hpp).
|
||||
|
@ -28,22 +28,22 @@ const char DEVICE_SUFFIX_SEPARATOR = '.';
|
||||
const unsigned int maxFileNameLength = 140;
|
||||
|
||||
#ifdef _WIN32
|
||||
#if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
const char pre[] = "lib";
|
||||
#else
|
||||
const char pre[] = "";
|
||||
#endif
|
||||
const char ext[] = ".dll";
|
||||
const char FileSeparator[] = "\\";
|
||||
# if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
const char pre[] = "lib";
|
||||
# else
|
||||
const char pre[] = "";
|
||||
# endif
|
||||
const char ext[] = ".dll";
|
||||
const char FileSeparator[] = "\\";
|
||||
#else
|
||||
#if defined __APPLE__
|
||||
const char pre[] = "lib";
|
||||
const char ext[] = ".so";
|
||||
#else
|
||||
const char pre[] = "lib";
|
||||
const char ext[] = ".so";
|
||||
#endif
|
||||
const char FileSeparator[] = "/";
|
||||
# if defined __APPLE__
|
||||
const char pre[] = "lib";
|
||||
const char ext[] = ".so";
|
||||
# else
|
||||
const char pre[] = "lib";
|
||||
const char ext[] = ".so";
|
||||
# endif
|
||||
const char FileSeparator[] = "/";
|
||||
#endif
|
||||
|
||||
} // namespace utils
|
||||
|
@ -22,31 +22,35 @@ std::string combine_test_backend_and_case(const std::string& backend_name, const
|
||||
|
||||
#define OPENVINO_GTEST_TEST_(backend_name, test_case_name, test_name, parent_class, parent_id) \
|
||||
class OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name) : public parent_class { \
|
||||
public: \
|
||||
public: \
|
||||
OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name)() {} \
|
||||
\
|
||||
private: \
|
||||
void TestBody() override; \
|
||||
static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_; \
|
||||
\
|
||||
private: \
|
||||
void TestBody() override; \
|
||||
static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_; \
|
||||
GTEST_DISALLOW_COPY_AND_ASSIGN_(OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name)); \
|
||||
}; \
|
||||
\
|
||||
}; \
|
||||
\
|
||||
::testing::TestInfo* const OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name)::test_info_ = \
|
||||
::testing::internal::MakeAndRegisterTestInfo( \
|
||||
::ov::combine_test_backend_and_case(#backend_name, #test_case_name).c_str(), \
|
||||
::ov::prepend_disabled(#backend_name, #test_name, s_manifest).c_str(), \
|
||||
nullptr, \
|
||||
nullptr, \
|
||||
::testing::internal::CodeLocation(__FILE__, __LINE__), \
|
||||
(parent_id), \
|
||||
parent_class::SetUpTestCase, \
|
||||
parent_class::TearDownTestCase, \
|
||||
new ::testing::internal::TestFactoryImpl< \
|
||||
::testing::internal::MakeAndRegisterTestInfo( \
|
||||
::ov::combine_test_backend_and_case(#backend_name, #test_case_name).c_str(), \
|
||||
::ov::prepend_disabled(#backend_name, #test_name, s_manifest).c_str(), \
|
||||
nullptr, \
|
||||
nullptr, \
|
||||
::testing::internal::CodeLocation(__FILE__, __LINE__), \
|
||||
(parent_id), \
|
||||
parent_class::SetUpTestCase, \
|
||||
parent_class::TearDownTestCase, \
|
||||
new ::testing::internal::TestFactoryImpl< \
|
||||
OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name)>); \
|
||||
void OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name)::TestBody()
|
||||
|
||||
#define OPENVINO_TEST(test_case_name, test_name) \
|
||||
OPENVINO_GTEST_TEST_(test_case_name, test_case_name, test_name, ::testing::Test, ::testing::internal::GetTestTypeId())
|
||||
OPENVINO_GTEST_TEST_(test_case_name, \
|
||||
test_case_name, \
|
||||
test_name, \
|
||||
::testing::Test, \
|
||||
::testing::internal::GetTestTypeId())
|
||||
|
||||
// OPENVINO_TEST_F facilitates the use of the same configuration parameters for multiple
|
||||
// unit tests similar to the original TEST_F, but with the introduction of a new 0th
|
||||
@ -68,10 +72,10 @@ std::string combine_test_backend_and_case(const std::string& backend_name, const
|
||||
// (rather than the BACKENDNAME.* that worked before the use of OPENVINO_TEST_F)
|
||||
#define OPENVINO_TEST_F(backend_name, test_fixture, test_name) \
|
||||
OPENVINO_GTEST_TEST_(backend_name, \
|
||||
test_fixture, \
|
||||
test_name, \
|
||||
test_fixture, \
|
||||
::testing::internal::GetTypeId<test_fixture>())
|
||||
test_fixture, \
|
||||
test_name, \
|
||||
test_fixture, \
|
||||
::testing::internal::GetTypeId<test_fixture>())
|
||||
|
||||
// OPENVINO_TEST_P combined with OPENVINO_INSTANTIATE_TEST_SUITE_P facilate the generation
|
||||
// of value parameterized tests (similar to the original TEST_P and INSTANTIATE_TEST_SUITE_P
|
||||
@ -81,10 +85,10 @@ std::string combine_test_backend_and_case(const std::string& backend_name, const
|
||||
// Start by defining a class derived from ::testing::TestWithParam<T>, which you'll pass
|
||||
// for the test_case_name parameter.
|
||||
// Then use OPENVINO_INSTANTIATE_TEST_SUITE_P to define each generation of test cases (see below).
|
||||
#define OPENVINO_TEST_P(backend_name, test_case_name, test_name) \
|
||||
class OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name) : public test_case_name { \
|
||||
#define OPENVINO_TEST_P(backend_name, test_case_name, test_name) \
|
||||
class OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name) : public test_case_name { \
|
||||
public: \
|
||||
OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name)() {} \
|
||||
OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name)() {} \
|
||||
void TestBody() override; \
|
||||
\
|
||||
private: \
|
||||
@ -95,16 +99,16 @@ std::string combine_test_backend_and_case(const std::string& backend_name, const
|
||||
::testing::internal::CodeLocation(__FILE__, __LINE__)) \
|
||||
->AddTestPattern( \
|
||||
#backend_name "/" #test_case_name, \
|
||||
::ov::prepend_disabled(#backend_name "/" #test_case_name, #test_name, s_manifest).c_str(), \
|
||||
::ov::prepend_disabled(#backend_name "/" #test_case_name, #test_name, s_manifest).c_str(), \
|
||||
new ::testing::internal::TestMetaFactory< \
|
||||
OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name)>()); \
|
||||
OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name)>()); \
|
||||
return 0; \
|
||||
} \
|
||||
static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \
|
||||
GTEST_DISALLOW_COPY_AND_ASSIGN_(OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name)); \
|
||||
GTEST_DISALLOW_COPY_AND_ASSIGN_(OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name)); \
|
||||
}; \
|
||||
int OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name)::gtest_registering_dummy_ = \
|
||||
OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name)::AddToRegistry(); \
|
||||
int OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name)::gtest_registering_dummy_ = \
|
||||
OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name)::AddToRegistry(); \
|
||||
void OPENVINO_GTEST_TEST_CLASS_NAME_(backend_name, test_case_name, test_name)::TestBody()
|
||||
|
||||
// Use OPENVINO_INSTANTIATE_TEST_SUITE_P to create a generated set of test case variations.
|
||||
@ -140,7 +144,7 @@ std::string combine_test_backend_and_case(const std::string& backend_name, const
|
||||
// the filter to run all the tests for a given backend should be:
|
||||
// --gtest_filter=BACKENDNAME*.*
|
||||
// (rather than the BACKENDNAME.* that worked before the use of OPENVINO_TEST_P)
|
||||
#define OPENVINO_INSTANTIATE_TEST_SUITE_P(backend_name, prefix, test_suite_name, generator) \
|
||||
#define OPENVINO_INSTANTIATE_TEST_SUITE_P(backend_name, prefix, test_suite_name, generator) \
|
||||
static ::testing::internal::ParamGenerator<test_suite_name::ParamType> \
|
||||
gtest_##prefix##backend_name##test_suite_name##_EvalGenerator_() { \
|
||||
return generator; \
|
||||
|
@ -8,34 +8,11 @@ namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
|
||||
enum ComparisonTypes {
|
||||
EQUAL,
|
||||
NOT_EQUAL,
|
||||
IS_FINITE,
|
||||
IS_INF,
|
||||
IS_NAN,
|
||||
LESS,
|
||||
LESS_EQUAL,
|
||||
GREATER,
|
||||
GREATER_EQUAL
|
||||
};
|
||||
enum ComparisonTypes { EQUAL, NOT_EQUAL, IS_FINITE, IS_INF, IS_NAN, LESS, LESS_EQUAL, GREATER, GREATER_EQUAL };
|
||||
|
||||
enum ConversionTypes {
|
||||
CONVERT,
|
||||
CONVERT_LIKE
|
||||
};
|
||||
enum ConversionTypes { CONVERT, CONVERT_LIKE };
|
||||
|
||||
enum ReductionType {
|
||||
Mean,
|
||||
Max,
|
||||
Min,
|
||||
Prod,
|
||||
Sum,
|
||||
LogicalOr,
|
||||
LogicalAnd,
|
||||
L1,
|
||||
L2
|
||||
};
|
||||
enum ReductionType { Mean, Max, Min, Prod, Sum, LogicalOr, LogicalAnd, L1, L2 };
|
||||
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
|
@ -4,12 +4,11 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <numeric>
|
||||
#include <exception>
|
||||
#include <fstream>
|
||||
#include <numeric>
|
||||
#include <vector>
|
||||
|
||||
|
||||
/// \brief Reads a binary file to a vector.
|
||||
///
|
||||
/// \param[in] path The path where the file is located.
|
||||
|
@ -4,10 +4,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
#include <algorithm>
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
#include "common_test_utils/w_dirent.h"
|
||||
@ -20,11 +20,11 @@ namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
|
||||
inline void fixSlashes(std::string &str) {
|
||||
inline void fixSlashes(std::string& str) {
|
||||
std::replace(str.begin(), str.end(), '/', '\\');
|
||||
}
|
||||
|
||||
inline void fixSlashes(std::wstring &str) {
|
||||
inline void fixSlashes(std::wstring& str) {
|
||||
std::replace(str.begin(), str.end(), L'/', L'\\');
|
||||
}
|
||||
|
||||
@ -33,15 +33,15 @@ inline std::wstring stringToWString(std::string input) {
|
||||
}
|
||||
|
||||
inline bool copyFile(std::wstring source_path, std::wstring dest_path) {
|
||||
#ifndef _WIN32
|
||||
# ifndef _WIN32
|
||||
std::ifstream source(ov::util::wstring_to_string(source_path), std::ios::binary);
|
||||
std::ofstream dest(ov::util::wstring_to_string(dest_path), std::ios::binary);
|
||||
#else
|
||||
# else
|
||||
fixSlashes(source_path);
|
||||
fixSlashes(dest_path);
|
||||
std::ifstream source(source_path.c_str(), std::ios::binary);
|
||||
std::ofstream dest(dest_path.c_str(), std::ios::binary);
|
||||
#endif
|
||||
# endif
|
||||
bool result = source && dest;
|
||||
std::istreambuf_iterator<char> begin_source(source);
|
||||
std::istreambuf_iterator<char> end_source;
|
||||
@ -70,17 +70,18 @@ inline std::wstring addUnicodePostfixToPath(std::string source_path, std::wstrin
|
||||
inline void removeFile(std::wstring path) {
|
||||
int result = 0;
|
||||
if (!path.empty()) {
|
||||
#ifdef _WIN32
|
||||
# ifdef _WIN32
|
||||
result = _wremove(path.c_str());
|
||||
#else
|
||||
# else
|
||||
result = remove(ov::util::wstring_to_string(path).c_str());
|
||||
#endif
|
||||
# endif
|
||||
}
|
||||
(void)result;
|
||||
}
|
||||
|
||||
inline bool endsWith(const std::wstring& source, const std::wstring& expectedSuffix) {
|
||||
return expectedSuffix.size() <= source.size() && source.compare(source.size() - expectedSuffix.size(), expectedSuffix.size(), expectedSuffix) == 0;
|
||||
return expectedSuffix.size() <= source.size() &&
|
||||
source.compare(source.size() - expectedSuffix.size(), expectedSuffix.size(), expectedSuffix) == 0;
|
||||
}
|
||||
|
||||
// Removes all files with extension=ext from the given directory
|
||||
@ -89,9 +90,9 @@ inline bool endsWith(const std::wstring& source, const std::wstring& expectedSuf
|
||||
// >= 0 - count of removed files
|
||||
inline int removeFilesWithExt(std::wstring path, std::wstring ext) {
|
||||
int ret = 0;
|
||||
#ifdef _WIN32
|
||||
struct _wdirent *ent;
|
||||
_WDIR *dir = _wopendir(path.c_str());
|
||||
# ifdef _WIN32
|
||||
struct _wdirent* ent;
|
||||
_WDIR* dir = _wopendir(path.c_str());
|
||||
if (dir != nullptr) {
|
||||
while ((ent = _wreaddir(dir)) != NULL) {
|
||||
auto file = ::FileUtils::makePath(path, std::wstring(ent->wd_name));
|
||||
@ -108,11 +109,11 @@ inline int removeFilesWithExt(std::wstring path, std::wstring ext) {
|
||||
}
|
||||
_wclosedir(dir);
|
||||
}
|
||||
#else
|
||||
struct dirent *ent;
|
||||
# else
|
||||
struct dirent* ent;
|
||||
auto path_mb = ov::util::wstring_to_string(path);
|
||||
auto ext_mb = ov::util::wstring_to_string(ext);
|
||||
DIR *dir = opendir(path_mb.c_str());
|
||||
DIR* dir = opendir(path_mb.c_str());
|
||||
if (dir != nullptr) {
|
||||
while ((ent = readdir(dir)) != NULL) {
|
||||
std::string file = ::FileUtils::makePath(path_mb, std::string(ent->d_name));
|
||||
@ -129,34 +130,34 @@ inline int removeFilesWithExt(std::wstring path, std::wstring ext) {
|
||||
}
|
||||
closedir(dir);
|
||||
}
|
||||
#endif
|
||||
# endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
inline int removeDir(std::wstring path) {
|
||||
int result = 0;
|
||||
if (!path.empty()) {
|
||||
#ifdef _WIN32
|
||||
# ifdef _WIN32
|
||||
result = _wrmdir(path.c_str());
|
||||
#else
|
||||
# else
|
||||
result = rmdir(ov::util::wstring_to_string(path).c_str());
|
||||
#endif
|
||||
# endif
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
inline bool directoryExists(const std::wstring &path) {
|
||||
#ifdef _WIN32
|
||||
inline bool directoryExists(const std::wstring& path) {
|
||||
# ifdef _WIN32
|
||||
struct _stat64i32 sb;
|
||||
if (_wstat(path.c_str(), &sb) == 0 && S_ISDIR(sb.st_mode)) {
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
# else
|
||||
struct stat sb;
|
||||
if (stat(ov::util::wstring_to_string(path).c_str(), &sb) == 0 && S_ISDIR(sb.st_mode)) {
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
# endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -6,50 +6,52 @@
|
||||
|
||||
#if defined(_WIN32)
|
||||
|
||||
#ifndef WIN32_LEAN_AND_MEAN
|
||||
# define WIN32_LEAN_AND_MEAN
|
||||
# define WIN32_LEAN_AND_MEAN_UNDEF
|
||||
#endif
|
||||
# ifndef WIN32_LEAN_AND_MEAN
|
||||
# define WIN32_LEAN_AND_MEAN
|
||||
# define WIN32_LEAN_AND_MEAN_UNDEF
|
||||
# endif
|
||||
|
||||
#ifndef NOMINMAX
|
||||
# define NOMINMAX
|
||||
# define NOMINMAX_UNDEF
|
||||
#endif
|
||||
# ifndef NOMINMAX
|
||||
# define NOMINMAX
|
||||
# define NOMINMAX_UNDEF
|
||||
# endif
|
||||
|
||||
#if defined(_M_IX86) && !defined(_X86_) && !defined(_AMD64_)
|
||||
# define _X86_
|
||||
#endif
|
||||
# if defined(_M_IX86) && !defined(_X86_) && !defined(_AMD64_)
|
||||
# define _X86_
|
||||
# endif
|
||||
|
||||
#if defined(_M_X64) && !defined(_X86_) && !defined(_AMD64_)
|
||||
# define _AMD64_
|
||||
#endif
|
||||
# if defined(_M_X64) && !defined(_X86_) && !defined(_AMD64_)
|
||||
# define _AMD64_
|
||||
# endif
|
||||
|
||||
#if defined(_M_ARM) && !defined(_ARM_) && !defined(_ARM64_)
|
||||
# define _ARM_
|
||||
#endif
|
||||
# if defined(_M_ARM) && !defined(_ARM_) && !defined(_ARM64_)
|
||||
# define _ARM_
|
||||
# endif
|
||||
|
||||
#if defined(_M_ARM64) && !defined(_ARM_) && !defined(_ARM64_)
|
||||
# define _ARM64_
|
||||
#endif
|
||||
# if defined(_M_ARM64) && !defined(_ARM_) && !defined(_ARM64_)
|
||||
# define _ARM64_
|
||||
# endif
|
||||
|
||||
#include <string>
|
||||
#include <windef.h>
|
||||
#include <fileapi.h>
|
||||
#include <winbase.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
// clang-format off
|
||||
# include <string>
|
||||
# include <windef.h>
|
||||
# include <fileapi.h>
|
||||
# include <winbase.h>
|
||||
# include <sys/types.h>
|
||||
# include <sys/stat.h>
|
||||
// clang-format on
|
||||
|
||||
// Copied from linux libc sys/stat.h:
|
||||
#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
|
||||
#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
|
||||
# define S_ISREG(m) (((m)&S_IFMT) == S_IFREG)
|
||||
# define S_ISDIR(m) (((m)&S_IFMT) == S_IFDIR)
|
||||
|
||||
struct dirent {
|
||||
char *d_name;
|
||||
char* d_name;
|
||||
|
||||
explicit dirent(const wchar_t *wsFilePath) {
|
||||
explicit dirent(const wchar_t* wsFilePath) {
|
||||
size_t i;
|
||||
auto slen = wcslen(wsFilePath);
|
||||
d_name = static_cast<char *>(malloc(slen + 1));
|
||||
d_name = static_cast<char*>(malloc(slen + 1));
|
||||
wcstombs_s(&i, d_name, slen + 1, wsFilePath, slen);
|
||||
}
|
||||
~dirent() {
|
||||
@ -60,22 +62,23 @@ struct dirent {
|
||||
class DIR {
|
||||
WIN32_FIND_DATAA FindFileData;
|
||||
HANDLE hFind;
|
||||
dirent *next;
|
||||
dirent* next;
|
||||
|
||||
static inline bool endsWith(const std::string &src, const char *with) {
|
||||
static inline bool endsWith(const std::string& src, const char* with) {
|
||||
int wl = static_cast<int>(strlen(with));
|
||||
int so = static_cast<int>(src.length()) - wl;
|
||||
if (so < 0) return false;
|
||||
if (so < 0)
|
||||
return false;
|
||||
return 0 == strncmp(with, &src[so], wl);
|
||||
}
|
||||
|
||||
public:
|
||||
DIR(const DIR &other) = delete;
|
||||
DIR(DIR &&other) = delete;
|
||||
DIR& operator=(const DIR &other) = delete;
|
||||
DIR& operator=(DIR &&other) = delete;
|
||||
DIR(const DIR& other) = delete;
|
||||
DIR(DIR&& other) = delete;
|
||||
DIR& operator=(const DIR& other) = delete;
|
||||
DIR& operator=(DIR&& other) = delete;
|
||||
|
||||
explicit DIR(const char *dirPath) : next(nullptr) {
|
||||
explicit DIR(const char* dirPath) : next(nullptr) {
|
||||
std::string ws = dirPath;
|
||||
if (endsWith(ws, "\\"))
|
||||
ws += "*";
|
||||
@ -86,7 +89,8 @@ public:
|
||||
}
|
||||
|
||||
~DIR() {
|
||||
if (!next) delete next;
|
||||
if (!next)
|
||||
delete next;
|
||||
next = nullptr;
|
||||
FindClose(hFind);
|
||||
}
|
||||
@ -96,10 +100,12 @@ public:
|
||||
}
|
||||
|
||||
dirent* nextEnt() {
|
||||
if (next != nullptr) delete next;
|
||||
if (next != nullptr)
|
||||
delete next;
|
||||
next = nullptr;
|
||||
|
||||
if (!FindFileData.dwReserved0) return nullptr;
|
||||
if (!FindFileData.dwReserved0)
|
||||
return nullptr;
|
||||
|
||||
wchar_t wbuf[4096];
|
||||
|
||||
@ -112,11 +118,11 @@ public:
|
||||
};
|
||||
|
||||
struct _wdirent {
|
||||
wchar_t *wd_name;
|
||||
wchar_t* wd_name;
|
||||
|
||||
explicit _wdirent(const wchar_t *wsFilePath) {
|
||||
explicit _wdirent(const wchar_t* wsFilePath) {
|
||||
auto slen = wcslen(wsFilePath);
|
||||
wd_name = static_cast<wchar_t *>(malloc(sizeof(wchar_t) * (slen + 1)));
|
||||
wd_name = static_cast<wchar_t*>(malloc(sizeof(wchar_t) * (slen + 1)));
|
||||
wcscpy_s(wd_name, slen + 1, wsFilePath);
|
||||
}
|
||||
~_wdirent() {
|
||||
@ -127,22 +133,23 @@ struct _wdirent {
|
||||
class _WDIR {
|
||||
WIN32_FIND_DATAW FindFileData;
|
||||
HANDLE hFind;
|
||||
_wdirent *next;
|
||||
_wdirent* next;
|
||||
|
||||
static inline bool endsWith(const std::wstring &src, const wchar_t *with) {
|
||||
static inline bool endsWith(const std::wstring& src, const wchar_t* with) {
|
||||
int wl = static_cast<int>(wcslen(with));
|
||||
int so = static_cast<int>(src.length()) - wl;
|
||||
if (so < 0) return false;
|
||||
if (so < 0)
|
||||
return false;
|
||||
return 0 == wcsncmp(with, &src[so], wl);
|
||||
}
|
||||
|
||||
public:
|
||||
_WDIR(const _WDIR &other) = delete;
|
||||
_WDIR(_WDIR &&other) = delete;
|
||||
_WDIR& operator=(const _WDIR &other) = delete;
|
||||
_WDIR& operator=(_WDIR &&other) = delete;
|
||||
_WDIR(const _WDIR& other) = delete;
|
||||
_WDIR(_WDIR&& other) = delete;
|
||||
_WDIR& operator=(const _WDIR& other) = delete;
|
||||
_WDIR& operator=(_WDIR&& other) = delete;
|
||||
|
||||
explicit _WDIR(const wchar_t *dirPath) : next(nullptr) {
|
||||
explicit _WDIR(const wchar_t* dirPath) : next(nullptr) {
|
||||
std::wstring ws = dirPath;
|
||||
if (endsWith(ws, L"\\"))
|
||||
ws += L"*";
|
||||
@ -153,7 +160,8 @@ public:
|
||||
}
|
||||
|
||||
~_WDIR() {
|
||||
if (!next) delete next;
|
||||
if (!next)
|
||||
delete next;
|
||||
next = nullptr;
|
||||
FindClose(hFind);
|
||||
}
|
||||
@ -163,10 +171,12 @@ public:
|
||||
}
|
||||
|
||||
_wdirent* nextEnt() {
|
||||
if (next != nullptr) delete next;
|
||||
if (next != nullptr)
|
||||
delete next;
|
||||
next = nullptr;
|
||||
|
||||
if (!FindFileData.dwReserved0) return nullptr;
|
||||
if (!FindFileData.dwReserved0)
|
||||
return nullptr;
|
||||
|
||||
std::wstring buf(FindFileData.cFileName);
|
||||
next = new _wdirent(buf.c_str());
|
||||
@ -175,8 +185,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
static DIR* opendir(const char *dirPath) {
|
||||
static DIR* opendir(const char* dirPath) {
|
||||
auto dp = new DIR(dirPath);
|
||||
if (!dp->isValid()) {
|
||||
delete dp;
|
||||
@ -185,7 +194,7 @@ static DIR* opendir(const char *dirPath) {
|
||||
return dp;
|
||||
}
|
||||
|
||||
static _WDIR* _wopendir(const wchar_t *dirPath) {
|
||||
static _WDIR* _wopendir(const wchar_t* dirPath) {
|
||||
auto dp = new _WDIR(dirPath);
|
||||
if (!dp->isValid()) {
|
||||
delete dp;
|
||||
@ -194,35 +203,35 @@ static _WDIR* _wopendir(const wchar_t *dirPath) {
|
||||
return dp;
|
||||
}
|
||||
|
||||
static struct dirent* readdir(DIR *dp) {
|
||||
static struct dirent* readdir(DIR* dp) {
|
||||
return dp->nextEnt();
|
||||
}
|
||||
|
||||
static struct _wdirent* _wreaddir(_WDIR *dp) {
|
||||
static struct _wdirent* _wreaddir(_WDIR* dp) {
|
||||
return dp->nextEnt();
|
||||
}
|
||||
|
||||
static void closedir(DIR *dp) {
|
||||
static void closedir(DIR* dp) {
|
||||
delete dp;
|
||||
}
|
||||
|
||||
static void _wclosedir(_WDIR *dp) {
|
||||
static void _wclosedir(_WDIR* dp) {
|
||||
delete dp;
|
||||
}
|
||||
|
||||
#ifdef WIN32_LEAN_AND_MEAN_UNDEF
|
||||
# undef WIN32_LEAN_AND_MEAN
|
||||
# undef WIN32_LEAN_AND_MEAN_UNDEF
|
||||
#endif
|
||||
# ifdef WIN32_LEAN_AND_MEAN_UNDEF
|
||||
# undef WIN32_LEAN_AND_MEAN
|
||||
# undef WIN32_LEAN_AND_MEAN_UNDEF
|
||||
# endif
|
||||
|
||||
#ifdef NOMINMAX_UNDEF
|
||||
# undef NOMINMAX_UNDEF
|
||||
# undef NOMINMAX
|
||||
#endif
|
||||
# ifdef NOMINMAX_UNDEF
|
||||
# undef NOMINMAX_UNDEF
|
||||
# undef NOMINMAX
|
||||
# endif
|
||||
|
||||
#else
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <dirent.h>
|
||||
# include <dirent.h>
|
||||
# include <sys/types.h>
|
||||
|
||||
#endif
|
||||
|
@ -15,29 +15,23 @@ namespace utils {
|
||||
return ::testing::AssertionFailure() << "Cannot compare tensors with different element types";
|
||||
}
|
||||
|
||||
#define all_close_ov_type(type)\
|
||||
case ov::element::type:\
|
||||
return all_close<ov::element_type_traits<ov::element::type>::value_type>(a, b, \
|
||||
static_cast<ov::element_type_traits<ov::element::type>::value_type>(rtol), \
|
||||
static_cast<ov::element_type_traits<ov::element::type>::value_type>(atol));\
|
||||
#define all_close_ov_type(type) \
|
||||
case ov::element::type: \
|
||||
return all_close<ov::element_type_traits<ov::element::type>::value_type>( \
|
||||
a, \
|
||||
b, \
|
||||
static_cast<ov::element_type_traits<ov::element::type>::value_type>(rtol), \
|
||||
static_cast<ov::element_type_traits<ov::element::type>::value_type>(atol));
|
||||
|
||||
switch (a.get_element_type()) {
|
||||
all_close_ov_type(u8)
|
||||
all_close_ov_type(u16)
|
||||
all_close_ov_type(u32)
|
||||
all_close_ov_type(u64)
|
||||
all_close_ov_type(i8)
|
||||
all_close_ov_type(i16)
|
||||
all_close_ov_type(i32)
|
||||
all_close_ov_type(i64)
|
||||
// all_close_ov_type(bf16)
|
||||
// all_close_ov_type(f16)
|
||||
all_close_ov_type(f32)
|
||||
all_close_ov_type(f64)
|
||||
all_close_ov_type(boolean)
|
||||
default:
|
||||
return ::testing::AssertionFailure()
|
||||
<< "Cannot compare tensors with unsupported element type: " << a.get_element_type();
|
||||
all_close_ov_type(u8) all_close_ov_type(u16) all_close_ov_type(u32) all_close_ov_type(u64) all_close_ov_type(i8)
|
||||
all_close_ov_type(i16) all_close_ov_type(i32) all_close_ov_type(i64)
|
||||
// all_close_ov_type(bf16)
|
||||
// all_close_ov_type(f16)
|
||||
all_close_ov_type(f32) all_close_ov_type(f64) all_close_ov_type(boolean) default
|
||||
: return ::testing::AssertionFailure()
|
||||
<< "Cannot compare tensors with unsupported element type: "
|
||||
<< a.get_element_type();
|
||||
}
|
||||
}
|
||||
} // namespace utils
|
||||
|
@ -8,8 +8,6 @@
|
||||
#include "openvino/core/type/element_type_traits.hpp"
|
||||
#include "precomp.hpp"
|
||||
|
||||
#include "common_test_utils/float_util.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
@ -153,10 +151,7 @@ bool close_f(double a, double b, int tolerance_bits, double min_signal) {
|
||||
return (distance <= tolerance) || (distance == DOUBLE_BELOW_MIN_SIGNAL);
|
||||
}
|
||||
|
||||
std::vector<uint32_t> float_distances(const float* const a,
|
||||
const float* const b,
|
||||
size_t size,
|
||||
float min_signal) {
|
||||
std::vector<uint32_t> float_distances(const float* const a, const float* const b, size_t size, float min_signal) {
|
||||
std::vector<uint32_t> distances(size);
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
distances[i] = float_distance(a[i], b[i], min_signal);
|
||||
@ -165,10 +160,7 @@ std::vector<uint32_t> float_distances(const float* const a,
|
||||
return distances;
|
||||
}
|
||||
|
||||
std::vector<uint64_t> float_distances(const double* const a,
|
||||
const double* const b,
|
||||
size_t size,
|
||||
double min_signal) {
|
||||
std::vector<uint64_t> float_distances(const double* const a, const double* const b, size_t size, double min_signal) {
|
||||
std::vector<uint64_t> distances(size);
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
distances[i] = float_distance(a[i], b[i], min_signal);
|
||||
@ -480,12 +472,8 @@ uint32_t matching_mantissa_bits(uint64_t distance) {
|
||||
return all_close_f(a.data(), b.data(), a.size(), tolerance_bits, min_signal);
|
||||
}
|
||||
|
||||
|
||||
template<typename T>
|
||||
::testing::AssertionResult all_close_f(const ov::Tensor& a,
|
||||
const ov::Tensor& b,
|
||||
int tolerance_bits,
|
||||
float min_signal) {
|
||||
template <typename T>
|
||||
::testing::AssertionResult all_close_f(const ov::Tensor& a, const ov::Tensor& b, int tolerance_bits, float min_signal) {
|
||||
if (a.get_size() != b.get_size()) {
|
||||
return ::testing::AssertionFailure() << "a.size() != b.size() for all_close_f comparison.";
|
||||
}
|
||||
@ -493,38 +481,37 @@ template<typename T>
|
||||
return ::testing::AssertionSuccess() << "No elements to compare";
|
||||
}
|
||||
|
||||
return all_close_f(static_cast<const T*>(a.data()), static_cast<const T*>(b.data()), a.get_size(), tolerance_bits, min_signal);
|
||||
return all_close_f(static_cast<const T*>(a.data()),
|
||||
static_cast<const T*>(b.data()),
|
||||
a.get_size(),
|
||||
tolerance_bits,
|
||||
min_signal);
|
||||
}
|
||||
|
||||
|
||||
::testing::AssertionResult all_close_f(const ov::Tensor& a,
|
||||
const ov::Tensor& b,
|
||||
int tolerance_bits,
|
||||
float min_signal) {
|
||||
if (a.get_element_type() != b.get_element_type()) {
|
||||
::testing::AssertionResult all_close_f(const ov::Tensor& a, const ov::Tensor& b, int tolerance_bits, float min_signal) {
|
||||
if (a.get_element_type() != b.get_element_type()) {
|
||||
return ::testing::AssertionFailure() << "Cannot compare tensors with different element types";
|
||||
}
|
||||
|
||||
#define all_close_f_ov_type(type)\
|
||||
case ov::element::type:\
|
||||
return all_close_f<ov::element_type_traits<ov::element::type>::value_type>(a, b, tolerance_bits, min_signal);\
|
||||
#define all_close_f_ov_type(type) \
|
||||
case ov::element::type: \
|
||||
return all_close_f<ov::element_type_traits<ov::element::type>::value_type>(a, b, tolerance_bits, min_signal);
|
||||
|
||||
switch (a.get_element_type()) {
|
||||
// all_close_f_ov_type(u8)
|
||||
// all_close_f_ov_type(u16)
|
||||
// all_close_f_ov_type(u32)
|
||||
// all_close_f_ov_type(u64)
|
||||
// all_close_f_ov_type(i8)
|
||||
// all_close_f_ov_type(i16)
|
||||
// all_close_f_ov_type(i32)
|
||||
// all_close_f_ov_type(i64)
|
||||
// all_close_f_ov_type(bf16)
|
||||
// all_close_f_ov_type(f16)
|
||||
all_close_f_ov_type(f32)
|
||||
all_close_f_ov_type(f64)
|
||||
default:
|
||||
return ::testing::AssertionFailure()
|
||||
<< "Cannot compare tensors with unsupported element type: " << a.get_element_type();
|
||||
// all_close_f_ov_type(u8)
|
||||
// all_close_f_ov_type(u16)
|
||||
// all_close_f_ov_type(u32)
|
||||
// all_close_f_ov_type(u64)
|
||||
// all_close_f_ov_type(i8)
|
||||
// all_close_f_ov_type(i16)
|
||||
// all_close_f_ov_type(i32)
|
||||
// all_close_f_ov_type(i64)
|
||||
// all_close_f_ov_type(bf16)
|
||||
// all_close_f_ov_type(f16)
|
||||
all_close_f_ov_type(f32) all_close_f_ov_type(f64) default
|
||||
: return ::testing::AssertionFailure()
|
||||
<< "Cannot compare tensors with unsupported element type: "
|
||||
<< a.get_element_type();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4,10 +4,9 @@
|
||||
|
||||
#include "common_test_utils/data_utils.hpp"
|
||||
|
||||
#include "debug.h" // to allow putting vector into exception string stream
|
||||
|
||||
#include "ie_blob.h"
|
||||
#include "blob_factory.hpp"
|
||||
#include "debug.h" // to allow putting vector into exception string stream
|
||||
#include "ie_blob.h"
|
||||
#include "openvino/core/deprecated.hpp"
|
||||
#include "openvino/core/type/element_type_traits.hpp"
|
||||
#include "openvino/runtime/tensor.hpp"
|
||||
@ -29,8 +28,10 @@ bool isDenseBlob(const InferenceEngine::Blob::Ptr& blob) {
|
||||
IE_ASSERT(dims.size() == strs.size()) << " isDenseBlob: inconsistent tensor descriptor";
|
||||
|
||||
auto size = dims.size();
|
||||
if (size == 0) return true;
|
||||
if (size == 1) return strs[0] == 1;
|
||||
if (size == 0)
|
||||
return true;
|
||||
if (size == 1)
|
||||
return strs[0] == 1;
|
||||
|
||||
for (auto i = size - 1; i > 0; i--) {
|
||||
if (strs[i - 1] != strs[i - 1] * dims[i])
|
||||
@ -40,20 +41,44 @@ bool isDenseBlob(const InferenceEngine::Blob::Ptr& blob) {
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void copy_7D(void *src_raw_ptr, std::vector<size_t> &src_str, void *dst_raw_ptr, std::vector<size_t> &dst_str, std::vector<size_t> &dims) {
|
||||
template <typename T>
|
||||
void copy_7D(void* src_raw_ptr,
|
||||
std::vector<size_t>& src_str,
|
||||
void* dst_raw_ptr,
|
||||
std::vector<size_t>& dst_str,
|
||||
std::vector<size_t>& dims) {
|
||||
auto src_ptr = static_cast<T*>(src_raw_ptr);
|
||||
auto dst_ptr = static_cast<T*>(dst_raw_ptr);
|
||||
|
||||
for (size_t d0 = 0; d0 < dims[0]; d0++) { auto src_ptr_0 = src_ptr + src_str[0]*d0; auto dst_ptr_0 = dst_ptr + dst_str[0]*d0;
|
||||
for (size_t d1 = 0; d1 < dims[1]; d1++) { auto src_ptr_1 = src_ptr_0 + src_str[1]*d1; auto dst_ptr_1 = dst_ptr_0 + dst_str[1]*d1;
|
||||
for (size_t d2 = 0; d2 < dims[2]; d2++) { auto src_ptr_2 = src_ptr_1 + src_str[2]*d2; auto dst_ptr_2 = dst_ptr_1 + dst_str[2]*d2;
|
||||
for (size_t d3 = 0; d3 < dims[3]; d3++) { auto src_ptr_3 = src_ptr_2 + src_str[3]*d3; auto dst_ptr_3 = dst_ptr_2 + dst_str[3]*d3;
|
||||
for (size_t d4 = 0; d4 < dims[4]; d4++) { auto src_ptr_4 = src_ptr_3 + src_str[4]*d4; auto dst_ptr_4 = dst_ptr_3 + dst_str[4]*d4;
|
||||
for (size_t d5 = 0; d5 < dims[5]; d5++) { auto src_ptr_5 = src_ptr_4 + src_str[5]*d5; auto dst_ptr_5 = dst_ptr_4 + dst_str[5]*d5;
|
||||
for (size_t d6 = 0; d6 < dims[6]; d6++) { auto src_ptr_6 = src_ptr_5 + src_str[6]*d6; auto dst_ptr_6 = dst_ptr_5 + dst_str[6]*d6;
|
||||
*dst_ptr_6 = *src_ptr_6;
|
||||
}}}}}}}
|
||||
for (size_t d0 = 0; d0 < dims[0]; d0++) {
|
||||
auto src_ptr_0 = src_ptr + src_str[0] * d0;
|
||||
auto dst_ptr_0 = dst_ptr + dst_str[0] * d0;
|
||||
for (size_t d1 = 0; d1 < dims[1]; d1++) {
|
||||
auto src_ptr_1 = src_ptr_0 + src_str[1] * d1;
|
||||
auto dst_ptr_1 = dst_ptr_0 + dst_str[1] * d1;
|
||||
for (size_t d2 = 0; d2 < dims[2]; d2++) {
|
||||
auto src_ptr_2 = src_ptr_1 + src_str[2] * d2;
|
||||
auto dst_ptr_2 = dst_ptr_1 + dst_str[2] * d2;
|
||||
for (size_t d3 = 0; d3 < dims[3]; d3++) {
|
||||
auto src_ptr_3 = src_ptr_2 + src_str[3] * d3;
|
||||
auto dst_ptr_3 = dst_ptr_2 + dst_str[3] * d3;
|
||||
for (size_t d4 = 0; d4 < dims[4]; d4++) {
|
||||
auto src_ptr_4 = src_ptr_3 + src_str[4] * d4;
|
||||
auto dst_ptr_4 = dst_ptr_3 + dst_str[4] * d4;
|
||||
for (size_t d5 = 0; d5 < dims[5]; d5++) {
|
||||
auto src_ptr_5 = src_ptr_4 + src_str[5] * d5;
|
||||
auto dst_ptr_5 = dst_ptr_4 + dst_str[5] * d5;
|
||||
for (size_t d6 = 0; d6 < dims[6]; d6++) {
|
||||
auto src_ptr_6 = src_ptr_5 + src_str[6] * d6;
|
||||
auto dst_ptr_6 = dst_ptr_5 + dst_str[6] * d6;
|
||||
*dst_ptr_6 = *src_ptr_6;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void fill_data_with_broadcast(InferenceEngine::Blob::Ptr& blob, InferenceEngine::Blob::Ptr& values) {
|
||||
@ -82,7 +107,7 @@ void fill_data_with_broadcast(InferenceEngine::Blob::Ptr& blob, InferenceEngine:
|
||||
|
||||
IE_ASSERT(compatible);
|
||||
|
||||
auto fill_strides_like_plain = [] (ov::Shape dims) {
|
||||
auto fill_strides_like_plain = [](ov::Shape dims) {
|
||||
ov::Shape str(dims.size());
|
||||
if (str.empty())
|
||||
return str;
|
||||
@ -90,12 +115,14 @@ void fill_data_with_broadcast(InferenceEngine::Blob::Ptr& blob, InferenceEngine:
|
||||
str.back() = 1;
|
||||
|
||||
// stride[i] = stride[i+1]*d[i+1]
|
||||
std::transform(dims.rbegin(), dims.rend() - 1, str.rbegin(), str.rbegin() + 1,
|
||||
[] (size_t d, size_t s) { return d * s; });
|
||||
std::transform(dims.rbegin(), dims.rend() - 1, str.rbegin(), str.rbegin() + 1, [](size_t d, size_t s) {
|
||||
return d * s;
|
||||
});
|
||||
|
||||
// zeroing broadcast dimension equal 1
|
||||
std::transform(str.begin(), str.end(), dims.begin(), str.begin(),
|
||||
[] (size_t s, size_t d) { return d == 1 ? 0 : s; });
|
||||
std::transform(str.begin(), str.end(), dims.begin(), str.begin(), [](size_t s, size_t d) {
|
||||
return d == 1 ? 0 : s;
|
||||
});
|
||||
|
||||
return str;
|
||||
};
|
||||
@ -103,7 +130,7 @@ void fill_data_with_broadcast(InferenceEngine::Blob::Ptr& blob, InferenceEngine:
|
||||
SizeVector src_strides = fill_strides_like_plain(src_dims);
|
||||
SizeVector dst_strides = fill_strides_like_plain(dst_dims);
|
||||
|
||||
auto get_data = [] (InferenceEngine::Blob::Ptr &blob) {
|
||||
auto get_data = [](InferenceEngine::Blob::Ptr& blob) {
|
||||
auto mem_blob = dynamic_cast<InferenceEngine::MemoryBlob*>(blob.get());
|
||||
auto mem = mem_blob->rwmap();
|
||||
return mem.as<float*>();
|
||||
@ -113,30 +140,30 @@ void fill_data_with_broadcast(InferenceEngine::Blob::Ptr& blob, InferenceEngine:
|
||||
auto src_ptr = get_data(values);
|
||||
|
||||
switch (blob->getTensorDesc().getPrecision()) {
|
||||
case InferenceEngine::Precision::U64:
|
||||
case InferenceEngine::Precision::I64:
|
||||
copy_7D<uint64_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
|
||||
break;
|
||||
case InferenceEngine::Precision::FP32:
|
||||
case InferenceEngine::Precision::I32:
|
||||
copy_7D<uint32_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
|
||||
break;
|
||||
case InferenceEngine::Precision::I16:
|
||||
case InferenceEngine::Precision::U16:
|
||||
case InferenceEngine::Precision::FP16:
|
||||
case InferenceEngine::Precision::BF16:
|
||||
copy_7D<uint16_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
|
||||
break;
|
||||
case InferenceEngine::Precision::U8:
|
||||
case InferenceEngine::Precision::I8:
|
||||
copy_7D<uint8_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
|
||||
break;
|
||||
default:
|
||||
IE_THROW() << "Unsupported precision by fill_data_with_broadcast function";
|
||||
case InferenceEngine::Precision::U64:
|
||||
case InferenceEngine::Precision::I64:
|
||||
copy_7D<uint64_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
|
||||
break;
|
||||
case InferenceEngine::Precision::FP32:
|
||||
case InferenceEngine::Precision::I32:
|
||||
copy_7D<uint32_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
|
||||
break;
|
||||
case InferenceEngine::Precision::I16:
|
||||
case InferenceEngine::Precision::U16:
|
||||
case InferenceEngine::Precision::FP16:
|
||||
case InferenceEngine::Precision::BF16:
|
||||
copy_7D<uint16_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
|
||||
break;
|
||||
case InferenceEngine::Precision::U8:
|
||||
case InferenceEngine::Precision::I8:
|
||||
copy_7D<uint8_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
|
||||
break;
|
||||
default:
|
||||
IE_THROW() << "Unsupported precision by fill_data_with_broadcast function";
|
||||
}
|
||||
}
|
||||
|
||||
template<InferenceEngine::Precision::ePrecision SRC_E, InferenceEngine::Precision::ePrecision DST_E>
|
||||
template <InferenceEngine::Precision::ePrecision SRC_E, InferenceEngine::Precision::ePrecision DST_E>
|
||||
void copy_with_convert(InferenceEngine::Blob::Ptr& src_blob, InferenceEngine::Blob::Ptr& dst_blob) {
|
||||
using SRC_TYPE = typename InferenceEngine::PrecisionTrait<SRC_E>::value_type;
|
||||
using DST_TYPE = typename InferenceEngine::PrecisionTrait<DST_E>::value_type;
|
||||
@ -151,7 +178,8 @@ void copy_with_convert(InferenceEngine::Blob::Ptr& src_blob, InferenceEngine::Bl
|
||||
std::copy(src_ptr, src_ptr + src_size, dst_ptr);
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::Ptr make_with_precision_convert(InferenceEngine::Blob::Ptr& blob, InferenceEngine::Precision prc) {
|
||||
InferenceEngine::Blob::Ptr make_with_precision_convert(InferenceEngine::Blob::Ptr& blob,
|
||||
InferenceEngine::Precision prc) {
|
||||
IE_ASSERT(isDenseBlob(blob));
|
||||
auto td = blob->getTensorDesc();
|
||||
td.setPrecision(prc);
|
||||
@ -159,11 +187,22 @@ InferenceEngine::Blob::Ptr make_with_precision_convert(InferenceEngine::Blob::Pt
|
||||
auto new_blob = make_blob_with_precision(td);
|
||||
new_blob->allocate();
|
||||
|
||||
#define CASE(_PRC) case InferenceEngine::Precision::_PRC: \
|
||||
copy_with_convert<InferenceEngine::Precision::FP32, InferenceEngine::Precision::_PRC> (blob, new_blob); break
|
||||
#define CASE(_PRC) \
|
||||
case InferenceEngine::Precision::_PRC: \
|
||||
copy_with_convert<InferenceEngine::Precision::FP32, InferenceEngine::Precision::_PRC>(blob, new_blob); \
|
||||
break
|
||||
switch (prc) {
|
||||
CASE(FP32); CASE(I64); CASE(U64); CASE(I32); CASE(U32); CASE(I16); CASE(U16); CASE(I8); CASE(U8);
|
||||
default: IE_THROW() << "Unsupported precision case";
|
||||
CASE(FP32);
|
||||
CASE(I64);
|
||||
CASE(U64);
|
||||
CASE(I32);
|
||||
CASE(U32);
|
||||
CASE(I16);
|
||||
CASE(U16);
|
||||
CASE(I8);
|
||||
CASE(U8);
|
||||
default:
|
||||
IE_THROW() << "Unsupported precision case";
|
||||
}
|
||||
#undef CASE
|
||||
|
||||
@ -188,7 +227,8 @@ void fill_data_with_broadcast(InferenceEngine::Blob::Ptr& blob, size_t axis, std
|
||||
fill_data_with_broadcast(blob, values_blob);
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::Ptr make_reshape_view(const InferenceEngine::Blob::Ptr &blob, InferenceEngine::SizeVector new_shape) {
|
||||
InferenceEngine::Blob::Ptr make_reshape_view(const InferenceEngine::Blob::Ptr& blob,
|
||||
InferenceEngine::SizeVector new_shape) {
|
||||
using InferenceEngine::TensorDesc;
|
||||
auto new_size = std::accumulate(new_shape.begin(), new_shape.end(), 1, std::multiplies<size_t>());
|
||||
IE_ASSERT(new_size == blob->size());
|
||||
@ -197,19 +237,19 @@ InferenceEngine::Blob::Ptr make_reshape_view(const InferenceEngine::Blob::Ptr &b
|
||||
auto orig_mem = orig_mem_blob->rwmap();
|
||||
auto orig_ptr = orig_mem.as<float*>();
|
||||
|
||||
auto new_tdesc = TensorDesc(blob->getTensorDesc().getPrecision(), new_shape, TensorDesc::getLayoutByDims(new_shape));
|
||||
auto new_tdesc =
|
||||
TensorDesc(blob->getTensorDesc().getPrecision(), new_shape, TensorDesc::getLayoutByDims(new_shape));
|
||||
auto new_blob = make_blob_with_precision(new_tdesc, orig_ptr);
|
||||
return new_blob;
|
||||
}
|
||||
|
||||
size_t byte_size(const InferenceEngine::TensorDesc &tdesc) {
|
||||
size_t byte_size(const InferenceEngine::TensorDesc& tdesc) {
|
||||
auto prc = tdesc.getPrecision();
|
||||
auto dims = tdesc.getDims();
|
||||
return prc.size() * std::accumulate(std::begin(dims), std::end(dims), (size_t)1, std::multiplies<size_t>());
|
||||
}
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
|
||||
void fill_data_with_broadcast(ov::Tensor& tensor, ov::Tensor& values) {
|
||||
constexpr size_t MAX_N_DIMS = 7; // Suppose it's enough
|
||||
|
||||
@ -235,7 +275,7 @@ void fill_data_with_broadcast(ov::Tensor& tensor, ov::Tensor& values) {
|
||||
|
||||
OPENVINO_ASSERT(compatible);
|
||||
|
||||
auto fill_strides_like_plain = [] (ov::Shape dims) {
|
||||
auto fill_strides_like_plain = [](ov::Shape dims) {
|
||||
ov::Shape str(dims.size());
|
||||
if (str.empty())
|
||||
return str;
|
||||
@ -243,12 +283,14 @@ void fill_data_with_broadcast(ov::Tensor& tensor, ov::Tensor& values) {
|
||||
str.back() = 1;
|
||||
|
||||
// stride[i] = stride[i+1]*d[i+1]
|
||||
std::transform(dims.rbegin(), dims.rend() - 1, str.rbegin(), str.rbegin() + 1,
|
||||
[] (size_t d, size_t s) { return d * s; });
|
||||
std::transform(dims.rbegin(), dims.rend() - 1, str.rbegin(), str.rbegin() + 1, [](size_t d, size_t s) {
|
||||
return d * s;
|
||||
});
|
||||
|
||||
// zeroing broadcast dimension equal 1
|
||||
std::transform(str.begin(), str.end(), dims.begin(), str.begin(),
|
||||
[] (size_t s, size_t d) { return d == 1 ? 0 : s; });
|
||||
std::transform(str.begin(), str.end(), dims.begin(), str.begin(), [](size_t s, size_t d) {
|
||||
return d == 1 ? 0 : s;
|
||||
});
|
||||
|
||||
return str;
|
||||
};
|
||||
@ -261,30 +303,30 @@ void fill_data_with_broadcast(ov::Tensor& tensor, ov::Tensor& values) {
|
||||
|
||||
using namespace ov::element;
|
||||
switch (tensor.get_element_type()) {
|
||||
case u64:
|
||||
case i64:
|
||||
copy_7D<uint64_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
|
||||
break;
|
||||
case f32:
|
||||
case i32:
|
||||
copy_7D<uint32_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
|
||||
break;
|
||||
case i16:
|
||||
case u16:
|
||||
case f16:
|
||||
case bf16:
|
||||
copy_7D<uint16_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
|
||||
break;
|
||||
case u8:
|
||||
case i8:
|
||||
copy_7D<uint8_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
|
||||
break;
|
||||
default:
|
||||
OPENVINO_THROW("Unsupported precision by fill_data_with_broadcast function");
|
||||
case u64:
|
||||
case i64:
|
||||
copy_7D<uint64_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
|
||||
break;
|
||||
case f32:
|
||||
case i32:
|
||||
copy_7D<uint32_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
|
||||
break;
|
||||
case i16:
|
||||
case u16:
|
||||
case f16:
|
||||
case bf16:
|
||||
copy_7D<uint16_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
|
||||
break;
|
||||
case u8:
|
||||
case i8:
|
||||
copy_7D<uint8_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
|
||||
break;
|
||||
default:
|
||||
OPENVINO_THROW("Unsupported precision by fill_data_with_broadcast function");
|
||||
}
|
||||
}
|
||||
|
||||
template<ov::element::Type_t SRC_E, ov::element::Type_t DST_E, typename std::enable_if<SRC_E != DST_E, int>::type = 0>
|
||||
template <ov::element::Type_t SRC_E, ov::element::Type_t DST_E, typename std::enable_if<SRC_E != DST_E, int>::type = 0>
|
||||
void copy_tensor_with_convert(const ov::Tensor& src_tensor, ov::Tensor& dst_tensor) {
|
||||
using SRC_TYPE = typename ov::fundamental_type_for<SRC_E>;
|
||||
using DST_TYPE = typename ov::fundamental_type_for<DST_E>;
|
||||
@ -296,12 +338,14 @@ void copy_tensor_with_convert(const ov::Tensor& src_tensor, ov::Tensor& dst_tens
|
||||
|
||||
auto dst_ptr = dst_tensor.data<DST_TYPE>();
|
||||
|
||||
auto converter = [] (SRC_TYPE value) {return static_cast<DST_TYPE>(value);};
|
||||
auto converter = [](SRC_TYPE value) {
|
||||
return static_cast<DST_TYPE>(value);
|
||||
};
|
||||
|
||||
std::transform(src_ptr, src_ptr + src_size, dst_ptr, converter);
|
||||
}
|
||||
|
||||
template<ov::element::Type_t SRC_E, ov::element::Type_t DST_E, typename std::enable_if<SRC_E == DST_E, int>::type = 0>
|
||||
template <ov::element::Type_t SRC_E, ov::element::Type_t DST_E, typename std::enable_if<SRC_E == DST_E, int>::type = 0>
|
||||
void copy_tensor_with_convert(const ov::Tensor& src_tensor, ov::Tensor& dst_tensor) {
|
||||
src_tensor.copy_to(dst_tensor);
|
||||
}
|
||||
@ -310,30 +354,46 @@ ov::Tensor make_tensor_with_precision_convert(const ov::Tensor& tensor, ov::elem
|
||||
ov::Tensor new_tensor(prc, tensor.get_shape());
|
||||
auto src_prc = tensor.get_element_type();
|
||||
|
||||
#define CASE0(SRC_PRC, DST_PRC) case ov::element::DST_PRC : \
|
||||
copy_tensor_with_convert<ov::element::SRC_PRC, ov::element::DST_PRC> (tensor, new_tensor); break;
|
||||
#define CASE0(SRC_PRC, DST_PRC) \
|
||||
case ov::element::DST_PRC: \
|
||||
copy_tensor_with_convert<ov::element::SRC_PRC, ov::element::DST_PRC>(tensor, new_tensor); \
|
||||
break;
|
||||
|
||||
#define CASE(SRC_PRC) \
|
||||
case ov::element::SRC_PRC: \
|
||||
switch (prc) { \
|
||||
CASE0(SRC_PRC, bf16) \
|
||||
CASE0(SRC_PRC, f16) \
|
||||
CASE0(SRC_PRC, f32) \
|
||||
CASE0(SRC_PRC, f64) \
|
||||
CASE0(SRC_PRC, i8) \
|
||||
CASE0(SRC_PRC, i16) \
|
||||
CASE0(SRC_PRC, i32) \
|
||||
CASE0(SRC_PRC, i64) \
|
||||
CASE0(SRC_PRC, u8) \
|
||||
CASE0(SRC_PRC, u16) \
|
||||
CASE0(SRC_PRC, u32) \
|
||||
CASE0(SRC_PRC, u64) \
|
||||
default: OPENVINO_THROW("Unsupported precision case: ", prc.c_type_string()); \
|
||||
} break;
|
||||
#define CASE(SRC_PRC) \
|
||||
case ov::element::SRC_PRC: \
|
||||
switch (prc) { \
|
||||
CASE0(SRC_PRC, bf16) \
|
||||
CASE0(SRC_PRC, f16) \
|
||||
CASE0(SRC_PRC, f32) \
|
||||
CASE0(SRC_PRC, f64) \
|
||||
CASE0(SRC_PRC, i8) \
|
||||
CASE0(SRC_PRC, i16) \
|
||||
CASE0(SRC_PRC, i32) \
|
||||
CASE0(SRC_PRC, i64) \
|
||||
CASE0(SRC_PRC, u8) \
|
||||
CASE0(SRC_PRC, u16) \
|
||||
CASE0(SRC_PRC, u32) \
|
||||
CASE0(SRC_PRC, u64) \
|
||||
default: \
|
||||
OPENVINO_THROW("Unsupported precision case: ", prc.c_type_string()); \
|
||||
} \
|
||||
break;
|
||||
|
||||
switch (src_prc) {
|
||||
CASE(f64); CASE(f32); CASE(f16); CASE(bf16); CASE(i64); CASE(u64); CASE(i32); CASE(u32); CASE(i16); CASE(u16); CASE(i8); CASE(u8);
|
||||
default: OPENVINO_THROW("Unsupported precision case: ", src_prc.c_type_string());
|
||||
CASE(f64);
|
||||
CASE(f32);
|
||||
CASE(f16);
|
||||
CASE(bf16);
|
||||
CASE(i64);
|
||||
CASE(u64);
|
||||
CASE(i32);
|
||||
CASE(u32);
|
||||
CASE(i16);
|
||||
CASE(u16);
|
||||
CASE(i8);
|
||||
CASE(u8);
|
||||
default:
|
||||
OPENVINO_THROW("Unsupported precision case: ", src_prc.c_type_string());
|
||||
}
|
||||
#undef CASE0
|
||||
#undef CASE
|
||||
@ -356,44 +416,61 @@ void fill_data_with_broadcast(ov::Tensor& tensor, size_t axis, std::vector<float
|
||||
fill_data_with_broadcast(tensor, values_tensor);
|
||||
}
|
||||
|
||||
template<ov::element::Type_t DT>
|
||||
void fill_tensor_random(ov::Tensor& tensor, const uint32_t range, const int32_t start_from, const int32_t k, const int seed) {
|
||||
template <ov::element::Type_t DT>
|
||||
void fill_tensor_random(ov::Tensor& tensor,
|
||||
const uint32_t range,
|
||||
const int32_t start_from,
|
||||
const int32_t k,
|
||||
const int seed) {
|
||||
using T = typename ov::element_type_traits<DT>::value_type;
|
||||
auto *rawBlobDataPtr = static_cast<T*>(tensor.data());
|
||||
if (DT == ov::element::u4 || DT == ov::element::i4 ||
|
||||
DT == ov::element::u1) {
|
||||
auto* rawBlobDataPtr = static_cast<T*>(tensor.data());
|
||||
if (DT == ov::element::u4 || DT == ov::element::i4 || DT == ov::element::u1) {
|
||||
fill_data_random(rawBlobDataPtr, tensor.get_byte_size(), range, start_from, k, seed);
|
||||
} else {
|
||||
fill_data_random(rawBlobDataPtr, tensor.get_size(), range, start_from, k, seed);
|
||||
}
|
||||
}
|
||||
|
||||
template<ov::element::Type_t DT>
|
||||
void fill_tensor_random_float(ov::Tensor& tensor, const double range, const double start_from, const int32_t k, const int seed) {
|
||||
template <ov::element::Type_t DT>
|
||||
void fill_tensor_random_float(ov::Tensor& tensor,
|
||||
const double range,
|
||||
const double start_from,
|
||||
const int32_t k,
|
||||
const int seed) {
|
||||
using T = typename ov::element_type_traits<DT>::value_type;
|
||||
std::default_random_engine random(seed);
|
||||
// 1/k is the resolution of the floating point numbers
|
||||
std::uniform_real_distribution<double> distribution(k * start_from, k * (start_from + range));
|
||||
|
||||
auto *rawBlobDataPtr = static_cast<T*>(tensor.data());
|
||||
auto* rawBlobDataPtr = static_cast<T*>(tensor.data());
|
||||
for (size_t i = 0; i < tensor.get_size(); i++) {
|
||||
auto value = static_cast<float>(distribution(random));
|
||||
value /= static_cast<float>(k);
|
||||
if (DT == ov::element::Type_t::f16) {
|
||||
rawBlobDataPtr[i] = static_cast<T>(ngraph::float16(value).to_bits());
|
||||
rawBlobDataPtr[i] = static_cast<T>(ov::float16(value).to_bits());
|
||||
} else if (DT == ov::element::Type_t::bf16) {
|
||||
rawBlobDataPtr[i] = static_cast<T>(ngraph::bfloat16(value).to_bits());
|
||||
rawBlobDataPtr[i] = static_cast<T>(ov::bfloat16(value).to_bits());
|
||||
} else {
|
||||
rawBlobDataPtr[i] = static_cast<T>(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void fill_tensor_random(ov::Tensor& tensor, const double range, const double start_from, const int32_t k, const int seed) {
|
||||
void fill_tensor_random(ov::Tensor& tensor,
|
||||
const double range,
|
||||
const double start_from,
|
||||
const int32_t k,
|
||||
const int seed) {
|
||||
auto element_type = tensor.get_element_type();
|
||||
|
||||
#define CASE(X) case X: fill_tensor_random<X>(tensor, static_cast<uint32_t>(range), static_cast<int32_t>(start_from), k, seed); break;
|
||||
#define CASE_FLOAT(X) case X: fill_tensor_random_float<X>(tensor, range, start_from, k, seed); break;
|
||||
#define CASE(X) \
|
||||
case X: \
|
||||
fill_tensor_random<X>(tensor, static_cast<uint32_t>(range), static_cast<int32_t>(start_from), k, seed); \
|
||||
break;
|
||||
#define CASE_FLOAT(X) \
|
||||
case X: \
|
||||
fill_tensor_random_float<X>(tensor, range, start_from, k, seed); \
|
||||
break;
|
||||
|
||||
switch (element_type) {
|
||||
CASE_FLOAT(ov::element::f64)
|
||||
@ -411,8 +488,8 @@ void fill_tensor_random(ov::Tensor& tensor, const double range, const double sta
|
||||
CASE(ov::element::i16)
|
||||
CASE(ov::element::i32)
|
||||
CASE(ov::element::i64)
|
||||
default:
|
||||
OPENVINO_THROW("Wrong precision specified: ", element_type);
|
||||
default:
|
||||
OPENVINO_THROW("Wrong precision specified: ", element_type);
|
||||
}
|
||||
#undef CASE
|
||||
#undef CASE_FLOAT
|
||||
|
@ -7,20 +7,20 @@
|
||||
#include "precomp.hpp"
|
||||
|
||||
#ifdef __APPLE__
|
||||
# include <mach-o/dyld.h>
|
||||
# include <mach-o/dyld.h>
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
# ifndef NOMINMAX
|
||||
# define NOMINMAX
|
||||
# endif
|
||||
# include <windows.h>
|
||||
# include <direct.h>
|
||||
# include <stdlib.h>
|
||||
# ifndef NOMINMAX
|
||||
# define NOMINMAX
|
||||
# endif
|
||||
# include <direct.h>
|
||||
# include <stdlib.h>
|
||||
# include <windows.h>
|
||||
#else
|
||||
# include <dlfcn.h>
|
||||
# include <unistd.h>
|
||||
# include <limits.h>
|
||||
# include <dlfcn.h>
|
||||
# include <limits.h>
|
||||
# include <unistd.h>
|
||||
#endif
|
||||
|
||||
namespace ov {
|
||||
@ -35,7 +35,7 @@ std::string getExecutableDirectory() {
|
||||
#elif defined(__APPLE__)
|
||||
Dl_info info;
|
||||
dladdr(reinterpret_cast<void*>(getExecutableDirectory), &info);
|
||||
const char * buffer = info.dli_fname;
|
||||
const char* buffer = info.dli_fname;
|
||||
int len = std::strlen(buffer);
|
||||
#else
|
||||
char buffer[PATH_MAX];
|
||||
@ -51,7 +51,7 @@ std::string getExecutableDirectory() {
|
||||
std::string getCurrentWorkingDir() {
|
||||
std::string path;
|
||||
#ifdef _WIN32
|
||||
char * buffer = _getcwd(NULL, 0);
|
||||
char* buffer = _getcwd(NULL, 0);
|
||||
if (buffer != NULL) {
|
||||
path = std::string(buffer);
|
||||
free(buffer);
|
||||
|
@ -895,6 +895,7 @@ void check_rt_info(const std::shared_ptr<ov::Model>& f) {
|
||||
|
||||
namespace attributes {
|
||||
namespace detail {
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
void ReadAndStoreAttributes::on_adapter(const std::string& name, ov::ValueAccessor<void>& adapter) {
|
||||
if (auto inputs = ov::as_type<ov::AttributeAdapter<SubGraphOpInputDescription>>(&adapter)) {
|
||||
insert(name, inputs->get());
|
||||
@ -922,6 +923,7 @@ void ReadAndStoreAttributes::on_adapter(const std::string& name, ov::ValueAccess
|
||||
adapter.get_type_info().name + "']";
|
||||
}
|
||||
}
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
template <typename AttrValue>
|
||||
void ReadAndCompareAttributes::verify(const std::string& name, const AttrValue& attr_value) {
|
||||
if (should_return()) {
|
||||
@ -940,6 +942,7 @@ void ReadAndCompareAttributes::verify(const std::string& name, const AttrValue&
|
||||
}
|
||||
}
|
||||
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
void ReadAndCompareAttributes::verify_mem_buf(const std::string& name,
|
||||
const std::shared_ptr<ngraph::runtime::AlignedBuffer>& buffer) {
|
||||
if (should_return()) {
|
||||
@ -958,6 +961,7 @@ void ReadAndCompareAttributes::verify_mem_buf(const std::string& name,
|
||||
return;
|
||||
}
|
||||
}
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
void ReadAndCompareAttributes::verify_function(const std::string& name, ModelAccessor& adapter) {
|
||||
if (should_return()) {
|
||||
@ -976,6 +980,7 @@ void ReadAndCompareAttributes::verify_function(const std::string& name, ModelAcc
|
||||
}
|
||||
}
|
||||
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
void ReadAndCompareAttributes::verify_others(const std::string& name, ov::ValueAccessor<void>& adapter) {
|
||||
if (auto inputs = ov::as_type<ov::AttributeAdapter<SubGraphOpInputDescription>>(&adapter)) {
|
||||
verify(name, inputs->get());
|
||||
@ -1000,6 +1005,7 @@ void ReadAndCompareAttributes::verify_others(const std::string& name, ov::ValueA
|
||||
adapter.get_type_info().name + "']";
|
||||
}
|
||||
}
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
} // namespace detail
|
||||
|
||||
|
@ -11,18 +11,22 @@
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
ov::Tensor create_and_fill_tensor(
|
||||
const ov::element::Type element_type,
|
||||
const ov::Shape& shape,
|
||||
const uint32_t range,
|
||||
const double_t start_from,
|
||||
const int32_t resolution,
|
||||
const int seed) {
|
||||
ov::Tensor create_and_fill_tensor(const ov::element::Type element_type,
|
||||
const ov::Shape& shape,
|
||||
const uint32_t range,
|
||||
const double_t start_from,
|
||||
const int32_t resolution,
|
||||
const int seed) {
|
||||
auto tensor = ov::Tensor{element_type, shape};
|
||||
#define CASE(X) case X: fill_data_random( \
|
||||
tensor.data<element_type_traits<X>::value_type>(), \
|
||||
shape_size(shape), \
|
||||
range, start_from, resolution, seed); break;
|
||||
#define CASE(X) \
|
||||
case X: \
|
||||
fill_data_random(tensor.data<element_type_traits<X>::value_type>(), \
|
||||
shape_size(shape), \
|
||||
range, \
|
||||
start_from, \
|
||||
resolution, \
|
||||
seed); \
|
||||
break;
|
||||
switch (element_type) {
|
||||
CASE(ov::element::Type_t::boolean)
|
||||
CASE(ov::element::Type_t::i8)
|
||||
@ -37,34 +41,38 @@ ov::Tensor create_and_fill_tensor(
|
||||
CASE(ov::element::Type_t::f16)
|
||||
CASE(ov::element::Type_t::f32)
|
||||
CASE(ov::element::Type_t::f64)
|
||||
case ov::element::Type_t::u1:
|
||||
case ov::element::Type_t::i4:
|
||||
case ov::element::Type_t::u4:
|
||||
fill_data_random(
|
||||
static_cast<uint8_t*>(tensor.data()),
|
||||
tensor.get_byte_size(),
|
||||
range, start_from, resolution, seed); break;
|
||||
default: OPENVINO_THROW("Unsupported element type: ", element_type);
|
||||
case ov::element::Type_t::u1:
|
||||
case ov::element::Type_t::i4:
|
||||
case ov::element::Type_t::u4:
|
||||
fill_data_random(static_cast<uint8_t*>(tensor.data()),
|
||||
tensor.get_byte_size(),
|
||||
range,
|
||||
start_from,
|
||||
resolution,
|
||||
seed);
|
||||
break;
|
||||
default:
|
||||
OPENVINO_THROW("Unsupported element type: ", element_type);
|
||||
}
|
||||
#undef CASE
|
||||
return tensor;
|
||||
}
|
||||
|
||||
ov::Tensor create_and_fill_tensor_unique_sequence(const ov::element::Type element_type,
|
||||
const ov::Shape& shape,
|
||||
const int32_t start_from,
|
||||
const int32_t resolution,
|
||||
const int seed) {
|
||||
const ov::Shape& shape,
|
||||
const int32_t start_from,
|
||||
const int32_t resolution,
|
||||
const int seed) {
|
||||
auto tensor = ov::Tensor{element_type, shape};
|
||||
auto range = shape_size(shape) * 2;
|
||||
#define CASE(X) \
|
||||
case X: \
|
||||
#define CASE(X) \
|
||||
case X: \
|
||||
fill_random_unique_sequence(tensor.data<element_type_traits<X>::value_type>(), \
|
||||
shape_size(shape), \
|
||||
range, \
|
||||
start_from, \
|
||||
resolution, \
|
||||
seed); \
|
||||
shape_size(shape), \
|
||||
range, \
|
||||
start_from, \
|
||||
resolution, \
|
||||
seed); \
|
||||
break;
|
||||
|
||||
switch (element_type) {
|
||||
@ -85,11 +93,11 @@ ov::Tensor create_and_fill_tensor_unique_sequence(const ov::element::Type elemen
|
||||
case ov::element::Type_t::i4:
|
||||
case ov::element::Type_t::u4:
|
||||
fill_random_unique_sequence(static_cast<uint8_t*>(tensor.data()),
|
||||
tensor.get_byte_size(),
|
||||
range,
|
||||
start_from,
|
||||
resolution,
|
||||
seed);
|
||||
tensor.get_byte_size(),
|
||||
range,
|
||||
start_from,
|
||||
resolution,
|
||||
seed);
|
||||
break;
|
||||
default:
|
||||
OPENVINO_THROW("Unsupported element type: ", element_type);
|
||||
@ -98,17 +106,20 @@ ov::Tensor create_and_fill_tensor_unique_sequence(const ov::element::Type elemen
|
||||
return tensor;
|
||||
}
|
||||
|
||||
ov::runtime::Tensor create_and_fill_tensor_normal_distribution(
|
||||
const ov::element::Type element_type,
|
||||
const ov::Shape& shape,
|
||||
const float mean,
|
||||
const float stddev,
|
||||
const int seed) {
|
||||
ov::runtime::Tensor create_and_fill_tensor_normal_distribution(const ov::element::Type element_type,
|
||||
const ov::Shape& shape,
|
||||
const float mean,
|
||||
const float stddev,
|
||||
const int seed) {
|
||||
auto tensor = ov::runtime::Tensor{element_type, shape};
|
||||
#define CASE(X) case X: fill_data_ptr_normal_random_float( \
|
||||
tensor.data<element_type_traits<X>::value_type>(), \
|
||||
shape_size(shape), \
|
||||
mean, stddev, seed); break;
|
||||
#define CASE(X) \
|
||||
case X: \
|
||||
fill_data_ptr_normal_random_float(tensor.data<element_type_traits<X>::value_type>(), \
|
||||
shape_size(shape), \
|
||||
mean, \
|
||||
stddev, \
|
||||
seed); \
|
||||
break;
|
||||
switch (element_type) {
|
||||
CASE(ov::element::Type_t::boolean)
|
||||
CASE(ov::element::Type_t::i8)
|
||||
@ -123,28 +134,36 @@ ov::runtime::Tensor create_and_fill_tensor_normal_distribution(
|
||||
CASE(ov::element::Type_t::f16)
|
||||
CASE(ov::element::Type_t::f32)
|
||||
CASE(ov::element::Type_t::f64)
|
||||
case ov::element::Type_t::u1:
|
||||
case ov::element::Type_t::i4:
|
||||
case ov::element::Type_t::u4:
|
||||
fill_data_ptr_normal_random_float(
|
||||
static_cast<uint8_t*>(tensor.data()),
|
||||
tensor.get_byte_size(),
|
||||
mean, stddev, seed); break;
|
||||
default: OPENVINO_THROW("Unsupported element type: ", element_type);
|
||||
case ov::element::Type_t::u1:
|
||||
case ov::element::Type_t::i4:
|
||||
case ov::element::Type_t::u4:
|
||||
fill_data_ptr_normal_random_float(static_cast<uint8_t*>(tensor.data()),
|
||||
tensor.get_byte_size(),
|
||||
mean,
|
||||
stddev,
|
||||
seed);
|
||||
break;
|
||||
default:
|
||||
OPENVINO_THROW("Unsupported element type: ", element_type);
|
||||
}
|
||||
#undef CASE
|
||||
return tensor;
|
||||
}
|
||||
|
||||
ov::runtime::Tensor create_and_fill_tensor_consistently(
|
||||
const ov::element::Type element_type,
|
||||
const ov::Shape& shape,
|
||||
const uint32_t range,
|
||||
const int32_t start_from,
|
||||
const int32_t resolution) {
|
||||
ov::runtime::Tensor create_and_fill_tensor_consistently(const ov::element::Type element_type,
|
||||
const ov::Shape& shape,
|
||||
const uint32_t range,
|
||||
const int32_t start_from,
|
||||
const int32_t resolution) {
|
||||
auto tensor = ov::runtime::Tensor{element_type, shape};
|
||||
#define CASE(X) case X: fill_data_ptr_consistently(tensor.data<element_type_traits<X>::value_type>(), \
|
||||
tensor.get_size(), range, start_from, resolution); break;
|
||||
#define CASE(X) \
|
||||
case X: \
|
||||
fill_data_ptr_consistently(tensor.data<element_type_traits<X>::value_type>(), \
|
||||
tensor.get_size(), \
|
||||
range, \
|
||||
start_from, \
|
||||
resolution); \
|
||||
break;
|
||||
switch (element_type) {
|
||||
CASE(ov::element::Type_t::boolean)
|
||||
CASE(ov::element::Type_t::i8)
|
||||
@ -159,13 +178,17 @@ tensor.get_size(), range, start_from, resolution); break;
|
||||
CASE(ov::element::Type_t::f16)
|
||||
CASE(ov::element::Type_t::f32)
|
||||
CASE(ov::element::Type_t::f64)
|
||||
case ov::element::Type_t::u1:
|
||||
case ov::element::Type_t::i4:
|
||||
case ov::element::Type_t::u4:
|
||||
fill_data_ptr_consistently(
|
||||
static_cast<uint8_t*>(tensor.data()),
|
||||
tensor.get_byte_size(), range, start_from, resolution); break;
|
||||
default: OPENVINO_THROW("Unsupported element type: ", element_type);
|
||||
case ov::element::Type_t::u1:
|
||||
case ov::element::Type_t::i4:
|
||||
case ov::element::Type_t::u4:
|
||||
fill_data_ptr_consistently(static_cast<uint8_t*>(tensor.data()),
|
||||
tensor.get_byte_size(),
|
||||
range,
|
||||
start_from,
|
||||
resolution);
|
||||
break;
|
||||
default:
|
||||
OPENVINO_THROW("Unsupported element type: ", element_type);
|
||||
}
|
||||
#undef CASE
|
||||
return tensor;
|
||||
@ -205,16 +228,16 @@ inline double calculate_median(std::vector<double>& abs_values) {
|
||||
auto expected_shape = abs_values.size();
|
||||
if (expected_shape % 2) {
|
||||
std::nth_element(abs_values.begin(), abs_values.begin() + expected_shape / 2, abs_values.end());
|
||||
abs_median = abs_values[expected_shape / 2];
|
||||
abs_median = abs_values[expected_shape / 2];
|
||||
} else {
|
||||
std::nth_element(abs_values.begin(), abs_values.begin() + expected_shape / 2, abs_values.end());
|
||||
std::nth_element(abs_values.begin(), abs_values.begin() + (expected_shape - 1) / 2, abs_values.end());
|
||||
abs_median = (abs_values[(expected_shape - 1) / 2] + abs_values[expected_shape / 2]) / 2.0;
|
||||
abs_median = (abs_values[(expected_shape - 1) / 2] + abs_values[expected_shape / 2]) / 2.0;
|
||||
}
|
||||
return abs_median;
|
||||
}
|
||||
|
||||
template<typename ExpectedT, typename ActualT>
|
||||
template <typename ExpectedT, typename ActualT>
|
||||
void compare(const ov::Tensor& expected,
|
||||
const ov::Tensor& actual,
|
||||
const double abs_threshold_ = std::numeric_limits<double>::max(),
|
||||
@ -224,7 +247,7 @@ void compare(const ov::Tensor& expected,
|
||||
if (expected_shape != actual_shape) {
|
||||
std::ostringstream out_stream;
|
||||
out_stream << "Expected and actual shape are different: " << expected_shape << " " << actual_shape;
|
||||
throw std::runtime_error(out_stream.str());
|
||||
throw std::runtime_error(out_stream.str());
|
||||
}
|
||||
if (shape_size(actual_shape) == 0) {
|
||||
return;
|
||||
@ -281,52 +304,55 @@ void compare(const ov::Tensor& expected,
|
||||
|
||||
if (!(less_or_equal(abs_error.max, abs_threshold) && less_or_equal(rel_error.max, rel_threshold))) {
|
||||
std::ostringstream out_stream;
|
||||
out_stream << "abs_max < abs_threshold && rel_max < rel_threshold" <<
|
||||
"\n\t abs_max: " << abs_error.max <<
|
||||
"\n\t\t coordinate " << abs_error.max_coordinate<<
|
||||
"; abs errors count " << abs_error.count << "; abs mean " <<
|
||||
abs_error.mean << "; abs threshold " << abs_threshold <<
|
||||
"\n\t rel_max: " << rel_error.max <<
|
||||
"\n\t\t coordinate " << rel_error.max_coordinate <<
|
||||
"; rel errors count " << rel_error.count << "; rel mean " <<
|
||||
rel_error.mean << "; rel threshold " << rel_threshold;
|
||||
out_stream << "abs_max < abs_threshold && rel_max < rel_threshold"
|
||||
<< "\n\t abs_max: " << abs_error.max << "\n\t\t coordinate " << abs_error.max_coordinate
|
||||
<< "; abs errors count " << abs_error.count << "; abs mean " << abs_error.mean << "; abs threshold "
|
||||
<< abs_threshold << "\n\t rel_max: " << rel_error.max << "\n\t\t coordinate "
|
||||
<< rel_error.max_coordinate << "; rel errors count " << rel_error.count << "; rel mean "
|
||||
<< rel_error.mean << "; rel threshold " << rel_threshold;
|
||||
throw std::runtime_error(out_stream.str());
|
||||
}
|
||||
}
|
||||
|
||||
void compare(
|
||||
const ov::Tensor& expected,
|
||||
const ov::Tensor& actual,
|
||||
const double abs_threshold,
|
||||
const double rel_threshold) {
|
||||
#define CASE0(X, Y) case Y : compare< \
|
||||
element_type_traits<X>::value_type, \
|
||||
element_type_traits<Y>::value_type>( \
|
||||
expected, actual, abs_threshold, rel_threshold); break;
|
||||
void compare(const ov::Tensor& expected,
|
||||
const ov::Tensor& actual,
|
||||
const double abs_threshold,
|
||||
const double rel_threshold) {
|
||||
#define CASE0(X, Y) \
|
||||
case Y: \
|
||||
compare<element_type_traits<X>::value_type, element_type_traits<Y>::value_type>(expected, \
|
||||
actual, \
|
||||
abs_threshold, \
|
||||
rel_threshold); \
|
||||
break;
|
||||
|
||||
#define CASE(X) \
|
||||
case X: \
|
||||
switch (actual.get_element_type()) { \
|
||||
CASE0(X, ov::element::Type_t::boolean) \
|
||||
CASE0(X, ov::element::Type_t::bf16) \
|
||||
CASE0(X, ov::element::Type_t::f16) \
|
||||
CASE0(X, ov::element::Type_t::f32) \
|
||||
CASE0(X, ov::element::Type_t::f64) \
|
||||
CASE0(X, ov::element::Type_t::i4) \
|
||||
CASE0(X, ov::element::Type_t::i8) \
|
||||
CASE0(X, ov::element::Type_t::i16) \
|
||||
CASE0(X, ov::element::Type_t::i32) \
|
||||
CASE0(X, ov::element::Type_t::i64) \
|
||||
CASE0(X, ov::element::Type_t::u1) \
|
||||
CASE0(X, ov::element::Type_t::u4) \
|
||||
CASE0(X, ov::element::Type_t::u8) \
|
||||
CASE0(X, ov::element::Type_t::u16) \
|
||||
CASE0(X, ov::element::Type_t::u32) \
|
||||
CASE0(X, ov::element::Type_t::u64) \
|
||||
default: OPENVINO_THROW("Unsupported element type: ", \
|
||||
"expected ", expected.get_element_type(), \
|
||||
", actual ", actual.get_element_type()); \
|
||||
} break;
|
||||
#define CASE(X) \
|
||||
case X: \
|
||||
switch (actual.get_element_type()) { \
|
||||
CASE0(X, ov::element::Type_t::boolean) \
|
||||
CASE0(X, ov::element::Type_t::bf16) \
|
||||
CASE0(X, ov::element::Type_t::f16) \
|
||||
CASE0(X, ov::element::Type_t::f32) \
|
||||
CASE0(X, ov::element::Type_t::f64) \
|
||||
CASE0(X, ov::element::Type_t::i4) \
|
||||
CASE0(X, ov::element::Type_t::i8) \
|
||||
CASE0(X, ov::element::Type_t::i16) \
|
||||
CASE0(X, ov::element::Type_t::i32) \
|
||||
CASE0(X, ov::element::Type_t::i64) \
|
||||
CASE0(X, ov::element::Type_t::u1) \
|
||||
CASE0(X, ov::element::Type_t::u4) \
|
||||
CASE0(X, ov::element::Type_t::u8) \
|
||||
CASE0(X, ov::element::Type_t::u16) \
|
||||
CASE0(X, ov::element::Type_t::u32) \
|
||||
CASE0(X, ov::element::Type_t::u64) \
|
||||
default: \
|
||||
OPENVINO_THROW("Unsupported element type: ", \
|
||||
"expected ", \
|
||||
expected.get_element_type(), \
|
||||
", actual ", \
|
||||
actual.get_element_type()); \
|
||||
} \
|
||||
break;
|
||||
|
||||
switch (expected.get_element_type()) {
|
||||
CASE(ov::element::Type_t::boolean)
|
||||
@ -345,7 +371,8 @@ void compare(
|
||||
CASE(ov::element::Type_t::u16)
|
||||
CASE(ov::element::Type_t::u32)
|
||||
CASE(ov::element::Type_t::u64)
|
||||
default: OPENVINO_THROW("Unsupported element type: ", expected.get_element_type());
|
||||
default:
|
||||
OPENVINO_THROW("Unsupported element type: ", expected.get_element_type());
|
||||
}
|
||||
#undef CASE0
|
||||
#undef CASE
|
||||
|
@ -32,8 +32,7 @@ private:
|
||||
} // namespace pass
|
||||
} // namespace ov
|
||||
|
||||
TransformationTestsF::TransformationTestsF()
|
||||
: comparator(FunctionsComparator::no_default()) {
|
||||
TransformationTestsF::TransformationTestsF() : comparator(FunctionsComparator::no_default()) {
|
||||
m_unh = std::make_shared<ov::pass::UniqueNamesHolder>();
|
||||
comparator.enable(FunctionsComparator::CmpValues::NODES);
|
||||
comparator.enable(FunctionsComparator::CmpValues::PRECISIONS);
|
||||
|
@ -5,31 +5,31 @@
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <bitset>
|
||||
#include <cassert>
|
||||
#include <cctype>
|
||||
#include <climits>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
#include <functional>
|
||||
#include <initializer_list>
|
||||
#include <iterator>
|
||||
#include <limits>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
#include <ostream>
|
||||
#include <queue>
|
||||
#include <set>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <tuple>
|
||||
#include <type_traits>
|
||||
#include <typeinfo>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include <cassert>
|
||||
#include <cctype>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <bitset>
|
||||
#include <limits>
|
||||
#include <queue>
|
||||
#include <fstream>
|
||||
#include <unordered_map>
|
||||
#include <climits>
|
||||
#include <thread>
|
||||
#include <vector>
|
@ -44,18 +44,26 @@ std::string TestsCommon::GetTimestamp() {
|
||||
}
|
||||
|
||||
std::string TestsCommon::GetTestName() const {
|
||||
std::string test_name =
|
||||
::testing::UnitTest::GetInstance()->current_test_info()->name();
|
||||
std::replace_if(test_name.begin(), test_name.end(),
|
||||
[](char c) { return !std::isalnum(c); }, '_');
|
||||
std::string test_name = ::testing::UnitTest::GetInstance()->current_test_info()->name();
|
||||
std::replace_if(
|
||||
test_name.begin(),
|
||||
test_name.end(),
|
||||
[](char c) {
|
||||
return !std::isalnum(c);
|
||||
},
|
||||
'_');
|
||||
return test_name;
|
||||
}
|
||||
|
||||
std::string TestsCommon::GetFullTestName() const {
|
||||
std::string suite_name =
|
||||
::testing::UnitTest::GetInstance()->current_test_info()->test_suite_name();
|
||||
std::replace_if(suite_name.begin(), suite_name.end(),
|
||||
[](char c) { return !std::isalnum(c); }, '_');
|
||||
std::string suite_name = ::testing::UnitTest::GetInstance()->current_test_info()->test_suite_name();
|
||||
std::replace_if(
|
||||
suite_name.begin(),
|
||||
suite_name.end(),
|
||||
[](char c) {
|
||||
return !std::isalnum(c);
|
||||
},
|
||||
'_');
|
||||
|
||||
std::string test_name = GetTestName();
|
||||
|
||||
|
@ -8,15 +8,15 @@ namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
|
||||
const char *DEVICE_AUTO = "AUTO";
|
||||
const char *DEVICE_CPU = "CPU";
|
||||
const char *DEVICE_GNA = "GNA";
|
||||
const char *DEVICE_GPU = "GPU";
|
||||
const char *DEVICE_KEEMBAY = "NPU";
|
||||
const char *DEVICE_BATCH = "BATCH";
|
||||
const char *DEVICE_MULTI = "MULTI";
|
||||
const char *DEVICE_TEMPLATE = "TEMPLATE";
|
||||
const char *DEVICE_HETERO = "HETERO";
|
||||
const char* DEVICE_AUTO = "AUTO";
|
||||
const char* DEVICE_CPU = "CPU";
|
||||
const char* DEVICE_GNA = "GNA";
|
||||
const char* DEVICE_GPU = "GPU";
|
||||
const char* DEVICE_KEEMBAY = "NPU";
|
||||
const char* DEVICE_BATCH = "BATCH";
|
||||
const char* DEVICE_MULTI = "MULTI";
|
||||
const char* DEVICE_TEMPLATE = "TEMPLATE";
|
||||
const char* DEVICE_HETERO = "HETERO";
|
||||
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
|
@ -10,16 +10,14 @@ namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
|
||||
const std::vector<std::wstring> test_unicode_postfix_vector = {
|
||||
L"unicode_Яㅎあ",
|
||||
L"ひらがな日本語",
|
||||
L"大家有天分",
|
||||
L"עפצקרשתםןףץ",
|
||||
L"ث خ ذ ض ظ غ",
|
||||
L"그것이정당하다",
|
||||
L"АБВГДЕЁЖЗИЙ",
|
||||
L"СТУФХЦЧШЩЬЮЯ"
|
||||
};
|
||||
const std::vector<std::wstring> test_unicode_postfix_vector = {L"unicode_Яㅎあ",
|
||||
L"ひらがな日本語",
|
||||
L"大家有天分",
|
||||
L"עפצקרשתםןףץ",
|
||||
L"ث خ ذ ض ظ غ",
|
||||
L"그것이정당하다",
|
||||
L"АБВГДЕЁЖЗИЙ",
|
||||
L"СТУФХЦЧШЩЬЮЯ"};
|
||||
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
|
@ -4,8 +4,7 @@
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <common_test_utils/graph_comparator.hpp>
|
||||
|
||||
#include "common_test_utils/graph_comparator.hpp"
|
||||
#include "openvino/op/add.hpp"
|
||||
#include "openvino/op/convert.hpp"
|
||||
#include "openvino/op/convolution.hpp"
|
||||
@ -15,24 +14,25 @@
|
||||
#include "openvino/op/squeeze.hpp"
|
||||
#include "openvino/op/tensor_iterator.hpp"
|
||||
#include "openvino/op/unsqueeze.hpp"
|
||||
#include "openvino/op/util/variable.hpp"
|
||||
|
||||
TEST(GraphComparatorTests, AllEnablePositiveCheck) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {1}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add}, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
function = function_ref->clone();
|
||||
}
|
||||
comparator.enable(FunctionsComparator::NAMES)
|
||||
.enable(FunctionsComparator::NODES)
|
||||
.enable(FunctionsComparator::CONST_VALUES)
|
||||
.enable(FunctionsComparator::PRECISIONS)
|
||||
.enable(FunctionsComparator::ATTRIBUTES)
|
||||
.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::TENSOR_NAMES);
|
||||
.enable(FunctionsComparator::NODES)
|
||||
.enable(FunctionsComparator::CONST_VALUES)
|
||||
.enable(FunctionsComparator::PRECISIONS)
|
||||
.enable(FunctionsComparator::ATTRIBUTES)
|
||||
.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::TENSOR_NAMES);
|
||||
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
ASSERT_TRUE(res.valid) << res.message;
|
||||
@ -42,16 +42,16 @@ TEST(GraphComparatorTests, CheckbyDefault) {
|
||||
FunctionsComparator comparator(FunctionsComparator::with_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto input2 = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto input2 = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, input2);
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input, input2 });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input, input2});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {3}, {12});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {3}, {12});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
ASSERT_FALSE(res.valid) << res.message;
|
||||
@ -61,18 +61,18 @@ TEST(GraphComparatorTests, CheckResultsNumber) {
|
||||
FunctionsComparator comparator(FunctionsComparator::with_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto input2 = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto input2 = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, input2);
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input, input2 });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input, input2});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {3}, {12});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {3}, {12});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
auto result1 = std::make_shared<ov::op::v0::Result>(constant);
|
||||
auto result2 = std::make_shared<ov::op::v0::Result>(add);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{ result1, result2 }, ngraph::ParameterVector{ input });
|
||||
function = std::make_shared<ov::Model>(ov::ResultVector{result1, result2}, ov::ParameterVector{input});
|
||||
}
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
ASSERT_FALSE(res.valid) << res.message;
|
||||
@ -82,25 +82,24 @@ TEST(GraphComparatorTests, NamesCheckPositive) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{1});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{1});
|
||||
input->set_friendly_name("new_name1");
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {0});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {1}, {0});
|
||||
constant->set_friendly_name("new_name2");
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
add->set_friendly_name("new_name3");
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{1});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{1});
|
||||
input->set_friendly_name("new_name1");
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {0});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {1}, {0});
|
||||
constant->set_friendly_name("new_name2");
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
add->set_friendly_name("new_name3");
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::NAMES)
|
||||
.enable(FunctionsComparator::NODES);
|
||||
comparator.enable(FunctionsComparator::NAMES).enable(FunctionsComparator::NODES);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
ASSERT_TRUE(res.valid) << res.message;
|
||||
}
|
||||
@ -109,25 +108,24 @@ TEST(GraphComparatorTests, NamesCheckNegative) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{1});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{1});
|
||||
input->set_friendly_name("new_name1");
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {0});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {1}, {0});
|
||||
constant->set_friendly_name("new_name2");
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
add->set_friendly_name("new_name3");
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{1});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{1});
|
||||
input->set_friendly_name("new_name1");
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {0});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {1}, {0});
|
||||
constant->set_friendly_name("new_name2");
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
add->set_friendly_name("new_name3_different");
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::NAMES)
|
||||
.enable(FunctionsComparator::NODES);
|
||||
comparator.enable(FunctionsComparator::NAMES).enable(FunctionsComparator::NODES);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
ASSERT_FALSE(res.valid) << res.message;
|
||||
}
|
||||
@ -136,16 +134,16 @@ TEST(GraphComparatorTests, ConstCheckWithoutEnable) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {3}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {3}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {3}, {12});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {3}, {12});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::NODES);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
@ -156,19 +154,18 @@ TEST(GraphComparatorTests, ConstCheckNegative) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {3}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {3}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {3}, {12});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {3}, {12});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::CONST_VALUES)
|
||||
.enable(FunctionsComparator::NODES);
|
||||
comparator.enable(FunctionsComparator::CONST_VALUES).enable(FunctionsComparator::NODES);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
ASSERT_FALSE(res.valid) << res.message;
|
||||
}
|
||||
@ -177,15 +174,14 @@ TEST(GraphComparatorTests, TensorNamesCheckNegative) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {1}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
function = function_ref->clone();
|
||||
add->get_input_tensor(0).set_names({"new_name"});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::TENSOR_NAMES)
|
||||
.enable(FunctionsComparator::NODES);
|
||||
comparator.enable(FunctionsComparator::TENSOR_NAMES).enable(FunctionsComparator::NODES);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
ASSERT_FALSE(res.valid) << res.message;
|
||||
}
|
||||
@ -194,10 +190,10 @@ TEST(GraphComparatorTests, TensorNamesCheckWithoutEnable) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {1}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
function = function_ref->clone();
|
||||
add->get_input_tensor(0).set_names({"new_name"});
|
||||
}
|
||||
@ -210,35 +206,36 @@ TEST(GraphComparatorTests, CheckAttributesNegative) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{ 1, 3, 12, 12 });
|
||||
auto const_weights = ov::op::v0::Constant::create(ov::element::f16,
|
||||
ov::Shape{ 1, 3, 3, 3 },
|
||||
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9 });
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 3, 12, 12});
|
||||
auto const_weights = ov::op::v0::Constant::create(
|
||||
ov::element::f16,
|
||||
ov::Shape{1, 3, 3, 3},
|
||||
{1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
auto convert_ins1 = std::make_shared<ov::op::v0::Convert>(const_weights, ov::element::f32);
|
||||
auto conv = std::make_shared<ov::op::v1::Convolution>(input,
|
||||
convert_ins1,
|
||||
ov::Strides{ 1, 1 },
|
||||
ov::CoordinateDiff{ 1, 1 },
|
||||
ov::CoordinateDiff{ 1, 1 },
|
||||
ov::Strides{ 1, 1 });
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ conv }, ngraph::ParameterVector{ input });
|
||||
ov::Strides{1, 1},
|
||||
ov::CoordinateDiff{1, 1},
|
||||
ov::CoordinateDiff{1, 1},
|
||||
ov::Strides{1, 1});
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{conv}, ov::ParameterVector{input});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{ 1, 3, 12, 12 });
|
||||
auto const_weights = ov::op::v0::Constant::create(ov::element::f16,
|
||||
ov::Shape{ 1, 3, 3, 3 },
|
||||
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9 });
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 3, 12, 12});
|
||||
auto const_weights = ov::op::v0::Constant::create(
|
||||
ov::element::f16,
|
||||
ov::Shape{1, 3, 3, 3},
|
||||
{1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
auto convert_ins1 = std::make_shared<ov::op::v0::Convert>(const_weights, ov::element::f32);
|
||||
auto conv = std::make_shared<ov::op::v1::Convolution>(input,
|
||||
convert_ins1,
|
||||
ov::Strides{ 1, 1 },
|
||||
ov::CoordinateDiff{ 0, 0 },
|
||||
ov::CoordinateDiff{ 0, 0 },
|
||||
ov::Strides{ 1, 1 });
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ conv }, ngraph::ParameterVector{ input });
|
||||
ov::Strides{1, 1},
|
||||
ov::CoordinateDiff{0, 0},
|
||||
ov::CoordinateDiff{0, 0},
|
||||
ov::Strides{1, 1});
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{conv}, ov::ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::ATTRIBUTES)
|
||||
.enable(FunctionsComparator::NODES);
|
||||
comparator.enable(FunctionsComparator::ATTRIBUTES).enable(FunctionsComparator::NODES);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
ASSERT_FALSE(res.valid) << res.message;
|
||||
}
|
||||
@ -247,19 +244,18 @@ TEST(GraphComparatorTests, CheckPrecisionsNegative) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {3}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {3}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::f32, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::f32, {3}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::f32, {3}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::PRECISIONS)
|
||||
.enable(FunctionsComparator::NODES);
|
||||
comparator.enable(FunctionsComparator::PRECISIONS).enable(FunctionsComparator::NODES);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
ASSERT_FALSE(res.valid) << res.message;
|
||||
}
|
||||
@ -268,16 +264,16 @@ TEST(GraphComparatorTests, CheckPrecisionsWithoutEnable) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {3}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {3}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::f32, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::f32, {3}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::f32, {3}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::NODES);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
@ -288,20 +284,19 @@ TEST(GraphComparatorTests, CheckRTInfo) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {3}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {3}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
add->get_rt_info()["my_info"] = 42;
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {3}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {3}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::NODES);
|
||||
comparator.enable(FunctionsComparator::RUNTIME_KEYS).enable(FunctionsComparator::NODES);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
ASSERT_FALSE(res.valid) << res.message;
|
||||
}
|
||||
@ -310,20 +305,19 @@ TEST(GraphComparatorTests, CheckRTInfoReverse) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {3}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {3}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {3}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {3}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
add->get_rt_info()["my_info"] = 42;
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::NODES);
|
||||
comparator.enable(FunctionsComparator::RUNTIME_KEYS).enable(FunctionsComparator::NODES);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
ASSERT_TRUE(res.valid) << res.message;
|
||||
}
|
||||
@ -332,20 +326,19 @@ TEST(GraphComparatorTests, CheckRTInfoInput) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {3}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {3}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
add->input(0).get_rt_info()["my_info"] = 42;
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {3}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {3}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::NODES);
|
||||
comparator.enable(FunctionsComparator::RUNTIME_KEYS).enable(FunctionsComparator::NODES);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
ASSERT_FALSE(res.valid) << res.message;
|
||||
}
|
||||
@ -354,20 +347,19 @@ TEST(GraphComparatorTests, CheckRTInfoOutput) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {3}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {3}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
add->output(0).get_rt_info()["my_info"] = 42;
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {3}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{3});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {3}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::NODES);
|
||||
comparator.enable(FunctionsComparator::RUNTIME_KEYS).enable(FunctionsComparator::NODES);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
ASSERT_FALSE(res.valid) << res.message;
|
||||
}
|
||||
@ -376,29 +368,28 @@ TEST(GraphComparatorTests, CheckTensorIteratorPositive) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto X = std::make_shared<ov::op::v0::Parameter>(ngraph::element::f32, ngraph::Shape{2, 1, 16});
|
||||
auto Y = std::make_shared<ov::op::v0::Parameter>(ngraph::element::f32, ngraph::Shape{1, 128});
|
||||
auto X = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{2, 1, 16});
|
||||
auto Y = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 128});
|
||||
|
||||
auto Xi = std::make_shared<ov::op::v0::Parameter>(ngraph::element::f32, ngraph::Shape{1, 1, 16});
|
||||
auto Yi = std::make_shared<ov::op::v0::Parameter>(ngraph::element::f32, ngraph::Shape{1, 128});
|
||||
auto Xi = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 1, 16});
|
||||
auto Yi = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 128});
|
||||
|
||||
// Body
|
||||
auto axis = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0});
|
||||
auto axis = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {0});
|
||||
auto squeeze = std::make_shared<ov::op::v0::Squeeze>(Xi, axis);
|
||||
|
||||
auto w_val = std::vector<float>(384*16, 0);
|
||||
auto r_val = std::vector<float>(384*128, 0);
|
||||
auto w_val = std::vector<float>(384 * 16, 0);
|
||||
auto r_val = std::vector<float>(384 * 128, 0);
|
||||
auto b_val = std::vector<float>(384, 0);
|
||||
auto W = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{384, 16}, w_val);
|
||||
auto R = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{384, 128}, r_val);
|
||||
auto B = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{384}, b_val);
|
||||
auto W = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{384, 16}, w_val);
|
||||
auto R = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{384, 128}, r_val);
|
||||
auto B = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{384}, b_val);
|
||||
|
||||
auto gru_cell = std::make_shared<ov::op::v3::GRUCell>(squeeze, Yi, W, R, B, 128);
|
||||
auto res_1 = std::make_shared<ov::op::v0::Result>(gru_cell);
|
||||
auto unsqueeze = std::make_shared<ov::op::v0::Unsqueeze>(gru_cell, axis);
|
||||
auto res_2 = std::make_shared<ov::op::v0::Result>(unsqueeze);
|
||||
auto body = std::make_shared<ngraph::Function>(ngraph::OutputVector{res_1, res_2},
|
||||
ngraph::ParameterVector{Xi, Yi});
|
||||
auto body = std::make_shared<ov::Model>(ov::OutputVector{res_1, res_2}, ov::ParameterVector{Xi, Yi});
|
||||
|
||||
auto tensor_iterator = std::make_shared<ov::op::v0::TensorIterator>();
|
||||
tensor_iterator->set_body(body);
|
||||
@ -410,8 +401,7 @@ TEST(GraphComparatorTests, CheckTensorIteratorPositive) {
|
||||
auto out1 = tensor_iterator->get_concatenated_slices(res_2, 0, 1, 1, -1, 0);
|
||||
|
||||
auto res_ti_1 = std::make_shared<ov::op::v0::Result>(tensor_iterator->output(1));
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{res_ti_1},
|
||||
ngraph::ParameterVector{X, Y});
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{res_ti_1}, ov::ParameterVector{X, Y});
|
||||
function = function_ref->clone();
|
||||
}
|
||||
comparator.enable(FunctionsComparator::NODES);
|
||||
@ -433,8 +423,8 @@ std::shared_ptr<ov::Model> make_check_loop_model(bool different_body) {
|
||||
auto M_body = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape::dynamic());
|
||||
auto body_condition = std::make_shared<ov::op::v0::Constant>(ov::element::boolean, ov::Shape{1}, true);
|
||||
|
||||
auto trip_count = std::make_shared<ov::op::v0::Constant>(ngraph::element::i64, ov::Shape{1}, 3);
|
||||
auto exec_condition = std::make_shared<ov::op::v0::Constant>(ngraph::element::boolean, ov::Shape{1}, true);
|
||||
auto trip_count = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{1}, 3);
|
||||
auto exec_condition = std::make_shared<ov::op::v0::Constant>(ov::element::boolean, ov::Shape{1}, true);
|
||||
// Body
|
||||
auto sum = std::make_shared<ov::op::v1::Add>(Xi, Yi);
|
||||
std::shared_ptr<ov::Node> Zo;
|
||||
@ -492,8 +482,8 @@ TEST(GraphComparatorTests, CheckSinksPositive) {
|
||||
auto arg = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 1});
|
||||
auto init_const = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{1, 1}, {0});
|
||||
const std::string variable_name("variable0");
|
||||
auto variable = std::make_shared<ngraph::Variable>(ngraph::VariableInfo{ov::PartialShape::dynamic(),
|
||||
ov::element::dynamic, variable_name});
|
||||
auto variable = std::make_shared<ov::op::util::Variable>(
|
||||
ov::op::util::VariableInfo{ov::PartialShape::dynamic(), ov::element::dynamic, variable_name});
|
||||
|
||||
auto read = std::make_shared<ov::op::v6::ReadValue>(init_const, variable);
|
||||
auto read2 = std::make_shared<ov::op::v6::ReadValue>(init_const, variable);
|
||||
@ -505,7 +495,8 @@ TEST(GraphComparatorTests, CheckSinksPositive) {
|
||||
auto res = std::make_shared<ov::op::v0::Result>(add);
|
||||
auto res2 = std::make_shared<ov::op::v0::Result>(add2);
|
||||
|
||||
function_ref = std::make_shared<ov::Model>(ov::ResultVector({res, res2}), ov::SinkVector({assign, assign2}),
|
||||
function_ref = std::make_shared<ov::Model>(ov::ResultVector({res, res2}),
|
||||
ov::SinkVector({assign, assign2}),
|
||||
ov::ParameterVector({arg}));
|
||||
function = function_ref->clone();
|
||||
}
|
||||
@ -521,8 +512,8 @@ TEST(GraphComparatorTests, CheckSinksNegative) {
|
||||
auto arg = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 1});
|
||||
auto init_const = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{1, 1}, {0});
|
||||
const std::string variable_name("variable0");
|
||||
auto variable = std::make_shared<ngraph::Variable>(ngraph::VariableInfo{ov::PartialShape::dynamic(),
|
||||
ov::element::dynamic, variable_name});
|
||||
auto variable = std::make_shared<ov::op::util::Variable>(
|
||||
ov::op::util::VariableInfo{ov::PartialShape::dynamic(), ov::element::dynamic, variable_name});
|
||||
|
||||
auto read = std::make_shared<ov::op::v6::ReadValue>(init_const, variable);
|
||||
auto read2 = std::make_shared<ov::op::v6::ReadValue>(init_const, variable);
|
||||
@ -534,7 +525,8 @@ TEST(GraphComparatorTests, CheckSinksNegative) {
|
||||
auto res = std::make_shared<ov::op::v0::Result>(add);
|
||||
auto res2 = std::make_shared<ov::op::v0::Result>(add2);
|
||||
|
||||
function_ref = std::make_shared<ov::Model>(ov::ResultVector({res, res2}), ov::SinkVector({assign, assign2}),
|
||||
function_ref = std::make_shared<ov::Model>(ov::ResultVector({res, res2}),
|
||||
ov::SinkVector({assign, assign2}),
|
||||
ov::ParameterVector({arg}));
|
||||
}
|
||||
|
||||
@ -542,8 +534,8 @@ TEST(GraphComparatorTests, CheckSinksNegative) {
|
||||
auto arg = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 1});
|
||||
auto init_const = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{1, 1}, {0});
|
||||
const std::string variable_name("variable_different");
|
||||
auto variable = std::make_shared<ngraph::Variable>(ngraph::VariableInfo{ov::PartialShape::dynamic(),
|
||||
ov::element::dynamic, variable_name});
|
||||
auto variable = std::make_shared<ov::op::util::Variable>(
|
||||
ov::op::util::VariableInfo{ov::PartialShape::dynamic(), ov::element::dynamic, variable_name});
|
||||
|
||||
auto read = std::make_shared<ov::op::v6::ReadValue>(init_const, variable);
|
||||
auto read2 = std::make_shared<ov::op::v6::ReadValue>(init_const, variable);
|
||||
@ -555,7 +547,8 @@ TEST(GraphComparatorTests, CheckSinksNegative) {
|
||||
auto res = std::make_shared<ov::op::v0::Result>(add);
|
||||
auto res2 = std::make_shared<ov::op::v0::Result>(add2);
|
||||
|
||||
function = std::make_shared<ov::Model>(ov::ResultVector({res, res2}), ov::SinkVector({assign, assign2}),
|
||||
function = std::make_shared<ov::Model>(ov::ResultVector({res, res2}),
|
||||
ov::SinkVector({assign, assign2}),
|
||||
ov::ParameterVector({arg}));
|
||||
}
|
||||
comparator.enable(FunctionsComparator::NODES);
|
||||
@ -567,10 +560,10 @@ TEST(GraphComparatorTests, DisableCheck) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {1}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
function = function_ref->clone();
|
||||
}
|
||||
comparator.enable(FunctionsComparator::NODES);
|
||||
@ -583,16 +576,16 @@ TEST(GraphComparatorTests, CheckAccuracyPositive) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {1}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add}, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {1}, {0});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add}, ngraph::ParameterVector{ input });
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::ACCURACY);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
@ -603,16 +596,16 @@ TEST(GraphComparatorTests, CheckAccuracyNegative) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {12});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {1}, {12});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add}, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {200});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {1}, {200});
|
||||
auto add = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add}, ngraph::ParameterVector{ input });
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::ACCURACY);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
@ -623,32 +616,34 @@ TEST(GraphComparatorTests, CheckAccuracyNotEnabled) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{ 1, 3, 12, 12 });
|
||||
auto const_weights = ov::op::v0::Constant::create(ov::element::f16,
|
||||
ov::Shape{ 1, 3, 3, 3 },
|
||||
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9 });
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 3, 12, 12});
|
||||
auto const_weights = ov::op::v0::Constant::create(
|
||||
ov::element::f16,
|
||||
ov::Shape{1, 3, 3, 3},
|
||||
{1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
auto convert_ins1 = std::make_shared<ov::op::v0::Convert>(const_weights, ov::element::f32);
|
||||
auto conv = std::make_shared<ov::op::v1::Convolution>(input,
|
||||
convert_ins1,
|
||||
ov::Strides{ 1, 1 },
|
||||
ov::CoordinateDiff{ 1, 1 },
|
||||
ov::CoordinateDiff{ 1, 1 },
|
||||
ov::Strides{ 1, 1 });
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ conv }, ngraph::ParameterVector{ input });
|
||||
ov::Strides{1, 1},
|
||||
ov::CoordinateDiff{1, 1},
|
||||
ov::CoordinateDiff{1, 1},
|
||||
ov::Strides{1, 1});
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{conv}, ov::ParameterVector{input});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{ 1, 3, 12, 12 });
|
||||
auto const_weights = ov::op::v0::Constant::create(ov::element::f16,
|
||||
ov::Shape{ 1, 3, 3, 3 },
|
||||
{ 1, 9, 3, 4, 5, 6, 7, 8, 9, 1, 12, 3, 9, 5, 0, 7, 8, 9, 1, 2, 12, 4, 9, 6, 7, 8, 9 });
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 3, 12, 12});
|
||||
auto const_weights = ov::op::v0::Constant::create(
|
||||
ov::element::f16,
|
||||
ov::Shape{1, 3, 3, 3},
|
||||
{1, 9, 3, 4, 5, 6, 7, 8, 9, 1, 12, 3, 9, 5, 0, 7, 8, 9, 1, 2, 12, 4, 9, 6, 7, 8, 9});
|
||||
auto convert_ins1 = std::make_shared<ov::op::v0::Convert>(const_weights, ov::element::f32);
|
||||
auto conv = std::make_shared<ov::op::v1::Convolution>(input,
|
||||
convert_ins1,
|
||||
ov::Strides{ 1, 1 },
|
||||
ov::CoordinateDiff{ 1, 1 },
|
||||
ov::CoordinateDiff{ 1, 1 },
|
||||
ov::Strides{ 1, 1 });
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ conv }, ngraph::ParameterVector{ input });
|
||||
ov::Strides{1, 1},
|
||||
ov::CoordinateDiff{1, 1},
|
||||
ov::CoordinateDiff{1, 1},
|
||||
ov::Strides{1, 1});
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{conv}, ov::ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::NODES);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
@ -659,20 +654,20 @@ TEST(GraphComparatorTests, CheckConsumersCountPositive) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {1}, {0});
|
||||
auto add_1 = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
auto add_2 = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
auto mul = std::make_shared<ov::op::v1::Multiply>(add_1, add_2);
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ mul }, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{mul}, ov::ParameterVector{input});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {1}, {0});
|
||||
auto add_1 = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
auto add_2 = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
auto mul = std::make_shared<ov::op::v1::Multiply>(add_1, add_2);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ mul }, ngraph::ParameterVector{ input });
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{mul}, ov::ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::NODES).enable(FunctionsComparator::CONSUMERS_COUNT);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
@ -683,21 +678,21 @@ TEST(GraphComparatorTests, CheckConsumersCountNegative) {
|
||||
FunctionsComparator comparator(FunctionsComparator::no_default());
|
||||
std::shared_ptr<ov::Model> function, function_ref;
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{1});
|
||||
auto constant = ov::op::v0::Constant::create(ov::element::i64, {1}, {0});
|
||||
auto add_1 = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
auto add_2 = std::make_shared<ov::op::v1::Add>(input, constant);
|
||||
auto mul = std::make_shared<ov::op::v1::Multiply>(add_1, add_2);
|
||||
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ mul }, ngraph::ParameterVector{ input });
|
||||
function_ref = std::make_shared<ov::Model>(ov::NodeVector{mul}, ov::ParameterVector{input});
|
||||
}
|
||||
{
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ngraph::element::i64, ov::Shape{1});
|
||||
auto constant_1 = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {0});
|
||||
auto constant_2 = ov::op::v0::Constant::create(ngraph::element::i64, {1}, {0});
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{1});
|
||||
auto constant_1 = ov::op::v0::Constant::create(ov::element::i64, {1}, {0});
|
||||
auto constant_2 = ov::op::v0::Constant::create(ov::element::i64, {1}, {0});
|
||||
auto add_1 = std::make_shared<ov::op::v1::Add>(input, constant_1);
|
||||
auto add_2 = std::make_shared<ov::op::v1::Add>(input, constant_2);
|
||||
auto mul = std::make_shared<ov::op::v1::Multiply>(add_1, add_2);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ mul }, ngraph::ParameterVector{ input });
|
||||
function = std::make_shared<ov::Model>(ov::NodeVector{mul}, ov::ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::NODES).enable(FunctionsComparator::CONSUMERS_COUNT);
|
||||
auto res = comparator.compare(function, function_ref);
|
||||
|
Loading…
Reference in New Issue
Block a user