[ONNX FE] Fix Windows warnings (#16141)
This commit is contained in:
parent
a3958d6ddf
commit
a004601774
@ -2,12 +2,6 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
|
||||||
ie_add_compiler_flags(/wd4267)
|
|
||||||
ie_add_compiler_flags(/wd4018)
|
|
||||||
ie_add_compiler_flags(/wd4244)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
ov_add_frontend(NAME onnx
|
ov_add_frontend(NAME onnx
|
||||||
LINKABLE_FRONTEND
|
LINKABLE_FRONTEND
|
||||||
PROTOBUF_LITE
|
PROTOBUF_LITE
|
||||||
|
@ -328,7 +328,7 @@ std::shared_ptr<Function> Graph::create_function() {
|
|||||||
const auto& onnx_outputs = m_model->get_graph().output();
|
const auto& onnx_outputs = m_model->get_graph().output();
|
||||||
for (std::size_t i{0}; i < function->get_output_size(); ++i) {
|
for (std::size_t i{0}; i < function->get_output_size(); ++i) {
|
||||||
const auto& result_node = function->get_output_op(i);
|
const auto& result_node = function->get_output_op(i);
|
||||||
const std::string onnx_output_name = onnx_outputs.Get(i).name();
|
const std::string onnx_output_name = onnx_outputs.Get(static_cast<int>(i)).name();
|
||||||
result_node->set_friendly_name(onnx_output_name + "/sink_port_0");
|
result_node->set_friendly_name(onnx_output_name + "/sink_port_0");
|
||||||
const auto& previous_operation = result_node->get_input_node_shared_ptr(0);
|
const auto& previous_operation = result_node->get_input_node_shared_ptr(0);
|
||||||
previous_operation->set_friendly_name(onnx_output_name);
|
previous_operation->set_friendly_name(onnx_output_name);
|
||||||
@ -386,7 +386,7 @@ OutputVector Graph::make_ng_nodes(const Node& onnx_node) {
|
|||||||
|
|
||||||
const size_t outputs_size = std::accumulate(std::begin(ng_subgraph_outputs),
|
const size_t outputs_size = std::accumulate(std::begin(ng_subgraph_outputs),
|
||||||
std::end(ng_subgraph_outputs),
|
std::end(ng_subgraph_outputs),
|
||||||
0,
|
static_cast<size_t>(0),
|
||||||
[](const size_t lhs, const Output<ov::Node>& rhs) {
|
[](const size_t lhs, const Output<ov::Node>& rhs) {
|
||||||
return lhs + rhs.get_node()->get_output_size();
|
return lhs + rhs.get_node()->get_output_size();
|
||||||
});
|
});
|
||||||
@ -420,10 +420,11 @@ void Graph::set_friendly_names(const Node& onnx_node, const OutputVector& ng_sub
|
|||||||
|
|
||||||
const auto common_node = detail::common_node_for_all_outputs(ng_subgraph_outputs);
|
const auto common_node = detail::common_node_for_all_outputs(ng_subgraph_outputs);
|
||||||
|
|
||||||
for (size_t i = 0; i < ng_subgraph_outputs.size(); ++i) {
|
const auto ng_subgraph_output_size = static_cast<int>(ng_subgraph_outputs.size());
|
||||||
|
for (int i = 0; i < ng_subgraph_output_size; ++i) {
|
||||||
// Trailing optional outputs may not be specified in the ONNX model.
|
// Trailing optional outputs may not be specified in the ONNX model.
|
||||||
// Other optional outputs should have name set to an empty string.
|
// Other optional outputs should have name set to an empty string.
|
||||||
if (i >= onnx_node.get_outputs_size()) {
|
if (i >= static_cast<int>(onnx_node.get_outputs_size())) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,9 +213,10 @@ void graph_topological_sort(GraphProto* graph) {
|
|||||||
std::multimap<std::string, const NodeProto*> output_name_to_node;
|
std::multimap<std::string, const NodeProto*> output_name_to_node;
|
||||||
GraphProto result;
|
GraphProto result;
|
||||||
|
|
||||||
for (int i = 0; i < graph->node().size(); ++i) {
|
const auto nodes_number = static_cast<int>(graph->node().size());
|
||||||
|
for (int i = 0; i < nodes_number; ++i) {
|
||||||
for (const auto& output_name : graph->node(i).output()) {
|
for (const auto& output_name : graph->node(i).output()) {
|
||||||
output_name_to_node.emplace(output_name, graph->mutable_node(static_cast<int>(i)));
|
output_name_to_node.emplace(output_name, graph->mutable_node(i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
auto get_node_by_out_name = [&output_name_to_node](const std::string& out_name) -> const NodeProto* {
|
auto get_node_by_out_name = [&output_name_to_node](const std::string& out_name) -> const NodeProto* {
|
||||||
|
@ -68,9 +68,9 @@ OutputVector roi_align(const Node& node) {
|
|||||||
return {std::make_shared<opset9::ROIAlign>(data,
|
return {std::make_shared<opset9::ROIAlign>(data,
|
||||||
rois,
|
rois,
|
||||||
num_rois,
|
num_rois,
|
||||||
pooled_h,
|
static_cast<int>(pooled_h),
|
||||||
pooled_w,
|
static_cast<int>(pooled_w),
|
||||||
sampling_ratio,
|
static_cast<int>(sampling_ratio),
|
||||||
spatial_scale,
|
spatial_scale,
|
||||||
pooling_mode,
|
pooling_mode,
|
||||||
aligned_mode)};
|
aligned_mode)};
|
||||||
|
@ -317,8 +317,8 @@ ov::frontend::Place::Ptr PlaceOp::get_input_port(const std::string& input_name)
|
|||||||
|
|
||||||
std::vector<ov::frontend::Place::Ptr> PlaceOp::get_consuming_ports() const {
|
std::vector<ov::frontend::Place::Ptr> PlaceOp::get_consuming_ports() const {
|
||||||
std::vector<ov::frontend::Place::Ptr> consuming_ports;
|
std::vector<ov::frontend::Place::Ptr> consuming_ports;
|
||||||
const auto out_ports_number = m_editor->get_output_ports(m_node).size();
|
const auto out_ports_number = static_cast<int>(m_editor->get_output_ports(m_node).size());
|
||||||
for (size_t out_idx = 0; out_idx < out_ports_number; ++out_idx) {
|
for (int out_idx = 0; out_idx < out_ports_number; ++out_idx) {
|
||||||
auto consuming_ops_out = get_output_port(out_idx)->get_consuming_ports();
|
auto consuming_ops_out = get_output_port(out_idx)->get_consuming_ports();
|
||||||
consuming_ports.insert(consuming_ports.end(), consuming_ops_out.begin(), consuming_ops_out.end());
|
consuming_ports.insert(consuming_ports.end(), consuming_ops_out.begin(), consuming_ops_out.end());
|
||||||
}
|
}
|
||||||
|
@ -6,13 +6,6 @@ set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE OFF)
|
|||||||
|
|
||||||
ov_try_use_gold_linker()
|
ov_try_use_gold_linker()
|
||||||
|
|
||||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
|
||||||
ie_add_compiler_flags(/wd4244)
|
|
||||||
ie_add_compiler_flags(/wd4267)
|
|
||||||
ie_add_compiler_flags(/wd4305)
|
|
||||||
ie_add_compiler_flags(/wd4756)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
message(STATUS "ONNX frontend test enabled")
|
message(STATUS "ONNX frontend test enabled")
|
||||||
|
|
||||||
add_compile_definitions(
|
add_compile_definitions(
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -56,7 +56,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_const_folding_model_scatter_elements) {
|
|||||||
SERIALIZED_ZOO,
|
SERIALIZED_ZOO,
|
||||||
"onnx/scatter_elements_opset11.onnx"));
|
"onnx/scatter_elements_opset11.onnx"));
|
||||||
|
|
||||||
test_constant_folding<float>(fn, {1.0, 1.1, 3.0, 2.1, 5.0}, Shape{1, 5});
|
test_constant_folding<float>(fn, {1.0f, 1.1f, 3.0f, 2.1f, 5.0f}, Shape{1, 5});
|
||||||
}
|
}
|
||||||
|
|
||||||
NGRAPH_TEST(${BACKEND_NAME}, onnx_const_folding_model_non_zero_scalar) {
|
NGRAPH_TEST(${BACKEND_NAME}, onnx_const_folding_model_non_zero_scalar) {
|
||||||
|
@ -537,7 +537,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_if_branches_with_same_inputs) {
|
|||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
std::vector<float> x(40, 2);
|
std::vector<float> x(40, 2);
|
||||||
std::vector<float> y(40);
|
std::vector<float> y(40);
|
||||||
std::iota(y.begin(), y.end(), -20);
|
std::iota(y.begin(), y.end(), -20.f);
|
||||||
|
|
||||||
// condition
|
// condition
|
||||||
test_case.add_input<bool>({true});
|
test_case.add_input<bool>({true});
|
||||||
@ -577,7 +577,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_if_branches_with_different_inputs) {
|
|||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
std::vector<float> x(40, 2);
|
std::vector<float> x(40, 2);
|
||||||
std::vector<float> y(40);
|
std::vector<float> y(40);
|
||||||
std::iota(y.begin(), y.end(), -20);
|
std::iota(y.begin(), y.end(), -20.f);
|
||||||
|
|
||||||
// condition
|
// condition
|
||||||
test_case.add_input<bool>({true});
|
test_case.add_input<bool>({true});
|
||||||
@ -649,7 +649,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_if_inside_if) {
|
|||||||
// expected value == x * y
|
// expected value == x * y
|
||||||
std::vector<float> x(40, 2);
|
std::vector<float> x(40, 2);
|
||||||
std::vector<float> y(40);
|
std::vector<float> y(40);
|
||||||
std::iota(y.begin(), y.end(), -20);
|
std::iota(y.begin(), y.end(), -20.f);
|
||||||
std::vector<float> expected;
|
std::vector<float> expected;
|
||||||
std::transform(x.begin(), x.end(), y.begin(), std::back_inserter(expected), [](float i, float j) -> float {
|
std::transform(x.begin(), x.end(), y.begin(), std::back_inserter(expected), [](float i, float j) -> float {
|
||||||
return i * j;
|
return i * j;
|
||||||
@ -663,7 +663,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_if_inside_if) {
|
|||||||
// case when condition == true and all(x < y)
|
// case when condition == true and all(x < y)
|
||||||
// expected value == x + y
|
// expected value == x + y
|
||||||
std::iota(x.begin(), x.end(), -static_cast<float>(x.size()));
|
std::iota(x.begin(), x.end(), -static_cast<float>(x.size()));
|
||||||
std::iota(y.begin(), y.end(), 1);
|
std::iota(y.begin(), y.end(), 1.f);
|
||||||
std::transform(x.begin(), x.end(), y.begin(), expected.begin(), [](float i, float j) -> float {
|
std::transform(x.begin(), x.end(), y.begin(), expected.begin(), [](float i, float j) -> float {
|
||||||
return i + j;
|
return i + j;
|
||||||
});
|
});
|
||||||
@ -703,13 +703,13 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_if_branches_with_multiple_outputs) {
|
|||||||
|
|
||||||
// case when condition == true so split is along axis 0
|
// case when condition == true so split is along axis 0
|
||||||
std::vector<float> x(36);
|
std::vector<float> x(36);
|
||||||
std::iota(x.begin(), x.end(), 0);
|
std::iota(x.begin(), x.end(), 0.f);
|
||||||
std::vector<float> expected1(12);
|
std::vector<float> expected1(12);
|
||||||
std::iota(expected1.begin(), expected1.end(), 0);
|
std::iota(expected1.begin(), expected1.end(), 0.f);
|
||||||
std::vector<float> expected2(12);
|
std::vector<float> expected2(12);
|
||||||
std::iota(expected2.begin(), expected2.end(), 12);
|
std::iota(expected2.begin(), expected2.end(), 12.f);
|
||||||
std::vector<float> expected3(12);
|
std::vector<float> expected3(12);
|
||||||
std::iota(expected3.begin(), expected3.end(), 24);
|
std::iota(expected3.begin(), expected3.end(), 24.f);
|
||||||
test_case.add_input<bool>({true}); // condition
|
test_case.add_input<bool>({true}); // condition
|
||||||
test_case.add_input<float>(x);
|
test_case.add_input<float>(x);
|
||||||
test_case.add_expected_output<float>(expected1);
|
test_case.add_expected_output<float>(expected1);
|
||||||
@ -768,9 +768,10 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_if_with_only_indentity_in_else_branch) {
|
|||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
|
|
||||||
std::vector<float> x(shape_size(Shape{1, 5, 2, 2}));
|
std::vector<float> x(shape_size(Shape{1, 5, 2, 2}));
|
||||||
std::iota(x.begin(), x.end(), 0);
|
std::iota(x.begin(), x.end(), 0.f);
|
||||||
std::vector<float> expected{1.333333, 3, 4.666666, 6.333333, 8, 10, 12, 14, 16, 18,
|
std::vector<float> expected{1.333333f, 3.f, 4.666666f, 6.333333f, 8.f, 10.f, 12.f,
|
||||||
20, 22, 24, 26, 28, 30, 25.33333, 27, 28.666667, 30.33333};
|
14.f, 16.f, 18.f, 20.f, 22.f, 24.f, 26.f,
|
||||||
|
28.f, 30.f, 25.33333f, 27.f, 28.666667f, 30.33333f};
|
||||||
test_case.add_input<float>(x);
|
test_case.add_input<float>(x);
|
||||||
test_case.add_expected_output<float>(expected);
|
test_case.add_expected_output<float>(expected);
|
||||||
test_case.run();
|
test_case.run();
|
||||||
@ -820,7 +821,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_if_dynamic_inputs) {
|
|||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
std::vector<float> x(40, 2);
|
std::vector<float> x(40, 2);
|
||||||
std::vector<float> y(40);
|
std::vector<float> y(40);
|
||||||
std::iota(y.begin(), y.end(), -20);
|
std::iota(y.begin(), y.end(), -20.f);
|
||||||
std::vector<float> expected;
|
std::vector<float> expected;
|
||||||
std::transform(x.begin(), x.end(), y.begin(), std::back_inserter(expected), [](float i, float j) -> float {
|
std::transform(x.begin(), x.end(), y.begin(), std::back_inserter(expected), [](float i, float j) -> float {
|
||||||
return i + j;
|
return i + j;
|
||||||
|
@ -220,7 +220,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_model_asinh_3_2) {
|
|||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
test_case.add_input<float>(Shape{3, 2}, {-1.5f, 0.0f, 1.5f, -1.5f, 0.0f, 1.5f});
|
test_case.add_input<float>(Shape{3, 2}, {-1.5f, 0.0f, 1.5f, -1.5f, 0.0f, 1.5f});
|
||||||
test_case.add_expected_output<float>(Shape{3, 2}, {-1.1947632f, 0.0f, 1.1947632f, -1.1947632, 0.0f, 1.1947632f});
|
test_case.add_expected_output<float>(Shape{3, 2}, {-1.1947632f, 0.0f, 1.1947632f, -1.1947632f, 0.0f, 1.1947632f});
|
||||||
|
|
||||||
test_case.run();
|
test_case.run();
|
||||||
}
|
}
|
||||||
@ -713,7 +713,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_transpose) {
|
|||||||
const auto elems_in_tensor = shape_size(shape);
|
const auto elems_in_tensor = shape_size(shape);
|
||||||
|
|
||||||
std::vector<float> input_values(elems_in_tensor);
|
std::vector<float> input_values(elems_in_tensor);
|
||||||
std::iota(std::begin(input_values), std::end(input_values), 1);
|
std::iota(std::begin(input_values), std::end(input_values), 1.f);
|
||||||
|
|
||||||
test_case.add_input<float>(shape, input_values);
|
test_case.add_input<float>(shape, input_values);
|
||||||
|
|
||||||
@ -729,9 +729,12 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_transpose) {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
Shape get_flattened_shape(const Shape& in_shape, size_t axis) {
|
Shape get_flattened_shape(const Shape& in_shape, size_t axis) {
|
||||||
size_t first_dim_size =
|
size_t first_dim_size = std::accumulate(begin(in_shape),
|
||||||
std::accumulate(begin(in_shape), next(begin(in_shape), axis), 1UL, std::multiplies<size_t>());
|
next(begin(in_shape), axis),
|
||||||
size_t last_dim_size = std::accumulate(next(begin(in_shape), axis), end(in_shape), 1UL, std::multiplies<size_t>());
|
static_cast<size_t>(1),
|
||||||
|
std::multiplies<size_t>());
|
||||||
|
size_t last_dim_size =
|
||||||
|
std::accumulate(next(begin(in_shape), axis), end(in_shape), static_cast<size_t>(1), std::multiplies<size_t>());
|
||||||
return Shape{first_dim_size, last_dim_size};
|
return Shape{first_dim_size, last_dim_size};
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
@ -751,7 +754,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_flatten_axis_0) {
|
|||||||
const auto elems_in_tensor = shape_size(shape);
|
const auto elems_in_tensor = shape_size(shape);
|
||||||
|
|
||||||
std::vector<float> input_values(elems_in_tensor);
|
std::vector<float> input_values(elems_in_tensor);
|
||||||
std::iota(input_values.begin(), input_values.end(), 1);
|
std::iota(input_values.begin(), input_values.end(), 1.f);
|
||||||
|
|
||||||
test_case.add_input<float>(shape, input_values);
|
test_case.add_input<float>(shape, input_values);
|
||||||
|
|
||||||
@ -778,7 +781,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_flatten_axis) {
|
|||||||
const auto elems_in_tensor = shape_size(shape);
|
const auto elems_in_tensor = shape_size(shape);
|
||||||
|
|
||||||
std::vector<float> input_values(elems_in_tensor);
|
std::vector<float> input_values(elems_in_tensor);
|
||||||
std::iota(input_values.begin(), input_values.end(), 1);
|
std::iota(input_values.begin(), input_values.end(), 1.f);
|
||||||
|
|
||||||
test_case.add_input<float>(shape, input_values);
|
test_case.add_input<float>(shape, input_values);
|
||||||
|
|
||||||
@ -805,7 +808,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_flatten_neg_axis) {
|
|||||||
const auto elems_in_tensor = shape_size(shape);
|
const auto elems_in_tensor = shape_size(shape);
|
||||||
|
|
||||||
std::vector<float> input_values(elems_in_tensor);
|
std::vector<float> input_values(elems_in_tensor);
|
||||||
std::iota(input_values.begin(), input_values.end(), 1);
|
std::iota(input_values.begin(), input_values.end(), 1.f);
|
||||||
|
|
||||||
test_case.add_input<float>(shape, input_values);
|
test_case.add_input<float>(shape, input_values);
|
||||||
|
|
||||||
@ -907,7 +910,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_slice_10_3d_input) {
|
|||||||
|
|
||||||
const Shape input_shape{3, 4, 1};
|
const Shape input_shape{3, 4, 1};
|
||||||
std::vector<float> input_values(shape_size(input_shape));
|
std::vector<float> input_values(shape_size(input_shape));
|
||||||
std::iota(input_values.begin(), input_values.end(), 0);
|
std::iota(input_values.begin(), input_values.end(), 0.f);
|
||||||
test_case.add_input<float>(input_values);
|
test_case.add_input<float>(input_values);
|
||||||
test_case.add_input<int64_t>({0, 0});
|
test_case.add_input<int64_t>({0, 0});
|
||||||
test_case.add_input<int64_t>({2, 3});
|
test_case.add_input<int64_t>({2, 3});
|
||||||
@ -926,7 +929,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_slice_10_3d_input_neg_axes) {
|
|||||||
|
|
||||||
const Shape input_shape{3, 4, 1};
|
const Shape input_shape{3, 4, 1};
|
||||||
std::vector<float> input_values(shape_size(input_shape));
|
std::vector<float> input_values(shape_size(input_shape));
|
||||||
std::iota(input_values.begin(), input_values.end(), 0);
|
std::iota(input_values.begin(), input_values.end(), 0.f);
|
||||||
test_case.add_input<float>(input_values);
|
test_case.add_input<float>(input_values);
|
||||||
test_case.add_input<int64_t>({0, 0});
|
test_case.add_input<int64_t>({0, 0});
|
||||||
test_case.add_input<int64_t>({2, 3});
|
test_case.add_input<int64_t>({2, 3});
|
||||||
@ -945,7 +948,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_slice_10_3d_input_12_axes) {
|
|||||||
|
|
||||||
const Shape input_shape{4, 3, 2};
|
const Shape input_shape{4, 3, 2};
|
||||||
std::vector<float> input_values(shape_size(input_shape));
|
std::vector<float> input_values(shape_size(input_shape));
|
||||||
std::iota(input_values.begin(), input_values.end(), 0);
|
std::iota(input_values.begin(), input_values.end(), 0.f);
|
||||||
test_case.add_input<float>(input_values);
|
test_case.add_input<float>(input_values);
|
||||||
test_case.add_input<int64_t>({0, 0});
|
test_case.add_input<int64_t>({0, 0});
|
||||||
test_case.add_input<int64_t>({2, 1});
|
test_case.add_input<int64_t>({2, 1});
|
||||||
@ -963,7 +966,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_slice_10_3d_input_20_axes) {
|
|||||||
|
|
||||||
const Shape input_shape{4, 3, 2};
|
const Shape input_shape{4, 3, 2};
|
||||||
std::vector<float> input_values(shape_size(input_shape));
|
std::vector<float> input_values(shape_size(input_shape));
|
||||||
std::iota(input_values.begin(), input_values.end(), 0);
|
std::iota(input_values.begin(), input_values.end(), 0.f);
|
||||||
test_case.add_input<float>(input_shape, input_values);
|
test_case.add_input<float>(input_shape, input_values);
|
||||||
test_case.add_input<int64_t>({0, 1});
|
test_case.add_input<int64_t>({0, 1});
|
||||||
test_case.add_input<int64_t>({1, 3});
|
test_case.add_input<int64_t>({1, 3});
|
||||||
@ -982,7 +985,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_slice_10_4d_input_23_axes) {
|
|||||||
|
|
||||||
const Shape input_shape{2, 2, 2, 2};
|
const Shape input_shape{2, 2, 2, 2};
|
||||||
std::vector<float> input_values(shape_size(input_shape));
|
std::vector<float> input_values(shape_size(input_shape));
|
||||||
std::iota(input_values.begin(), input_values.end(), 0);
|
std::iota(input_values.begin(), input_values.end(), 0.f);
|
||||||
test_case.add_input<float>(input_values);
|
test_case.add_input<float>(input_values);
|
||||||
test_case.add_input<int64_t>({0, 0});
|
test_case.add_input<int64_t>({0, 0});
|
||||||
test_case.add_input<int64_t>({1, 1});
|
test_case.add_input<int64_t>({1, 1});
|
||||||
@ -1000,7 +1003,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_slice_10_4d_input_0231_axes_ends_ma
|
|||||||
|
|
||||||
const Shape input_shape{2, 2, 2, 2};
|
const Shape input_shape{2, 2, 2, 2};
|
||||||
std::vector<float> input_values(shape_size(input_shape));
|
std::vector<float> input_values(shape_size(input_shape));
|
||||||
std::iota(input_values.begin(), input_values.end(), 0);
|
std::iota(input_values.begin(), input_values.end(), 0.f);
|
||||||
test_case.add_input<float>(input_values);
|
test_case.add_input<float>(input_values);
|
||||||
test_case.add_input<int64_t>({0, 1, 1, 0});
|
test_case.add_input<int64_t>({0, 1, 1, 0});
|
||||||
test_case.add_input<int64_t>({std::numeric_limits<int64_t>::max(),
|
test_case.add_input<int64_t>({std::numeric_limits<int64_t>::max(),
|
||||||
@ -1021,7 +1024,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_slice_10_4d_input_2103_axes_ends_ma
|
|||||||
|
|
||||||
const Shape input_shape{2, 2, 2, 5};
|
const Shape input_shape{2, 2, 2, 5};
|
||||||
std::vector<float> input_values(shape_size(input_shape));
|
std::vector<float> input_values(shape_size(input_shape));
|
||||||
std::iota(input_values.begin(), input_values.end(), 0);
|
std::iota(input_values.begin(), input_values.end(), 0.f);
|
||||||
test_case.add_input<float>(input_values);
|
test_case.add_input<float>(input_values);
|
||||||
test_case.add_input<int64_t>({1, 0, 0, 1});
|
test_case.add_input<int64_t>({1, 0, 0, 1});
|
||||||
test_case.add_input<int64_t>({2,
|
test_case.add_input<int64_t>({2,
|
||||||
@ -1043,7 +1046,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_slice_10_4d_input_23_axes_21_steps)
|
|||||||
|
|
||||||
const Shape input_shape{2, 2, 6, 2};
|
const Shape input_shape{2, 2, 6, 2};
|
||||||
std::vector<float> input_values(shape_size(input_shape));
|
std::vector<float> input_values(shape_size(input_shape));
|
||||||
std::iota(input_values.begin(), input_values.end(), 0);
|
std::iota(input_values.begin(), input_values.end(), 0.f);
|
||||||
test_case.add_input<float>(input_values);
|
test_case.add_input<float>(input_values);
|
||||||
test_case.add_input<int64_t>({0, 1});
|
test_case.add_input<int64_t>({0, 1});
|
||||||
test_case.add_input<int64_t>({5, 2});
|
test_case.add_input<int64_t>({5, 2});
|
||||||
@ -1060,7 +1063,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_slice_10_default_axes) {
|
|||||||
|
|
||||||
const Shape input_shape{4, 3, 2};
|
const Shape input_shape{4, 3, 2};
|
||||||
std::vector<float> input_values(shape_size(input_shape));
|
std::vector<float> input_values(shape_size(input_shape));
|
||||||
std::iota(input_values.begin(), input_values.end(), 0);
|
std::iota(input_values.begin(), input_values.end(), 0.f);
|
||||||
test_case.add_input<float>(input_values);
|
test_case.add_input<float>(input_values);
|
||||||
test_case.add_input<int64_t>({1, 1, 1});
|
test_case.add_input<int64_t>({1, 1, 1});
|
||||||
test_case.add_input<int64_t>({2, 2, 2});
|
test_case.add_input<int64_t>({2, 2, 2});
|
||||||
@ -1116,34 +1119,34 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_model_softmax_axis_2) {
|
|||||||
file_util::path_join(CommonTestUtils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/softmax_axis_2.onnx"));
|
file_util::path_join(CommonTestUtils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/softmax_axis_2.onnx"));
|
||||||
|
|
||||||
const std::vector<float> input = {
|
const std::vector<float> input = {
|
||||||
2.75793882, -0.50841322, 0.82013929, -0.62409912, -0.96136118, 0.21004745, 1.38337255,
|
2.75793882f, -0.50841322f, 0.82013929f, -0.62409912f, -0.96136118f, 0.21004745f, 1.38337255f,
|
||||||
1.19030397, 2.0940445, -0.03551657, -0.78686039, 1.992782, 0.04300319, -0.29230777,
|
1.19030397f, 2.0940445f, -0.03551657f, -0.78686039f, 1.992782f, 0.04300319f, -0.29230777f,
|
||||||
-0.56797112, -1.26732165, -0.61935399, 0.57670432, 0.92844898, 2.82469233,
|
-0.56797112f, -1.26732165f, -0.61935399f, 0.57670432f, 0.92844898f, 2.82469233f,
|
||||||
|
|
||||||
0.98721677, -0.05100663, -1.21178917, -0.17530157, 1.40051805, -0.13259761, -1.14313018,
|
0.98721677f, -0.05100663f, -1.21178917f, -0.17530157f, 1.40051805f, -0.13259761f, -1.14313018f,
|
||||||
0.2673723, -0.87996154, 1.29053106, 1.55, 0.8396538, 1.20729817, 0.23727845,
|
0.2673723f, -0.87996154f, 1.29053106f, 1.55f, 0.8396538f, 1.20729817f, 0.23727845f,
|
||||||
-0.89113606, -1.70909842, 0.26460363, -0.70566808, 2.383518, 1.07024615,
|
-0.89113606f, -1.70909842f, 0.26460363f, -0.70566808f, 2.383518f, 1.07024615f,
|
||||||
|
|
||||||
-1.21722605, 0.82919357, 0.55765697, 0.12657686, 0.63432172, 0.75425957, -2.43721014,
|
-1.21722605f, 0.82919357f, 0.55765697f, 0.12657686f, 0.63432172f, 0.75425957f, -2.43721014f,
|
||||||
-1.24478184, 2.65316853, 1.19509542, -0.95523998, 0.5149006, -0.01151649, 0.68327026,
|
-1.24478184f, 2.65316853f, 1.19509542f, -0.95523998f, 0.5149006f, -0.01151649f, 0.68327026f,
|
||||||
-0.4589638, -0.46554745, 0.21055324, 0.39266729, 2.05098086, 1.83207919};
|
-0.4589638f, -0.46554745f, 0.21055324f, 0.39266729f, 2.05098086f, 1.83207919f};
|
||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
test_case.add_input<float>(input);
|
test_case.add_input<float>(input);
|
||||||
|
|
||||||
test_case.add_expected_output<float>(
|
test_case.add_expected_output<float>(
|
||||||
Shape{3, 4, 5},
|
Shape{3, 4, 5},
|
||||||
{0.80619486, 0.03075257, 0.1161086, 0.027393, 0.01955098, 0.07012682, 0.22670066,
|
{0.80619486f, 0.03075257f, 0.1161086f, 0.027393f, 0.01955098f, 0.07012682f, 0.22670066f,
|
||||||
0.18689779, 0.4614171, 0.05485763, 0.04486172, 0.72286838, 0.10286818, 0.07356265,
|
0.18689779f, 0.4614171f, 0.05485763f, 0.04486172f, 0.72286838f, 0.10286818f, 0.07356265f,
|
||||||
0.05583908, 0.01280724, 0.02448298, 0.08096658, 0.11509768, 0.76664552,
|
0.05583908f, 0.01280724f, 0.02448298f, 0.08096658f, 0.11509768f, 0.76664552f,
|
||||||
|
|
||||||
0.30399806, 0.1076406, 0.03371745, 0.0950595, 0.4595844, 0.13369873, 0.04866969,
|
0.30399806f, 0.1076406f, 0.03371745f, 0.0950595f, 0.4595844f, 0.13369873f, 0.04866969f,
|
||||||
0.19944906, 0.06332151, 0.55486101, 0.39101105, 0.19217177, 0.27755913, 0.10521588,
|
0.19944906f, 0.06332151f, 0.55486101f, 0.39101105f, 0.19217177f, 0.27755913f, 0.10521588f,
|
||||||
0.03404216, 0.01150354, 0.08279411, 0.03137732, 0.68902071, 0.18530432,
|
0.03404216f, 0.01150354f, 0.08279411f, 0.03137732f, 0.68902071f, 0.18530432f,
|
||||||
|
|
||||||
0.0402528, 0.31156222, 0.23747503, 0.1543129, 0.25639705, 0.10627912, 0.00436928,
|
0.0402528f, 0.31156222f, 0.23747503f, 0.1543129f, 0.25639705f, 0.10627912f, 0.00436928f,
|
||||||
0.01439711, 0.70979614, 0.16515835, 0.06798343, 0.2957175, 0.17468555, 0.34994439,
|
0.01439711f, 0.70979614f, 0.16515835f, 0.06798343f, 0.2957175f, 0.17468555f, 0.34994439f,
|
||||||
0.11166912, 0.03615172, 0.07108136, 0.08527994, 0.44775794, 0.35972905});
|
0.11166912f, 0.03615172f, 0.07108136f, 0.08527994f, 0.44775794f, 0.35972905f});
|
||||||
|
|
||||||
test_case.run(3);
|
test_case.run(3);
|
||||||
}
|
}
|
||||||
@ -1184,7 +1187,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_instance_normalization_dyn_shape) {
|
|||||||
|
|
||||||
Shape data_shape{1, 2, 3, 4};
|
Shape data_shape{1, 2, 3, 4};
|
||||||
std::vector<float> data(shape_size(data_shape));
|
std::vector<float> data(shape_size(data_shape));
|
||||||
std::iota(std::begin(data), std::end(data), 1);
|
std::iota(std::begin(data), std::end(data), 1.f);
|
||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
|
|
||||||
@ -1207,7 +1210,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_instance_normalization_dyn_shape2) {
|
|||||||
|
|
||||||
Shape data_shape{1, 2, 3, 4};
|
Shape data_shape{1, 2, 3, 4};
|
||||||
std::vector<float> data(shape_size(data_shape));
|
std::vector<float> data(shape_size(data_shape));
|
||||||
std::iota(std::begin(data), std::end(data), 1);
|
std::iota(std::begin(data), std::end(data), 1.f);
|
||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
|
|
||||||
@ -1275,7 +1278,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_slice_1_3d_input_21_axes_ends_max)
|
|||||||
|
|
||||||
const Shape input_shape{1, 2, 3, 4};
|
const Shape input_shape{1, 2, 3, 4};
|
||||||
std::vector<float> input_values(shape_size(input_shape));
|
std::vector<float> input_values(shape_size(input_shape));
|
||||||
std::iota(input_values.begin(), input_values.end(), 0);
|
std::iota(input_values.begin(), input_values.end(), 0.f);
|
||||||
test_case.add_input<float>(input_shape, input_values);
|
test_case.add_input<float>(input_shape, input_values);
|
||||||
test_case.add_expected_output<float>(Shape{1, 1, 3, 3}, {13, 14, 15, 17, 18, 19, 21, 22, 23});
|
test_case.add_expected_output<float>(Shape{1, 1, 3, 3}, {13, 14, 15, 17, 18, 19, 21, 22, 23});
|
||||||
test_case.run();
|
test_case.run();
|
||||||
@ -1315,7 +1318,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_max_pool_dyn_rank_without_default_attrs)
|
|||||||
|
|
||||||
Shape input_shape{1, 1, 4, 4};
|
Shape input_shape{1, 1, 4, 4};
|
||||||
std::vector<float> input(shape_size(input_shape));
|
std::vector<float> input(shape_size(input_shape));
|
||||||
std::iota(input.begin(), input.end(), 0);
|
std::iota(input.begin(), input.end(), 0.f);
|
||||||
test_case.add_input<float>(input_shape, input);
|
test_case.add_input<float>(input_shape, input);
|
||||||
test_case.add_expected_output<float>(Shape{1, 1, 3, 3}, {5, 6, 7, 9, 10, 11, 13, 14, 15});
|
test_case.add_expected_output<float>(Shape{1, 1, 3, 3}, {5, 6, 7, 9, 10, 11, 13, 14, 15});
|
||||||
test_case.run();
|
test_case.run();
|
||||||
@ -1327,7 +1330,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_depth_to_space_dynamic_input) {
|
|||||||
"onnx/dynamic_shapes/depth_to_space.onnx"));
|
"onnx/dynamic_shapes/depth_to_space.onnx"));
|
||||||
|
|
||||||
std::vector<float> input(32);
|
std::vector<float> input(32);
|
||||||
std::iota(input.begin(), input.end(), 0);
|
std::iota(input.begin(), input.end(), 0.f);
|
||||||
|
|
||||||
std::vector<float> expected_output{0.f, 8.f, 1.f, 9.f, 16.f, 24.f, 17.f, 25.f, 2.f, 10.f, 3.f,
|
std::vector<float> expected_output{0.f, 8.f, 1.f, 9.f, 16.f, 24.f, 17.f, 25.f, 2.f, 10.f, 3.f,
|
||||||
11.f, 18.f, 26.f, 19.f, 27.f, 4.f, 12.f, 5.f, 13.f, 20.f, 28.f,
|
11.f, 18.f, 26.f, 19.f, 27.f, 4.f, 12.f, 5.f, 13.f, 20.f, 28.f,
|
||||||
@ -1345,7 +1348,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_space_to_depth_dynamic_input) {
|
|||||||
"onnx/dynamic_shapes/space_to_depth.onnx"));
|
"onnx/dynamic_shapes/space_to_depth.onnx"));
|
||||||
|
|
||||||
std::vector<float> input(32);
|
std::vector<float> input(32);
|
||||||
std::iota(input.begin(), input.end(), 0);
|
std::iota(input.begin(), input.end(), 0.f);
|
||||||
|
|
||||||
std::vector<float> expected_output{
|
std::vector<float> expected_output{
|
||||||
0.f, 2.f, 8.f, 10.f, 16.f, 18.f, 24.f, 26.f, 1.f, 3.f, 9.f, 11.f, 17.f, 19.f, 25.f, 27.f,
|
0.f, 2.f, 8.f, 10.f, 16.f, 18.f, 24.f, 26.f, 1.f, 3.f, 9.f, 11.f, 17.f, 19.f, 25.f, 27.f,
|
||||||
|
@ -57,14 +57,14 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_prior_box) {
|
|||||||
std::vector<float> A(3 * 2 * 2);
|
std::vector<float> A(3 * 2 * 2);
|
||||||
std::vector<float> B(3 * 6 * 6);
|
std::vector<float> B(3 * 6 * 6);
|
||||||
std::vector<float> output = {
|
std::vector<float> output = {
|
||||||
-2.3200002, -2.3200002, 3.6533334, 3.6533334, -3.7053659, -3.7053659, 5.0386992, 5.0386992,
|
-2.3200002f, -2.3200002f, 3.6533334f, 3.6533334f, -3.7053659f, -3.7053659f, 5.0386992f, 5.0386992f,
|
||||||
-0.98666668, -2.3200002, 4.9866667, 3.6533334, -2.3720326, -3.7053659, 6.3720322, 5.0386992,
|
-0.98666668f, -2.3200002f, 4.9866667f, 3.6533334f, -2.3720326f, -3.7053659f, 6.3720322f, 5.0386992f,
|
||||||
-2.3200002, -0.98666668, 3.6533334, 4.9866667, -3.7053659, -2.3720326, 5.0386992, 6.3720322,
|
-2.3200002f, -0.98666668f, 3.6533334f, 4.9866667f, -3.7053659f, -2.3720326f, 5.0386992f, 6.3720322f,
|
||||||
-0.98666668, -0.98666668, 4.9866667, 4.9866667, -2.3720326, -2.3720326, 6.3720322, 6.3720322,
|
-0.98666668f, -0.98666668f, 4.9866667f, 4.9866667f, -2.3720326f, -2.3720326f, 6.3720322f, 6.3720322f,
|
||||||
0.1, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.2,
|
0.1f, 0.1f, 0.2f, 0.2f, 0.1f, 0.1f, 0.2f, 0.2f,
|
||||||
0.1, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.2,
|
0.1f, 0.1f, 0.2f, 0.2f, 0.1f, 0.1f, 0.2f, 0.2f,
|
||||||
0.1, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.2,
|
0.1f, 0.1f, 0.2f, 0.2f, 0.1f, 0.1f, 0.2f, 0.2f,
|
||||||
0.1, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.2,
|
0.1f, 0.1f, 0.2f, 0.2f, 0.1f, 0.1f, 0.2f, 0.2f,
|
||||||
};
|
};
|
||||||
test_case.add_input<float>(A);
|
test_case.add_input<float>(A);
|
||||||
test_case.add_input<float>(B);
|
test_case.add_input<float>(B);
|
||||||
@ -78,11 +78,11 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_priorbox_clustered) {
|
|||||||
"onnx/priorbox_clustered.onnx"));
|
"onnx/priorbox_clustered.onnx"));
|
||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
std::vector<float> A{15.0};
|
std::vector<float> A{15.0f};
|
||||||
std::vector<float> B{10.0};
|
std::vector<float> B{10.0f};
|
||||||
std::vector<float> output = {
|
std::vector<float> output = {
|
||||||
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
0.1, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.2,
|
0.1f, 0.1f, 0.2f, 0.2f, 0.1f, 0.1f, 0.2f, 0.2f, 0.1f, 0.1f, 0.2f, 0.2f, 0.1f, 0.1f, 0.2f, 0.2f,
|
||||||
};
|
};
|
||||||
test_case.add_input<float>(A);
|
test_case.add_input<float>(A);
|
||||||
test_case.add_input<float>(B);
|
test_case.add_input<float>(B);
|
||||||
@ -101,22 +101,22 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_priorbox_clustered_most_attrs_default) {
|
|||||||
std::iota(std::begin(A), std::end(A), 0.0f);
|
std::iota(std::begin(A), std::end(A), 0.0f);
|
||||||
std::vector<float> B(1 * 1 * 3 * 3);
|
std::vector<float> B(1 * 1 * 3 * 3);
|
||||||
std::iota(std::begin(B), std::end(B), 0.0f);
|
std::iota(std::begin(B), std::end(B), 0.0f);
|
||||||
std::vector<float> output = {-0.1666666716337203979,
|
std::vector<float> output = {-0.1666666716337203979f,
|
||||||
-0.1666666716337203979,
|
-0.1666666716337203979f,
|
||||||
0.1666666716337203979,
|
0.1666666716337203979f,
|
||||||
0.1666666716337203979,
|
0.1666666716337203979f,
|
||||||
-0.1666666716337203979,
|
-0.1666666716337203979f,
|
||||||
0.3333333432674407959,
|
0.3333333432674407959f,
|
||||||
0.1666666716337203979,
|
0.1666666716337203979f,
|
||||||
0.6666666865348815918,
|
0.6666666865348815918f,
|
||||||
0.1,
|
0.1f,
|
||||||
0.1,
|
0.1f,
|
||||||
0.2,
|
0.2f,
|
||||||
0.2,
|
0.2f,
|
||||||
0.1,
|
0.1f,
|
||||||
0.1,
|
0.1f,
|
||||||
0.2,
|
0.2f,
|
||||||
0.2};
|
0.2f};
|
||||||
test_case.add_input<float>(A);
|
test_case.add_input<float>(A);
|
||||||
test_case.add_input<float>(B);
|
test_case.add_input<float>(B);
|
||||||
test_case.add_expected_output<float>(Shape{1, 2, 8}, output);
|
test_case.add_expected_output<float>(Shape{1, 2, 8}, output);
|
||||||
@ -170,11 +170,11 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_detection_output) {
|
|||||||
std::vector<float> logits = gen_vector(12, -2, 2);
|
std::vector<float> logits = gen_vector(12, -2, 2);
|
||||||
std::vector<float> class_preds = gen_vector(9, 0, 1);
|
std::vector<float> class_preds = gen_vector(9, 0, 1);
|
||||||
std::vector<float> proposals = gen_vector(12 * 2, 0, 1);
|
std::vector<float> proposals = gen_vector(12 * 2, 0, 1);
|
||||||
std::vector<float> output = {0, 1, 0.777778, 0.279849, 0.283779, 0.562743, 0.695387,
|
std::vector<float> output = {0, 1, 0.777778f, 0.279849f, 0.283779f, 0.562743f, 0.695387f,
|
||||||
0, 1, 0.444444, 0.12963, 0.176075, 0.212963, 0.284573,
|
0, 1, 0.444444f, 0.12963f, 0.176075f, 0.212963f, 0.284573f,
|
||||||
0, 2, 0.888889, 0.279849, 0.283779, 0.562743, 0.695387,
|
0, 2, 0.888889f, 0.279849f, 0.283779f, 0.562743f, 0.695387f,
|
||||||
0, 2, 0.555556, 0.12963, 0.176075, 0.212963, 0.284573,
|
0, 2, 0.555556f, 0.12963f, 0.176075f, 0.212963f, 0.284573f,
|
||||||
0, 2, 0.222222, -0.0608094, -0.0142007, -0.0225239, 0.0304044};
|
0, 2, 0.222222f, -0.0608094f, -0.0142007f, -0.0225239f, 0.0304044f};
|
||||||
test_case.add_input<float>(logits);
|
test_case.add_input<float>(logits);
|
||||||
test_case.add_input<float>(class_preds);
|
test_case.add_input<float>(class_preds);
|
||||||
test_case.add_input<float>(proposals);
|
test_case.add_input<float>(proposals);
|
||||||
@ -188,18 +188,18 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_group_norm) {
|
|||||||
file_util::path_join(CommonTestUtils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/group_norm.onnx"));
|
file_util::path_join(CommonTestUtils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/group_norm.onnx"));
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
Shape shape{2, 8, 2, 2};
|
Shape shape{2, 8, 2, 2};
|
||||||
int size = shape_size(shape);
|
const auto size = shape_size(shape);
|
||||||
std::vector<float> data(size);
|
std::vector<float> data(size);
|
||||||
std::iota(data.begin(), data.end(), 0);
|
std::iota(data.begin(), data.end(), 0.f);
|
||||||
std::vector<float> output = {
|
std::vector<float> output = {
|
||||||
-0.52752507, -0.09108937, 0.3453464, 0.78178215, 2.4364357, 3.309307, 4.1821785, 5.05505,
|
-0.52752507f, -0.09108937f, 0.3453464f, 0.78178215f, 2.4364357f, 3.309307f, 4.1821785f, 5.05505f,
|
||||||
-1.5825753, -0.27326822, 1.0360391, 2.3453465, 4.8728714, 6.618614, 8.364357, 10.1101,
|
-1.5825753f, -0.27326822f, 1.0360391f, 2.3453465f, 4.8728714f, 6.618614f, 8.364357f, 10.1101f,
|
||||||
-2.6376252, -0.45544672, 1.726732, 3.9089108, 7.309307, 9.927921, 12.546536, 15.165151,
|
-2.6376252f, -0.45544672f, 1.726732f, 3.9089108f, 7.309307f, 9.927921f, 12.546536f, 15.165151f,
|
||||||
-3.6926756, -0.6376257, 2.4174247, 5.472475, 9.745743, 13.237228, 16.728714, 20.2202,
|
-3.6926756f, -0.6376257f, 2.4174247f, 5.472475f, 9.745743f, 13.237228f, 16.728714f, 20.2202f,
|
||||||
-0.52752507, -0.09108937, 0.3453464, 0.78178215, 2.4364357, 3.309307, 4.1821785, 5.05505,
|
-0.52752507f, -0.09108937f, 0.3453464f, 0.78178215f, 2.4364357f, 3.309307f, 4.1821785f, 5.05505f,
|
||||||
-1.5825753, -0.27326822, 1.0360391, 2.3453465, 4.8728714, 6.618614, 8.364357, 10.1101,
|
-1.5825753f, -0.27326822f, 1.0360391f, 2.3453465f, 4.8728714f, 6.618614f, 8.364357f, 10.1101f,
|
||||||
-2.6376252, -0.45544672, 1.726732, 3.9089108, 7.309307, 9.927921, 12.546536, 15.165151,
|
-2.6376252f, -0.45544672f, 1.726732f, 3.9089108f, 7.309307f, 9.927921f, 12.546536f, 15.165151f,
|
||||||
-3.6926756, -0.6376257, 2.4174247, 5.472475, 9.745743, 13.237228, 16.728714, 20.2202,
|
-3.6926756f, -0.6376257f, 2.4174247f, 5.472475f, 9.745743f, 13.237228f, 16.728714f, 20.2202f,
|
||||||
};
|
};
|
||||||
|
|
||||||
test_case.add_input<float>(data);
|
test_case.add_input<float>(data);
|
||||||
@ -212,15 +212,16 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_group_norm_5d) {
|
|||||||
file_util::path_join(CommonTestUtils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/group_norm_5d.onnx"));
|
file_util::path_join(CommonTestUtils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/group_norm_5d.onnx"));
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
Shape shape{2, 8, 1, 2, 1};
|
Shape shape{2, 8, 1, 2, 1};
|
||||||
int size = shape_size(shape);
|
const auto size = shape_size(shape);
|
||||||
std::vector<float> data(size);
|
std::vector<float> data(size);
|
||||||
std::iota(data.begin(), data.end(), 0);
|
std::iota(data.begin(), data.end(), 0.f);
|
||||||
std::vector<float> output = {
|
std::vector<float> output = {-0.34163546562f, 0.55278813838f, 2.89442372322f, 4.68327093124f, -1.02490639686f,
|
||||||
-0.34163546562, 0.55278813838, 2.89442372322, 4.68327093124, -1.02490639686, 1.65836453437, 5.78884744644,
|
1.65836453437f, 5.78884744644f, 9.36654186248f, -1.70817732810f, 2.76394081115f,
|
||||||
9.36654186248, -1.70817732810, 2.76394081115, 8.68327140808, 14.04981231689, -2.39144825935, 3.86951708793,
|
8.68327140808f, 14.04981231689f, -2.39144825935f, 3.86951708793f, 11.57769489288f,
|
||||||
11.57769489288, 18.73308372497, -0.34163546562, 0.55278813838, 2.89442372322, 4.68327093124, -1.02490639686,
|
18.73308372497f, -0.34163546562f, 0.55278813838f, 2.89442372322f, 4.68327093124f,
|
||||||
1.65836453437, 5.78884744644, 9.36654186248, -1.70817732810, 2.76394081115, 8.68327140808, 14.04981231689,
|
-1.02490639686f, 1.65836453437f, 5.78884744644f, 9.36654186248f, -1.70817732810f,
|
||||||
-2.39144825935, 3.86951708793, 11.57769489288, 18.73308372497};
|
2.76394081115f, 8.68327140808f, 14.04981231689f, -2.39144825935f, 3.86951708793f,
|
||||||
|
11.57769489288f, 18.73308372497f};
|
||||||
|
|
||||||
test_case.add_input<float>(data);
|
test_case.add_input<float>(data);
|
||||||
test_case.add_expected_output<float>(shape, output);
|
test_case.add_expected_output<float>(shape, output);
|
||||||
@ -232,20 +233,20 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_normalize) {
|
|||||||
file_util::path_join(CommonTestUtils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/normalize.onnx"));
|
file_util::path_join(CommonTestUtils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/normalize.onnx"));
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
std::vector<float> data(12);
|
std::vector<float> data(12);
|
||||||
std::iota(data.begin(), data.end(), 1);
|
std::iota(data.begin(), data.end(), 1.f);
|
||||||
std::vector<float> output = {
|
std::vector<float> output = {
|
||||||
0.19334731,
|
0.19334731f,
|
||||||
0.33806169,
|
0.33806169f,
|
||||||
0.44846106,
|
0.44846106f,
|
||||||
0.53452247,
|
0.53452247f,
|
||||||
1.4501048,
|
1.4501048f,
|
||||||
1.5212777,
|
1.5212777f,
|
||||||
1.5696137,
|
1.5696137f,
|
||||||
1.6035674,
|
1.6035674f,
|
||||||
3.4802516,
|
3.4802516f,
|
||||||
3.3806169,
|
3.3806169f,
|
||||||
3.2887144,
|
3.2887144f,
|
||||||
3.2071347,
|
3.2071347f,
|
||||||
};
|
};
|
||||||
test_case.add_input<float>(data);
|
test_case.add_input<float>(data);
|
||||||
test_case.add_expected_output<float>(Shape{1, 3, 2, 2}, output);
|
test_case.add_expected_output<float>(Shape{1, 3, 2, 2}, output);
|
||||||
@ -260,7 +261,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_swish_with_beta) {
|
|||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
std::vector<float> input_data{-0.5f, 0, 0.5f};
|
std::vector<float> input_data{-0.5f, 0, 0.5f};
|
||||||
test_case.add_input<float>(input_data);
|
test_case.add_input<float>(input_data);
|
||||||
test_case.add_expected_output<float>(expected_output_shape, {-0.2036667, 0.0, 0.2963333});
|
test_case.add_expected_output<float>(expected_output_shape, {-0.2036667f, 0.0f, 0.2963333f});
|
||||||
|
|
||||||
test_case.run_with_tolerance_as_fp(2.0e-5f);
|
test_case.run_with_tolerance_as_fp(2.0e-5f);
|
||||||
}
|
}
|
||||||
@ -274,7 +275,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_swish_without_beta) {
|
|||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
std::vector<float> input_data{-0.5f, 0, 0.5f};
|
std::vector<float> input_data{-0.5f, 0, 0.5f};
|
||||||
test_case.add_input<float>(input_data);
|
test_case.add_input<float>(input_data);
|
||||||
test_case.add_expected_output<float>(expected_output_shape, {-0.18877034, 0.0, 0.31122968});
|
test_case.add_expected_output<float>(expected_output_shape, {-0.18877034f, 0.0f, 0.31122968f});
|
||||||
|
|
||||||
test_case.run_with_tolerance_as_fp(2.0e-5f);
|
test_case.run_with_tolerance_as_fp(2.0e-5f);
|
||||||
}
|
}
|
||||||
@ -313,9 +314,9 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_experimental_detectron_detection_output)
|
|||||||
test_case.add_expected_output<float>(Shape{5, 4},
|
test_case.add_expected_output<float>(Shape{5, 4},
|
||||||
{
|
{
|
||||||
0.8929862f,
|
0.8929862f,
|
||||||
0.892986297607421875,
|
0.892986297607421875f,
|
||||||
12.10701370239257812,
|
12.10701370239257812f,
|
||||||
12.10701370239257812,
|
12.10701370239257812f,
|
||||||
0,
|
0,
|
||||||
0.0f,
|
0.0f,
|
||||||
0.0f,
|
0.0f,
|
||||||
@ -440,18 +441,18 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_experimental_detectron_group_norm) {
|
|||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
Shape shape{2, 8, 2, 2};
|
Shape shape{2, 8, 2, 2};
|
||||||
int size = shape_size(shape);
|
const auto size = shape_size(shape);
|
||||||
std::vector<float> data(size);
|
std::vector<float> data(size);
|
||||||
std::iota(data.begin(), data.end(), 0);
|
std::iota(data.begin(), data.end(), 0.f);
|
||||||
std::vector<float> output = {
|
std::vector<float> output = {
|
||||||
-0.52752507, -0.09108937, 0.3453464, 0.78178215, 2.4364357, 3.309307, 4.1821785, 5.05505,
|
-0.52752507f, -0.09108937f, 0.3453464f, 0.78178215f, 2.4364357f, 3.309307f, 4.1821785f, 5.05505f,
|
||||||
-1.5825753, -0.27326822, 1.0360391, 2.3453465, 4.8728714, 6.618614, 8.364357, 10.1101,
|
-1.5825753f, -0.27326822f, 1.0360391f, 2.3453465f, 4.8728714f, 6.618614f, 8.364357f, 10.1101f,
|
||||||
-2.6376252, -0.45544672, 1.726732, 3.9089108, 7.309307, 9.927921, 12.546536, 15.165151,
|
-2.6376252f, -0.45544672f, 1.726732f, 3.9089108f, 7.309307f, 9.927921f, 12.546536f, 15.165151f,
|
||||||
-3.6926756, -0.6376257, 2.4174247, 5.472475, 9.745743, 13.237228, 16.728714, 20.2202,
|
-3.6926756f, -0.6376257f, 2.4174247f, 5.472475f, 9.745743f, 13.237228f, 16.728714f, 20.2202f,
|
||||||
-0.52752507, -0.09108937, 0.3453464, 0.78178215, 2.4364357, 3.309307, 4.1821785, 5.05505,
|
-0.52752507f, -0.09108937f, 0.3453464f, 0.78178215f, 2.4364357f, 3.309307f, 4.1821785f, 5.05505f,
|
||||||
-1.5825753, -0.27326822, 1.0360391, 2.3453465, 4.8728714, 6.618614, 8.364357, 10.1101,
|
-1.5825753f, -0.27326822f, 1.0360391f, 2.3453465f, 4.8728714f, 6.618614f, 8.364357f, 10.1101f,
|
||||||
-2.6376252, -0.45544672, 1.726732, 3.9089108, 7.309307, 9.927921, 12.546536, 15.165151,
|
-2.6376252f, -0.45544672f, 1.726732f, 3.9089108f, 7.309307f, 9.927921f, 12.546536f, 15.165151f,
|
||||||
-3.6926756, -0.6376257, 2.4174247, 5.472475, 9.745743, 13.237228, 16.728714, 20.2202,
|
-3.6926756f, -0.6376257f, 2.4174247f, 5.472475f, 9.745743f, 13.237228f, 16.728714f, 20.2202f,
|
||||||
};
|
};
|
||||||
|
|
||||||
test_case.add_input<float>(data);
|
test_case.add_input<float>(data);
|
||||||
@ -468,13 +469,13 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_experimental_detectron_prior_grid_genera
|
|||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
|
|
||||||
std::vector<float> priors(shape_size(Shape{3, 4}));
|
std::vector<float> priors(shape_size(Shape{3, 4}));
|
||||||
std::iota(priors.begin(), priors.end(), 0);
|
std::iota(priors.begin(), priors.end(), 0.f);
|
||||||
|
|
||||||
std::vector<float> feature_map(shape_size(Shape{1, 1, 1, 3}));
|
std::vector<float> feature_map(shape_size(Shape{1, 1, 1, 3}));
|
||||||
std::iota(feature_map.begin(), feature_map.end(), 0);
|
std::iota(feature_map.begin(), feature_map.end(), 0.f);
|
||||||
|
|
||||||
std::vector<float> im_data(shape_size(Shape{1, 3, 4, 7}));
|
std::vector<float> im_data(shape_size(Shape{1, 3, 4, 7}));
|
||||||
std::iota(im_data.begin(), im_data.end(), 0);
|
std::iota(im_data.begin(), im_data.end(), 0.f);
|
||||||
|
|
||||||
test_case.add_input<float>(priors);
|
test_case.add_input<float>(priors);
|
||||||
test_case.add_input<float>(feature_map);
|
test_case.add_input<float>(feature_map);
|
||||||
@ -495,51 +496,51 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_experimental_detectron_roi_feature_extra
|
|||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
|
|
||||||
std::vector<float> rois(shape_size(Shape{2, 4}));
|
std::vector<float> rois(shape_size(Shape{2, 4}));
|
||||||
std::iota(rois.begin(), rois.end(), 0);
|
std::iota(rois.begin(), rois.end(), 0.f);
|
||||||
|
|
||||||
std::vector<float> pyramid_layer_0(shape_size(Shape{1, 2, 2, 3}));
|
std::vector<float> pyramid_layer_0(shape_size(Shape{1, 2, 2, 3}));
|
||||||
std::iota(pyramid_layer_0.begin(), pyramid_layer_0.end(), 0);
|
std::iota(pyramid_layer_0.begin(), pyramid_layer_0.end(), 0.f);
|
||||||
|
|
||||||
test_case.add_input<float>(rois);
|
test_case.add_input<float>(rois);
|
||||||
test_case.add_input<float>(pyramid_layer_0);
|
test_case.add_input<float>(pyramid_layer_0);
|
||||||
|
|
||||||
test_case.add_expected_output<float>(Shape{2, 2, 3, 3},
|
test_case.add_expected_output<float>(Shape{2, 2, 3, 3},
|
||||||
{1.416666746139526367,
|
{1.416666746139526367f,
|
||||||
1.750000119209289551,
|
1.750000119209289551f,
|
||||||
2.083333492279052734,
|
2.083333492279052734f,
|
||||||
2.416666746139526367,
|
2.416666746139526367f,
|
||||||
2.75,
|
2.75f,
|
||||||
3.083333492279052734,
|
3.083333492279052734f,
|
||||||
3.166666507720947266,
|
3.166666507720947266f,
|
||||||
3.5,
|
3.5f,
|
||||||
3.833333492279052734,
|
3.833333492279052734f,
|
||||||
7.416666507720947266,
|
7.416666507720947266f,
|
||||||
7.75,
|
7.75f,
|
||||||
8.083333015441894531,
|
8.083333015441894531f,
|
||||||
8.416666984558105469,
|
8.416666984558105469f,
|
||||||
8.75,
|
8.75f,
|
||||||
9.083333969116210938,
|
9.083333969116210938f,
|
||||||
9.166666030883789062,
|
9.166666030883789062f,
|
||||||
9.5,
|
9.5f,
|
||||||
9.833333969116210938,
|
9.833333969116210938f,
|
||||||
4.166666984558105469,
|
4.166666984558105469f,
|
||||||
4.5,
|
4.5f,
|
||||||
4.833333492279052734,
|
4.833333492279052734f,
|
||||||
4.166666984558105469,
|
4.166666984558105469f,
|
||||||
4.5,
|
4.5f,
|
||||||
4.833333492279052734,
|
4.833333492279052734f,
|
||||||
2.083333492279052734,
|
2.083333492279052734f,
|
||||||
2.25,
|
2.25f,
|
||||||
2.416666746139526367,
|
2.416666746139526367f,
|
||||||
10.16666603088378906,
|
10.16666603088378906f,
|
||||||
10.5,
|
10.5f,
|
||||||
10.83333206176757812,
|
10.83333206176757812f,
|
||||||
10.16666603088378906,
|
10.16666603088378906f,
|
||||||
10.5,
|
10.5f,
|
||||||
10.83333206176757812,
|
10.83333206176757812f,
|
||||||
5.083333015441894531,
|
5.083333015441894531f,
|
||||||
5.25,
|
5.25f,
|
||||||
5.416666507720947266});
|
5.416666507720947266f});
|
||||||
|
|
||||||
test_case.add_expected_output<float>(Shape{2, 4}, {0, 1, 2, 3, 4, 5, 6, 7});
|
test_case.add_expected_output<float>(Shape{2, 4}, {0, 1, 2, 3, 4, 5, 6, 7});
|
||||||
test_case.run();
|
test_case.run();
|
||||||
@ -593,29 +594,32 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_generate_proposals) {
|
|||||||
// scores
|
// scores
|
||||||
test_case.add_input<float>(
|
test_case.add_input<float>(
|
||||||
Shape{1, 3, 2, 6},
|
Shape{1, 3, 2, 6},
|
||||||
{0.56637216, 0.90457034, 0.69827306, 0.4353543, 0.47985056, 0.42658508, 0.14516132, 0.08081771, 0.1799732,
|
{0.56637216f, 0.90457034f, 0.69827306f, 0.4353543f, 0.47985056f, 0.42658508f, 0.14516132f, 0.08081771f,
|
||||||
0.9229515, 0.42420176, 0.50857586, 0.82664067, 0.4972319, 0.3752427, 0.56731623, 0.18241242, 0.33252355,
|
0.1799732f, 0.9229515f, 0.42420176f, 0.50857586f, 0.82664067f, 0.4972319f, 0.3752427f, 0.56731623f,
|
||||||
0.30608943, 0.6572437, 0.69185436, 0.88646156, 0.36985755, 0.5590753, 0.5256446, 0.03342898, 0.1344396,
|
0.18241242f, 0.33252355f, 0.30608943f, 0.6572437f, 0.69185436f, 0.88646156f, 0.36985755f, 0.5590753f,
|
||||||
0.68642473, 0.37953874, 0.32575172, 0.21108444, 0.5661886, 0.45378175, 0.62126315, 0.26799858, 0.37272978});
|
0.5256446f, 0.03342898f, 0.1344396f, 0.68642473f, 0.37953874f, 0.32575172f, 0.21108444f, 0.5661886f,
|
||||||
|
0.45378175f, 0.62126315f, 0.26799858f, 0.37272978f});
|
||||||
// deltas
|
// deltas
|
||||||
test_case.add_input<float>(
|
test_case.add_input<float>(
|
||||||
Shape{1, 12, 2, 6},
|
Shape{1, 12, 2, 6},
|
||||||
{0.5337073, 0.86607957, 0.55151343, 0.21626699, 0.4462629, 0.03985678, 0.5157072, 0.9932138, 0.7565954,
|
{0.5337073f, 0.86607957f, 0.55151343f, 0.21626699f, 0.4462629f, 0.03985678f, 0.5157072f, 0.9932138f,
|
||||||
0.43803605, 0.802818, 0.14834064, 0.53932905, 0.14314, 0.3817048, 0.95075196, 0.05516243, 0.2567484,
|
0.7565954f, 0.43803605f, 0.802818f, 0.14834064f, 0.53932905f, 0.14314f, 0.3817048f, 0.95075196f,
|
||||||
0.25508744, 0.77438325, 0.43561, 0.2094628, 0.8299043, 0.44982538, 0.95615596, 0.5651084, 0.11801951,
|
0.05516243f, 0.2567484f, 0.25508744f, 0.77438325f, 0.43561f, 0.2094628f, 0.8299043f, 0.44982538f,
|
||||||
0.05352486, 0.9774733, 0.14439464, 0.62644225, 0.14370479, 0.54161614, 0.557915, 0.53102225, 0.0840179,
|
0.95615596f, 0.5651084f, 0.11801951f, 0.05352486f, 0.9774733f, 0.14439464f, 0.62644225f, 0.14370479f,
|
||||||
0.7249888, 0.9843559, 0.5490522, 0.53788143, 0.822474, 0.3278008, 0.39688024, 0.3286012, 0.5117038,
|
0.54161614f, 0.557915f, 0.53102225f, 0.0840179f, 0.7249888f, 0.9843559f, 0.5490522f, 0.53788143f,
|
||||||
0.04743988, 0.9408995, 0.29885054, 0.81039643, 0.85277915, 0.06807619, 0.86430097, 0.36225632, 0.16606331,
|
0.822474f, 0.3278008f, 0.39688024f, 0.3286012f, 0.5117038f, 0.04743988f, 0.9408995f, 0.29885054f,
|
||||||
0.5401001, 0.7541649, 0.11998601, 0.5131829, 0.40606487, 0.327888, 0.27721855, 0.6378373, 0.22795396,
|
0.81039643f, 0.85277915f, 0.06807619f, 0.86430097f, 0.36225632f, 0.16606331f, 0.5401001f, 0.7541649f,
|
||||||
0.4961256, 0.3215895, 0.15607187, 0.14782153, 0.8908137, 0.8835288, 0.834191, 0.29907143, 0.7983525,
|
0.11998601f, 0.5131829f, 0.40606487f, 0.327888f, 0.27721855f, 0.6378373f, 0.22795396f, 0.4961256f,
|
||||||
0.755875, 0.30837986, 0.0839176, 0.26624718, 0.04371626, 0.09472824, 0.20689541, 0.37622106, 0.1083321,
|
0.3215895f, 0.15607187f, 0.14782153f, 0.8908137f, 0.8835288f, 0.834191f, 0.29907143f, 0.7983525f,
|
||||||
0.1342548, 0.05815459, 0.7676379, 0.8105144, 0.92348766, 0.26761323, 0.7183306, 0.8947588, 0.19020908,
|
0.755875f, 0.30837986f, 0.0839176f, 0.26624718f, 0.04371626f, 0.09472824f, 0.20689541f, 0.37622106f,
|
||||||
0.42731014, 0.7473663, 0.85775334, 0.9340091, 0.3278848, 0.755993, 0.05307213, 0.39705503, 0.21003333,
|
0.1083321f, 0.1342548f, 0.05815459f, 0.7676379f, 0.8105144f, 0.92348766f, 0.26761323f, 0.7183306f,
|
||||||
0.5625373, 0.66188884, 0.80521655, 0.6125863, 0.44678232, 0.97802377, 0.0204936, 0.02686367, 0.7390654,
|
0.8947588f, 0.19020908f, 0.42731014f, 0.7473663f, 0.85775334f, 0.9340091f, 0.3278848f, 0.755993f,
|
||||||
0.74631, 0.58399844, 0.5988792, 0.37413648, 0.5946692, 0.6955776, 0.36377597, 0.7891322, 0.40900692,
|
0.05307213f, 0.39705503f, 0.21003333f, 0.5625373f, 0.66188884f, 0.80521655f, 0.6125863f, 0.44678232f,
|
||||||
0.99139464, 0.50169915, 0.41435778, 0.17142445, 0.26761186, 0.31591868, 0.14249913, 0.12919712, 0.5418711,
|
0.97802377f, 0.0204936f, 0.02686367f, 0.7390654f, 0.74631f, 0.58399844f, 0.5988792f, 0.37413648f,
|
||||||
0.6523203, 0.50259084, 0.7379765, 0.01171071, 0.94423133, 0.00841132, 0.97486794, 0.2921785, 0.7633071,
|
0.5946692f, 0.6955776f, 0.36377597f, 0.7891322f, 0.40900692f, 0.99139464f, 0.50169915f, 0.41435778f,
|
||||||
0.88477814, 0.03563205, 0.50833166, 0.01354555, 0.535081, 0.41366324, 0.0694767, 0.9944055, 0.9981207});
|
0.17142445f, 0.26761186f, 0.31591868f, 0.14249913f, 0.12919712f, 0.5418711f, 0.6523203f, 0.50259084f,
|
||||||
|
0.7379765f, 0.01171071f, 0.94423133f, 0.00841132f, 0.97486794f, 0.2921785f, 0.7633071f, 0.88477814f,
|
||||||
|
0.03563205f, 0.50833166f, 0.01354555f, 0.535081f, 0.41366324f, 0.0694767f, 0.9944055f, 0.9981207f});
|
||||||
// im_info
|
// im_info
|
||||||
test_case.add_input<float>(Shape{1, 3}, {200, 200, 0});
|
test_case.add_input<float>(Shape{1, 3}, {200, 200, 0});
|
||||||
// anchors
|
// anchors
|
||||||
@ -623,11 +627,11 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_generate_proposals) {
|
|||||||
|
|
||||||
test_case.add_expected_output<float>(
|
test_case.add_expected_output<float>(
|
||||||
Shape{6, 4},
|
Shape{6, 4},
|
||||||
{0.12904608, 1.3703424, 3.6230984, 3.4675088, 0.9725206, 0., 4.4917974, 4.9623675,
|
{0.12904608f, 1.3703424f, 3.6230984f, 3.4675088f, 0.9725206f, 0., 4.4917974f, 4.9623675f,
|
||||||
4.882682, 5.1236916, 7.1700497, 10.213073, 4.4913187, 4.305372, 8.750267, 8.803502,
|
4.882682f, 5.1236916f, 7.1700497f, 10.213073f, 4.4913187f, 4.305372f, 8.750267f, 8.803502f,
|
||||||
0.9777608, 1.0317986, 3.228293, 4.495021, 4.125554, 5.4091997, 6.35439, 10.124915});
|
0.9777608f, 1.0317986f, 3.228293f, 4.495021f, 4.125554f, 5.4091997f, 6.35439f, 10.124915f});
|
||||||
test_case.add_expected_output<float>(Shape{6},
|
test_case.add_expected_output<float>(Shape{6},
|
||||||
{0.9229515, 0.90457034, 0.88646156, 0.82664067, 0.69827306, 0.69185436});
|
{0.9229515f, 0.90457034f, 0.88646156f, 0.82664067f, 0.69827306f, 0.69185436f});
|
||||||
test_case.add_expected_output<int64_t>(Shape{1}, {6});
|
test_case.add_expected_output<int64_t>(Shape{1}, {6});
|
||||||
test_case.run();
|
test_case.run();
|
||||||
}
|
}
|
||||||
|
@ -34,23 +34,23 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_adaptive_avg_pooling2d_nchw) {
|
|||||||
"onnx/org.pytorch/adaptive_avg_pooling2d_nchw.onnx"));
|
"onnx/org.pytorch/adaptive_avg_pooling2d_nchw.onnx"));
|
||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
test_case.add_input<float>({0.9945,
|
test_case.add_input<float>({0.9945f,
|
||||||
0.3466,
|
0.3466f,
|
||||||
0.2894,
|
0.2894f,
|
||||||
0.9318,
|
0.9318f,
|
||||||
0.0115,
|
0.0115f,
|
||||||
0.4867,
|
0.4867f,
|
||||||
0.7608,
|
0.7608f,
|
||||||
0.1550,
|
0.1550f,
|
||||||
0.8485,
|
0.8485f,
|
||||||
0.4971,
|
0.4971f,
|
||||||
0.8833,
|
0.8833f,
|
||||||
0.4579,
|
0.4579f,
|
||||||
0.3673,
|
0.3673f,
|
||||||
0.5410,
|
0.5410f,
|
||||||
0.2004,
|
0.2004f,
|
||||||
0.1519});
|
0.1519f});
|
||||||
test_case.add_expected_output<float>(Shape{1, 1, 2, 2}, {0.4598249, 0.5342500, 0.5634750, 0.4233750});
|
test_case.add_expected_output<float>(Shape{1, 1, 2, 2}, {0.4598249f, 0.5342500f, 0.5634750f, 0.4233750f});
|
||||||
test_case.run();
|
test_case.run();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -61,8 +61,8 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_adaptive_avg_pooling2d_chw) {
|
|||||||
"onnx/org.pytorch/adaptive_avg_pooling2d_chw.onnx"));
|
"onnx/org.pytorch/adaptive_avg_pooling2d_chw.onnx"));
|
||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
test_case.add_input<float>({12.0, -1.0, -56.0, 20.0, 1.0, -8.0, 7.0, 9.0});
|
test_case.add_input<float>({12.0f, -1.0f, -56.0f, 20.0f, 1.0f, -8.0f, 7.0f, 9.0f});
|
||||||
|
|
||||||
test_case.add_expected_output<float>(Shape{1, 2, 2}, {5.5, -18.0, -3.5, 8.0});
|
test_case.add_expected_output<float>(Shape{1, 2, 2}, {5.5f, -18.0f, -3.5f, 8.0f});
|
||||||
test_case.run();
|
test_case.run();
|
||||||
}
|
}
|
||||||
|
@ -1061,7 +1061,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_fake_quantize_const_inputs_infer) {
|
|||||||
const Shape data_shape{1, 2, 3, 4};
|
const Shape data_shape{1, 2, 3, 4};
|
||||||
const auto n_elements = shape_size(data_shape);
|
const auto n_elements = shape_size(data_shape);
|
||||||
std::vector<float> input_data(n_elements);
|
std::vector<float> input_data(n_elements);
|
||||||
std::iota(std::begin(input_data), std::end(input_data), 0);
|
std::iota(std::begin(input_data), std::end(input_data), 0.f);
|
||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
test_case.add_input<float>(input_data);
|
test_case.add_input<float>(input_data);
|
||||||
@ -1081,7 +1081,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_fake_quantize_nonconst_inputs_infer) {
|
|||||||
const Shape data_shape{1, 2, 3, 4};
|
const Shape data_shape{1, 2, 3, 4};
|
||||||
const size_t n_elements = shape_size(data_shape);
|
const size_t n_elements = shape_size(data_shape);
|
||||||
std::vector<float> input_data(n_elements);
|
std::vector<float> input_data(n_elements);
|
||||||
std::iota(std::begin(input_data), std::end(input_data), 0);
|
std::iota(std::begin(input_data), std::end(input_data), 0.f);
|
||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
test_case.add_input<float>(input_data);
|
test_case.add_input<float>(input_data);
|
||||||
|
@ -129,29 +129,29 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_reshape_negative_dim) {
|
|||||||
"onnx/reshape_negative_dim.onnx"));
|
"onnx/reshape_negative_dim.onnx"));
|
||||||
|
|
||||||
// 2x3x4
|
// 2x3x4
|
||||||
auto input = test::NDArray<float, 3>({{{0.5488135, 0.71518934, 0.60276335, 0.5448832},
|
auto input = test::NDArray<float, 3>({{{0.5488135f, 0.71518934f, 0.60276335f, 0.5448832f},
|
||||||
{0.4236548, 0.6458941, 0.4375872, 0.891773},
|
{0.4236548f, 0.6458941f, 0.4375872f, 0.891773f},
|
||||||
{0.96366274, 0.3834415, 0.79172504, 0.5288949}},
|
{0.96366274f, 0.3834415f, 0.79172504f, 0.5288949f}},
|
||||||
|
|
||||||
{{0.56804454, 0.92559665, 0.07103606, 0.0871293},
|
{{0.56804454f, 0.92559665f, 0.07103606f, 0.0871293f},
|
||||||
{0.0202184, 0.83261985, 0.77815676, 0.87001216},
|
{0.0202184f, 0.83261985f, 0.77815676f, 0.87001216f},
|
||||||
{0.9786183, 0.7991586, 0.46147937, 0.7805292}}})
|
{0.9786183f, 0.7991586f, 0.46147937f, 0.7805292f}}})
|
||||||
.get_vector();
|
.get_vector();
|
||||||
|
|
||||||
// 2x6x2
|
// 2x6x2
|
||||||
auto expected_output = test::NDArray<float, 3>({{{0.5488135, 0.71518934},
|
auto expected_output = test::NDArray<float, 3>({{{0.5488135f, 0.71518934f},
|
||||||
{0.60276335, 0.5448832},
|
{0.60276335f, 0.5448832f},
|
||||||
{0.4236548, 0.6458941},
|
{0.4236548f, 0.6458941f},
|
||||||
{0.4375872, 0.891773},
|
{0.4375872f, 0.891773f},
|
||||||
{0.96366274, 0.3834415},
|
{0.96366274f, 0.3834415f},
|
||||||
{0.79172504, 0.5288949}},
|
{0.79172504f, 0.5288949f}},
|
||||||
|
|
||||||
{{0.56804454, 0.92559665},
|
{{0.56804454f, 0.92559665f},
|
||||||
{0.07103606, 0.0871293},
|
{0.07103606f, 0.0871293f},
|
||||||
{0.0202184, 0.83261985},
|
{0.0202184f, 0.83261985f},
|
||||||
{0.77815676, 0.87001216},
|
{0.77815676f, 0.87001216f},
|
||||||
{0.9786183, 0.7991586},
|
{0.9786183f, 0.7991586f},
|
||||||
{0.46147937, 0.7805292}}})
|
{0.46147937f, 0.7805292f}}})
|
||||||
.get_vector();
|
.get_vector();
|
||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
@ -207,7 +207,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_depth_to_space) {
|
|||||||
file_util::path_join(CommonTestUtils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/depth_to_space.onnx"));
|
file_util::path_join(CommonTestUtils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/depth_to_space.onnx"));
|
||||||
|
|
||||||
std::vector<float> input(32);
|
std::vector<float> input(32);
|
||||||
std::iota(input.begin(), input.end(), 0);
|
std::iota(input.begin(), input.end(), 0.f);
|
||||||
|
|
||||||
std::vector<float> expected_output{0.f, 8.f, 1.f, 9.f, 16.f, 24.f, 17.f, 25.f, 2.f, 10.f, 3.f,
|
std::vector<float> expected_output{0.f, 8.f, 1.f, 9.f, 16.f, 24.f, 17.f, 25.f, 2.f, 10.f, 3.f,
|
||||||
11.f, 18.f, 26.f, 19.f, 27.f, 4.f, 12.f, 5.f, 13.f, 20.f, 28.f,
|
11.f, 18.f, 26.f, 19.f, 27.f, 4.f, 12.f, 5.f, 13.f, 20.f, 28.f,
|
||||||
@ -224,7 +224,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_depth_to_space_v1) {
|
|||||||
file_util::path_join(CommonTestUtils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/depth_to_space_v1.onnx"));
|
file_util::path_join(CommonTestUtils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/depth_to_space_v1.onnx"));
|
||||||
|
|
||||||
std::vector<float> input(32);
|
std::vector<float> input(32);
|
||||||
std::iota(input.begin(), input.end(), 0);
|
std::iota(input.begin(), input.end(), 0.f);
|
||||||
|
|
||||||
std::vector<float> expected_output{0.f, 8.f, 1.f, 9.f, 16.f, 24.f, 17.f, 25.f, 2.f, 10.f, 3.f,
|
std::vector<float> expected_output{0.f, 8.f, 1.f, 9.f, 16.f, 24.f, 17.f, 25.f, 2.f, 10.f, 3.f,
|
||||||
11.f, 18.f, 26.f, 19.f, 27.f, 4.f, 12.f, 5.f, 13.f, 20.f, 28.f,
|
11.f, 18.f, 26.f, 19.f, 27.f, 4.f, 12.f, 5.f, 13.f, 20.f, 28.f,
|
||||||
@ -242,7 +242,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_depth_to_space_crd) {
|
|||||||
"onnx/depth_to_space_crd.onnx"));
|
"onnx/depth_to_space_crd.onnx"));
|
||||||
|
|
||||||
std::vector<float> input(32);
|
std::vector<float> input(32);
|
||||||
std::iota(input.begin(), input.end(), 0);
|
std::iota(input.begin(), input.end(), 0.f);
|
||||||
|
|
||||||
std::vector<float> expected_output{0.f, 4.f, 1.f, 5.f, 8.f, 12.f, 9.f, 13.f, 2.f, 6.f, 3.f,
|
std::vector<float> expected_output{0.f, 4.f, 1.f, 5.f, 8.f, 12.f, 9.f, 13.f, 2.f, 6.f, 3.f,
|
||||||
7.f, 10.f, 14.f, 11.f, 15.f, 16.f, 20.f, 17.f, 21.f, 24.f, 28.f,
|
7.f, 10.f, 14.f, 11.f, 15.f, 16.f, 20.f, 17.f, 21.f, 24.f, 28.f,
|
||||||
@ -304,7 +304,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_space_to_depth) {
|
|||||||
file_util::path_join(CommonTestUtils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/space_to_depth.onnx"));
|
file_util::path_join(CommonTestUtils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/space_to_depth.onnx"));
|
||||||
|
|
||||||
std::vector<float> input(32);
|
std::vector<float> input(32);
|
||||||
std::iota(input.begin(), input.end(), 0);
|
std::iota(input.begin(), input.end(), 0.f);
|
||||||
|
|
||||||
std::vector<float> expected_output{
|
std::vector<float> expected_output{
|
||||||
0.f, 2.f, 8.f, 10.f, 16.f, 18.f, 24.f, 26.f, 1.f, 3.f, 9.f, 11.f, 17.f, 19.f, 25.f, 27.f,
|
0.f, 2.f, 8.f, 10.f, 16.f, 18.f, 24.f, 26.f, 1.f, 3.f, 9.f, 11.f, 17.f, 19.f, 25.f, 27.f,
|
||||||
|
@ -39,12 +39,12 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_lstm_fwd_default_const) {
|
|||||||
"onnx/lstm_fwd_default_const.onnx"));
|
"onnx/lstm_fwd_default_const.onnx"));
|
||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
test_case.add_input<float>({0.68172926, 1.1405563, -0.03931177, -0.03759607}); // X
|
test_case.add_input<float>({0.68172926f, 1.1405563f, -0.03931177f, -0.03759607f}); // X
|
||||||
|
|
||||||
test_case.add_expected_output<float>(Shape{2, 1, 1, 2},
|
test_case.add_expected_output<float>(Shape{2, 1, 1, 2},
|
||||||
{-0.063373, -0.20347191, -0.07230289, -0.13298286}); // Y_data
|
{-0.063373f, -0.20347191f, -0.07230289f, -0.13298286f}); // Y_data
|
||||||
test_case.add_expected_output<float>(Shape{1, 1, 2}, {-0.07230289, -0.13298286}); // Y_h_data
|
test_case.add_expected_output<float>(Shape{1, 1, 2}, {-0.07230289f, -0.13298286f}); // Y_h_data
|
||||||
test_case.add_expected_output<float>(Shape{1, 1, 2}, {-0.1557954, -0.24502525}); // Y_c_data
|
test_case.add_expected_output<float>(Shape{1, 1, 2}, {-0.1557954f, -0.24502525f}); // Y_c_data
|
||||||
|
|
||||||
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
||||||
}
|
}
|
||||||
@ -55,12 +55,12 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_lstm_reverse_const) {
|
|||||||
"onnx/lstm_reverse_const.onnx"));
|
"onnx/lstm_reverse_const.onnx"));
|
||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
test_case.add_input<float>({0.68172926, 1.1405563, -0.03931177, -0.03759607}); // X
|
test_case.add_input<float>({0.68172926f, 1.1405563f, -0.03931177f, -0.03759607f}); // X
|
||||||
|
|
||||||
test_case.add_expected_output<float>(Shape{2, 1, 1, 2},
|
test_case.add_expected_output<float>(Shape{2, 1, 1, 2},
|
||||||
{-0.06082131, -0.19985214, 0.00860566, 0.00920492}); // Y_data
|
{-0.06082131f, -0.19985214f, 0.00860566f, 0.00920492f}); // Y_data
|
||||||
test_case.add_expected_output<float>(Shape{1, 1, 2}, {-0.06082131, -0.19985214}); // Y_h_data
|
test_case.add_expected_output<float>(Shape{1, 1, 2}, {-0.06082131f, -0.19985214f}); // Y_h_data
|
||||||
test_case.add_expected_output<float>(Shape{1, 1, 2}, {-0.25917438, -0.3832652}); // Y_c_data
|
test_case.add_expected_output<float>(Shape{1, 1, 2}, {-0.25917438f, -0.3832652f}); // Y_c_data
|
||||||
|
|
||||||
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
||||||
}
|
}
|
||||||
@ -70,21 +70,21 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_lstm_bidir_const) {
|
|||||||
file_util::path_join(CommonTestUtils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/lstm_bidir_const.onnx"));
|
file_util::path_join(CommonTestUtils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/lstm_bidir_const.onnx"));
|
||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
test_case.add_input<float>({0.68172926, 1.1405563, -0.03931177, -0.03759607}); // X
|
test_case.add_input<float>({0.68172926f, 1.1405563f, -0.03931177f, -0.03759607f}); // X
|
||||||
|
|
||||||
test_case.add_expected_output<float>(Shape{2, 2, 1, 2},
|
test_case.add_expected_output<float>(Shape{2, 2, 1, 2},
|
||||||
{-0.063373,
|
{-0.063373f,
|
||||||
-0.20347191,
|
-0.20347191f,
|
||||||
-0.06082131,
|
-0.06082131f,
|
||||||
-0.19985214,
|
-0.19985214f,
|
||||||
-0.07230289,
|
-0.07230289f,
|
||||||
-0.13298286,
|
-0.13298286f,
|
||||||
0.00860566,
|
0.00860566f,
|
||||||
0.00920492}); // Y_data
|
0.00920492f}); // Y_data
|
||||||
test_case.add_expected_output<float>(Shape{2, 1, 2},
|
test_case.add_expected_output<float>(Shape{2, 1, 2},
|
||||||
{-0.07230289, -0.13298286, -0.06082131, -0.19985214}); // Y_h_data
|
{-0.07230289f, -0.13298286f, -0.06082131f, -0.19985214f}); // Y_h_data
|
||||||
test_case.add_expected_output<float>(Shape{2, 1, 2},
|
test_case.add_expected_output<float>(Shape{2, 1, 2},
|
||||||
{-0.1557954, -0.24502525, -0.25917438, -0.3832652}); // Y_c_data
|
{-0.1557954f, -0.24502525f, -0.25917438f, -0.3832652f}); // Y_c_data
|
||||||
|
|
||||||
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
||||||
}
|
}
|
||||||
@ -95,12 +95,12 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_lstm_fwd_with_clip_const) {
|
|||||||
"onnx/lstm_fwd_clip_const.onnx"));
|
"onnx/lstm_fwd_clip_const.onnx"));
|
||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
test_case.add_input<float>({0.68172926, 1.1405563, -0.03931177, -0.03759607}); // X
|
test_case.add_input<float>({0.68172926f, 1.1405563f, -0.03931177f, -0.03759607f}); // X
|
||||||
|
|
||||||
test_case.add_expected_output<float>(Shape{2, 1, 1, 2},
|
test_case.add_expected_output<float>(Shape{2, 1, 1, 2},
|
||||||
{-0.02391884, -0.02744377, -0.01024176, -0.01188637}); // Y_data
|
{-0.02391884f, -0.02744377f, -0.01024176f, -0.01188637f}); // Y_data
|
||||||
test_case.add_expected_output<float>(Shape{1, 1, 2}, {-0.01024176, -0.01188637}); // Y_h_data
|
test_case.add_expected_output<float>(Shape{1, 1, 2}, {-0.01024176f, -0.01188637f}); // Y_h_data
|
||||||
test_case.add_expected_output<float>(Shape{1, 1, 2}, {-0.02039271, -0.02353566}); // Y_c_data
|
test_case.add_expected_output<float>(Shape{1, 1, 2}, {-0.02039271f, -0.02353566f}); // Y_c_data
|
||||||
|
|
||||||
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
||||||
}
|
}
|
||||||
@ -111,27 +111,27 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_lstm_fwd_mixed_seq_const) {
|
|||||||
"onnx/lstm_fwd_mixed_seq_const.onnx"));
|
"onnx/lstm_fwd_mixed_seq_const.onnx"));
|
||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
test_case.add_input<float>({0.68172926, 1.1405563, -0.03931177, -0.03759607}); // X
|
test_case.add_input<float>({0.68172926f, 1.1405563f, -0.03931177f, -0.03759607f}); // X
|
||||||
|
|
||||||
test_case.add_expected_output<float>(Shape{2, 1, 2, 3},
|
test_case.add_expected_output<float>(Shape{2, 1, 2, 3},
|
||||||
{0.13528088,
|
{0.13528088f,
|
||||||
-0.1779867,
|
-0.1779867f,
|
||||||
-0.07448981,
|
-0.07448981f,
|
||||||
0.14769037,
|
0.14769037f,
|
||||||
-0.16327181,
|
-0.16327181f,
|
||||||
-0.10419653,
|
-0.10419653f,
|
||||||
0.,
|
0.,
|
||||||
0.,
|
0.,
|
||||||
0.,
|
0.,
|
||||||
0.08759661,
|
0.08759661f,
|
||||||
-0.04002844,
|
-0.04002844f,
|
||||||
-0.08617793}); // Y_data
|
-0.08617793f}); // Y_data
|
||||||
test_case.add_expected_output<float>(
|
test_case.add_expected_output<float>(
|
||||||
Shape{1, 2, 3},
|
Shape{1, 2, 3},
|
||||||
{0.13528088, -0.1779867, -0.07448981, 0.08759661, -0.04002844, -0.08617793}); // Y_h_data
|
{0.13528088f, -0.1779867f, -0.07448981f, 0.08759661f, -0.04002844f, -0.08617793f}); // Y_h_data
|
||||||
test_case.add_expected_output<float>(
|
test_case.add_expected_output<float>(
|
||||||
Shape{1, 2, 3},
|
Shape{1, 2, 3},
|
||||||
{0.367563, -0.43762812, -0.20435227, 0.17330585, -0.0732716, -0.18809439}); // Y_c_data
|
{0.367563f, -0.43762812f, -0.20435227f, 0.17330585f, -0.0732716f, -0.18809439f}); // Y_c_data
|
||||||
|
|
||||||
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
||||||
}
|
}
|
||||||
@ -142,27 +142,27 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_lstm_reverse_mixed_seq_const) {
|
|||||||
"onnx/lstm_reverse_mixed_seq_const.onnx"));
|
"onnx/lstm_reverse_mixed_seq_const.onnx"));
|
||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
test_case.add_input<float>({0.68172926, 1.1405563, -0.03931177, -0.03759607}); // X
|
test_case.add_input<float>({0.68172926f, 1.1405563f, -0.03931177f, -0.03759607f}); // X
|
||||||
|
|
||||||
test_case.add_expected_output<float>(Shape{2, 1, 2, 3},
|
test_case.add_expected_output<float>(Shape{2, 1, 2, 3},
|
||||||
{0.13528088,
|
{0.13528088f,
|
||||||
-0.1779867,
|
-0.1779867f,
|
||||||
-0.07448981,
|
-0.07448981f,
|
||||||
0.14696799,
|
0.14696799f,
|
||||||
-0.15571019,
|
-0.15571019f,
|
||||||
-0.10270946,
|
-0.10270946f,
|
||||||
0.,
|
0.,
|
||||||
0.,
|
0.,
|
||||||
0.,
|
0.,
|
||||||
-0.01110403,
|
-0.01110403f,
|
||||||
0.0228607,
|
0.0228607f,
|
||||||
0.00397353}); // Y_data
|
0.00397353f}); // Y_data
|
||||||
test_case.add_expected_output<float>(
|
test_case.add_expected_output<float>(
|
||||||
Shape{1, 2, 3},
|
Shape{1, 2, 3},
|
||||||
{0.13528088, -0.1779867, -0.07448981, 0.14696799, -0.15571019, -0.10270946}); // Y_h_data
|
{0.13528088f, -0.1779867f, -0.07448981f, 0.14696799f, -0.15571019f, -0.10270946f}); // Y_h_data
|
||||||
test_case.add_expected_output<float>(
|
test_case.add_expected_output<float>(
|
||||||
Shape{1, 2, 3},
|
Shape{1, 2, 3},
|
||||||
{0.367563, -0.43762812, -0.20435227, 0.50598085, -0.42627674, -0.3641275}); // Y_c_data
|
{0.367563f, -0.43762812f, -0.20435227f, 0.50598085f, -0.42627674f, -0.3641275f}); // Y_c_data
|
||||||
|
|
||||||
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
||||||
}
|
}
|
||||||
@ -174,43 +174,43 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_lstm_bidir_mixed_seq_const) {
|
|||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
test_case.add_input<float>(
|
test_case.add_input<float>(
|
||||||
{0.68172926, 1.1405563, -0.03931177, -0.03759607, 1.1397027, 0.60444903, 1.3246384, -0.28191715}); // X
|
{0.68172926f, 1.1405563f, -0.03931177f, -0.03759607f, 1.1397027f, 0.60444903f, 1.3246384f, -0.28191715f}); // X
|
||||||
|
|
||||||
test_case.add_expected_output<float>(Shape{2, 2, 2, 2},
|
test_case.add_expected_output<float>(Shape{2, 2, 2, 2},
|
||||||
{-0.063373,
|
{-0.063373f,
|
||||||
-0.20347191,
|
-0.20347191f,
|
||||||
0.00860566,
|
0.00860566f,
|
||||||
0.00920492,
|
0.00920492f,
|
||||||
-0.063373,
|
-0.063373f,
|
||||||
-0.20347191,
|
-0.20347191f,
|
||||||
-0.12004475,
|
-0.12004475f,
|
||||||
-0.12800421,
|
-0.12800421f,
|
||||||
0.,
|
0.,
|
||||||
0.,
|
0.,
|
||||||
-0.19095606,
|
-0.19095606f,
|
||||||
-0.12459831,
|
-0.12459831f,
|
||||||
0.,
|
0.,
|
||||||
0.,
|
0.,
|
||||||
-0.1911628,
|
-0.1911628f,
|
||||||
-0.12813942}); // Y_data
|
-0.12813942f}); // Y_data
|
||||||
test_case.add_expected_output<float>(Shape{2, 2, 2},
|
test_case.add_expected_output<float>(Shape{2, 2, 2},
|
||||||
{-0.063373,
|
{-0.063373f,
|
||||||
-0.20347191,
|
-0.20347191f,
|
||||||
-0.19095606,
|
-0.19095606f,
|
||||||
-0.12459831,
|
-0.12459831f,
|
||||||
-0.063373,
|
-0.063373f,
|
||||||
-0.20347191,
|
-0.20347191f,
|
||||||
-0.12004475,
|
-0.12004475f,
|
||||||
-0.12800421}); // Y_h_data
|
-0.12800421f}); // Y_h_data
|
||||||
test_case.add_expected_output<float>(Shape{2, 2, 2},
|
test_case.add_expected_output<float>(Shape{2, 2, 2},
|
||||||
{-0.2732999,
|
{-0.2732999f,
|
||||||
-0.38956356,
|
-0.38956356f,
|
||||||
-0.48170844,
|
-0.48170844f,
|
||||||
-0.34701264,
|
-0.34701264f,
|
||||||
-0.2732999,
|
-0.2732999f,
|
||||||
-0.38956356,
|
-0.38956356f,
|
||||||
-0.27130172,
|
-0.27130172f,
|
||||||
-0.253659}); // Y_c_data
|
-0.253659f}); // Y_c_data
|
||||||
|
|
||||||
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
||||||
}
|
}
|
||||||
@ -221,7 +221,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_lstm_fwd_with_clip_peepholes) {
|
|||||||
"onnx/lstm_fwd_with_clip_peepholes.onnx"));
|
"onnx/lstm_fwd_with_clip_peepholes.onnx"));
|
||||||
|
|
||||||
auto test_case = test::TestCase(function, s_device);
|
auto test_case = test::TestCase(function, s_device);
|
||||||
test_case.add_input<float>({-0.455351, -0.276391, -0.185934, -0.269585}); // X
|
test_case.add_input<float>({-0.455351f, -0.276391f, -0.185934f, -0.269585f}); // X
|
||||||
test_case.add_input<float>({-0.494659f, // W
|
test_case.add_input<float>({-0.494659f, // W
|
||||||
0.0453352f,
|
0.0453352f,
|
||||||
-0.487793f,
|
-0.487793f,
|
||||||
@ -560,9 +560,9 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_lstm_dynamic_batch_size_and_seq_len) {
|
|||||||
test_case.add_input<float>({1, 2, 3, 4, 5, 6});
|
test_case.add_input<float>({1, 2, 3, 4, 5, 6});
|
||||||
|
|
||||||
test_case.add_expected_output<float>(Shape{1, 1, 3, 2},
|
test_case.add_expected_output<float>(Shape{1, 1, 3, 2},
|
||||||
{0.761594, 0.761594, 0.761594, 0.761594, 0.761594, 0.761594}); // Y
|
{0.761594f, 0.761594f, 0.761594f, 0.761594f, 0.761594f, 0.761594f}); // Y
|
||||||
test_case.add_expected_output<float>(Shape{1, 3, 2},
|
test_case.add_expected_output<float>(Shape{1, 3, 2},
|
||||||
{0.761594, 0.761594, 0.761594, 0.761594, 0.761594, 0.761594}); // Y_c
|
{0.761594f, 0.761594f, 0.761594f, 0.761594f, 0.761594f, 0.761594f}); // Y_c
|
||||||
test_case.add_expected_output<float>(Shape{1, 3, 2}, {1, 1, 1, 1, 1, 1}); // Y_h
|
test_case.add_expected_output<float>(Shape{1, 3, 2}, {1, 1, 1, 1, 1, 1}); // Y_h
|
||||||
|
|
||||||
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
||||||
@ -769,32 +769,32 @@ NGRAPH_TEST_F(${BACKEND_NAME}, GRUSequenceOp, onnx_model_gru_fwd_activations_con
|
|||||||
// Y
|
// Y
|
||||||
test_case.add_expected_output<float>(
|
test_case.add_expected_output<float>(
|
||||||
Shape{4, 1, 3, 5},
|
Shape{4, 1, 3, 5},
|
||||||
std::vector<float>{0.30736187, 0.10271017, 0.91698503, 0.3471303, -0.0123809, 0.51264125, 0.51235366,
|
std::vector<float>{0.30736187f, 0.10271017f, 0.91698503f, 0.3471303f, -0.0123809f, 0.51264125f, 0.51235366f,
|
||||||
0.45471948, 0.50601995, 0.49260828, 0.4781971, 0.0668709, 0.89421916, 0.33762455,
|
0.45471948f, 0.50601995f, 0.49260828f, 0.4781971f, 0.0668709f, 0.89421916f, 0.33762455f,
|
||||||
-0.19021586, 0.6881336, 0.7331965, 0.8887774, 0.34048334, 0.38408905, 0.49962956,
|
-0.19021586f, 0.6881336f, 0.7331965f, 0.8887774f, 0.34048334f, 0.38408905f, 0.49962956f,
|
||||||
0.2948451, 0.3651103, 0.33406913, 0.57418096, 0.49882296, 0.4321446, 0.97142136,
|
0.2948451f, 0.3651103f, 0.33406913f, 0.57418096f, 0.49882296f, 0.4321446f, 0.97142136f,
|
||||||
0.20714557, 0.66270787, 0.53192705, 0.46424377, 0.9647801, 0.19583187, 0.7362316,
|
0.20714557f, 0.66270787f, 0.53192705f, 0.46424377f, 0.9647801f, 0.19583187f, 0.7362316f,
|
||||||
0.48205143, -0.04748845, 0.27395952, 0.35897565, 0.5801568, 0.5889811, 0.36110958,
|
0.48205143f, -0.04748845f, 0.27395952f, 0.35897565f, 0.5801568f, 0.5889811f, 0.36110958f,
|
||||||
1.3433081, 0.29702073, 0.5709667, 0.936689, 0.84129435, 1.1782551, 0.23925206,
|
1.3433081f, 0.29702073f, 0.5709667f, 0.936689f, 0.84129435f, 1.1782551f, 0.23925206f,
|
||||||
0.57521456, 0.43502977, -0.5664091, 0.6758457, 0.2958132, 0.70932186, 0.4411352,
|
0.57521456f, 0.43502977f, -0.5664091f, 0.6758457f, 0.2958132f, 0.70932186f, 0.4411352f,
|
||||||
-0.1717428, 1.7761463, 0.14413449, 0.73801273});
|
-0.1717428f, 1.7761463f, 0.14413449f, 0.73801273f});
|
||||||
// Y_h
|
// Y_h
|
||||||
test_case.add_expected_output<float>(Shape{1, 3, 5},
|
test_case.add_expected_output<float>(Shape{1, 3, 5},
|
||||||
std::vector<float>{0.936689,
|
std::vector<float>{0.936689f,
|
||||||
0.84129435,
|
0.84129435f,
|
||||||
1.1782551,
|
1.1782551f,
|
||||||
0.23925206,
|
0.23925206f,
|
||||||
0.57521456,
|
0.57521456f,
|
||||||
0.43502977,
|
0.43502977f,
|
||||||
-0.5664091,
|
-0.5664091f,
|
||||||
0.6758457,
|
0.6758457f,
|
||||||
0.2958132,
|
0.2958132f,
|
||||||
0.70932186,
|
0.70932186f,
|
||||||
0.4411352,
|
0.4411352f,
|
||||||
-0.1717428,
|
-0.1717428f,
|
||||||
1.7761463,
|
1.7761463f,
|
||||||
0.14413449,
|
0.14413449f,
|
||||||
0.73801273});
|
0.73801273f});
|
||||||
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 5);
|
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 5);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -908,32 +908,33 @@ NGRAPH_TEST_F(${BACKEND_NAME}, GRUSequenceOp, onnx_model_gru_fwd_mixed_seq_len_c
|
|||||||
// Y
|
// Y
|
||||||
test_case.add_expected_output<float>(
|
test_case.add_expected_output<float>(
|
||||||
Shape{4, 1, 3, 5},
|
Shape{4, 1, 3, 5},
|
||||||
std::vector<float>{-0.9559332, 0.4372494, 0.9967716, -0.9079381, -1.2538278, 1.9265908, -0.8437393,
|
std::vector<float>{-0.9559332f, 0.4372494f, 0.9967716f, -0.9079381f, -1.2538278f, 1.9265908f,
|
||||||
-1.2057271, -0.25887525, -0.52679026, -0.3619178, 0.67928517, 0.9486744, -0.12006134,
|
-0.8437393f, -1.2057271f, -0.25887525f, -0.52679026f, -0.3619178f, 0.67928517f,
|
||||||
-1.3862017, -0.98941356, 0.80389524, 0.97586197, -0.9343586, -0.74858856, 1.797039,
|
0.9486744f, -0.12006134f, -1.3862017f, -0.98941356f, 0.80389524f, 0.97586197f,
|
||||||
-0.7873732, -0.72469383, -0.5866635, -0.42103744, -0.8406298, 0.85877097, 0.6349921,
|
-0.9343586f, -0.74858856f, 1.797039f, -0.7873732f, -0.72469383f, -0.5866635f,
|
||||||
-0.55897295, -0.6168443, 0., 0., 0., 0., 0.,
|
-0.42103744f, -0.8406298f, 0.85877097f, 0.6349921f, -0.55897295f, -0.6168443f,
|
||||||
1.577129, -0.6935871, -0.304804, -0.75392795, -0.20703818, -0.93796504, 0.9220495,
|
0., 0., 0., 0., 0., 1.577129f,
|
||||||
0.36017662, -0.7007159, 0.06962098, 0., 0., 0., 0.,
|
-0.6935871f, -0.304804f, -0.75392795f, -0.20703818f, -0.93796504f, 0.9220495f,
|
||||||
0., 0., 0., 0., 0., 0., -0.96323603,
|
0.36017662f, -0.7007159f, 0.06962098f, 0., 0., 0.,
|
||||||
0.9265786, 0.54976916, -0.8037839, 0.73501444});
|
0., 0., 0., 0., 0., 0.,
|
||||||
|
0., -0.96323603f, 0.9265786f, 0.54976916f, -0.8037839f, 0.73501444f});
|
||||||
// Y_h
|
// Y_h
|
||||||
test_case.add_expected_output<float>(Shape{1, 3, 5},
|
test_case.add_expected_output<float>(Shape{1, 3, 5},
|
||||||
std::vector<float>{-0.98941356,
|
std::vector<float>{-0.98941356f,
|
||||||
0.80389524,
|
0.80389524f,
|
||||||
0.97586197,
|
0.97586197f,
|
||||||
-0.9343586,
|
-0.9343586f,
|
||||||
-0.74858856,
|
-0.74858856f,
|
||||||
1.577129,
|
1.577129f,
|
||||||
-0.6935871,
|
-0.6935871f,
|
||||||
-0.304804,
|
-0.304804f,
|
||||||
-0.75392795,
|
-0.75392795f,
|
||||||
-0.20703818,
|
-0.20703818f,
|
||||||
-0.96323603,
|
-0.96323603f,
|
||||||
0.9265786,
|
0.9265786f,
|
||||||
0.54976916,
|
0.54976916f,
|
||||||
-0.8037839,
|
-0.8037839f,
|
||||||
0.73501444});
|
0.73501444f});
|
||||||
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 3);
|
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -949,32 +950,33 @@ NGRAPH_TEST_F(${BACKEND_NAME}, GRUSequenceOp, onnx_model_gru_reverse_mixed_seq_l
|
|||||||
// Y
|
// Y
|
||||||
test_case.add_expected_output<float>(
|
test_case.add_expected_output<float>(
|
||||||
Shape{4, 1, 3, 5},
|
Shape{4, 1, 3, 5},
|
||||||
std::vector<float>{-0.9917215, 0.07583051, 0.997975, -0.9315585, -0.7483002, 1.536813, -0.59922504,
|
std::vector<float>{-0.9917215f, 0.07583051f, 0.997975f, -0.9315585f, -0.7483002f, 1.536813f,
|
||||||
-0.33637103, -0.7565539, -0.23930266, -0.7844553, 1.0393485, 0.73516595, -0.5616293,
|
-0.59922504f, -0.33637103f, -0.7565539f, -0.23930266f, -0.7844553f, 1.0393485f,
|
||||||
-0.09489207, -0.9501128, 0.7905356, 0.9928266, -0.9153729, -1.1781745, 1.7955453,
|
0.73516595f, -0.5616293f, -0.09489207f, -0.9501128f, 0.7905356f, 0.9928266f,
|
||||||
-0.77754307, -0.6831806, -0.6266324, -0.39791372, -0.8030517, 1.3107346, 0.3700709,
|
-0.9153729f, -1.1781745f, 1.7955453f, -0.77754307f, -0.6831806f, -0.6266324f,
|
||||||
-0.49808976, 0.52939236, 0., 0., 0., 0., 0.,
|
-0.39791372f, -0.8030517f, 1.3107346f, 0.3700709f, -0.49808976f, 0.52939236f,
|
||||||
1.9345565, -0.83817405, -1.1433047, -0.35640514, -0.5191339, -0.655544, 1.3520991,
|
0., 0., 0., 0., 0., 1.9345565f,
|
||||||
0.42289692, -0.3171452, -0.3922639, 0., 0., 0., 0.,
|
-0.83817405f, -1.1433047f, -0.35640514f, -0.5191339f, -0.655544f, 1.3520991f,
|
||||||
0., 0., 0., 0., 0., 0., -0.24612205,
|
0.42289692f, -0.3171452f, -0.3922639f, 0., 0., 0.,
|
||||||
1.6415757, 0.79883975, -0.18640287, -1.0134869});
|
0., 0., 0., 0., 0., 0.,
|
||||||
|
0., -0.24612205f, 1.6415757f, 0.79883975f, -0.18640287f, -1.0134869f});
|
||||||
// Y_h
|
// Y_h
|
||||||
test_case.add_expected_output<float>(Shape{1, 3, 5},
|
test_case.add_expected_output<float>(Shape{1, 3, 5},
|
||||||
std::vector<float>{-0.9917215,
|
std::vector<float>{-0.9917215f,
|
||||||
0.07583051,
|
0.07583051f,
|
||||||
0.997975,
|
0.997975f,
|
||||||
-0.9315585,
|
-0.9315585f,
|
||||||
-0.7483002,
|
-0.7483002f,
|
||||||
1.536813,
|
1.536813f,
|
||||||
-0.59922504,
|
-0.59922504f,
|
||||||
-0.33637103,
|
-0.33637103f,
|
||||||
-0.7565539,
|
-0.7565539f,
|
||||||
-0.23930266,
|
-0.23930266f,
|
||||||
-0.7844553,
|
-0.7844553f,
|
||||||
1.0393485,
|
1.0393485f,
|
||||||
0.73516595,
|
0.73516595f,
|
||||||
-0.5616293,
|
-0.5616293f,
|
||||||
-0.09489207});
|
-0.09489207f});
|
||||||
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 3);
|
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -991,29 +993,32 @@ NGRAPH_TEST_F(${BACKEND_NAME}, GRUSequenceOp, onnx_model_gru_bidir_mixed_seq_len
|
|||||||
test_case.add_expected_output<float>(
|
test_case.add_expected_output<float>(
|
||||||
Shape{4, 2, 3, 5},
|
Shape{4, 2, 3, 5},
|
||||||
std::vector<float>{
|
std::vector<float>{
|
||||||
-0.3224981, -0.44282594, 0.7499796, -0.12240417, 0.12079421, 0.02534254, 0.02504561, -0.0463777,
|
-0.3224981f, -0.44282594f, 0.7499796f, -0.12240417f, 0.12079421f, 0.02534254f, 0.02504561f,
|
||||||
0.01204535, -0.01497037, -0.04651929, -0.6264307, 0.7236632, 0.06250653, 0.02594197, 0.0595789,
|
-0.0463777f, 0.01204535f, -0.01497037f, -0.04651929f, -0.6264307f, 0.7236632f, 0.06250653f,
|
||||||
0.40258542, -0.40646964, 0.70320284, -0.02962421, 0.10372428, -0.38378227, -0.4331268, -0.15696645,
|
0.02594197f, 0.0595789f, 0.40258542f, -0.40646964f, 0.70320284f, -0.02962421f, 0.10372428f,
|
||||||
-0.3451503, 0.20918667, -0.59024405, -0.845524, 0.60705113, -0.6336088, -0.0833023, -0.40062034,
|
-0.38378227f, -0.4331268f, -0.15696645f, -0.3451503f, 0.20918667f, -0.59024405f, -0.845524f,
|
||||||
0.7579466, -0.12340625, 0.04415433, -0.24662054, 0.27420586, -0.09122991, -0.22768986, 0.19980887,
|
0.60705113f, -0.6336088f, -0.0833023f, -0.40062034f, 0.7579466f, -0.12340625f, 0.04415433f,
|
||||||
-0.218649, -0.5560231, 0.56177044, -0.25098884, 0.15462328, 0.0409361, 0.17866893, -0.2782218,
|
-0.24662054f, 0.27420586f, -0.09122991f, -0.22768986f, 0.19980887f, -0.218649f, -0.5560231f,
|
||||||
0.27396634, -0.04992082, 0.15353821, -0.4497267, -0.44631857, -0.478926, -0.23017275, 0.25369287,
|
0.56177044f, -0.25098884f, 0.15462328f, 0.0409361f, 0.17866893f, -0.2782218f, 0.27396634f,
|
||||||
-0.7369056, -0.73285, -0.5750758, -0.533177, 0., 0., 0., 0.,
|
-0.04992082f, 0.15353821f, -0.4497267f, -0.44631857f, -0.478926f, -0.23017275f, 0.25369287f,
|
||||||
0., -0.45753813, 0.5987347, -0.07046632, -0.35819566, 0.3916747, -0.18096107, -0.24415034,
|
-0.7369056f, -0.73285f, -0.5750758f, -0.533177f, 0., 0., 0.,
|
||||||
0.38435352, -0.29881003, 0.07738188, 0., 0., 0., 0., 0.,
|
0., 0., -0.45753813f, 0.5987347f, -0.07046632f, -0.35819566f, 0.3916747f,
|
||||||
0.10390212, -0.29646862, -0.20532897, -0.31521815, 0.01049522, 0.19370168, -0.6386781, -0.42919028,
|
-0.18096107f, -0.24415034f, 0.38435352f, -0.29881003f, 0.07738188f, 0., 0.,
|
||||||
-0.47081998, -0.2954276, 0., 0., 0., 0., 0., 0.,
|
0., 0., 0., 0.10390212f, -0.29646862f, -0.20532897f, -0.31521815f,
|
||||||
0., 0., 0., 0., -0.50112087, -0.11085765, 0.5155622, -0.5635352,
|
0.01049522f, 0.19370168f, -0.6386781f, -0.42919028f, -0.47081998f, -0.2954276f, 0.,
|
||||||
0.54762024, 0., 0., 0., 0., 0., 0., 0.,
|
0., 0., 0., 0., 0., 0., 0.,
|
||||||
0., 0., 0., 0.17058733, -0.6941011, -0.27862304, -0.27050856, -0.03864266});
|
0., 0., -0.50112087f, -0.11085765f, 0.5155622f, -0.5635352f, 0.54762024f,
|
||||||
|
0., 0., 0., 0., 0., 0., 0.,
|
||||||
|
0., 0., 0., 0.17058733f, -0.6941011f, -0.27862304f, -0.27050856f,
|
||||||
|
-0.03864266f});
|
||||||
// Y_h
|
// Y_h
|
||||||
test_case.add_expected_output<float>(
|
test_case.add_expected_output<float>(
|
||||||
Shape{2, 3, 5},
|
Shape{2, 3, 5},
|
||||||
std::vector<float>{-0.0833023, -0.40062034, 0.7579466, -0.12340625, 0.04415433, -0.45753813,
|
std::vector<float>{-0.0833023f, -0.40062034f, 0.7579466f, -0.12340625f, 0.04415433f, -0.45753813f,
|
||||||
0.5987347, -0.07046632, -0.35819566, 0.3916747, -0.50112087, -0.11085765,
|
0.5987347f, -0.07046632f, -0.35819566f, 0.3916747f, -0.50112087f, -0.11085765f,
|
||||||
0.5155622, -0.5635352, 0.54762024, 0.0595789, 0.40258542, -0.40646964,
|
0.5155622f, -0.5635352f, 0.54762024f, 0.0595789f, 0.40258542f, -0.40646964f,
|
||||||
0.70320284, -0.02962421, 0.10372428, -0.38378227, -0.4331268, -0.15696645,
|
0.70320284f, -0.02962421f, 0.10372428f, -0.38378227f, -0.4331268f, -0.15696645f,
|
||||||
-0.3451503, 0.20918667, -0.59024405, -0.845524, 0.60705113, -0.6336088});
|
-0.3451503f, 0.20918667f, -0.59024405f, -0.845524f, 0.60705113f, -0.6336088f});
|
||||||
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 4);
|
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1901,32 +1906,32 @@ NGRAPH_TEST_F(${BACKEND_NAME}, RNNSequenceOp, onnx_model_rnn_reverse_mixed_seq_l
|
|||||||
// Y
|
// Y
|
||||||
test_case.add_expected_output<float>(
|
test_case.add_expected_output<float>(
|
||||||
Shape{4, 1, 3, 5},
|
Shape{4, 1, 3, 5},
|
||||||
std::vector<float>{-0.27398264, 0.96948624, 0.26404798, 0.8068119, 0.99935544, 0.73694086, 0.44305325,
|
std::vector<float>{
|
||||||
-0.9964632, 0.7063714, 0.9999049, -0.7241098, 0.08538079, -0.785874, 0.60833323,
|
-0.27398264f, 0.96948624f, 0.26404798f, 0.8068119f, 0.99935544f, 0.73694086f, 0.44305325f, -0.9964632f,
|
||||||
0.99999666, 0.53703666, 0.0267657, 0.37151086, -0.68740594, 0.9992448, 0.3254757,
|
0.7063714f, 0.9999049f, -0.7241098f, 0.08538079f, -0.785874f, 0.60833323f, 0.99999666f, 0.53703666f,
|
||||||
0.7716811, -0.9996745, 0.9957807, 0.9995338, 0.9997339, 0.9888724, -0.8992324,
|
0.0267657f, 0.37151086f, -0.68740594f, 0.9992448f, 0.3254757f, 0.7716811f, -0.9996745f, 0.9957807f,
|
||||||
-0.797282, 0.98666525, 0., 0., 0., 0., 0.,
|
0.9995338f, 0.9997339f, 0.9888724f, -0.8992324f, -0.797282f, 0.98666525f, 0., 0.,
|
||||||
0.95711637, -0.8986079, -0.99998885, 0.96265936, 0.9380511, -0.86523867, 0.3528558,
|
0., 0., 0., 0.95711637f, -0.8986079f, -0.99998885f, 0.96265936f, 0.9380511f,
|
||||||
-0.99675506, 0.946875, 0.79539406, 0., 0., 0., 0.,
|
-0.86523867f, 0.3528558f, -0.99675506f, 0.946875f, 0.79539406f, 0., 0., 0.,
|
||||||
0., 0., 0., 0., 0., 0., 0.99903,
|
0., 0., 0., 0., 0., 0., 0., 0.99903f,
|
||||||
0.9998094, 0.9499353, 0.6077225, -0.9921822});
|
0.9998094f, 0.9499353f, 0.6077225f, -0.9921822f});
|
||||||
// Y_h
|
// Y_h
|
||||||
test_case.add_expected_output<float>(Shape{1, 3, 5},
|
test_case.add_expected_output<float>(Shape{1, 3, 5},
|
||||||
std::vector<float>{-0.27398264,
|
std::vector<float>{-0.27398264f,
|
||||||
0.96948624,
|
0.96948624f,
|
||||||
0.26404798,
|
0.26404798f,
|
||||||
0.8068119,
|
0.8068119f,
|
||||||
0.99935544,
|
0.99935544f,
|
||||||
0.73694086,
|
0.73694086f,
|
||||||
0.44305325,
|
0.44305325f,
|
||||||
-0.9964632,
|
-0.9964632f,
|
||||||
0.7063714,
|
0.7063714f,
|
||||||
0.9999049,
|
0.9999049f,
|
||||||
-0.7241098,
|
-0.7241098f,
|
||||||
0.08538079,
|
0.08538079f,
|
||||||
-0.785874,
|
-0.785874f,
|
||||||
0.60833323,
|
0.60833323f,
|
||||||
0.99999666});
|
0.99999666f});
|
||||||
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 4);
|
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1942,29 +1947,32 @@ NGRAPH_TEST_F(${BACKEND_NAME}, RNNSequenceOp, onnx_model_rnn_bidir_mixed_seq_len
|
|||||||
test_case.add_expected_output<float>(
|
test_case.add_expected_output<float>(
|
||||||
Shape{4, 2, 3, 5},
|
Shape{4, 2, 3, 5},
|
||||||
std::vector<float>{
|
std::vector<float>{
|
||||||
0.02254748, 0.15776646, -0.8229023, 0.19205809, 0.76984656, -0.00603169, -0.0286147, 0.04512155,
|
0.02254748f, 0.15776646f, -0.8229023f, 0.19205809f, 0.76984656f, -0.00603169f, -0.0286147f,
|
||||||
-0.0011912, -0.02572936, -0.13703543, -0.49651444, -0.78868157, 0.3566854, 0.8758509, -0.99602485,
|
0.04512155f, -0.0011912f, -0.02572936f, -0.13703543f, -0.49651444f, -0.78868157f, 0.3566854f,
|
||||||
-0.8151508, -0.5803147, 0.4985683, 0.30210292, 0.11550081, -0.30236644, 0.99622667, -0.8732492,
|
0.8758509f, -0.99602485f, -0.8151508f, -0.5803147f, 0.4985683f, 0.30210292f, 0.11550081f,
|
||||||
-0.43772405, -0.9284624, -0.5595875, 0.9986867, -0.18373811, 0.8451735, -0.43823165, -0.1904698,
|
-0.30236644f, 0.99622667f, -0.8732492f, -0.43772405f, -0.9284624f, -0.5595875f, 0.9986867f,
|
||||||
0.8320786, 0.9830735, 0.61861455, 0.19109797, 0.64407, 0.00962067, -0.32752877, -0.5050589,
|
-0.18373811f, 0.8451735f, -0.43823165f, -0.1904698f, 0.8320786f, 0.9830735f, 0.61861455f,
|
||||||
-0.23455954, 0.9517933, 0.9050665, 0.91091585, -0.77941567, -0.71390504, -0.24422187, -0.38115412,
|
0.19109797f, 0.64407f, 0.00962067f, -0.32752877f, -0.5050589f, -0.23455954f, 0.9517933f,
|
||||||
0.3462553, 0.44084883, -0.81455964, -0.23556596, 0.85043025, -0.7840209, -0.82087713, -0.8349008,
|
0.9050665f, 0.91091585f, -0.77941567f, -0.71390504f, -0.24422187f, -0.38115412f, 0.3462553f,
|
||||||
-0.7880142, 0.99017143, -0.9816452, -0.93827677, 0., 0., 0., 0.,
|
0.44084883f, -0.81455964f, -0.23556596f, 0.85043025f, -0.7840209f, -0.82087713f, -0.8349008f,
|
||||||
0., 0.28117967, 0.20685148, 0.01166701, -0.5441828, -0.5463747, -0.85301256, 0.52109087,
|
-0.7880142f, 0.99017143f, -0.9816452f, -0.93827677f, 0., 0., 0.,
|
||||||
-0.8317892, -0.9676957, -0.30258918, 0., 0., 0., 0., 0.,
|
0., 0., 0.28117967f, 0.20685148f, 0.01166701f, -0.5441828f, -0.5463747f,
|
||||||
-0.7010546, -0.3106169, -0.04788882, -0.21822351, -0.33518708, -0.9073148, 0.16276085, 0.9518349,
|
-0.85301256f, 0.52109087f, -0.8317892f, -0.9676957f, -0.30258918f, 0., 0.,
|
||||||
-0.8635942, -0.92539954, 0., 0., 0., 0., 0., 0.,
|
0., 0., 0., -0.7010546f, -0.3106169f, -0.04788882f, -0.21822351f,
|
||||||
0., 0., 0., 0., 0.9948462, -0.6242633, -0.19065344, -0.36072153,
|
-0.33518708f, -0.9073148f, 0.16276085f, 0.9518349f, -0.8635942f, -0.92539954f, 0.,
|
||||||
-0.99407107, 0., 0., 0., 0., 0., 0., 0.,
|
0., 0., 0., 0., 0., 0., 0.,
|
||||||
0., 0., 0., -0.9957684, -0.7924, -0.40261805, -0.34061068, -0.55580306});
|
0., 0., 0.9948462f, -0.6242633f, -0.19065344f, -0.36072153f, -0.99407107f,
|
||||||
|
0., 0., 0., 0., 0., 0., 0.,
|
||||||
|
0., 0., 0., -0.9957684f, -0.7924f, -0.40261805f, -0.34061068f,
|
||||||
|
-0.55580306f});
|
||||||
// Y_h
|
// Y_h
|
||||||
test_case.add_expected_output<float>(
|
test_case.add_expected_output<float>(
|
||||||
Shape{2, 3, 5},
|
Shape{2, 3, 5},
|
||||||
std::vector<float>{-0.43823165, -0.1904698, 0.8320786, 0.9830735, 0.61861455, 0.28117967,
|
std::vector<float>{-0.43823165f, -0.1904698f, 0.8320786f, 0.9830735f, 0.61861455f, 0.28117967f,
|
||||||
0.20685148, 0.01166701, -0.5441828, -0.5463747, 0.9948462, -0.6242633,
|
0.20685148f, 0.01166701f, -0.5441828f, -0.5463747f, 0.9948462f, -0.6242633f,
|
||||||
-0.19065344, -0.36072153, -0.99407107, -0.99602485, -0.8151508, -0.5803147,
|
-0.19065344f, -0.36072153f, -0.99407107f, -0.99602485f, -0.8151508f, -0.5803147f,
|
||||||
0.4985683, 0.30210292, 0.11550081, -0.30236644, 0.99622667, -0.8732492,
|
0.4985683f, 0.30210292f, 0.11550081f, -0.30236644f, 0.99622667f, -0.8732492f,
|
||||||
-0.43772405, -0.9284624, -0.5595875, 0.9986867, -0.18373811, 0.8451735});
|
-0.43772405f, -0.9284624f, -0.5595875f, 0.9986867f, -0.18373811f, 0.8451735f});
|
||||||
|
|
||||||
// loosest match @ mantissa bit:
|
// loosest match @ mantissa bit:
|
||||||
// 16 or next bit (0.01166688557714223862 vs 0.01166701037436723709)
|
// 16 or next bit (0.01166688557714223862 vs 0.01166701037436723709)
|
||||||
|
@ -42,7 +42,7 @@ bool after_func_expand_name_comp(std::string lhs, std::string rhs) {
|
|||||||
if (is_hex_symbol(name[i])) {
|
if (is_hex_symbol(name[i])) {
|
||||||
++founded_hex;
|
++founded_hex;
|
||||||
if (cut_begin == -1) {
|
if (cut_begin == -1) {
|
||||||
cut_begin = i;
|
cut_begin = static_cast<int>(i);
|
||||||
}
|
}
|
||||||
if (founded_hex >= min_address) {
|
if (founded_hex >= min_address) {
|
||||||
cut_length = founded_hex;
|
cut_length = founded_hex;
|
||||||
|
Loading…
Reference in New Issue
Block a user