[ONNX] Extend ONNX Frontend with BlackmanWindow, HammingWindow and HannWindow operators (#19428)

* ONNX BlackManWindow enabled

* added a test periodic

* Add the license statement

* ONNX HammingWindow, HannWindow enabled

also added basic tests for each

* minor tests added

* made reviewed changes

* made reviewed changes

used output_datatype directly, returned y_values directly

* fixed clang-format

* add OPENVINO_SUPPRESS_DEPRECATED_START

* include math.h

* float fix

* fix

* fix namespace to set_1

* test fixes

* fix cast to output_datatype

* fix, replace cast with ov::convert

* fix, use element::f32

* major fixes

* fixes

* Update onnx_import.in.cpp

* Update onnx_import.in.cpp

---------

Co-authored-by: Przemyslaw Wysocki <przemyslaw.wysocki@intel.com>
This commit is contained in:
Siddhant Chauhan 2023-10-24 23:15:50 +05:30 committed by GitHub
parent 22184c32f4
commit eb55360f10
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 745 additions and 6 deletions

View File

@ -0,0 +1,86 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "op/blackmanwindow.hpp"
#include <memory>
#include "default_opset.hpp"
#include "utils/common.hpp"
#define _USE_MATH_DEFINES
#include <math.h>
OPENVINO_SUPPRESS_DEPRECATED_START
namespace ngraph {
namespace onnx_import {
namespace op {
namespace set_1 {
OutputVector blackmanwindow(const Node& node) {
const auto size = node.get_ng_inputs().at(0);
const auto output_datatype =
common::get_ngraph_element_type(node.get_attribute_value<int64_t>("output_datatype", 1));
const bool periodic = node.get_attribute_value<int64_t>("periodic", 1) == 1;
const ov::PartialShape shape = size.get_partial_shape();
const std::vector<size_t> axis_lengths = shape.to_shape();
// Weights as described in ONNX BlackmanWindow docs
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#blackmanwindow
const auto float_size = std::make_shared<default_opset::Convert>(size, ov::element::f32);
const auto a_0 =
std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{0.42f});
const auto a_1 =
std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{-0.50f});
const auto a_2 =
std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{0.08f});
const auto start =
std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{0.0f});
const auto one_const =
std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{1.0f});
const auto two_const =
std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{2.0f});
const auto four_const =
std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{4.0f});
const auto range = std::make_shared<default_opset::Range>(start, size, one_const, ov::element::f32);
const auto pi =
default_opset::Constant::create(ov::element::f32, ov::Shape(), std::vector<float>{static_cast<float>(M_PI)});
std::shared_ptr<ov::Node> factor_1, factor_2;
if (periodic) {
factor_1 = std::make_shared<default_opset::Multiply>(
range,
std::make_shared<default_opset::Divide>(std::make_shared<default_opset::Multiply>(pi, two_const),
float_size));
factor_2 = std::make_shared<default_opset::Multiply>(
range,
std::make_shared<default_opset::Divide>(std::make_shared<default_opset::Multiply>(pi, four_const),
float_size));
} else {
factor_1 = std::make_shared<default_opset::Multiply>(
range,
std::make_shared<default_opset::Divide>(std::make_shared<default_opset::Multiply>(pi, two_const),
std::make_shared<default_opset::Subtract>(float_size, one_const)));
factor_2 = std::make_shared<default_opset::Multiply>(
range,
std::make_shared<default_opset::Divide>(std::make_shared<default_opset::Multiply>(pi, four_const),
std::make_shared<default_opset::Subtract>(float_size, one_const)));
}
const auto cos_1 = std::make_shared<default_opset::Cos>(factor_1);
const auto cos_2 = std::make_shared<default_opset::Cos>(factor_2);
const auto scaled_cos_1 = std::make_shared<default_opset::Multiply>(cos_1, a_1);
const auto scaled_cos_2 = std::make_shared<default_opset::Multiply>(cos_2, a_2);
const auto y_values =
std::make_shared<default_opset::Add>(std::make_shared<default_opset::Add>(a_0, scaled_cos_1), scaled_cos_2);
if (output_datatype == element::f32) {
return {y_values};
} else {
return {std::make_shared<default_opset::Convert>(y_values, output_datatype)};
}
}
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph
OPENVINO_SUPPRESS_DEPRECATED_END

View File

@ -0,0 +1,23 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include "openvino/core/deprecated.hpp"
OPENVINO_SUPPRESS_DEPRECATED_START
#include "ngraph/node.hpp"
#include "onnx_import/core/node.hpp"
namespace ngraph {
namespace onnx_import {
namespace op {
namespace set_1 {
OutputVector blackmanwindow(const Node& node);
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph
OPENVINO_SUPPRESS_DEPRECATED_END

View File

@ -0,0 +1,72 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "op/hammingwindow.hpp"
#include <memory>
#include "default_opset.hpp"
#include "utils/common.hpp"
#define _USE_MATH_DEFINES
#include <math.h>
OPENVINO_SUPPRESS_DEPRECATED_START
namespace ngraph {
namespace onnx_import {
namespace op {
namespace set_1 {
OutputVector hammingwindow(const Node& node) {
const auto size = node.get_ng_inputs().at(0);
const auto output_datatype =
common::get_ngraph_element_type(node.get_attribute_value<int64_t>("output_datatype", 1));
const bool periodic = node.get_attribute_value<int64_t>("periodic", 1) == 1;
const ov::PartialShape shape = size.get_partial_shape();
const std::vector<size_t> axis_lengths = shape.to_shape();
// Weights as described in ONNX HammingWindow docs
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#hammingwindow
const auto float_size = std::make_shared<default_opset::Convert>(size, ov::element::f32);
const auto a_0 = std::make_shared<default_opset::Divide>(
std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{25.0f}),
std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{46.0f}));
const auto a_1 = std::make_shared<default_opset::Subtract>(
std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{1.0f}),
a_0);
const auto start =
std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{0.0f});
const auto one_const =
std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{1.0f});
const auto two_const =
std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{2.0f});
const auto range = std::make_shared<default_opset::Range>(start, size, one_const, ov::element::f32);
const auto pi =
default_opset::Constant::create(ov::element::f32, ov::Shape(), std::vector<float>{static_cast<float>(M_PI)});
std::shared_ptr<ov::Node> factor;
if (periodic) {
factor = std::make_shared<default_opset::Multiply>(
range,
std::make_shared<default_opset::Divide>(std::make_shared<default_opset::Multiply>(pi, two_const),
float_size));
} else {
factor = std::make_shared<default_opset::Multiply>(
range,
std::make_shared<default_opset::Divide>(std::make_shared<default_opset::Multiply>(pi, two_const),
std::make_shared<default_opset::Subtract>(float_size, one_const)));
}
const auto cos = std::make_shared<default_opset::Cos>(factor);
const auto scaled_cos = std::make_shared<default_opset::Multiply>(cos, a_1);
const auto y_values = std::make_shared<default_opset::Subtract>(a_0, scaled_cos);
if (output_datatype == element::f32) {
return {y_values};
} else {
return {std::make_shared<default_opset::Convert>(y_values, output_datatype)};
}
}
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph
OPENVINO_SUPPRESS_DEPRECATED_END

View File

@ -0,0 +1,23 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include "openvino/core/deprecated.hpp"
OPENVINO_SUPPRESS_DEPRECATED_START
#include "ngraph/node.hpp"
#include "onnx_import/core/node.hpp"
namespace ngraph {
namespace onnx_import {
namespace op {
namespace set_1 {
OutputVector hammingwindow(const Node& node);
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph
OPENVINO_SUPPRESS_DEPRECATED_END

View File

@ -0,0 +1,68 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "op/hannwindow.hpp"
#include <memory>
#include "default_opset.hpp"
#include "utils/common.hpp"
#define _USE_MATH_DEFINES
#include <math.h>
OPENVINO_SUPPRESS_DEPRECATED_START
namespace ngraph {
namespace onnx_import {
namespace op {
namespace set_1 {
OutputVector hannwindow(const Node& node) {
const auto size = node.get_ng_inputs().at(0);
const auto output_datatype =
common::get_ngraph_element_type(node.get_attribute_value<int64_t>("output_datatype", 1));
const bool periodic = node.get_attribute_value<int64_t>("periodic", 1) == 1;
const ov::PartialShape shape = size.get_partial_shape();
const std::vector<size_t> axis_lengths = shape.to_shape();
// Weights as described in ONNX HannWindow docs
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#hannwindow
const auto float_size = std::make_shared<default_opset::Convert>(size, ov::element::f32);
const auto a_0 = std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{0.5f});
const auto a_1 = a_0;
const auto start =
std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{0.0f});
const auto one_const =
std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{1.0f});
const auto two_const =
std::make_shared<default_opset::Constant>(ov::element::f32, ov::Shape(), std::vector<float>{2.0f});
const auto range = std::make_shared<default_opset::Range>(start, size, one_const, ov::element::f32);
const auto pi =
default_opset::Constant::create(ov::element::f32, ov::Shape(), std::vector<float>{static_cast<float>(M_PI)});
std::shared_ptr<ov::Node> factor;
if (periodic) {
factor = std::make_shared<default_opset::Multiply>(
range,
std::make_shared<default_opset::Divide>(std::make_shared<default_opset::Multiply>(pi, two_const),
float_size));
} else {
factor = std::make_shared<default_opset::Multiply>(
range,
std::make_shared<default_opset::Divide>(std::make_shared<default_opset::Multiply>(pi, two_const),
std::make_shared<default_opset::Subtract>(float_size, one_const)));
}
const auto cos = std::make_shared<default_opset::Cos>(factor);
const auto scaled_cos = std::make_shared<default_opset::Multiply>(cos, a_1);
const auto y_values = std::make_shared<default_opset::Subtract>(a_0, scaled_cos);
if (output_datatype == element::f32) {
return {y_values};
} else {
return {std::make_shared<default_opset::Convert>(y_values, output_datatype)};
}
}
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph
OPENVINO_SUPPRESS_DEPRECATED_END

View File

@ -0,0 +1,23 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include "openvino/core/deprecated.hpp"
OPENVINO_SUPPRESS_DEPRECATED_START
#include "ngraph/node.hpp"
#include "onnx_import/core/node.hpp"
namespace ngraph {
namespace onnx_import {
namespace op {
namespace set_1 {
OutputVector hannwindow(const Node& node);
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph
OPENVINO_SUPPRESS_DEPRECATED_END

View File

@ -29,6 +29,7 @@
#include "op/average_pool.hpp" #include "op/average_pool.hpp"
#include "op/batch_norm.hpp" #include "op/batch_norm.hpp"
#include "op/bitshift.hpp" #include "op/bitshift.hpp"
#include "op/blackmanwindow.hpp"
#include "op/cast.hpp" #include "op/cast.hpp"
#include "op/cast_like.hpp" #include "op/cast_like.hpp"
#include "op/ceil.hpp" #include "op/ceil.hpp"
@ -75,6 +76,8 @@
#include "op/greater.hpp" #include "op/greater.hpp"
#include "op/grid_sample.hpp" #include "op/grid_sample.hpp"
#include "op/gru.hpp" #include "op/gru.hpp"
#include "op/hammingwindow.hpp"
#include "op/hannwindow.hpp"
#include "op/hard_sigmoid.hpp" #include "op/hard_sigmoid.hpp"
#include "op/hard_swish.hpp" #include "op/hard_swish.hpp"
#include "op/hardmax.hpp" #include "op/hardmax.hpp"
@ -345,6 +348,7 @@ OperatorsBridge::OperatorsBridge() {
REGISTER_OPERATOR("BatchNormalization", 1, batch_norm); REGISTER_OPERATOR("BatchNormalization", 1, batch_norm);
REGISTER_OPERATOR("BatchNormalization", 7, batch_norm); REGISTER_OPERATOR("BatchNormalization", 7, batch_norm);
REGISTER_OPERATOR("BitShift", 1, bitshift); REGISTER_OPERATOR("BitShift", 1, bitshift);
REGISTER_OPERATOR("BlackmanWindow", 1, blackmanwindow);
REGISTER_OPERATOR("Cast", 1, cast); REGISTER_OPERATOR("Cast", 1, cast);
REGISTER_OPERATOR("CastLike", 1, cast_like); REGISTER_OPERATOR("CastLike", 1, cast_like);
REGISTER_OPERATOR("Ceil", 1, ceil); REGISTER_OPERATOR("Ceil", 1, ceil);
@ -392,6 +396,8 @@ OperatorsBridge::OperatorsBridge() {
REGISTER_OPERATOR("Greater", 1, greater); REGISTER_OPERATOR("Greater", 1, greater);
REGISTER_OPERATOR("GridSample", 1, grid_sample); REGISTER_OPERATOR("GridSample", 1, grid_sample);
REGISTER_OPERATOR("GRU", 1, gru); REGISTER_OPERATOR("GRU", 1, gru);
REGISTER_OPERATOR("HannWindow", 1, hannwindow);
REGISTER_OPERATOR("HammingWindow", 1, hammingwindow);
REGISTER_OPERATOR("Hardmax", 1, hardmax); REGISTER_OPERATOR("Hardmax", 1, hardmax);
REGISTER_OPERATOR("Hardmax", 13, hardmax); REGISTER_OPERATOR("Hardmax", 13, hardmax);
REGISTER_OPERATOR("HardSigmoid", 1, hard_sigmoid); REGISTER_OPERATOR("HardSigmoid", 1, hard_sigmoid);

View File

@ -0,0 +1,46 @@
ir_version: 7
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "size"
output: "y"
op_type: "BlackmanWindow"
attribute {
name: "output_datatype"
i: 1 # Use 1 for f32
type: INT
}
attribute {
name: "periodic"
i: 1 # Set to 1 for periodic, 0 for non-periodic
type: INT
}
}
name: "test_blackmanwindow_periodic"
input {
name: "size"
type {
tensor_type {
elem_type: 7 # INT64
shape {
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1 # FLOAT
shape {
dim {
dim_value: 10 # Modify this based on your expected output shape
}
}
}
}
}
}
opset_import {
version: 17
}

View File

@ -0,0 +1,46 @@
ir_version: 7
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "size"
output: "y"
op_type: "BlackmanWindow"
attribute {
name: "output_datatype"
i: 1 # Use 1 for f32
type: INT
}
attribute {
name: "periodic"
i: 0 # Set to 1 for periodic, 0 for non-periodic
type: INT
}
}
name: "test_blackmanwindow_symmetric"
input {
name: "size"
type {
tensor_type {
elem_type: 7 # INT64
shape {
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1 # FLOAT
shape {
dim {
dim_value: 10 # Modify this based on your expected output shape
}
}
}
}
}
}
opset_import {
version: 17
}

View File

@ -0,0 +1,46 @@
ir_version: 7
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "size"
output: "y"
op_type: "HammingWindow"
attribute {
name: "output_datatype"
i: 1 # Use 1 for f32
type: INT
}
attribute {
name: "periodic"
i: 1 # Set to 1 for periodic, 0 for non-periodic
type: INT
}
}
name: "test_hammingwindow_periodic"
input {
name: "size"
type {
tensor_type {
elem_type: 7 # INT64
shape {
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1 # FLOAT
shape {
dim {
dim_value: 10 # Modify this based on your expected output shape
}
}
}
}
}
}
opset_import {
version: 17
}

View File

@ -0,0 +1,46 @@
ir_version: 7
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "size"
output: "y"
op_type: "HammingWindow"
attribute {
name: "output_datatype"
i: 1 # Use 1 for f32
type: INT
}
attribute {
name: "periodic"
i: 0 # Set to 0 for symmetric, 1 for periodic
type: INT
}
}
name: "test_hammingwindow_symmetric"
input {
name: "size"
type {
tensor_type {
elem_type: 7 # INT64
shape {
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1 # FLOAT
shape {
dim {
dim_value: 10 # Modify this based on your expected output shape
}
}
}
}
}
}
opset_import {
version: 17
}

View File

@ -0,0 +1,46 @@
ir_version: 7
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "size"
output: "y"
op_type: "HannWindow"
attribute {
name: "output_datatype"
i: 1 # Use 1 for f32
type: INT
}
attribute {
name: "periodic"
i: 1 # Set to 1 for periodic, 0 for non-periodic
type: INT
}
}
name: "test_hannwindow_periodic"
input {
name: "size"
type {
tensor_type {
elem_type: 7 # INT64
shape {
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1 # FLOAT
shape {
dim {
dim_value: 10 # Modify this based on your expected output shape
}
}
}
}
}
}
opset_import {
version: 17
}

View File

@ -0,0 +1,46 @@
ir_version: 7
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "size"
output: "y"
op_type: "HannWindow"
attribute {
name: "output_datatype"
i: 1 # Use 1 for f32
type: INT
}
attribute {
name: "periodic"
i: 0 # Set to 0 for symmetric, 1 for periodic
type: INT
}
}
name: "test_hannwindow_symmetric"
input {
name: "size"
type {
tensor_type {
elem_type: 7 # INT64
shape {
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1 # FLOAT
shape {
dim {
dim_value: 10 # Modify this based on your expected output shape
}
}
}
}
}
}
opset_import {
version: 17
}

View File

@ -6716,3 +6716,171 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_unique_3d_with_duplicates_and_axis_2)
test_case.run(); test_case.run();
} }
OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_periodic) {
auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(),
SERIALIZED_ZOO,
"onnx/blackmanwindow_periodic.onnx"));
auto test_case = ov::test::TestCase(function, s_device);
test_case.add_input<int64_t>({10});
test_case.add_expected_output<float>(Shape{10},
{-0.000000014901161f,
0.040212844f,
0.20077012f,
0.50978714f,
0.8492299f,
0.99999994f,
0.84922975f,
0.5097869f,
0.20077008f,
0.040212862f});
// GPU has an accuracy drop, need to use different tolerance
if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) {
test_case.run_with_tolerance_as_fp();
} else {
test_case.run_with_tolerance_as_fp(0.01f);
}
}
OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_symmetric) {
auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(),
SERIALIZED_ZOO,
"onnx/blackmanwindow_symmetric.onnx"));
auto test_case = ov::test::TestCase(function, s_device);
test_case.add_input<int64_t>({10});
test_case.add_expected_output<float>(Shape{10},
{-0.00000001f,
0.05086961f,
0.25800052f,
0.63000000f,
0.95112991f,
0.95112979f,
0.62999994f,
0.25800028f,
0.05086958f,
-0.00000001f});
// GPU has an accuracy drop, need to use different tolerance
if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) {
test_case.run_with_tolerance_as_fp();
} else {
test_case.run_with_tolerance_as_fp(0.01f);
}
}
OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_periodic) {
auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(),
SERIALIZED_ZOO,
"onnx/hammingwindow_periodic.onnx"));
auto test_case = ov::test::TestCase(function, s_device);
test_case.add_input<int64_t>({10});
test_case.add_expected_output<float>(Shape{10},
{0.08695650f,
0.17414439f,
0.40240526f,
0.68455124f,
0.91281211f,
1.00000000f,
0.91281211f,
0.68455112f,
0.40240520f,
0.17414442f});
// GPU has an accuracy drop, need to use different tolerance
if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) {
test_case.run_with_tolerance_as_fp();
} else {
test_case.run_with_tolerance_as_fp(0.01f);
}
}
OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_symmetric) {
auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(),
SERIALIZED_ZOO,
"onnx/hammingwindow_symmetric.onnx"));
auto test_case = ov::test::TestCase(function, s_device);
test_case.add_input<int64_t>({10});
test_case.add_expected_output<float>(Shape{10},
{0.08695650f,
0.19376230f,
0.46420413f,
0.77173913f,
0.97246838f,
0.97246838f,
0.77173907f,
0.46420389f,
0.19376221f,
0.08695650f});
// GPU has an accuracy drop, need to use different tolerance
if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) {
test_case.run_with_tolerance_as_fp();
} else {
test_case.run_with_tolerance_as_fp(0.01f);
}
}
OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_periodic) {
auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(),
SERIALIZED_ZOO,
"onnx/hannwindow_periodic.onnx"));
auto test_case = ov::test::TestCase(function, s_device);
test_case.add_input<int64_t>({10});
test_case.add_expected_output<float>(Shape{10},
{0.00000000f,
0.09549150f,
0.34549153f,
0.65450853f,
0.90450847f,
1.00000000f,
0.90450847f,
0.65450835f,
0.34549144f,
0.09549153f});
// GPU has an accuracy drop, need to use different tolerance
if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) {
test_case.run_with_tolerance_as_fp();
} else {
test_case.run_with_tolerance_as_fp(0.01f);
}
}
OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_symmetric) {
auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(),
SERIALIZED_ZOO,
"onnx/hannwindow_symmetric.onnx"));
auto test_case = ov::test::TestCase(function, s_device);
test_case.add_input<int64_t>({10});
test_case.add_expected_output<float>(Shape{10},
{0.00000000f,
0.11697778f,
0.41317594f,
0.75000000f,
0.96984637f,
0.96984625f,
0.74999994f,
0.41317570f,
0.11697769f,
0.00000000f});
// GPU has an accuracy drop, need to use different tolerance
if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) {
test_case.run_with_tolerance_as_fp();
} else {
test_case.run_with_tolerance_as_fp(0.01f);
}
}

View File

@ -378,12 +378,6 @@ tests_expected_to_fail = [
), ),
( (
xfail_issue_90649, xfail_issue_90649,
"OnnxBackendNodeModelTest.test_blackmanwindow_cpu",
"OnnxBackendNodeModelTest.test_blackmanwindow_symmetric_cpu",
"OnnxBackendNodeModelTest.test_hammingwindow_cpu",
"OnnxBackendNodeModelTest.test_hammingwindow_symmetric_cpu",
"OnnxBackendNodeModelTest.test_hannwindow_cpu",
"OnnxBackendNodeModelTest.test_hannwindow_symmetric_cpu",
"OnnxBackendNodeModelTest.test_melweightmatrix_cpu", "OnnxBackendNodeModelTest.test_melweightmatrix_cpu",
"OnnxBackendNodeModelTest.test_sequence_map_add_1_sequence_1_tensor_cpu", "OnnxBackendNodeModelTest.test_sequence_map_add_1_sequence_1_tensor_cpu",
"OnnxBackendNodeModelTest.test_sequence_map_add_2_sequences_cpu", "OnnxBackendNodeModelTest.test_sequence_map_add_2_sequences_cpu",