From 358cd4b709c37d2cd6b15ffe0121be9f6b989ac7 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Tue, 12 Dec 2023 22:03:09 +0100 Subject: [PATCH] Refactor GPU single layer tests (#21527) * Refactor GPU single layer tests --- .../skip_tests_config.cpp | 6 +- .../dynamic/batch_to_space.cpp | 75 ++-- .../single_layer_tests/dynamic/broadcast.cpp | 72 ++-- .../dynamic/convolution.cpp | 194 +++++----- .../dynamic/convolution_backprop_data.cpp | 179 ++++------ .../single_layer_tests/dynamic/cum_sum.cpp | 71 ++-- .../dynamic/depth_to_space.cpp | 66 ++-- .../dynamic/detection_output.cpp | 59 +-- .../single_layer_tests/dynamic/gather.cpp | 72 ++-- .../dynamic/gather_elements.cpp | 58 ++- .../single_layer_tests/dynamic/gather_nd.cpp | 61 ++-- .../dynamic/gather_tree.cpp | 90 +++-- .../dynamic/grid_sample.cpp | 49 ++- .../group_convolution_backprop_data.cpp | 128 +++---- .../dynamic/groupconvolution.cpp | 145 ++++---- .../dynamic/interpolate.cpp | 336 +++++++++--------- .../single_layer_tests/dynamic/matmul.cpp | 300 ++++++++-------- .../single_layer_tests/dynamic/mvn.cpp | 49 ++- .../dynamic/non_max_suppression.cpp | 98 +++-- .../dynamic/normalize_l2.cpp | 42 ++- .../single_layer_tests/dynamic/pad.cpp | 98 +++-- .../single_layer_tests/dynamic/pooling.cpp | 191 +++++----- .../single_layer_tests/dynamic/prior_box.cpp | 90 +++-- .../dynamic/random_uniform.cpp | 92 +++-- .../single_layer_tests/dynamic/range.cpp | 133 ++++--- .../single_layer_tests/dynamic/reduce.cpp | 119 +++---- .../dynamic/region_yolo.cpp | 81 ++--- .../single_layer_tests/dynamic/reorg_yolo.cpp | 59 ++- .../dynamic/roi_pooling.cpp | 159 ++++----- .../dynamic/scatter_nd_update.cpp | 105 +++--- .../single_layer_tests/dynamic/select.cpp | 88 ++--- .../single_layer_tests/dynamic/shapeof.cpp | 142 ++++---- .../single_layer_tests/dynamic/softmax.cpp | 57 ++- .../dynamic/space_to_batch.cpp | 98 +++-- .../dynamic/space_to_depth.cpp | 83 ++--- .../single_layer_tests/dynamic/split.cpp | 227 ++++++------ .../dynamic/strided_slice.cpp | 148 ++++---- .../single_layer_tests/dynamic/tile.cpp | 93 +++-- .../single_layer_tests/dynamic/top_k.cpp | 166 +++++---- .../single_layer_tests/dynamic/unique.cpp | 89 +++-- 40 files changed, 2031 insertions(+), 2437 deletions(-) diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index d96723d3f47..24e9c674b7b 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -86,10 +86,10 @@ std::vector disabledTestPatterns() { // unsupported metrics R"(.*nightly_HeteroAutoBatchOVGetMetricPropsTest.*OVGetMetricPropsTest.*(FULL_DEVICE_NAME_with_DEVICE_ID|AVAILABLE_DEVICES|DEVICE_UUID|OPTIMIZATION_CAPABILITIES|MAX_BATCH_SIZE|DEVICE_GOPS|DEVICE_TYPE|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", // Issue: 111437 - R"(.*smoke_Deconv_2D_Dynamic_.*FP32/DeconvolutionLayerGPUTest.CompareWithRefs.*)", - R"(.*smoke_GroupDeconv_2D_Dynamic_.*FP32/GroupDeconvolutionLayerGPUTest.CompareWithRefs.*)", + R"(.*smoke_Deconv_2D_Dynamic_.*FP32/DeconvolutionLayerGPUTest.Inference.*)", + R"(.*smoke_GroupDeconv_2D_Dynamic_.*FP32/GroupDeconvolutionLayerGPUTest.Inference.*)", // Issue: 111440 - R"(.*smoke_set1/GatherElementsGPUTest.CompareWithRefs.*)", + R"(.*smoke_set1/GatherElementsGPUTest.Inference.*)", // New plugin work with tensors, so it means that blob in old API can have different pointers R"(.*InferRequestIOBBlobTest.*secondCallGetInputDoNotReAllocateData.*)", R"(.*InferRequestIOBBlobTest.*secondCallGetOutputDoNotReAllocateData.*)", diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/batch_to_space.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/batch_to_space.cpp index 9c1b6c5796a..15ada4b92c3 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/batch_to_space.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/batch_to_space.cpp @@ -2,16 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/batch_to_space.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/batch_to_space.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; struct BatchToSpaceParams { std::vector block; @@ -22,22 +23,21 @@ struct BatchToSpaceParams { typedef std::tuple< InputShape, // Input shapes BatchToSpaceParams, - ElementType, // Element type - ngraph::helpers::InputLayerType, // block/begin/end input type + ov::element::Type, // Element type + ov::test::utils::InputLayerType, // block/begin/end input type std::map // Additional network configuration > BatchToSpaceParamsLayerParamSet; class BatchToSpaceLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { InputShape shapes; BatchToSpaceParams params; - ElementType elementType; - ngraph::helpers::InputLayerType restInputType; - TargetDevice targetDevice; + ov::element::Type model_type; + ov::test::utils::InputLayerType restInputType; std::map additionalConfig; - std::tie(shapes, params, elementType, restInputType, additionalConfig) = obj.param; + std::tie(shapes, params, model_type, restInputType, additionalConfig) = obj.param; std::ostringstream results; results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; @@ -45,7 +45,7 @@ public: for (const auto& item : shapes.second) { results << ov::test::utils::vec2str(item) << "_"; } - results << "netPRC=" << elementType << "_"; + results << "netPRC=" << model_type << "_"; results << "block=" << ov::test::utils::vec2str(params.block) << "_"; results << "begin=" << ov::test::utils::vec2str(params.begin) << "_"; results << "end=" << ov::test::utils::vec2str(params.end) << "_"; @@ -59,7 +59,7 @@ public: return results.str(); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { @@ -100,7 +100,7 @@ protected: void SetUp() override { InputShape shapes; BatchToSpaceParams ssParams; - ngraph::helpers::InputLayerType restInputType; + ov::test::utils::InputLayerType restInputType; std::map additionalConfig; std::tie(shapes, ssParams, inType, restInputType, additionalConfig) = this->GetParam(); @@ -112,7 +112,7 @@ protected: std::vector inputShapes; inputShapes.push_back(shapes); - if (restInputType == ngraph::helpers::InputLayerType::PARAMETER) { + if (restInputType == ov::test::utils::InputLayerType::PARAMETER) { inputShapes.push_back(InputShape({static_cast(block.size())}, std::vector(shapes.second.size(), {block.size()}))); inputShapes.push_back(InputShape({static_cast(begin.size())}, std::vector(shapes.second.size(), {begin.size()}))); inputShapes.push_back(InputShape({static_cast(end.size())}, std::vector(shapes.second.size(), {end.size()}))); @@ -122,10 +122,10 @@ protected: ov::ParameterVector params{std::make_shared(inType, inputDynamicShapes.front())}; std::shared_ptr blockInput, beginInput, endInput; - if (restInputType == ngraph::helpers::InputLayerType::PARAMETER) { - auto blockNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{block.size()}); - auto beginNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{begin.size()}); - auto endNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{end.size()}); + if (restInputType == ov::test::utils::InputLayerType::PARAMETER) { + auto blockNode = std::make_shared(ov::element::i64, ov::Shape{block.size()}); + auto beginNode = std::make_shared(ov::element::i64, ov::Shape{begin.size()}); + auto endNode = std::make_shared(ov::element::i64, ov::Shape{end.size()}); params.push_back(blockNode); params.push_back(beginNode); @@ -135,38 +135,34 @@ protected: beginInput = beginNode; endInput = endNode; } else { - blockInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{block.size()}, block); - beginInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{begin.size()}, begin); - endInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{end.size()}, end); + blockInput = std::make_shared(ov::element::i64, ov::Shape{block.size()}, block); + beginInput = std::make_shared(ov::element::i64, ov::Shape{begin.size()}, begin); + endInput = std::make_shared(ov::element::i64, ov::Shape{end.size()}, end); } - auto ss = std::make_shared(params[0], blockInput, beginInput, endInput); + auto ss = std::make_shared(params[0], blockInput, beginInput, endInput); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < ss->get_output_size(); i++) { - results.push_back(std::make_shared(ss->output(i))); + results.push_back(std::make_shared(ss->output(i))); } - function = std::make_shared(results, params, "BatchToSpaceFuncTest"); + function = std::make_shared(results, params, "BatchToSpaceFuncTest"); } }; -TEST_P(BatchToSpaceLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(BatchToSpaceLayerGPUTest, Inference) { run(); } -namespace { - std::map emptyAdditionalConfig; -const std::vector inputPrecisions = { - ElementType::f32 +const std::vector inputPrecisions = { + ov::element::f32 }; -const std::vector restInputTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER +const std::vector restInputTypes = { + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER }; const std::vector inputShapesDynamic3D = { @@ -224,4 +220,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Dynamic_5D, BatchToSpaceLay BatchToSpaceLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp index 8b7c750756b..f09491bed63 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp @@ -2,48 +2,46 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/broadcast.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -#include -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/broadcast.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< std::vector, // Shapes std::vector, // Target shapes std::vector, // Axes mapping ov::op::BroadcastType, // Broadcast mode - ov::element::Type_t, // Network precision + ov::element::Type, // Network precision std::vector, // Const inputs std::string // Device name > BroadcastLayerTestParamsSet; class BroadcastLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - std::vector inputShapes; + std::vector shapes; std::vector targetShapes, axesMapping; ov::op::BroadcastType mode; - ov::element::Type_t netPrecision; + ov::element::Type model_type; std::vector isConstInputs; std::string deviceName; - std::tie(inputShapes, targetShapes, axesMapping, mode, netPrecision, isConstInputs, deviceName) = obj.param; + std::tie(shapes, targetShapes, axesMapping, mode, model_type, isConstInputs, deviceName) = obj.param; std::ostringstream result; result << "IS=("; - for (const auto& shape : inputShapes) { + for (const auto& shape : shapes) { result << ov::test::utils::partialShape2str({shape.first}) << "_"; } result << ")_TS=("; - for (const auto& shape : inputShapes) { + for (const auto& shape : shapes) { for (const auto& item : shape.second) { result << ov::test::utils::vec2str(item) << "_"; } @@ -51,7 +49,7 @@ public: result << "targetShape=" << ov::test::utils::vec2str(targetShapes) << "_"; result << "axesMapping=" << ov::test::utils::vec2str(axesMapping) << "_"; result << "mode=" << mode << "_"; - result << "netPrec=" << netPrecision << "_"; + result << "netPrec=" << model_type << "_"; result << "constIn=(" << (isConstInputs[0] ? "True" : "False") << "." << (isConstInputs[1] ? "True" : "False") << ")_"; result << "trgDevice=" << deviceName; @@ -62,11 +60,11 @@ protected: std::vector targetShape, axesMapping; void SetUp() override { - std::vector inputShapes; + std::vector shapes; ov::op::BroadcastType mode; - ov::element::Type_t netPrecision; + ov::element::Type model_type; std::vector isConstInput; - std::tie(inputShapes, targetShape, axesMapping, mode, netPrecision, isConstInput, targetDevice) = this->GetParam(); + std::tie(shapes, targetShape, axesMapping, mode, model_type, isConstInput, targetDevice) = this->GetParam(); bool isTargetShapeConst = isConstInput[0]; bool isAxesMapConst = isConstInput[1]; @@ -74,8 +72,8 @@ protected: const auto targetShapeRank = targetShape.size(); const auto axesMappingRank = axesMapping.size(); - if (inputShapes.front().first.rank() != 0) { - inputDynamicShapes.push_back(inputShapes.front().first); + if (shapes.front().first.rank() != 0) { + inputDynamicShapes.push_back(shapes.front().first); if (!isTargetShapeConst) { inputDynamicShapes.push_back({ static_cast(targetShape.size()) }); } @@ -83,10 +81,10 @@ protected: inputDynamicShapes.push_back({ static_cast(axesMapping.size()) }); } } - const size_t targetStaticShapeSize = inputShapes.front().second.size(); + const size_t targetStaticShapeSize = shapes.front().second.size(); targetStaticShapes.resize(targetStaticShapeSize); for (size_t i = 0lu; i < targetStaticShapeSize; ++i) { - targetStaticShapes[i].push_back(inputShapes.front().second[i]); + targetStaticShapes[i].push_back(shapes.front().second[i]); if (!isTargetShapeConst) targetStaticShapes[i].push_back({ targetShape.size() }); if (!isAxesMapConst) @@ -95,9 +93,9 @@ protected: ov::ParameterVector functionParams; if (inputDynamicShapes.empty()) { - functionParams.push_back(std::make_shared(netPrecision, targetStaticShapes.front().front())); + functionParams.push_back(std::make_shared(model_type, targetStaticShapes.front().front())); } else { - functionParams.push_back(std::make_shared(netPrecision, inputDynamicShapes.front())); + functionParams.push_back(std::make_shared(model_type, inputDynamicShapes.front())); if (!isTargetShapeConst) { functionParams.push_back(std::make_shared(ov::element::i64, inputDynamicShapes[1])); functionParams.back()->set_friendly_name("targetShape"); @@ -140,19 +138,19 @@ protected: } } - auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr &lastNode) { - ResultVector results; + auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "BroadcastLayerGPUTest"); + return std::make_shared(results, params, "BroadcastLayerGPUTest"); }; function = makeFunction(functionParams, broadcastOp); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0lu; i < funcInputs.size(); i++) { @@ -183,19 +181,15 @@ protected: } }; -TEST_P(BroadcastLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(BroadcastLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector inputPrecisionsFloat = { +const std::vector inputPrecisionsFloat = { ov::element::f32, }; -const std::vector inputPrecisionsInt = { +const std::vector inputPrecisionsInt = { ov::element::i32, }; @@ -407,5 +401,3 @@ INSTANTIATE_TEST_CASE_P(smoke_broadcast_6d_numpy_compareWithRefs_dynamic, BroadcastLayerGPUTest::getTestCaseName); } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution.cpp index c957a35d0e2..f3f97b4d325 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution.cpp @@ -1,49 +1,43 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/node_builders/activation.hpp" +#include "common_test_utils/node_builders/convolution.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/convolution.hpp" -#include "common_test_utils/test_constants.hpp" +#include "shared_test_classes/single_op/convolution.hpp" -// using namespace LayerTestsDefinitions; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/convolution.hpp" -namespace GPULayerTestsDefinitions { - -using LayerTestsDefinitions::convSpecificParams; +namespace { +using ov::test::InputShape; +using ov::test::convSpecificParams; typedef std::tuple< convSpecificParams, - ElementType, // Net precision - ElementType, // Input precision - ElementType, // Output precision - InputShape, // Input shape - LayerTestsUtils::TargetDevice, // Device name - bool // activation fusing + ov::element::Type, // Model type + InputShape, // Input shape + std::string, // Device name + bool // activation fusing > convLayerTestParamsSet; class ConvolutionLayerGPUTestDynamic : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { convSpecificParams convParams; - ElementType netType; - ElementType inType, outType; + ov::element::Type model_type; InputShape inputShape; std::string targetDevice; bool activationFusing; - std::tie(convParams, netType, inType, outType, inputShape, targetDevice, activationFusing) = obj.param; + std::tie(convParams, model_type, inputShape, targetDevice, activationFusing) = obj.param; - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType) = convParams; @@ -63,9 +57,7 @@ public: result << "D=" << ov::test::utils::vec2str(dilation) << "_"; result << "O=" << convOutChannels << "_"; result << "AP=" << padType << "_"; - result << "netPRC=" << netType << "_"; - result << "inPRC=" << inType << "_"; - result << "outPRC=" << outType << "_"; + result << "netPRC=" << model_type << "_"; result << "trgDev=" << targetDevice << "_"; result << "activationFusing=" << activationFusing; @@ -76,49 +68,46 @@ protected: void SetUp() override { convSpecificParams convParams; InputShape inputShape; - auto netType = ElementType::undefined; + auto model_type = ov::element::undefined; bool activationFusing; - std::tie(convParams, netType, inType, outType, inputShape, targetDevice, activationFusing) = this->GetParam(); + std::tie(convParams, model_type, inputShape, targetDevice, activationFusing) = this->GetParam(); init_input_shapes({inputShape}); - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType) = convParams; ov::ParameterVector inputParams; for (auto&& shape : inputDynamicShapes) - inputParams.push_back(std::make_shared(inType, shape)); + inputParams.push_back(std::make_shared(model_type, shape)); - auto convolutionNode = ngraph::builder::makeConvolution(inputParams.front(), netType, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels); + auto convolutionNode = ov::test::utils::make_convolution(inputParams.front(), model_type, kernel, stride, padBegin, + padEnd, dilation, padType, convOutChannels); if (activationFusing) { - auto activationNode = ngraph::builder::makeActivation(convolutionNode, netType, ngraph::helpers::ActivationTypes::Relu); + auto activationNode = ov::test::utils::make_activation(convolutionNode, model_type, ov::test::utils::ActivationTypes::Relu); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < activationNode->get_output_size(); i++) - results.push_back(std::make_shared(activationNode->output(i))); + results.push_back(std::make_shared(activationNode->output(i))); - function = std::make_shared(results, inputParams, "Convolution"); + function = std::make_shared(results, inputParams, "Convolution"); } else { - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < convolutionNode->get_output_size(); i++) - results.push_back(std::make_shared(convolutionNode->output(i))); + results.push_back(std::make_shared(convolutionNode->output(i))); - function = std::make_shared(results, inputParams, "Convolution"); + function = std::make_shared(results, inputParams, "Convolution"); } } }; -TEST_P(ConvolutionLayerGPUTestDynamic, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(ConvolutionLayerGPUTestDynamic, Inference) { run(); } -namespace { - // ======== 1D convolutions const std::vector dynInputShapes1D = { { @@ -130,27 +119,25 @@ const std::vector dynInputShapes1D = { INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic1DSymPad, ConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3}), - ::testing::Values(SizeVector{1}), + ::testing::Values(std::vector{3}), + ::testing::Values(std::vector{1}), ::testing::Values(std::vector{1}), ::testing::Values(std::vector{1}), - ::testing::Values(SizeVector{1}), + ::testing::Values(std::vector{1}), ::testing::Values(10), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes1D), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(false)), ConvolutionLayerGPUTestDynamic::getTestCaseName); -const std::vector kernels1D = { {3}, {1} }; -const std::vector strides1D = { {1} }; +const std::vector> kernels1D = { {3}, {1} }; +const std::vector> strides1D = { {1} }; const std::vector> padBegins1D = { {0}, {1} }; const std::vector> padEnds1D = { {0}, {1} }; -const std::vector dilations1D = { {1} }; -const SizeVector numOutChannels = { 64, 63 }; +const std::vector> dilations1D = { {1} }; +const std::vector numOutChannels = { 64, 63 }; const std::vector inputShapes1D = { {{}, {{ 2, 64, 7 }}}, {{}, {{ 1, 67, 7 }}}, @@ -181,10 +168,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_ExplicitPad1D, Convolutio ::testing::ValuesIn(padEnds1D), ::testing::ValuesIn(dilations1D), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT)), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::Values(ov::op::PadType::EXPLICIT)), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(inputShapes1D), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(false)), @@ -209,16 +194,14 @@ const std::vector dynInputShapes2D_static_output = { INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2DSymPad, ConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{3, 3}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(std::vector{1, 2}), ::testing::Values(std::vector{1, 2}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(10), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes2D), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(false)), @@ -228,16 +211,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2DSymPad, Convolut INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2DSymAutoPad, ConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{3, 3}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(std::vector{0, 0}), ::testing::Values(std::vector{0, 0}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(10), - ::testing::ValuesIn({ngraph::op::PadType::SAME_LOWER, ngraph::op::PadType::SAME_UPPER})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes2D), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(false)), @@ -247,16 +228,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2DSymAutoPad, Conv INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2D_AsymPad, ConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{3, 3}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(std::vector{1, 2}), ::testing::Values(std::vector{2, 1}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(10), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes2D), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(false)), @@ -266,16 +245,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2D_AsymPad, Convol INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2D_static_output, ConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), - ::testing::Values(SizeVector{2, 2}), + ::testing::Values(std::vector{3, 3}), + ::testing::Values(std::vector{2, 2}), ::testing::Values(std::vector{1, 1}), ::testing::Values(std::vector{1, 1}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(256), - ::testing::Values(ngraph::op::PadType::EXPLICIT)), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), + ::testing::Values(ov::op::PadType::EXPLICIT)), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(dynInputShapes2D_static_output), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(true)), @@ -293,16 +270,14 @@ const std::vector dynInputShapes3D = { INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DSymPad, ConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3, 3}), - ::testing::Values(SizeVector{1, 1, 1}), + ::testing::Values(std::vector{3, 3, 3}), + ::testing::Values(std::vector{1, 1, 1}), ::testing::Values(std::vector{1, 2, 1}), ::testing::Values(std::vector{1, 2, 1}), - ::testing::Values(SizeVector{1, 1, 1}), + ::testing::Values(std::vector{1, 1, 1}), ::testing::Values(3), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes3D), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(false)), @@ -312,16 +287,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DSymPad, Convolut INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DSymAutoPad, ConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3, 3}), - ::testing::Values(SizeVector{1, 1, 1}), + ::testing::Values(std::vector{3, 3, 3}), + ::testing::Values(std::vector{1, 1, 1}), ::testing::Values(std::vector{0, 0, 0}), ::testing::Values(std::vector{0, 0, 0}), - ::testing::Values(SizeVector{1, 1, 1}), + ::testing::Values(std::vector{1, 1, 1}), ::testing::Values(3), - ::testing::ValuesIn({ngraph::op::PadType::SAME_LOWER, ngraph::op::PadType::SAME_UPPER})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes3D), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(false)), @@ -331,20 +304,17 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DSymAutoPad, Conv INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DAsymPad, ConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3, 3}), - ::testing::Values(SizeVector{1, 1, 1}), + ::testing::Values(std::vector{3, 3, 3}), + ::testing::Values(std::vector{1, 1, 1}), ::testing::Values(std::vector{1, 2, 1}), ::testing::Values(std::vector{2, 1, 1}), - ::testing::Values(SizeVector{1, 1, 1}), + ::testing::Values(std::vector{1, 1, 1}), ::testing::Values(3), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes3D), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(false)), ConvolutionLayerGPUTestDynamic::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp index a36426cd84c..f34e155a5e6 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp @@ -2,54 +2,50 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/convolution_backprop_data.hpp" -#include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include "openvino/core/preprocess/pre_post_process.hpp" +#include "common_test_utils/test_enums.hpp" +#include "common_test_utils/node_builders/convolution_backprop_data.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "shared_test_classes/single_op/convolution_backprop_data.hpp" -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/convolution.hpp" -namespace GPULayerTestsDefinitions { - -using DeconvSpecParams = LayerTestsDefinitions::convBackpropDataSpecificParams; +namespace { +using ov::test::InputShape; +using ov::test::convBackpropDataSpecificParams; using DeconvInputData = std::tuple>>; // values for 'output_shape' -using DeconvLayerTestParamsSet = std::tuple>; class DeconvolutionLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - DeconvSpecParams basicParamsSet; + convBackpropDataSpecificParams basicParamsSet; DeconvInputData inputData; - ElementType prec; + ov::element::Type model_type; std::string targetDevice; std::map additionalConfig; - std::tie(basicParamsSet, inputData, prec, targetDevice, additionalConfig) = obj.param; + std::tie(basicParamsSet, inputData, model_type, targetDevice, additionalConfig) = obj.param; - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = basicParamsSet; InputShape inputShape; - ngraph::helpers::InputLayerType outShapeType; + ov::test::utils::InputLayerType outShapeType; std::vector> outShapeData; std::tie(inputShape, outShapeType, outShapeData) = inputData; @@ -62,7 +58,7 @@ public: result << ov::test::utils::vec2str(shape); result << ")_"; } - result << "PRC=" << prec << "_"; + result << "PRC=" << model_type << "_"; result << "K=" << ov::test::utils::vec2str(kernel) << "_"; result << "S=" << ov::test::utils::vec2str(stride) << "_"; result << "PB=" << ov::test::utils::vec2str(padBegin) << "_"; @@ -88,13 +84,13 @@ public: return result.str(); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { if (function->get_parameters().size() != 1) { // WA: output_shape depends on 3rd deconvolution input data // but the reference implementation doesn't implement shape inference - // so we need to build a new ngraph function and replace the 3rd input parameter with a constant + // so we need to build a new ov function and replace the 3rd input parameter with a constant // to get valid output shapes - functionRefs = createGraph({targetInputStaticShapes[0]}, ngraph::helpers::InputLayerType::CONSTANT); + functionRefs = createGraph({targetInputStaticShapes[0]}, ov::test::utils::InputLayerType::CONSTANT); } inputs.clear(); const auto& funcInputs = function->inputs(); @@ -113,62 +109,17 @@ public: inferRequestNum++; } - void validate() override { - auto actualOutputs = get_plugin_outputs(); - if (function->get_parameters().size() == 2) { - auto pos = std::find_if(inputs.begin(), inputs.end(), - [](const std::pair, ov::Tensor> ¶ms) { - return params.first->get_friendly_name() == "param_1"; - }); - IE_ASSERT(pos != inputs.end()); - inputs.erase(pos); - } - auto expectedOutputs = calculate_refs(); - if (expectedOutputs.empty()) { - return; - } - ASSERT_EQ(actualOutputs.size(), expectedOutputs.size()) - << "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size(); - - abs_threshold = 1e-2f; - compare(expectedOutputs, actualOutputs); - } - - void configure_model() override { - ov::preprocess::PrePostProcessor p(function); - { - auto& params = function->get_parameters(); - for (size_t i = 0; i < params.size(); i++) { - if (i > 0) { - continue; - } - if (inType != ov::element::Type_t::undefined) { - p.input(i).tensor().set_element_type(inType); - } - } - } - { - auto results = function->get_results(); - for (size_t i = 0; i < results.size(); i++) { - if (outType != ov::element::Type_t::undefined) { - p.output(i).tensor().set_element_type(outType); - } - } - } - function = p.build(); - } - - std::shared_ptr createGraph(const std::vector& inShapes, ngraph::helpers::InputLayerType outShapeType) { - ov::ParameterVector params{std::make_shared(prec, inShapes.front())}; + std::shared_ptr createGraph(const std::vector& inShapes, ov::test::utils::InputLayerType outShapeType) { + ov::ParameterVector params{std::make_shared(model_type, inShapes.front())}; std::shared_ptr outShapeNode; if (!outShapeData.empty()) { - if (outShapeType == ngraph::helpers::InputLayerType::PARAMETER) { + if (outShapeType == ov::test::utils::InputLayerType::PARAMETER) { IE_ASSERT(inputDynamicShapes.size() == 2); - auto outShapeParam = std::make_shared(ngraph::element::i32, inputDynamicShapes.back()); + auto outShapeParam = std::make_shared(ov::element::i32, inputDynamicShapes.back()); params.push_back(outShapeParam); outShapeNode = outShapeParam; } else { - outShapeNode = ngraph::opset8::Constant::create(ngraph::element::i32, {outShapeData[inferRequestNum].size()}, outShapeData[inferRequestNum]); + outShapeNode = ov::op::v0::Constant::create(ov::element::i32, {outShapeData[inferRequestNum].size()}, outShapeData[inferRequestNum]); } } @@ -179,36 +130,36 @@ public: std::shared_ptr deconv; if (!outShapeData.empty()) { IE_ASSERT(outShapeNode != nullptr); - deconv = ngraph::builder::makeConvolutionBackpropData(params[0], outShapeNode, prec, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels); + deconv = ov::test::utils::make_convolution_backprop_data(params[0], outShapeNode, model_type, kernel, stride, padBegin, + padEnd, dilation, padType, convOutChannels); } else { - deconv = ngraph::builder::makeConvolutionBackpropData(params[0], prec, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, false, outPadding); + deconv = ov::test::utils::make_convolution_backprop_data(params[0], model_type, kernel, stride, padBegin, + padEnd, dilation, padType, convOutChannels, false, outPadding); } - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < deconv->get_output_size(); i++) - results.push_back(std::make_shared(deconv->output(i))); + results.push_back(std::make_shared(deconv->output(i))); - return std::make_shared(results, params, "Deconv"); + return std::make_shared(results, params, "Deconv"); } protected: void SetUp() override { - DeconvSpecParams basicParamsSet; + convBackpropDataSpecificParams basicParamsSet; DeconvInputData inputData; std::map additionalConfig; - std::tie(basicParamsSet, inputData, prec, targetDevice, additionalConfig) = this->GetParam(); + std::tie(basicParamsSet, inputData, model_type, targetDevice, additionalConfig) = this->GetParam(); InputShape inputShape; - ngraph::helpers::InputLayerType outShapeType; + ov::test::utils::InputLayerType outShapeType; std::tie(inputShape, outShapeType, outShapeData) = inputData; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = basicParamsSet; std::vector paramsShapes; paramsShapes.push_back(inputShape); - if (!outShapeData.empty() && outShapeType == ngraph::helpers::InputLayerType::PARAMETER) { + if (!outShapeData.empty() && outShapeType == ov::test::utils::InputLayerType::PARAMETER) { const auto outShapeDims = ov::Shape{outShapeData.front().size()}; paramsShapes.push_back(InputShape{outShapeDims, std::vector(inputShape.second.size(), outShapeDims)}); } @@ -219,36 +170,32 @@ protected: } private: - ElementType prec; - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::element::Type model_type; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels; std::vector> outShapeData; size_t inferRequestNum = 0; }; -TEST_P(DeconvolutionLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(DeconvolutionLayerGPUTest, Inference) { run(); } -namespace { - std::map emptyAdditionalConfig; const std::vector> emptyOutputPadding = { {} }; /* ============= Deconvolution params ============= */ -const InferenceEngine::SizeVector numOutChannels = { 6 }; +const std::vector numOutChannels = { 6 }; /* ============= Deconvolution params (2D) ============= */ -const std::vector kernels2d = { {3, 3}, {1, 1} }; -const std::vector strides2d = { {1, 1}, {2, 2} }; +const std::vector> kernels2d = { {3, 3}, {1, 1} }; +const std::vector> strides2d = { {1, 1}, {2, 2} }; const std::vector> padBegins2d = { {0, 0} }; const std::vector> padEnds2d = { {0, 0} }; -const std::vector dilations2d = { {1, 1} }; +const std::vector> dilations2d = { {1, 1} }; /* ============= Deconvolution (2D) ============= */ const auto convParams_ExplicitPadding_2D = ::testing::Combine( @@ -258,29 +205,29 @@ const auto convParams_ExplicitPadding_2D = ::testing::Combine( ::testing::ValuesIn(padEnds2d), ::testing::ValuesIn(dilations2d), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::ValuesIn(emptyOutputPadding) ); const std::vector dyn_2D_inputs_smoke = { DeconvInputData{ InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {} }, DeconvInputData{ InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {} }, DeconvInputData{ InputShape{{-1, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {1, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {} }, DeconvInputData{ InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {3, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {} }, }; @@ -289,7 +236,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Dynamic_FP32, DeconvolutionLayerGPUTest ::testing::Combine( convParams_ExplicitPadding_2D, ::testing::ValuesIn(dyn_2D_inputs_smoke), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)), DeconvolutionLayerGPUTest::getTestCaseName); @@ -297,17 +244,17 @@ INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Dynamic_FP32, DeconvolutionLayerGPUTest const std::vector dyn_2D_inputs_with_output_shape = { DeconvInputData{ InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{15, 15}, {9, 10}, {15, 15}} }, DeconvInputData{ InputShape{{-1, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {1, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{15, 15}} }, DeconvInputData{ InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {3, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{15, 15}} }, }; @@ -315,20 +262,18 @@ const std::vector dyn_2D_inputs_with_output_shape = { INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Dynamic_OutputShape_FP32, DeconvolutionLayerGPUTest, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), + ::testing::Values(std::vector{3, 3}), ::testing::ValuesIn(strides2d), ::testing::ValuesIn(padBegins2d), ::testing::ValuesIn(padEnds2d), ::testing::ValuesIn(dilations2d), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::ValuesIn(emptyOutputPadding)), ::testing::ValuesIn(dyn_2D_inputs_with_output_shape), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)), DeconvolutionLayerGPUTest::getTestCaseName); } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/cum_sum.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/cum_sum.cpp index db4bea2f89e..6664c8f5026 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/cum_sum.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/cum_sum.cpp @@ -2,22 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/cum_sum.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/cum_sum.hpp" -using ElementType = ov::element::Type_t; - -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< - ElementType, // data precision + ov::element::Type, // data type InputShape, // input shape std::int64_t, // axis bool, // exclusive @@ -25,15 +22,15 @@ typedef std::tuple< > CumSumLayerGPUParamSet; class CumSumLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - ElementType inputPrecision; + ov::element::Type model_type; InputShape shapes; std::int64_t axis; bool exclusive; bool reverse; - std::tie(inputPrecision, shapes, axis, exclusive, reverse) = obj.param; + std::tie(model_type, shapes, axis, exclusive, reverse) = obj.param; std::ostringstream results; results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; @@ -41,7 +38,7 @@ public: for (const auto& item : shapes.second) { results << ov::test::utils::vec2str(item) << "_"; } - results << "Prc=" << inputPrecision << "_"; + results << "Prc=" << model_type << "_"; results << "Axis=" << axis << "_" << (exclusive ? "exclusive" : "") << "_" << (reverse ? "reverse" : ""); return results.str(); } @@ -50,44 +47,40 @@ protected: void SetUp() override { targetDevice = ov::test::utils::DEVICE_GPU; - ElementType inputPrecision; + ov::element::Type model_type; InputShape shapes; std::int64_t axis; bool exclusive; bool reverse; - std::tie(inputPrecision, shapes, axis, exclusive, reverse) = this->GetParam(); + std::tie(model_type, shapes, axis, exclusive, reverse) = this->GetParam(); init_input_shapes({shapes}); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) { - params.push_back(std::make_shared(inputPrecision, shape)); + params.push_back(std::make_shared(model_type, shape)); } - auto axisNode = ngraph::opset1::Constant::create(ngraph::element::i32, ngraph::Shape{}, std::vector{axis})->output(0); - auto cumSum = std::make_shared(params[0], axisNode, exclusive, reverse); + auto axisNode = std::make_shared(ov::element::i32, ov::Shape{}, std::vector{axis}); + auto cumSum = std::make_shared(params[0], axisNode, exclusive, reverse); - auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr &lastNode) { - ResultVector results; + auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "CumSumLayerGPUTest"); + return std::make_shared(results, params, "CumSumLayerGPUTest"); }; function = makeFunction(params, cumSum); } }; -TEST_P(CumSumLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(CumSumLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector inputPrecision = { - ngraph::element::f32 +const std::vector model_type = { + ov::element::f32 }; const std::vector axes = { 0, 1, 2, 3, 4, 5 }; @@ -117,7 +110,7 @@ const std::vector inShapes = { }; const auto testCasesAxis_0 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(model_type), ::testing::ValuesIn(inShapes), ::testing::Values(axes[0]), ::testing::ValuesIn(exclusive), @@ -125,7 +118,7 @@ const auto testCasesAxis_0 = ::testing::Combine( ); const auto testCasesAxis_1 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(model_type), ::testing::ValuesIn(std::vector(inShapes.begin() + 1, inShapes.end())), ::testing::Values(axes[1]), ::testing::ValuesIn(exclusive), @@ -133,7 +126,7 @@ const auto testCasesAxis_1 = ::testing::Combine( ); const auto testCasesAxis_2 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(model_type), ::testing::ValuesIn(std::vector(inShapes.begin() + 2, inShapes.end())), ::testing::Values(axes[2]), ::testing::ValuesIn(exclusive), @@ -141,7 +134,7 @@ const auto testCasesAxis_2 = ::testing::Combine( ); const auto testCasesAxis_3 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(model_type), ::testing::ValuesIn(std::vector(inShapes.begin() + 3, inShapes.end())), ::testing::Values(axes[3]), ::testing::ValuesIn(exclusive), @@ -149,7 +142,7 @@ const auto testCasesAxis_3 = ::testing::Combine( ); const auto testCasesAxis_4 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(model_type), ::testing::ValuesIn(std::vector(inShapes.begin() + 4, inShapes.end())), ::testing::Values(axes[4]), ::testing::ValuesIn(exclusive), @@ -157,7 +150,7 @@ const auto testCasesAxis_4 = ::testing::Combine( ); const auto testCasesAxis_5 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(model_type), ::testing::ValuesIn(std::vector(inShapes.begin() + 5, inShapes.end())), ::testing::Values(axes[5]), ::testing::ValuesIn(exclusive), @@ -165,7 +158,7 @@ const auto testCasesAxis_5 = ::testing::Combine( ); const auto testCasesAxis_negative = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(model_type), ::testing::ValuesIn(std::vector(inShapes.begin() + 5, inShapes.end())), ::testing::ValuesIn(negativeAxes), ::testing::ValuesIn(exclusive), @@ -181,5 +174,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_cum_sum_axis_5_CompareWithRefs_dynamic, CumSumLay INSTANTIATE_TEST_SUITE_P(smoke_cum_sum_neg_axes_CompareWithRefs_dynamic, CumSumLayerGPUTest, testCasesAxis_negative, CumSumLayerGPUTest::getTestCaseName); } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/depth_to_space.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/depth_to_space.cpp index 3f0ea75534e..cd271734d81 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/depth_to_space.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/depth_to_space.cpp @@ -2,22 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/depth_to_space.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph::opset3; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/depth_to_space.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; +using ov::op::v0::DepthToSpace; typedef std::tuple< InputShape, // Input shape - ElementType, // Input element type + ov::element::Type, // Input element type DepthToSpace::DepthToSpaceMode, // Mode std::size_t // Block size > DepthToSpaceLayerGPUTestParams; @@ -27,7 +26,7 @@ class DepthToSpaceLayerGPUTest : public testing::WithParamInterface obj) { InputShape shapes; - ElementType inType; + ov::element::Type inType; DepthToSpace::DepthToSpaceMode mode; std::size_t blockSize; std::tie(shapes, inType, mode, blockSize) = obj.param; @@ -70,25 +69,21 @@ protected: auto d2s = std::make_shared(params[0], mode, blockSize); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < d2s->get_output_size(); i++) - results.push_back(std::make_shared(d2s->output(i))); - function = std::make_shared(results, params, "DepthToSpace"); + results.push_back(std::make_shared(d2s->output(i))); + function = std::make_shared(results, params, "DepthToSpace"); } }; -TEST_P(DepthToSpaceLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(DepthToSpaceLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector inputElementType = { - ElementType::f32, - ElementType::f16, - ElementType::i8 +const std::vector input_types = { + ov::element::f32, + ov::element::f16, + ov::element::i8 }; const std::vector depthToSpaceModes = { @@ -120,16 +115,16 @@ const std::vector inputShapesBS3_4D = { INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceStaticBS2_4D, DepthToSpaceLayerGPUTest, testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_4D)), - testing::ValuesIn(inputElementType), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS2_4D)), + testing::ValuesIn(input_types), testing::ValuesIn(depthToSpaceModes), testing::Values(1, 2)), DepthToSpaceLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceStaticBS3_4D, DepthToSpaceLayerGPUTest, testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_4D)), - testing::ValuesIn(inputElementType), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS3_4D)), + testing::ValuesIn(input_types), testing::ValuesIn(depthToSpaceModes), testing::Values(1, 3)), DepthToSpaceLayerGPUTest::getTestCaseName); @@ -153,16 +148,16 @@ const std::vector inputShapesBS3_5D = { INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceStaticBS2_5D, DepthToSpaceLayerGPUTest, testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_5D)), - testing::ValuesIn(inputElementType), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS2_5D)), + testing::ValuesIn(input_types), testing::ValuesIn(depthToSpaceModes), testing::Values(1, 2)), DepthToSpaceLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceStaticBS3_5D, DepthToSpaceLayerGPUTest, testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_5D)), - testing::ValuesIn(inputElementType), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS3_5D)), + testing::ValuesIn(input_types), testing::ValuesIn(depthToSpaceModes), testing::Values(1, 3)), DepthToSpaceLayerGPUTest::getTestCaseName); @@ -171,8 +166,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceStaticBS3_5D, DepthToSpaceLayerGPU //======================== Dynamic Shapes Tests ======================== -namespace dynamic_shapes { - const std::vector inputShapes4D = { {{-1, -1, -1, -1}, // dynamic {{2, 36, 1, 1}, {1, 36, 3, 1}, {2, 36, 1, 1}, {1, 36, 3, 1}}}, // target @@ -198,7 +191,7 @@ const std::vector inputShapes5D = { INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceDynamic4D, DepthToSpaceLayerGPUTest, testing::Combine( testing::ValuesIn(inputShapes4D), - testing::ValuesIn(inputElementType), + testing::ValuesIn(input_types), testing::ValuesIn(depthToSpaceModes), testing::Values(1, 2, 3)), DepthToSpaceLayerGPUTest::getTestCaseName); @@ -206,12 +199,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceDynamic4D, DepthToSpaceLayerGPUTes INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceDynamic5D, DepthToSpaceLayerGPUTest, testing::Combine( testing::ValuesIn(inputShapes5D), - testing::ValuesIn(inputElementType), + testing::ValuesIn(input_types), testing::ValuesIn(depthToSpaceModes), testing::Values(1, 2, 3)), DepthToSpaceLayerGPUTest::getTestCaseName); -} // namespace dynamic_shapes - } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp index ab76700486d..2b60747dbfa 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp @@ -2,18 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/detection_output.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/detection_output.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; enum { idxLocation, @@ -65,7 +63,7 @@ public: static std::string getTestCaseName(testing::TestParamInfo obj) { DetectionOutputAttributes commonAttrs; ParamsWhichSizeDependsDynamic specificAttrs; - ngraph::op::DetectionOutputAttrs attrs; + ov::op::v0::DetectionOutput::Attributes attrs; size_t batch; bool replaceDynamicShapesToIntervals; std::string targetDevice; @@ -101,14 +99,30 @@ public: result << " }_"; } - using LayerTestsDefinitions::operator<<; - result << attrs; + result << "attributes={"; + result << "Classes=" << attrs.num_classes << "_"; + result << "backgrId=" << attrs.background_label_id << "_"; + result << "topK=" << attrs.top_k << "_"; + result << "varEnc=" << attrs.variance_encoded_in_target << "_"; + result << "keepTopK=" << ov::test::utils::vec2str(attrs.keep_top_k) << "_"; + result << "codeType=" << attrs.code_type << "_"; + result << "shareLoc=" << attrs.share_location << "_"; + result << "nmsThr=" << attrs.nms_threshold << "_"; + result << "confThr=" << attrs.confidence_threshold << "_"; + result << "clipAfterNms=" << attrs.clip_after_nms << "_"; + result << "clipBeforeNms=" << attrs.clip_before_nms << "_"; + result << "decrId=" << attrs.decrease_label_id << "_"; + result << "norm=" << attrs.normalized << "_"; + result << "inH=" << attrs.input_height << "_"; + result << "inW=" << attrs.input_width << "_"; + result << "OS=" << attrs.objectness_score; + result << "}_"; result << "RDS=" << (replaceDynamicShapesToIntervals ? "true" : "false") << "_"; result << "TargetDevice=" << targetDevice; return result.str(); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (auto i = 0ul; i < funcInputs.size(); ++i) { @@ -197,7 +211,7 @@ protected: ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) - params.push_back(std::make_shared(ngraph::element::f32, shape)); + params.push_back(std::make_shared(ov::element::f32, shape)); if (attrs.num_classes == -1) { std::shared_ptr detOut; @@ -209,8 +223,8 @@ protected: else throw std::runtime_error("DetectionOutput layer supports only 3 or 5 inputs"); - ngraph::ResultVector results{std::make_shared(detOut)}; - function = std::make_shared(results, params, "DetectionOutputDynamic"); + ov::ResultVector results{std::make_shared(detOut)}; + function = std::make_shared(results, params, "DetectionOutputDynamic"); } else { std::shared_ptr detOut; if (params.size() == 3) @@ -220,8 +234,8 @@ protected: else OPENVINO_THROW("DetectionOutput layer supports only 3 or 5 inputs"); - ngraph::ResultVector results{std::make_shared(detOut)}; - function = std::make_shared(results, params, "DetectionOutputDynamic"); + ov::ResultVector results{std::make_shared(detOut)}; + function = std::make_shared(results, params, "DetectionOutputDynamic"); } } @@ -253,18 +267,14 @@ private: } } } - ngraph::op::DetectionOutputAttrs attrs; + ov::op::v0::DetectionOutput::Attributes attrs; std::vector inShapes; }; -TEST_P(DetectionOutputLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(DetectionOutputLayerGPUTest, Inference) { run(); } -namespace { - const std::vector numClasses = {11, -1}; const int backgroundLabelId = 0; const std::vector topK = {75}; @@ -438,4 +448,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_GPUDetectionOutputV8Dynamic3In, DetectionOutputLa params3InputsDynamic_v8, DetectionOutputLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather.cpp index bdda5ccbe2a..1f32a7356a4 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather.cpp @@ -2,18 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/gather.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/gather.hpp" + +namespace { +using ov::test::InputShape; -namespace GPULayerTestsDefinitions { struct GatherShapeParams { InputShape inputShapes; InputShape targetShapes; @@ -23,22 +23,21 @@ struct GatherShapeParams { typedef std::tuple< GatherShapeParams, - ElementType, // Network precision + ov::element::Type, // Network precision bool, // Is const Indices bool // Is const Axis > GatherGPUTestParams; - class GatherGPUTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { GatherShapeParams Shapes; - ElementType netPrecision; + ov::element::Type model_type; bool isIndicesConstant; bool isAxisConstant; - std::tie(Shapes, netPrecision, isIndicesConstant, isAxisConstant) = obj.param; + std::tie(Shapes, model_type, isIndicesConstant, isAxisConstant) = obj.param; std::ostringstream result; result << "IS=("; @@ -57,7 +56,7 @@ public: } result << "axis=" << Shapes.axis << "_"; result << "batchDims=" << Shapes.batch_dims << "_"; - result << "netPrc=" << netPrecision << "_"; + result << "netPrc=" << model_type << "_"; result << "constIdx=" << (isIndicesConstant ? "True" : "False") << "_"; result << "constAx=" << (isAxisConstant ? "True" : "False") << "_"; @@ -67,12 +66,12 @@ public: protected: void SetUp() override { GatherShapeParams Shapes; - ElementType netPrecision; + ov::element::Type model_type; bool isAxisConstant; bool isIndicesConstant; - const ElementType intInputsPrecision = ElementType::i32; + const auto int_model_type = ov::element::i32; - std::tie(Shapes, netPrecision, isIndicesConstant, isAxisConstant) = this->GetParam(); + std::tie(Shapes, model_type, isIndicesConstant, isAxisConstant) = this->GetParam(); const int axis = Shapes.axis; const int batchDims = Shapes.batch_dims; targetDevice = ov::test::utils::DEVICE_GPU; @@ -86,7 +85,7 @@ protected: init_input_shapes({Shapes.inputShapes, Shapes.targetShapes}); } - ngraph::ParameterVector params{std::make_shared(netPrecision, inputDynamicShapes[0])}; + ov::ParameterVector params{std::make_shared(model_type, inputDynamicShapes[0])}; params.back()->set_friendly_name("data"); if (isIndicesConstant) { @@ -96,26 +95,22 @@ protected: for (size_t i = 0; i < Shapes.inputShapes.second.size(); ++i) { idx_range = std::min(static_cast(Shapes.inputShapes.second[i][axis_norm]), idx_range); } - indicesNode = ngraph::builder::makeConstant( - ngraph::element::i64, - Shapes.targetShapes.second[0], - {}, - true, - idx_range - 1, - 0); + + auto indices_tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, Shapes.targetShapes.second[0], idx_range - 1, 0); + indicesNode = std::make_shared(indices_tensor); } else { - params.push_back(std::make_shared(intInputsPrecision, inputDynamicShapes[1])); + params.push_back(std::make_shared(int_model_type, inputDynamicShapes[1])); params.back()->set_friendly_name("indices"); } if (isAxisConstant) { - axisNode = ngraph::builder::makeConstant(intInputsPrecision, ov::Shape({1}), {axis}); + axisNode = std::make_shared(int_model_type, ov::Shape({1}), std::vector{axis}); } else { inputDynamicShapes.push_back({1}); for (size_t i = 0lu; i < targetStaticShapes.size(); i++) { targetStaticShapes[i].push_back({1}); } - params.push_back(std::make_shared(intInputsPrecision, inputDynamicShapes[2])); + params.push_back(std::make_shared(int_model_type, inputDynamicShapes[2])); params.back()->set_friendly_name("axis"); } @@ -125,20 +120,16 @@ protected: : isIndicesConstant ? params[1] : params[2], batchDims); - ngraph::ResultVector results{std::make_shared(gatherNode)}; - function = std::make_shared(results, params, "Gather"); + ov::ResultVector results{std::make_shared(gatherNode)}; + function = std::make_shared(results, params, "Gather"); } }; -TEST_P(GatherGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(GatherGPUTest, Inference) { run(); } -namespace { - -const std::vector netPrecisions = { +const std::vector model_types = { ov::element::f32, ov::element::i32, ov::element::i64, @@ -201,9 +192,8 @@ const std::vector dynamicInputShapeConstTargetShape = { INSTANTIATE_TEST_SUITE_P(smoke_dynamic_input_shapes_const_target_shapes, GatherGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapeConstTargetShape), // input shapes - ::testing::ValuesIn(netPrecisions), // network precision - ::testing::Values(true), // is const indices - ::testing::Values(true)), // is const axis + ::testing::ValuesIn(model_types), // network precision + ::testing::Values(true), // is const indices + ::testing::Values(true)), // is const axis GatherGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_elements.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_elements.cpp index 68047495311..6738aab04c5 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_elements.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_elements.cpp @@ -2,36 +2,33 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ov::test; -using namespace ngraph; -using namespace InferenceEngine; -using namespace ngraph::helpers; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/gather_elements.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; using GatherElementsParams = std::tuple< std::vector, // Dynamic shape + Target static shapes int, // Axis - ElementType, // Data precision - ElementType, // Indices precision - TargetDevice // Device name ->; + ov::element::Type, // Data type + ov::element::Type, // Indices type + std::string>; // Device name class GatherElementsGPUTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { std::vector shapes; - ElementType dPrecision, iPrecision; + ov::element::Type data_type, indices_type; int axis; std::string device; - std::tie(shapes, axis, dPrecision, iPrecision, device) = obj.param; + std::tie(shapes, axis, data_type, indices_type, device) = obj.param; std::ostringstream result; result << "IS=("; @@ -45,14 +42,14 @@ public: } } result << "Ax=" << axis << "_"; - result << "DP=" << dPrecision << "_"; - result << "IP=" << iPrecision << "_"; + result << "DP=" << data_type << "_"; + result << "IP=" << indices_type << "_"; result << "device=" << device; return result.str(); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { @@ -68,29 +65,27 @@ public: protected: void SetUp() override { std::vector shapes; - ElementType dPrecision, iPrecision; + ov::element::Type data_type, indices_type; int axis; - std::tie(shapes, axis, dPrecision, iPrecision, targetDevice) = this->GetParam(); + std::tie(shapes, axis, data_type, indices_type, targetDevice) = this->GetParam(); init_input_shapes(shapes); - ngraph::ParameterVector params = { - std::make_shared(dPrecision, inputDynamicShapes[0]), - std::make_shared(iPrecision, inputDynamicShapes[1]), + ov::ParameterVector params = { + std::make_shared(data_type, inputDynamicShapes[0]), + std::make_shared(indices_type, inputDynamicShapes[1]), }; - auto gather = std::make_shared(params[0], params[1], axis); + auto gather = std::make_shared(params[0], params[1], axis); - ngraph::ResultVector results{std::make_shared(gather)}; - function = std::make_shared(results, params, "GatherElements"); + ov::ResultVector results{std::make_shared(gather)}; + function = std::make_shared(results, params, "GatherElements"); } }; -TEST_P(GatherElementsGPUTest, CompareWithRefs) { +TEST_P(GatherElementsGPUTest, Inference) { run(); } -namespace { - const std::vector> inDynamicShapeParams = { {{{-1, -1, -1, -1}, {{2, 3, 5, 7}, {3, 4, 6, 8}}}, {{-1, -1, -1, -1}, {{2, 3, 9, 7}, {3, 4, 4, 8}}}}, @@ -102,10 +97,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_set1, GatherElementsGPUTest, ::testing::Combine( ::testing::ValuesIn(inDynamicShapeParams), // shape ::testing::ValuesIn(std::vector({2, -2})), // Axis - ::testing::ValuesIn(std::vector({ElementType::f16, ElementType::f32})), - ::testing::Values(ElementType::i32), + ::testing::ValuesIn(std::vector({ov::element::f16, ov::element::f32})), + ::testing::Values(ov::element::i32), ::testing::Values(ov::test::utils::DEVICE_GPU)), GatherElementsGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_nd.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_nd.cpp index f3d98ff82de..09fa75d5a54 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_nd.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_nd.cpp @@ -3,17 +3,18 @@ // #include "shared_test_classes/single_layer/gather_nd.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/gather_nd.hpp" + +namespace { +using ov::test::InputShape; -namespace GPULayerTestsDefinitions { struct GatherNDShapeParams { InputShape inputShapes; InputShape targetShapes; @@ -22,20 +23,20 @@ struct GatherNDShapeParams { typedef std::tuple< GatherNDShapeParams, - ElementType, // Network precision - bool // Is const Indices + ov::element::Type, // Model type + bool // Is const Indices > GatherNDGPUTestParams; class GatherNDGPUTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { GatherNDShapeParams Shapes; - ElementType netPrecision; + ov::element::Type model_type; bool isIndicesConstant; - std::tie(Shapes, netPrecision, isIndicesConstant) = obj.param; + std::tie(Shapes, model_type, isIndicesConstant) = obj.param; std::ostringstream result; result << "IS=("; @@ -53,7 +54,7 @@ public: result << "}_"; } result << "batchDims=" << Shapes.batch_dims << "_"; - result << "netPrc=" << netPrecision << "_"; + result << "netPrc=" << model_type << "_"; result << "constIdx=" << (isIndicesConstant ? "True" : "False") << "_"; return result.str(); @@ -62,11 +63,11 @@ public: protected: void SetUp() override { GatherNDShapeParams Shapes; - ElementType netPrecision; + ov::element::Type model_type; bool isIndicesConstant; - const ElementType intInputsPrecision = ElementType::i32; + const auto intInputsPrecision = ov::element::i32; - std::tie(Shapes, netPrecision, isIndicesConstant) = this->GetParam(); + std::tie(Shapes, model_type, isIndicesConstant) = this->GetParam(); const int batchDims = Shapes.batch_dims; targetDevice = ov::test::utils::DEVICE_GPU; std::shared_ptr indicesNode; @@ -78,7 +79,7 @@ protected: init_input_shapes({Shapes.inputShapes, Shapes.targetShapes}); } - ngraph::ParameterVector params{std::make_shared(netPrecision, inputDynamicShapes[0])}; + ov::ParameterVector params{std::make_shared(model_type, inputDynamicShapes[0])}; params.back()->set_friendly_name("data"); if (isIndicesConstant) { @@ -88,13 +89,8 @@ protected: idx_range = std::min(static_cast(Shapes.inputShapes.second[i][j]), idx_range); } } - indicesNode = ngraph::builder::makeConstant( - ngraph::element::i64, - Shapes.targetShapes.second[0], - {}, - true, - idx_range - 1, - 0); + auto indices_tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, Shapes.targetShapes.second[0], idx_range - 1, 0); + indicesNode = std::make_shared(indices_tensor); } else { params.push_back(std::make_shared(intInputsPrecision, inputDynamicShapes[1])); params.back()->set_friendly_name("indices"); @@ -103,20 +99,16 @@ protected: gather_ndNode = std::make_shared(params[0], isIndicesConstant ? indicesNode : params[1], batchDims); - ngraph::ResultVector results{std::make_shared(gather_ndNode)}; - function = std::make_shared(results, params, "GatherND"); + ov::ResultVector results{std::make_shared(gather_ndNode)}; + function = std::make_shared(results, params, "GatherND"); } }; -TEST_P(GatherNDGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(GatherNDGPUTest, Inference) { run(); } -namespace { - -const std::vector netPrecisions = { +const std::vector model_types = { ov::element::f32, ov::element::f16, ov::element::i32 @@ -158,8 +150,7 @@ const std::vector dynamicInputShapeConstTargetShape = { INSTANTIATE_TEST_SUITE_P(smoke_dynamic_input_shapes_const_target_shapes, GatherNDGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapeConstTargetShape), // input shapes - ::testing::ValuesIn(netPrecisions), // network precision + ::testing::ValuesIn(model_types), // network precision ::testing::Values(true)), // is const indices GatherNDGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_tree.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_tree.cpp index 2b48e5f4a5c..bc17b8775a7 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_tree.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_tree.cpp @@ -2,36 +2,35 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/gather_tree.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/gather_tree.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< InputShape, // Input tensors shape - ngraph::helpers::InputLayerType, // Secondary input type - ov::element::Type_t, // Network precision + ov::test::utils::InputLayerType, // Secondary input type + ov::element::Type, // Model type std::string // Device name > GatherTreeGPUTestParams; class GatherTreeLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo &obj) { InputShape inputShape; - ov::element::Type_t netPrecision; - ngraph::helpers::InputLayerType secondaryInputType; + ov::element::Type_t model_type; + ov::test::utils::InputLayerType secondaryInputType; std::string targetName; - std::tie(inputShape, secondaryInputType, netPrecision, targetName) = obj.param; + std::tie(inputShape, secondaryInputType, model_type, targetName) = obj.param; std::ostringstream result; result << "IS=" << ov::test::utils::partialShape2str({inputShape.first}) << "_"; @@ -40,7 +39,7 @@ public: result << ov::test::utils::vec2str(item) << "_"; } result << "secondaryInputType=" << secondaryInputType << "_"; - result << "netPRC=" << netPrecision << "_"; + result << "netPRC=" << model_type << "_"; result << "trgDev=" << targetName; return result.str(); @@ -49,10 +48,10 @@ public: protected: void SetUp() override { InputShape inputShape; - ov::element::Type netPrecision; - ngraph::helpers::InputLayerType secondaryInputType; + ov::element::Type model_type; + ov::test::utils::InputLayerType secondaryInputType; - std::tie(inputShape, secondaryInputType, netPrecision, targetDevice) = this->GetParam(); + std::tie(inputShape, secondaryInputType, model_type, targetDevice) = this->GetParam(); InputShape parentShape{inputShape}; InputShape::first_type maxSeqLenFirst; if (inputShape.first.is_dynamic()) { @@ -73,15 +72,15 @@ protected: shape.push_back({}); } - std::shared_ptr inp2; - std::shared_ptr inp3; - std::shared_ptr inp4; + std::shared_ptr inp2; + std::shared_ptr inp3; + std::shared_ptr inp4; - ov::ParameterVector paramsIn{std::make_shared(netPrecision, inputDynamicShapes[0])}; - if (ngraph::helpers::InputLayerType::PARAMETER == secondaryInputType) { - auto param2 = std::make_shared(netPrecision, inputDynamicShapes[1]); - auto param3 = std::make_shared(netPrecision, inputDynamicShapes[2]); - auto param4 = std::make_shared(netPrecision, inputDynamicShapes[3]); + ov::ParameterVector paramsIn{std::make_shared(model_type, inputDynamicShapes[0])}; + if (ov::test::utils::InputLayerType::PARAMETER == secondaryInputType) { + auto param2 = std::make_shared(model_type, inputDynamicShapes[1]); + auto param3 = std::make_shared(model_type, inputDynamicShapes[2]); + auto param4 = std::make_shared(model_type, inputDynamicShapes[3]); inp2 = param2; inp3 = param3; inp4 = param4; @@ -89,23 +88,26 @@ protected: paramsIn.push_back(param2); paramsIn.push_back(param3); paramsIn.push_back(param4); - } else if (ngraph::helpers::InputLayerType::CONSTANT == secondaryInputType) { + } else if (ov::test::utils::InputLayerType::CONSTANT == secondaryInputType) { auto maxBeamIndex = inputShape.second.front().at(2) - 1; - inp2 = ngraph::builder::makeConstant(netPrecision, inputShape.second.front(), {}, true, maxBeamIndex); - inp3 = ngraph::builder::makeConstant(netPrecision, {inputShape.second.front().at(1)}, {}, true, maxBeamIndex); - inp4 = ngraph::builder::makeConstant(netPrecision, {}, {}, true, maxBeamIndex); + auto inp2_tensor = ov::test::utils::create_and_fill_tensor(model_type, inputShape.second.front(), maxBeamIndex); + inp2 = std::make_shared(inp2_tensor); + auto inp3_tensor = ov::test::utils::create_and_fill_tensor(model_type, ov::Shape{inputShape.second.front().at(1)}, maxBeamIndex); + inp3 = std::make_shared(inp3_tensor); + auto inp4_tensor = ov::test::utils::create_and_fill_tensor(model_type, ov::Shape{}, maxBeamIndex); + inp4 = std::make_shared(inp4_tensor); } else { throw std::runtime_error("Unsupported inputType"); } - auto operationResult = std::make_shared(paramsIn.front(), inp2, inp3, inp4); + auto operationResult = std::make_shared(paramsIn.front(), inp2, inp3, inp4); - ngraph::ResultVector results{std::make_shared(operationResult)}; - function = std::make_shared(results, paramsIn, "GatherTree"); + ov::ResultVector results{std::make_shared(operationResult)}; + function = std::make_shared(results, paramsIn, "GatherTree"); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto maxBeamIndex = targetInputStaticShapes.front().at(2) - 1; const auto& funcInputs = function->inputs(); @@ -121,15 +123,11 @@ protected: } }; -TEST_P(GatherTreeLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(GatherTreeLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector netPrecisions = { +const std::vector model_types = { ov::element::f32, ov::element::i32 }; @@ -167,19 +165,17 @@ const std::vector inputDynamicShapesConstant = { INSTANTIATE_TEST_SUITE_P(smoke_gathertree_parameter_compareWithRefs_dynamic, GatherTreeLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(inputDynamicShapesParameter), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), - ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU)), GatherTreeLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_gathertree_constant_compareWithRefs_dynamic, GatherTreeLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(inputDynamicShapesConstant), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU)), GatherTreeLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions - diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/grid_sample.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/grid_sample.cpp index 11862e3d42c..a115d2bc069 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/grid_sample.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/grid_sample.cpp @@ -2,38 +2,37 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/select.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -#include -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/grid_sample.hpp" + +namespace { +using ov::test::InputShape; using ov::op::v9::GridSample; -namespace GPULayerTestsDefinitions { - typedef std::tuple< std::vector, // Input shapes GridSample::InterpolationMode, // Interpolation mode GridSample::PaddingMode, // Padding mode bool, // Align corners - ElementType, // Data precision - ElementType // Grid precision + ov::element::Type, // Data precision + ov::element::Type // Grid precision > GridSampleLayerTestGPUParams; class GridSampleLayerTestGPU : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::vector inputShapes; GridSample::InterpolationMode interpolateMode; GridSample::PaddingMode paddingMode; bool alignCorners; - ElementType dataPrecision, gridPrecision; + ov::element::Type dataPrecision, gridPrecision; std::tie(inputShapes, interpolateMode, paddingMode, alignCorners, dataPrecision, gridPrecision) = obj.param; @@ -69,7 +68,7 @@ protected: GridSample::InterpolationMode interpolateMode; GridSample::PaddingMode paddingMode; bool alignCorners; - ElementType dataPrecision, gridPrecision; + ov::element::Type dataPrecision, gridPrecision; std::tie(inputShapes, interpolateMode, paddingMode, alignCorners, dataPrecision, gridPrecision) = this->GetParam(); targetDevice = ov::test::utils::DEVICE_GPU; @@ -86,12 +85,12 @@ protected: GridSample::Attributes attributes = {alignCorners, interpolateMode, paddingMode}; auto gridSampleNode = std::make_shared(params[0], params[1], attributes); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < gridSampleNode->get_output_size(); i++) { - results.push_back(std::make_shared(gridSampleNode->output(i))); + results.push_back(std::make_shared(gridSampleNode->output(i))); } - function = std::make_shared(results, params, "GridSampleGPU"); + function = std::make_shared(results, params, "GridSampleGPU"); } void generate_inputs(const std::vector& targetInputStaticShapes) override { @@ -104,12 +103,12 @@ protected: if (funcInput.get_node()->get_friendly_name() == "data") { int32_t range = std::accumulate(targetInputStaticShapes[0].begin(), targetInputStaticShapes[0].end(), 1u, std::multiplies()); - tensor = utils::create_and_fill_tensor( + tensor = ov::test::utils::create_and_fill_tensor( funcInput.get_element_type(), targetInputStaticShapes[0], range, -range / 2, 1); } else if (funcInput.get_node()->get_friendly_name() == "grid") { int32_t range = std::max(targetInputStaticShapes[0][2], targetInputStaticShapes[0][3]) + 2; int32_t resolution = range / 2; - tensor = utils::create_and_fill_tensor( + tensor = ov::test::utils::create_and_fill_tensor( funcInput.get_element_type(), targetInputStaticShapes[1], range, -1, resolution == 0 ? 1 : resolution); } inputs.insert({funcInput.get_node_shared_ptr(), tensor}); @@ -117,8 +116,7 @@ protected: } }; -TEST_P(GridSampleLayerTestGPU, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(GridSampleLayerTestGPU, Inference) { run(); } @@ -152,8 +150,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamic, GridSampleLayerTestGPU, ::testing::ValuesIn(interpolateMode), ::testing::ValuesIn(paddingMode), ::testing::ValuesIn(alignCorners), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32)), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::f32)), GridSampleLayerTestGPU::getTestCaseName); - -} // namespace GPULayerTestsDefinitions +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp index 531c97fa218..2141f5a935b 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp @@ -2,54 +2,50 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/group_convolution_backprop_data.hpp" -#include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include "openvino/core/preprocess/pre_post_process.hpp" +#include "common_test_utils/test_enums.hpp" +#include "common_test_utils/node_builders/group_convolution_backprop_data.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "shared_test_classes/single_op/group_convolution_backprop_data.hpp" -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/group_conv.hpp" -namespace GPULayerTestsDefinitions { - -using GroupDeconvSpecParams = LayerTestsDefinitions::groupConvBackpropSpecificParams; +namespace { +using ov::test::InputShape; +using ov::test::groupConvBackpropSpecificParams; using DeconvInputData = std::tuple>>; // values for 'output_shape' -using GroupDeconvLayerTestParamsSet = std::tuple>; class GroupDeconvolutionLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - GroupDeconvSpecParams basicParamsSet; + groupConvBackpropSpecificParams basicParamsSet; DeconvInputData inputData; - ElementType prec; + ov::element::Type prec; std::string targetDevice; std::map additionalConfig; std::tie(basicParamsSet, inputData, prec, targetDevice, additionalConfig) = obj.param; - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels, groupNum; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, groupNum, padType, outPadding) = basicParamsSet; InputShape inputShape; - ngraph::helpers::InputLayerType outShapeType; + ov::test::utils::InputLayerType outShapeType; std::vector> outShapeData; std::tie(inputShape, outShapeType, outShapeData) = inputData; @@ -89,13 +85,13 @@ public: return result.str(); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { if (function->get_parameters().size() != 1) { // WA: output_shape depends on 3rd deconvolution input data // but the reference implementation doesn't implement shape inference - // so we need to build a new ngraph function and replace the 3rd input parameter with a constant + // so we need to build a new ov function and replace the 3rd input parameter with a constant // to get valid output shapes - functionRefs = createGraph({targetInputStaticShapes[0]}, ngraph::helpers::InputLayerType::CONSTANT); + functionRefs = createGraph({targetInputStaticShapes[0]}, ov::test::utils::InputLayerType::CONSTANT); } inputs.clear(); const auto& funcInputs = function->inputs(); @@ -159,17 +155,17 @@ public: function = p.build(); } - std::shared_ptr createGraph(const std::vector& inShapes, ngraph::helpers::InputLayerType outShapeType) { + std::shared_ptr createGraph(const std::vector& inShapes, ov::test::utils::InputLayerType outShapeType) { ov::ParameterVector params{std::make_shared(prec, inShapes.front())}; std::shared_ptr outShapeNode; if (!outShapeData.empty()) { - if (outShapeType == ngraph::helpers::InputLayerType::PARAMETER) { + if (outShapeType == ov::test::utils::InputLayerType::PARAMETER) { IE_ASSERT(inputDynamicShapes.size() == 2); - auto outShapeParam = std::make_shared(ngraph::element::i32, inputDynamicShapes.back()); + auto outShapeParam = std::make_shared(ov::element::i32, inputDynamicShapes.back()); params.push_back(outShapeParam); outShapeNode = outShapeParam; } else { - outShapeNode = ngraph::opset8::Constant::create(ngraph::element::i32, {outShapeData[inferRequestNum].size()}, outShapeData[inferRequestNum]); + outShapeNode = ov::op::v0::Constant::create(ov::element::i32, {outShapeData[inferRequestNum].size()}, outShapeData[inferRequestNum]); } } @@ -180,36 +176,36 @@ public: std::shared_ptr deconv; if (!outShapeData.empty()) { IE_ASSERT(outShapeNode != nullptr); - deconv = ngraph::builder::makeGroupConvolutionBackpropData(params[0], outShapeNode, prec, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, groupNum); + deconv = ov::test::utils::make_group_convolution_backprop_data(params[0], outShapeNode, prec, kernel, stride, padBegin, + padEnd, dilation, padType, convOutChannels, groupNum); } else { - deconv = ngraph::builder::makeGroupConvolutionBackpropData(params[0], prec, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, groupNum, false, outPadding); + deconv = ov::test::utils::make_group_convolution_backprop_data(params[0], prec, kernel, stride, padBegin, + padEnd, dilation, padType, convOutChannels, groupNum, false, outPadding); } - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < deconv->get_output_size(); i++) - results.push_back(std::make_shared(deconv->output(i))); + results.push_back(std::make_shared(deconv->output(i))); - return std::make_shared(results, params, "GroupDeconv"); + return std::make_shared(results, params, "GroupDeconv"); } protected: void SetUp() override { - GroupDeconvSpecParams basicParamsSet; + groupConvBackpropSpecificParams basicParamsSet; DeconvInputData inputData; std::map additionalConfig; std::tie(basicParamsSet, inputData, prec, targetDevice, additionalConfig) = this->GetParam(); InputShape inputShape; - ngraph::helpers::InputLayerType outShapeType; + ov::test::utils::InputLayerType outShapeType; std::tie(inputShape, outShapeType, outShapeData) = inputData; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, groupNum, padType, outPadding) = basicParamsSet; std::vector paramsShapes; paramsShapes.push_back(inputShape); - if (!outShapeData.empty() && outShapeType == ngraph::helpers::InputLayerType::PARAMETER) { + if (!outShapeData.empty() && outShapeType == ov::test::utils::InputLayerType::PARAMETER) { const auto outShapeDims = ov::Shape{outShapeData.front().size()}; paramsShapes.push_back(InputShape{outShapeDims, std::vector(inputShape.second.size(), outShapeDims)}); } @@ -220,38 +216,34 @@ protected: } private: - ElementType prec; - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::element::Type prec; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels, groupNum; std::vector> outShapeData; size_t inferRequestNum = 0; }; -TEST_P(GroupDeconvolutionLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(GroupDeconvolutionLayerGPUTest, Inference) { run(); } -namespace { - std::map emptyAdditionalConfig; const std::vector> emptyOutputShape = {{}}; const std::vector> emptyOutputPadding = {{}}; /* ============= GroupConvolution params ============= */ -const InferenceEngine::SizeVector numOutChannels = {6}; -const InferenceEngine::SizeVector numGroups = {2, 3}; +const std::vector numOutChannels = {6}; +const std::vector numGroups = {2, 3}; /* ============= GroupConvolution params (2D) ============= */ -const std::vector kernels2d = {{3, 3}, {1, 1}}; -const std::vector strides2d = {{1, 1}, {2, 2}}; +const std::vector> kernels2d = {{3, 3}, {1, 1}}; +const std::vector> strides2d = {{1, 1}, {2, 2}}; const std::vector> padBegins2d = {{0, 0}}; const std::vector> padEnds2d = {{0, 0}}; -const std::vector dilations2d = {{1, 1}}; +const std::vector> dilations2d = {{1, 1}}; /* ============= GroupConvolution (2D) ============= */ const auto groupConvParams_ExplicitPadding_2D = ::testing::Combine( @@ -262,29 +254,29 @@ const auto groupConvParams_ExplicitPadding_2D = ::testing::Combine( ::testing::ValuesIn(dilations2d), ::testing::ValuesIn(numOutChannels), ::testing::ValuesIn(numGroups), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::ValuesIn(emptyOutputPadding) ); const std::vector dyn_2D_inputs_smoke = { DeconvInputData{ InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {} }, DeconvInputData{ InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {} }, DeconvInputData{ InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}, {2, 12, 5, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {} }, DeconvInputData{ InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {3, 12, 7, 7}, {2, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {} } }; @@ -293,7 +285,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Dynamic_FP32, GroupDeconvolutionLa ::testing::Combine( groupConvParams_ExplicitPadding_2D, ::testing::ValuesIn(dyn_2D_inputs_smoke), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)), GroupDeconvolutionLayerGPUTest::getTestCaseName); @@ -301,17 +293,17 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Dynamic_FP32, GroupDeconvolutionLa const std::vector dyn_2D_inputs_with_output_shape = { DeconvInputData{ InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{15, 15}, {9, 10}, {15, 15}} }, DeconvInputData{ InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}, {2, 12, 5, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{15, 15}} }, DeconvInputData{ InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {3, 12, 7, 7}, {2, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{15, 15}} } }; @@ -319,21 +311,19 @@ const std::vector dyn_2D_inputs_with_output_shape = { INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Dynamic_OutputShape_FP32, GroupDeconvolutionLayerGPUTest, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), + ::testing::Values(std::vector{3, 3}), ::testing::ValuesIn(strides2d), ::testing::ValuesIn(padBegins2d), ::testing::ValuesIn(padEnds2d), ::testing::ValuesIn(dilations2d), ::testing::ValuesIn(numOutChannels), ::testing::ValuesIn(numGroups), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::ValuesIn(emptyOutputPadding)), ::testing::ValuesIn(dyn_2D_inputs_with_output_shape), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)), GroupDeconvolutionLayerGPUTest::getTestCaseName); } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/groupconvolution.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/groupconvolution.cpp index 01fad8fc9f6..892be327e07 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/groupconvolution.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/groupconvolution.cpp @@ -1,46 +1,39 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/node_builders/group_convolution.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/group_convolution.hpp" -#include "common_test_utils/test_constants.hpp" +#include "shared_test_classes/single_op/group_convolution.hpp" -// using namespace LayerTestsDefinitions; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/group_conv.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; +using ov::test::groupConvSpecificParams; -using LayerTestsDefinitions::groupConvSpecificParams; typedef std::tuple< groupConvSpecificParams, - ElementType, // Net precision - ElementType, // Input precision - ElementType, // Output precision - InputShape, // Input shape - LayerTestsUtils::TargetDevice // Device name + ov::element::Type, // Model type + InputShape, // Input shape + std::string // Device name > groupConvLayerTestParamsSet; - class GroupConvolutionLayerGPUTestDynamic : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { groupConvSpecificParams groupConvParams; - ElementType netType; - ElementType inType, outType; + ov::element::Type model_type; InputShape inputShape; std::string targetDevice; - std::tie(groupConvParams, netType, inType, outType, inputShape, targetDevice) = obj.param; + std::tie(groupConvParams, model_type, inputShape, targetDevice) = obj.param; - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels; size_t numGroups; @@ -62,9 +55,7 @@ public: result << "O=" << convOutChannels << "_"; result << "G=" << numGroups << "_"; result << "AP=" << padType << "_"; - result << "netPRC=" << netType << "_"; - result << "inPRC=" << inType << "_"; - result << "outPRC=" << outType << "_"; + result << "netPRC=" << model_type << "_"; result << "trgDev=" << targetDevice; return result.str(); @@ -74,13 +65,13 @@ protected: void SetUp() override { groupConvSpecificParams groupConvParams; InputShape inputShape; - auto netType = ElementType::undefined; - std::tie(groupConvParams, netType, inType, outType, inputShape, targetDevice) = this->GetParam(); + auto model_type = ov::element::undefined; + std::tie(groupConvParams, model_type, inputShape, targetDevice) = this->GetParam(); init_input_shapes({inputShape}); - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels; size_t numGroups; @@ -88,25 +79,23 @@ protected: ov::ParameterVector inputParams; for (auto&& shape : inputDynamicShapes) - inputParams.push_back(std::make_shared(inType, shape)); + inputParams.push_back(std::make_shared(model_type, shape)); - auto groupConvolutionNode = ngraph::builder::makeGroupConvolution(inputParams.front(), netType, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, numGroups); + auto groupConvolutionNode = ov::test::utils::make_group_convolution(inputParams.front(), model_type, kernel, stride, padBegin, + padEnd, dilation, padType, convOutChannels, numGroups); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < groupConvolutionNode->get_output_size(); i++) - results.push_back(std::make_shared(groupConvolutionNode->output(i))); + results.push_back(std::make_shared(groupConvolutionNode->output(i))); - function = std::make_shared(results, inputParams, "GroupConvolution"); + function = std::make_shared(results, inputParams, "GroupConvolution"); } }; -TEST_P(GroupConvolutionLayerGPUTestDynamic, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(GroupConvolutionLayerGPUTestDynamic, Inference) { run(); } -namespace { const std::vector dynInputShapes1D = { { {1, 12, ov::Dimension::dynamic()}, @@ -116,17 +105,15 @@ const std::vector dynInputShapes1D = { INSTANTIATE_TEST_SUITE_P(smoke_DwGroupConvolutionLayerGPUTest_dynamic1DSymPad, GroupConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3}), - ::testing::Values(SizeVector{1}), + ::testing::Values(std::vector{3}), + ::testing::Values(std::vector{1}), ::testing::Values(std::vector{0}), ::testing::Values(std::vector{0}), - ::testing::Values(SizeVector{1}), + ::testing::Values(std::vector{1}), ::testing::Values(12), ::testing::Values(12), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes1D), ::testing::Values(ov::test::utils::DEVICE_GPU)), GroupConvolutionLayerGPUTestDynamic::getTestCaseName); @@ -135,17 +122,15 @@ INSTANTIATE_TEST_SUITE_P(smoke_DwGroupConvolutionLayerGPUTest_dynamic1DSymPad, G INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic1DSymPad_Disabled, GroupConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3}), - ::testing::Values(SizeVector{1}), + ::testing::Values(std::vector{3}), + ::testing::Values(std::vector{1}), ::testing::Values(std::vector{0}), ::testing::Values(std::vector{0}), - ::testing::Values(SizeVector{1}), + ::testing::Values(std::vector{1}), ::testing::Values(4), ::testing::Values(4), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes1D), ::testing::Values(ov::test::utils::DEVICE_GPU)), GroupConvolutionLayerGPUTestDynamic::getTestCaseName); @@ -161,17 +146,15 @@ const std::vector dynInputShapes2D = { INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2DSymPad, GroupConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{3, 3}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(std::vector{1, 2}), ::testing::Values(std::vector{1, 2}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(4), ::testing::Values(4), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes2D), ::testing::Values(ov::test::utils::DEVICE_GPU)), GroupConvolutionLayerGPUTestDynamic::getTestCaseName); @@ -179,17 +162,15 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2DSymPad, Gro INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2D_AsymPad, GroupConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{3, 3}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(std::vector{1, 2}), ::testing::Values(std::vector{2, 1}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(4), ::testing::Values(4), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes2D), ::testing::Values(ov::test::utils::DEVICE_GPU)), GroupConvolutionLayerGPUTestDynamic::getTestCaseName); @@ -197,17 +178,15 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2D_AsymPad, G INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2D_SymAutoPad, GroupConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{3, 3}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(std::vector{1, 2}), ::testing::Values(std::vector{1, 2}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(4), ::testing::Values(4), - ::testing::ValuesIn({ngraph::op::PadType::SAME_LOWER, ngraph::op::PadType::SAME_UPPER})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes2D), ::testing::Values(ov::test::utils::DEVICE_GPU)), GroupConvolutionLayerGPUTestDynamic::getTestCaseName); @@ -215,20 +194,16 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2D_SymAutoPad INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2D_AsymAutoPad, GroupConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{3, 3}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(std::vector{1, 2}), ::testing::Values(std::vector{2, 1}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(4), ::testing::Values(4), - ::testing::ValuesIn({ngraph::op::PadType::SAME_LOWER, ngraph::op::PadType::SAME_UPPER})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes2D), ::testing::Values(ov::test::utils::DEVICE_GPU)), GroupConvolutionLayerGPUTestDynamic::getTestCaseName); } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/interpolate.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/interpolate.cpp index 5bfe9bb5612..5593fc22b45 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/interpolate.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/interpolate.cpp @@ -2,67 +2,69 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/interpolate.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" -#include -#include "openvino/core/preprocess/pre_post_process.hpp" -using namespace ov::test; -using ngraph::helpers::operator<<; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/interpolate.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; -using InterpolateSpecificParams = std::tuple, // PadBegin std::vector, // PadEnd double>; // Cube coef -using ShapeParams = std::tuple>, // scales or sizes values std::vector>; // axes using InterpolateLayerGPUTestParamsSet = std::tuple; // use Interpolate_v11 class InterpolateLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InterpolateSpecificParams specificParams; ShapeParams shapeParams; - ElementType prec; + ov::element::Type prec; bool useInterpolateV11; std::map additionalConfig; std::tie(specificParams, shapeParams, prec, useInterpolateV11) = obj.param; - ngraph::op::v4::Interpolate::InterpolateMode mode; - ngraph::op::v4::Interpolate::CoordinateTransformMode transfMode; - ngraph::op::v4::Interpolate::NearestMode nearMode; + ov::op::v4::Interpolate::InterpolateMode mode; + ov::op::v4::Interpolate::CoordinateTransformMode transfMode; + ov::op::v4::Interpolate::NearestMode nearMode; bool antiAlias; std::vector padBegin; std::vector padEnd; double cubeCoef; std::tie(mode, transfMode, nearMode, antiAlias, padBegin, padEnd, cubeCoef) = specificParams; - ngraph::op::v4::Interpolate::ShapeCalcMode shapeCalcMode; + ov::op::v4::Interpolate::ShapeCalcMode shapeCalcMode; InputShape inputShapes; - ngraph::helpers::InputLayerType sizesInputType; - ngraph::helpers::InputLayerType scalesInputType; + ov::test::utils::InputLayerType sizesInputType; + ov::test::utils::InputLayerType scalesInputType; std::vector> shapeDataForInput; std::vector axes; std::tie(shapeCalcMode, inputShapes, sizesInputType, scalesInputType, shapeDataForInput, axes) = shapeParams; std::ostringstream result; + using ov::operator<<; result << "ShapeCalcMode=" << shapeCalcMode << "_"; result << "IS="; result << ov::test::utils::partialShape2str({inputShapes.first}) << "_"; @@ -70,7 +72,7 @@ public: for (const auto& shape : inputShapes.second) { result << ov::test::utils::vec2str(shape) << "_"; } - if (shapeCalcMode == ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES) { + if (shapeCalcMode == ov::op::v4::Interpolate::ShapeCalcMode::SCALES) { result << "Scales="; } else { result << "Sizes="; @@ -101,7 +103,7 @@ public: return result.str(); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { @@ -111,7 +113,7 @@ public: if (i == 0) { tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 2560, 0, 256); } else if (i == 1) { - if (shapeCalcMode == ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES || funcInputs.size() == 3) { + if (shapeCalcMode == ov::op::v4::Interpolate::ShapeCalcMode::SIZES || funcInputs.size() == 3) { tensor = ov::Tensor(funcInput.get_element_type(), targetInputStaticShapes[i], sizes[inferRequestNum].data()); } else { tensor = ov::Tensor(funcInput.get_element_type(), targetInputStaticShapes[i], scales[inferRequestNum].data()); @@ -152,7 +154,7 @@ public: protected: std::vector> scales; std::vector> sizes; - ngraph::op::v4::Interpolate::ShapeCalcMode shapeCalcMode; + ov::op::v4::Interpolate::ShapeCalcMode shapeCalcMode; size_t inferRequestNum = 0; void SetUp() override { @@ -160,13 +162,13 @@ protected: InterpolateSpecificParams specificParams; ShapeParams shapeParams; - ElementType ngPrc; + ov::element::Type ngPrc; bool useInterpolateV11; std::tie(specificParams, shapeParams, ngPrc, useInterpolateV11) = this->GetParam(); - ngraph::op::v4::Interpolate::InterpolateMode mode; - ngraph::op::v4::Interpolate::CoordinateTransformMode transfMode; - ngraph::op::v4::Interpolate::NearestMode nearMode; + ov::op::v4::Interpolate::InterpolateMode mode; + ov::op::v4::Interpolate::CoordinateTransformMode transfMode; + ov::op::v4::Interpolate::NearestMode nearMode; bool antiAlias; std::vector padBegin; std::vector padEnd; @@ -174,13 +176,13 @@ protected: std::tie(mode, transfMode, nearMode, antiAlias, padBegin, padEnd, cubeCoef) = specificParams; InputShape dataShape; - ngraph::helpers::InputLayerType sizesInputType; - ngraph::helpers::InputLayerType scalesInputType; + ov::test::utils::InputLayerType sizesInputType; + ov::test::utils::InputLayerType scalesInputType; std::vector> shapeDataForInput; std::vector axes; std::tie(shapeCalcMode, dataShape, sizesInputType, scalesInputType, shapeDataForInput, axes) = shapeParams; - if (shapeCalcMode == ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES) { + if (shapeCalcMode == ov::op::v4::Interpolate::ShapeCalcMode::SCALES) { scales = shapeDataForInput; sizes.resize(scales.size(), std::vector(scales.front().size(), 0)); } else { @@ -195,10 +197,10 @@ protected: std::vector inputShapes; inputShapes.push_back(dataShape); - if (sizesInputType == ngraph::helpers::InputLayerType::PARAMETER) { + if (sizesInputType == ov::test::utils::InputLayerType::PARAMETER) { inputShapes.push_back(InputShape({static_cast(axes.size())}, std::vector(dataShape.second.size(), {axes.size()}))); } - if (scalesInputType == ngraph::helpers::InputLayerType::PARAMETER) { + if (scalesInputType == ov::test::utils::InputLayerType::PARAMETER) { inputShapes.push_back(InputShape({static_cast(axes.size())}, std::vector(dataShape.second.size(), {axes.size()}))); } @@ -207,111 +209,108 @@ protected: ov::ParameterVector params{std::make_shared(ngPrc, inputDynamicShapes.front())}; std::shared_ptr sizesInput, scalesInput; - if (shapeCalcMode == ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES) { - if (scalesInputType == ngraph::helpers::InputLayerType::PARAMETER) { - auto paramNode = std::make_shared(ngraph::element::Type_t::f32, ov::Shape{scales.front().size()}); + if (shapeCalcMode == ov::op::v4::Interpolate::ShapeCalcMode::SCALES) { + if (scalesInputType == ov::test::utils::InputLayerType::PARAMETER) { + auto paramNode = std::make_shared(ov::element::f32, ov::Shape{scales.front().size()}); params.push_back(paramNode); scalesInput = paramNode; } else { - scalesInput = std::make_shared(ngraph::element::Type_t::f32, ov::Shape{scales.front().size()}, scales.front()); + scalesInput = std::make_shared(ov::element::f32, ov::Shape{scales.front().size()}, scales.front()); } - if (sizesInputType == ngraph::helpers::InputLayerType::PARAMETER) { - auto paramNode = std::make_shared(ngraph::element::Type_t::i32, ov::Shape{sizes.front().size()}); + if (sizesInputType == ov::test::utils::InputLayerType::PARAMETER) { + auto paramNode = std::make_shared(ov::element::i32, ov::Shape{sizes.front().size()}); params.push_back(paramNode); sizesInput = paramNode; } else { - sizesInput = std::make_shared(ngraph::element::Type_t::i32, ov::Shape{sizes.front().size()}, sizes.front()); + sizesInput = std::make_shared(ov::element::i32, ov::Shape{sizes.front().size()}, sizes.front()); } } else { - if (sizesInputType == ngraph::helpers::InputLayerType::PARAMETER) { - auto paramNode = std::make_shared(ngraph::element::Type_t::i32, ov::Shape{sizes.front().size()}); + if (sizesInputType == ov::test::utils::InputLayerType::PARAMETER) { + auto paramNode = std::make_shared(ov::element::i32, ov::Shape{sizes.front().size()}); params.push_back(paramNode); sizesInput = paramNode; } else { - sizesInput = std::make_shared(ngraph::element::Type_t::i32, ov::Shape{sizes.front().size()}, sizes.front()); + sizesInput = std::make_shared(ov::element::i32, ov::Shape{sizes.front().size()}, sizes.front()); } - if (scalesInputType == ngraph::helpers::InputLayerType::PARAMETER) { - auto paramNode = std::make_shared(ngraph::element::Type_t::f32, ov::Shape{scales.front().size()}); + if (scalesInputType == ov::test::utils::InputLayerType::PARAMETER) { + auto paramNode = std::make_shared(ov::element::f32, ov::Shape{scales.front().size()}); params.push_back(paramNode); scalesInput = paramNode; } else { - scalesInput = std::make_shared(ngraph::element::Type_t::f32, ov::Shape{scales.front().size()}, scales.front()); + scalesInput = std::make_shared(ov::element::f32, ov::Shape{scales.front().size()}, scales.front()); } } - auto axesInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{axes.size()}, axes); + auto axesInput = std::make_shared(ov::element::i64, ov::Shape{axes.size()}, axes); for (size_t i = 0; i < params.size(); i++) { params[i]->set_friendly_name(std::string("param_") + std::to_string(i)); } - ngraph::op::v4::Interpolate::InterpolateAttrs interpAttr{mode, shapeCalcMode, padBegin, padEnd, transfMode, nearMode, + ov::op::v4::Interpolate::InterpolateAttrs interpAttr{mode, shapeCalcMode, padBegin, padEnd, transfMode, nearMode, antiAlias, cubeCoef}; - std::shared_ptr interpolate; - bool scalesMode = shapeCalcMode == ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES; + std::shared_ptr interpolate; + bool scalesMode = shapeCalcMode == ov::op::v4::Interpolate::ShapeCalcMode::SCALES; if (useInterpolateV11) { if (axes.size() != dataShape.first.size()) { - interpolate = std::make_shared(params[0], + interpolate = std::make_shared(params[0], scalesMode ? scalesInput : sizesInput, axesInput, interpAttr); } else { - interpolate = std::make_shared(params[0], + interpolate = std::make_shared(params[0], scalesMode ? scalesInput : sizesInput, interpAttr); } } else { - interpolate = std::make_shared(params[0], + interpolate = std::make_shared(params[0], sizesInput, scalesInput, axesInput, interpAttr); } - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < interpolate->get_output_size(); ++i) { - results.push_back(std::make_shared(interpolate->output(i))); + results.push_back(std::make_shared(interpolate->output(i))); } - function = std::make_shared(results, params, "InterpolateGPU"); + function = std::make_shared(results, params, "InterpolateGPU"); } }; -TEST_P(InterpolateLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(InterpolateLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector coordinateTransformModes_Smoke = { - ngraph::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL, - ngraph::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC, +const std::vector coordinateTransformModes_Smoke = { + ov::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL, + ov::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC, }; -const std::vector coordinateTransformModes_Full = { - ngraph::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN, - ngraph::op::v4::Interpolate::CoordinateTransformMode::PYTORCH_HALF_PIXEL, - ngraph::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL, - ngraph::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC, - ngraph::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS, +const std::vector coordinateTransformModes_Full = { + ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN, + ov::op::v4::Interpolate::CoordinateTransformMode::PYTORCH_HALF_PIXEL, + ov::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL, + ov::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC, + ov::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS, }; -const std::vector nearestModes_Smoke = { - ngraph::op::v4::Interpolate::NearestMode::SIMPLE, - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, - ngraph::op::v4::Interpolate::NearestMode::FLOOR, +const std::vector nearestModes_Smoke = { + ov::op::v4::Interpolate::NearestMode::SIMPLE, + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, + ov::op::v4::Interpolate::NearestMode::FLOOR, }; -const std::vector nearestModes_Full = { - ngraph::op::v4::Interpolate::NearestMode::SIMPLE, - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, - ngraph::op::v4::Interpolate::NearestMode::FLOOR, - ngraph::op::v4::Interpolate::NearestMode::CEIL, - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_CEIL, +const std::vector nearestModes_Full = { + ov::op::v4::Interpolate::NearestMode::SIMPLE, + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, + ov::op::v4::Interpolate::NearestMode::FLOOR, + ov::op::v4::Interpolate::NearestMode::CEIL, + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_CEIL, }; -const std::vector defNearestModes = { - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, +const std::vector defNearestModes = { + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, }; const std::vector antialias = { @@ -337,50 +336,50 @@ const std::vector> reducedAxes4D = { const std::vector shapeParams4D_Smoke = { ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER, {{1.f, 1.f, 1.25f, 1.5f}, {1.f, 1.f, 1.25f, 1.25f}, {1.f, 1.f, 1.25f, 1.5f}}, defaultAxes4D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {1, 10}, -1, -1}, {{1, 2, 12, 20}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1.f, 1.f, 0.5f, 2.0f}}, defaultAxes4D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {1, 10}, -1, -1}, {{1, 2, 12, 20}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{0.5f, 2.0f}}, reducedAxes4D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6}, {2, 7, 8, 7}, {1, 11, 5, 6}}, defaultAxes4D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {1, 10}, -1, -1}, {{1, 2, 12, 20}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1, 2, 24, 10}}, defaultAxes4D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {1, 10}, -1, -1}, {{1, 2, 12, 20}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{24, 10}}, reducedAxes4D.front() } @@ -388,18 +387,18 @@ const std::vector shapeParams4D_Smoke = { const std::vector shapeParams4D_Full = { ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f, 1.5f}}, defaultAxes4D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6}}, defaultAxes4D.front() } @@ -407,41 +406,41 @@ const std::vector shapeParams4D_Full = { const std::vector shapeParams4DReducedAxis_Full = { ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f, 1.5f}}, defaultAxes4D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6}}, defaultAxes4D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.5f}}, reducedAxes4D.back() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{6}}, reducedAxes4D.back() } }; const auto interpolateCasesNN_Smoke = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::NEAREST), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::NEAREST), ::testing::ValuesIn(coordinateTransformModes_Smoke), ::testing::ValuesIn(nearestModes_Smoke), ::testing::ValuesIn(antialias), @@ -450,7 +449,7 @@ const auto interpolateCasesNN_Smoke = ::testing::Combine( ::testing::ValuesIn(cubeCoefs)); const auto interpolateCasesNN_Full = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::NEAREST), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::NEAREST), ::testing::ValuesIn(coordinateTransformModes_Full), ::testing::ValuesIn(nearestModes_Full), ::testing::ValuesIn(antialias), @@ -462,7 +461,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateNN_Layout_Test, InterpolateLayerGPUTes ::testing::Combine( interpolateCasesNN_Smoke, ::testing::ValuesIn(shapeParams4D_Smoke), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(true, false)), InterpolateLayerGPUTest::getTestCaseName); @@ -470,12 +469,12 @@ INSTANTIATE_TEST_SUITE_P(InterpolateNN_Layout_Test, InterpolateLayerGPUTest, ::testing::Combine( interpolateCasesNN_Full, ::testing::ValuesIn(shapeParams4DReducedAxis_Full), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(true, false)), InterpolateLayerGPUTest::getTestCaseName); const auto interpolateCasesLinearOnnx_Smoke = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX), ::testing::ValuesIn(coordinateTransformModes_Smoke), ::testing::ValuesIn(defNearestModes), ::testing::ValuesIn(antialias), @@ -484,7 +483,7 @@ const auto interpolateCasesLinearOnnx_Smoke = ::testing::Combine( ::testing::ValuesIn(cubeCoefs)); const auto interpolateCasesLinearOnnx_Full = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX), ::testing::ValuesIn(coordinateTransformModes_Full), ::testing::ValuesIn(defNearestModes), ::testing::ValuesIn(antialias), @@ -496,7 +495,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateLinearOnnx_Layout_Test, InterpolateLay ::testing::Combine( interpolateCasesLinearOnnx_Smoke, ::testing::ValuesIn(shapeParams4D_Smoke), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(false)), InterpolateLayerGPUTest::getTestCaseName); @@ -504,12 +503,12 @@ INSTANTIATE_TEST_SUITE_P(InterpolateLinearOnnx_Layout_Test, InterpolateLayerGPUT ::testing::Combine( interpolateCasesLinearOnnx_Full, ::testing::ValuesIn(shapeParams4D_Full), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(true, false)), InterpolateLayerGPUTest::getTestCaseName); const auto interpolateCasesLinear_Smoke = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR), ::testing::ValuesIn(coordinateTransformModes_Smoke), ::testing::ValuesIn(defNearestModes), ::testing::ValuesIn(antialias), @@ -518,7 +517,7 @@ const auto interpolateCasesLinear_Smoke = ::testing::Combine( ::testing::ValuesIn(cubeCoefs)); const auto interpolateCasesLinear_Full = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR), ::testing::ValuesIn(coordinateTransformModes_Full), ::testing::ValuesIn(defNearestModes), ::testing::ValuesIn(antialias), @@ -530,7 +529,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateLinear_Layout_Test, InterpolateLayerGP ::testing::Combine( interpolateCasesLinear_Smoke, ::testing::ValuesIn(shapeParams4D_Smoke), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(false)), InterpolateLayerGPUTest::getTestCaseName); @@ -538,12 +537,12 @@ INSTANTIATE_TEST_SUITE_P(InterpolateLinear_Layout_Test, InterpolateLayerGPUTest, ::testing::Combine( interpolateCasesLinear_Full, ::testing::ValuesIn(shapeParams4DReducedAxis_Full), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(true, false)), InterpolateLayerGPUTest::getTestCaseName); const auto interpolateCasesCubic_Smoke = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::CUBIC), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::CUBIC), ::testing::ValuesIn(coordinateTransformModes_Smoke), ::testing::ValuesIn(defNearestModes), ::testing::ValuesIn(antialias), @@ -552,7 +551,7 @@ const auto interpolateCasesCubic_Smoke = ::testing::Combine( ::testing::ValuesIn(cubeCoefs)); const auto interpolateCasesCubic_Full = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::CUBIC), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::CUBIC), ::testing::ValuesIn(coordinateTransformModes_Full), ::testing::ValuesIn(defNearestModes), ::testing::ValuesIn(antialias), @@ -564,7 +563,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateCubic_Layout_Test, InterpolateLayerGPU ::testing::Combine( interpolateCasesCubic_Smoke, ::testing::ValuesIn(shapeParams4D_Smoke), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(false)), InterpolateLayerGPUTest::getTestCaseName); @@ -572,7 +571,7 @@ INSTANTIATE_TEST_SUITE_P(InterpolateCubic_Layout_Test, InterpolateLayerGPUTest, ::testing::Combine( interpolateCasesCubic_Full, ::testing::ValuesIn(shapeParams4DReducedAxis_Full), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(true, false)), InterpolateLayerGPUTest::getTestCaseName); @@ -592,42 +591,42 @@ const std::vector> reducedAxes5D = { const std::vector shapeParams5D_Smoke = { ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER, {{1.f, 1.f, 1.25f, 1.5f, 0.5f}, {1.f, 1.f, 1.25f, 1.25f, 1.25f}, {1.f, 1.f, 1.25f, 1.5f, 0.5f}}, defaultAxes5D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 10}, -1, -1, -1}, {{1, 4, 2, 3, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1.f, 1.f, 1.5f, 2.f, 0.5f}}, defaultAxes5D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6, 2}, {2, 7, 8, 7, 4}, {1, 11, 5, 6, 2}}, defaultAxes5D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 10}, -1, -1, -1}, {{1, 4, 2, 3, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1, 4, 4, 1, 6}}, defaultAxes5D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 10}, -1, -1, -1}, {{1, 4, 2, 3, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{4, 1, 6}}, reducedAxes5D.front() }, @@ -635,33 +634,33 @@ const std::vector shapeParams5D_Smoke = { const std::vector shapeParams5D_Full = { ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f, 1.5f, 0.5f}}, defaultAxes5D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {1, 11, 5, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6, 4}}, defaultAxes5D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {1, 11, 5, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 6, 4}}, reducedAxes5D.front() } }; const auto interpolateCasesLinearOnnx5D_Smoke = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX), ::testing::ValuesIn(coordinateTransformModes_Smoke), ::testing::ValuesIn(defNearestModes), ::testing::ValuesIn(antialias), @@ -670,7 +669,7 @@ const auto interpolateCasesLinearOnnx5D_Smoke = ::testing::Combine( ::testing::ValuesIn(cubeCoefs)); const auto interpolateCasesLinearOnnx5D_Full = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX), ::testing::ValuesIn(coordinateTransformModes_Full), ::testing::ValuesIn(defNearestModes), ::testing::ValuesIn(antialias), @@ -682,7 +681,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateLinearOnnx5D_Layout_Test, InterpolateL ::testing::Combine( interpolateCasesLinearOnnx5D_Smoke, ::testing::ValuesIn(shapeParams5D_Smoke), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(false)), InterpolateLayerGPUTest::getTestCaseName); @@ -690,12 +689,12 @@ INSTANTIATE_TEST_SUITE_P(InterpolateLinearOnnx5D_Layout_Test, InterpolateLayerGP ::testing::Combine( interpolateCasesLinearOnnx5D_Full, ::testing::ValuesIn(shapeParams5D_Full), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(true, false)), InterpolateLayerGPUTest::getTestCaseName); const auto interpolateCasesNN5D_Smoke = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::NEAREST), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::NEAREST), ::testing::ValuesIn(coordinateTransformModes_Smoke), ::testing::ValuesIn(nearestModes_Smoke), ::testing::ValuesIn(antialias), @@ -704,7 +703,7 @@ const auto interpolateCasesNN5D_Smoke = ::testing::Combine( ::testing::ValuesIn(cubeCoefs)); const auto interpolateCasesNN5D_Full = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::NEAREST), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::NEAREST), ::testing::ValuesIn(coordinateTransformModes_Full), ::testing::ValuesIn(nearestModes_Full), ::testing::ValuesIn(antialias), @@ -716,7 +715,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateNN5D_Layout_Test, InterpolateLayerGPUT ::testing::Combine( interpolateCasesNN5D_Smoke, ::testing::ValuesIn(shapeParams5D_Smoke), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(true, false)), InterpolateLayerGPUTest::getTestCaseName); @@ -724,10 +723,7 @@ INSTANTIATE_TEST_SUITE_P(InterpolateNN5D_Layout_Test, InterpolateLayerGPUTest, ::testing::Combine( interpolateCasesNN5D_Full, ::testing::ValuesIn(shapeParams5D_Full), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(true, false)), InterpolateLayerGPUTest::getTestCaseName); - } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp index 4f0f69ab172..2b3d2dccf2c 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp @@ -2,18 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/mat_mul.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include #include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/matmul.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; struct ShapeRelatedParams { std::vector inputShapes; @@ -22,36 +21,36 @@ struct ShapeRelatedParams { typedef std::tuple< ShapeRelatedParams, - ElementType, // Network precision - ElementType, // Input precision - ElementType, // Output precision - ngraph::helpers::InputLayerType, // Secondary input type - TargetDevice, // Device name + ov::element::Type, // Network precision + ov::element::Type, // Input precision + ov::element::Type, // Output precision + ov::test::utils::InputLayerType, // Secondary input type + std::string, // Device name std::map // Additional network configuration > MatMulLayerTestParamsSet; class MatMulLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { MatMulLayerTestParamsSet basicParamsSet = obj.param; - ElementType netType; - ElementType inType, outType; - ShapeRelatedParams shapeRelatedParams; - ngraph::helpers::InputLayerType secondaryInputType; - TargetDevice targetDevice; - std::map additionalConfig; - std::tie(shapeRelatedParams, netType, inType, outType, secondaryInputType, targetDevice, additionalConfig) = + ov::element::Type model_type; + ov::element::Type inType, outType; + ShapeRelatedParams shape_related_params; + ov::test::utils::InputLayerType secondary_input_type; + std::string targetDevice; + std::map additional_config; + std::tie(shape_related_params, model_type, inType, outType, secondary_input_type, targetDevice, additional_config) = basicParamsSet; std::ostringstream result; result << "IS="; - for (const auto& shape : shapeRelatedParams.inputShapes) { + for (const auto& shape : shape_related_params.inputShapes) { result << ov::test::utils::partialShape2str({shape.first}) << "_"; } result << "TS="; - for (const auto& shape : shapeRelatedParams.inputShapes) { + for (const auto& shape : shape_related_params.inputShapes) { result << "("; if (!shape.second.empty()) { auto itr = shape.second.begin(); @@ -61,15 +60,15 @@ public: } result << ")_"; } - result << "transpose_a=" << shapeRelatedParams.transpose.first << "_"; - result << "transpose_b=" << shapeRelatedParams.transpose.second << "_"; - result << "secondaryInputType=" << secondaryInputType << "_"; - result << "netPRC=" << netType << "_"; + result << "transpose_a=" << shape_related_params.transpose.first << "_"; + result << "transpose_b=" << shape_related_params.transpose.second << "_"; + result << "secondary_input_type=" << secondary_input_type << "_"; + result << "netPRC=" << model_type << "_"; result << "inPRC=" << inType << "_"; result << "outPRC=" << outType << "_"; result << "trgDev=" << targetDevice; result << "config=("; - for (const auto& configEntry : additionalConfig) { + for (const auto& configEntry : additional_config) { result << configEntry.first << ", " << configEntry.second << ":"; } result << ")"; @@ -87,17 +86,17 @@ protected: void SetUp() override { MatMulLayerTestParamsSet basicParamsSet = this->GetParam(); - ShapeRelatedParams shapeRelatedParams; - ElementType netType; - helpers::InputLayerType secondaryInputType; - std::map additionalConfig; + ShapeRelatedParams shape_related_params; + ov::element::Type model_type; + ov::test::utils::InputLayerType secondary_input_type; + std::map additional_config; - std::tie(shapeRelatedParams, netType, inType, outType, secondaryInputType, targetDevice, additionalConfig) = basicParamsSet; + std::tie(shape_related_params, model_type, inType, outType, secondary_input_type, targetDevice, additional_config) = basicParamsSet; - init_input_shapes(shapeRelatedParams.inputShapes); + init_input_shapes(shape_related_params.inputShapes); - bool transpA = shapeRelatedParams.transpose.first; - bool transpB = shapeRelatedParams.transpose.second; + bool transpA = shape_related_params.transpose.first; + bool transpB = shape_related_params.transpose.second; if (transpA) { transpose(inputDynamicShapes[0]); @@ -115,69 +114,64 @@ protected: const auto& inShapeA = inputDynamicShapes[0]; const auto& inShapeB = inputDynamicShapes[1]; - configuration.insert(additionalConfig.begin(), additionalConfig.end()); + configuration.insert(additional_config.begin(), additional_config.end()); - ov::ParameterVector params{std::make_shared(netType, inShapeA)}; + ov::ParameterVector params{std::make_shared(model_type, inShapeA)}; std::shared_ptr matrixB; - if (secondaryInputType == helpers::InputLayerType::PARAMETER) { - auto param = std::make_shared(netType, inShapeB); + if (secondary_input_type == ov::test::utils::InputLayerType::PARAMETER) { + auto param = std::make_shared(model_type, inShapeB); matrixB = param; params.push_back(param); } else { ASSERT_TRUE(inShapeB.is_static()); - auto tensor = ov::test::utils::create_and_fill_tensor(netType, inShapeB.to_shape()); + auto tensor = ov::test::utils::create_and_fill_tensor(model_type, inShapeB.to_shape()); matrixB = std::make_shared(tensor); } auto matMul = std::make_shared(params[0], matrixB, transpA, transpB); - auto makeFunction = [](const ngraph::element::Type &ngPrc, ngraph::ParameterVector ¶ms, const std::shared_ptr &lastNode) { - ngraph::ResultVector results; + auto makeFunction = [](const ov::element::Type &ngPrc, ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "MatMul"); + return std::make_shared(results, params, "MatMul"); }; - function = makeFunction(netType, params, matMul); + function = makeFunction(model_type, params, matMul); } }; -TEST_P(MatMulLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(MatMulLayerGPUTest, Inference) { run(); } -namespace { - /* ============= Common params ============= */ std::map emptyAdditionalConfig; -std::vector> additionalConfig { +std::vector> additional_config { std::map{/* empty config */}, }; -const std::vector netPRCs { - ElementType::f32, +const std::vector netPRCs { + ov::element::f32, }; /* ============= FullyConnected ============= */ -namespace fullyConnected { const std::vector IS2D_smoke = { - {static_shapes_to_test_representation({{59, 1}, {1, 120}}), {false, true}}, - {static_shapes_to_test_representation({{59, 1}, {1, 120}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{59, 1}, {1, 120}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{59, 1}, {1, 120}}), {true, true}}, - {static_shapes_to_test_representation({{59, 120}, {120, 1}}), {false, false}}, - {static_shapes_to_test_representation({{59, 120}, {120, 1}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{59, 120}, {120, 1}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{59, 120}, {120, 1}}), {true, true}}, - {static_shapes_to_test_representation({{1, 120}, {120, 59}}), {false, false}}, - {static_shapes_to_test_representation({{1, 120}, {120, 59}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{1, 120}, {120, 59}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{1, 120}, {120, 59}}), {true, false}}, - {static_shapes_to_test_representation({{71, 128}, {128, 20}}), {true, false}}, - {static_shapes_to_test_representation({{71, 128}, {128, 20}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{71, 128}, {128, 20}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{71, 128}, {128, 20}}), {false, true}}, { { @@ -196,17 +190,17 @@ const std::vector IS2D_smoke = { }; const std::vector IS2D_nightly = { - {static_shapes_to_test_representation({{59, 1}, {1, 120}}), {false, false}}, - {static_shapes_to_test_representation({{59, 1}, {1, 120}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{59, 1}, {1, 120}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{59, 1}, {1, 120}}), {true, false}}, - {static_shapes_to_test_representation({{59, 120}, {120, 1}}), {true, false}}, - {static_shapes_to_test_representation({{59, 120}, {120, 1}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{59, 120}, {120, 1}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{59, 120}, {120, 1}}), {false, true}}, - {static_shapes_to_test_representation({{1, 120}, {120, 59}}), {true, true}}, - {static_shapes_to_test_representation({{1, 120}, {120, 59}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{1, 120}, {120, 59}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{1, 120}, {120, 59}}), {false, true}}, - {static_shapes_to_test_representation({{71, 128}, {128, 20}}), {true, true}}, - {static_shapes_to_test_representation({{71, 128}, {128, 20}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{71, 128}, {128, 20}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{71, 128}, {128, 20}}), {false, false}}, { { @@ -232,31 +226,31 @@ const std::vector IS2D_nightly = { }; const auto testParams2D_smoke = ::testing::Combine(::testing::ValuesIn(IS2D_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)); INSTANTIATE_TEST_SUITE_P(smoke_FC_2D, MatMulLayerGPUTest, testParams2D_smoke, MatMulLayerGPUTest::getTestCaseName); const auto testParams2D_nightly = ::testing::Combine(::testing::ValuesIn(IS2D_nightly), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)); INSTANTIATE_TEST_SUITE_P(nightly_FC_2D, MatMulLayerGPUTest, testParams2D_nightly, MatMulLayerGPUTest::getTestCaseName); const std::vector IS3D_smoke = { - {static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {false, false}}, - {static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {false, true}}, - {static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {true, false}}, - {static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {false, true}}, { { @@ -266,7 +260,7 @@ const std::vector IS3D_smoke = { {false, true} }, - {static_shapes_to_test_representation({{1, 429}, {1, 429, 1}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{1, 429}, {1, 429, 1}}), {true, true}}, { { {{-1, -1}, {{1, 129}, {2, 129}, {1, 129}, {2, 129}}}, @@ -285,11 +279,11 @@ const std::vector IS3D_smoke = { }; const std::vector IS3D_nightly = { - {static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {true, false}}, - {static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {true, true}}, - {static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {false, false}}, - {static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {true, true}}, { { @@ -315,20 +309,20 @@ const std::vector IS3D_nightly = { }; const auto fullyConnectedParams3D_smoke = ::testing::Combine(::testing::ValuesIn(IS3D_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)); INSTANTIATE_TEST_SUITE_P(smoke_FC_3D, MatMulLayerGPUTest, fullyConnectedParams3D_smoke, MatMulLayerGPUTest::getTestCaseName); const auto fullyConnectedParams3D_nightly = ::testing::Combine(::testing::ValuesIn(IS3D_nightly), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)); @@ -366,62 +360,59 @@ const std::vector IS4D_smoke = { }; const auto fullyConnectedParams4D_smoke = ::testing::Combine(::testing::ValuesIn(IS4D_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)); INSTANTIATE_TEST_SUITE_P(smoke_FC_4D, MatMulLayerGPUTest, fullyConnectedParams4D_smoke, MatMulLayerGPUTest::getTestCaseName); -} // namespace fullyConnected - /* ============= MatMul ============= */ -namespace matmul { const std::vector IS = { - {static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {false, false}}, - {static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {true, false}}, - {static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {false, true}}, - {static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {true, true}}, - {static_shapes_to_test_representation({{1, 2, 100010, 120}, {120, 5}}), {true, true}}, - {static_shapes_to_test_representation({{1, 2, 200010, 120}, {120, 5}}), {false, true}}, - {static_shapes_to_test_representation({{1, 2, 30, 120}, {120, 100010}}), {true, true}}, - {static_shapes_to_test_representation({{1, 2, 30, 120}, {120, 100010}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{1, 2, 100010, 120}, {120, 5}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{1, 2, 200010, 120}, {120, 5}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{1, 2, 30, 120}, {120, 100010}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{1, 2, 30, 120}, {120, 100010}}), {true, false}}, - {static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {false, false}}, - {static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {true, false}}, - {static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {false, true}}, - {static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {true, true}}, - {static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {false, false}}, - {static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {true, false}}, - {static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {false, true}}, - {static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {true, true}}, - {static_shapes_to_test_representation({{55, 12}, {12, 55}}), {false, false}}, - {static_shapes_to_test_representation({{55, 12}, {12, 55}}), {true, false}}, - {static_shapes_to_test_representation({{55, 12}, {12, 55}}), {false, true}}, - {static_shapes_to_test_representation({{55, 12}, {12, 55}}), {true, true}} + {ov::test::static_shapes_to_test_representation({{55, 12}, {12, 55}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{55, 12}, {12, 55}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{55, 12}, {12, 55}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{55, 12}, {12, 55}}), {true, true}} }; const std::vector IS_OneDNN = { - {static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {false, false}}, - {static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {true, false}}, - {static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {false, true}}, - {static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {true, true}}, - {static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {false, false}}, - {static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {true, false}}, - {static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {false, true}}, - {static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {true, true}}, - {static_shapes_to_test_representation({{12, 12}, {12, 12}}), {false, false}}, - {static_shapes_to_test_representation({{12, 12}, {12, 12}}), {true, false}}, - {static_shapes_to_test_representation({{12, 12}, {12, 12}}), {false, true}}, - {static_shapes_to_test_representation({{12, 12}, {12, 12}}), {true, true}} + {ov::test::static_shapes_to_test_representation({{12, 12}, {12, 12}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{12, 12}, {12, 12}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{12, 12}, {12, 12}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{12, 12}, {12, 12}}), {true, true}} }; const std::vector IS_Dynamic = { @@ -678,44 +669,41 @@ const std::vector IS_Dynamic_nightly = { const auto testParams = ::testing::Combine(::testing::ValuesIn(IS), ::testing::ValuesIn(netPRCs), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(additionalConfig)); + ::testing::ValuesIn(additional_config)); INSTANTIATE_TEST_SUITE_P(smoke_MM_Static, MatMulLayerGPUTest, testParams, MatMulLayerGPUTest::getTestCaseName); const auto testParamsOneDNN = ::testing::Combine(::testing::ValuesIn(IS_OneDNN), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::element::f16), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(additionalConfig)); + ::testing::ValuesIn(additional_config)); INSTANTIATE_TEST_SUITE_P(smoke_MM_Static_OneDNN, MatMulLayerGPUTest, testParamsOneDNN, MatMulLayerGPUTest::getTestCaseName); const auto testParamsDynamic = ::testing::Combine(::testing::ValuesIn(IS_Dynamic), ::testing::ValuesIn(netPRCs), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(additionalConfig)); + ::testing::ValuesIn(additional_config)); INSTANTIATE_TEST_SUITE_P(smoke_MM_Dynamic, MatMulLayerGPUTest, testParamsDynamic, MatMulLayerGPUTest::getTestCaseName); const auto testParamsDynamic_nightly = ::testing::Combine(::testing::ValuesIn(IS_Dynamic_nightly), ::testing::ValuesIn(netPRCs), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(additionalConfig)); + ::testing::ValuesIn(additional_config)); INSTANTIATE_TEST_SUITE_P(nightly_MM_Dynamic, MatMulLayerGPUTest, testParamsDynamic_nightly, MatMulLayerGPUTest::getTestCaseName); - -} // namespace matmul } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/mvn.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/mvn.cpp index d6f438bfa2f..1ff9fe2378c 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/mvn.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/mvn.cpp @@ -2,36 +2,38 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/mvn.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; using basicGPUMvnParams = std::tuple< InputShape, // Input shapes - ElementType, // Input precision + ov::element::Type, // Input precision std::vector, // Reduction axes bool, // Normalize variance double>; // Epsilon using MvnLayerGPUTestParamSet = std::tuple< basicGPUMvnParams, - ElementType>; // CNNNetwork input precision + ov::element::Type>; // CNNNetwork input precision class MvnLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { basicGPUMvnParams basicParamsSet; - ElementType inputPrecision; + ov::element::Type inputPrecision; std::tie(basicParamsSet, inputPrecision) = obj.param; InputShape inputShapes; - ElementType netPrecision; + ov::element::Type netPrecision; std::vector axes; bool normalizeVariance; double eps; @@ -56,11 +58,11 @@ protected: targetDevice = ov::test::utils::DEVICE_GPU; basicGPUMvnParams basicParamsSet; - ElementType inPrc; + ov::element::Type inPrc; std::tie(basicParamsSet, inPrc) = this->GetParam(); InputShape inputShapes; - ElementType netPrecision; + ov::element::Type netPrecision; std::vector axes; bool normalizeVariance; double eps; @@ -75,7 +77,7 @@ protected: for (auto&& shape : inputDynamicShapes) params.push_back(std::make_shared(netPrecision, shape)); - auto axesNode = ngraph::builder::makeConstant(axesType, ngraph::Shape{axes.size()}, axes); + auto axesNode = std::make_shared(axesType, ov::Shape{axes.size()}, axes); ov::op::MVNEpsMode nEpsMode = ov::op::MVNEpsMode::INSIDE_SQRT; if (eps_mode == "outside_sqrt") nEpsMode = ov::op::MVNEpsMode::OUTSIDE_SQRT; @@ -83,21 +85,18 @@ protected: rel_threshold = 0.015f; - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < mvn->get_output_size(); ++i) { - results.push_back(std::make_shared(mvn->output(i))); + results.push_back(std::make_shared(mvn->output(i))); } - function = std::make_shared(results, params, "MVN"); + function = std::make_shared(results, params, "MVN"); } }; -TEST_P(MvnLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(MvnLayerGPUTest, Inference) { run(); } -namespace { - const std::vector inputShapes_1D = { { // dynamic @@ -205,12 +204,12 @@ const std::vector reduction_axes_12 = {1, 2}; const std::vector reduction_axes_3 = {3}; const std::vector reduction_axes_2 = {2}; -std::vector inpPrc = {ElementType::i8, ElementType::f16, ElementType::f32}; +std::vector inpPrc = {ov::element::i8, ov::element::f16, ov::element::f32}; const auto Mvn3D = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(inputShapes_3D), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn({reduction_axes_12, reduction_axes_2}), ::testing::ValuesIn(normalizeVariance), ::testing::ValuesIn(epsilon)), @@ -221,7 +220,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn3D, MvnLayerGPUTest, Mvn3D, Mv const auto Mvn4D = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(inputShapes_4D), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn({reduction_axes_2, reduction_axes_3, reduction_axes_12, reduction_axes_23, reduction_axes_123}), ::testing::ValuesIn(normalizeVariance), ::testing::ValuesIn(epsilon)), @@ -232,13 +231,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn4D, MvnLayerGPUTest, Mvn4D, Mv const auto Mvn5D = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(inputShapes_5D), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn({reduction_axes_3, reduction_axes_23, reduction_axes_123, reduction_axes_1234}), ::testing::ValuesIn(normalizeVariance), ::testing::ValuesIn(epsilon)), ::testing::ValuesIn(inpPrc)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn5D, MvnLayerGPUTest, Mvn5D, MvnLayerGPUTest::getTestCaseName); - } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp index 4708b2f0ffb..2e798d76395 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp @@ -2,22 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/non_max_suppression.hpp" -#include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; -using namespace ngraph; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/non_max_suppression.hpp" -namespace GPULayerTestsDefinitions { +namespace { enum { BATCHES, @@ -32,9 +26,9 @@ using TargetShapeParams = std::tuple, // bounds for input dynamic shape std::vector>; // target input dimensions -using InputPrecisions = std::tuple; // iou_threshold, score_threshold, soft_nms_sigma precisions +using InputPrecisions = std::tuple; // iou_threshold, score_threshold, soft_nms_sigma precisions using ThresholdValues = std::tuple>; // Additional network configuration -class NmsLayerGPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest { +class NmsLayerGPUTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { InputShapeParams inShapeParams; @@ -58,17 +53,17 @@ public: int32_t maxOutBoxesPerClass; ThresholdValues thrValues; float iouThr, scoreThr, softNmsSigma; - op::v9::NonMaxSuppression::BoxEncodingType boxEncoding; + ov::op::v9::NonMaxSuppression::BoxEncodingType boxEncoding; bool sortResDescend; - element::Type outType; - TargetDevice targetDevice; + ov::element::Type outType; + std::string targetDevice; std::map additionalConfig; std::tie(inShapeParams, inPrecisions, maxOutBoxesPerClass, thrValues, boxEncoding, sortResDescend, outType, targetDevice, additionalConfig) = obj.param; std::tie(iouThr, scoreThr, softNmsSigma) = thrValues; - ElementType paramsPrec, maxBoxPrec, thrPrec; + ov::element::Type paramsPrec, maxBoxPrec, thrPrec; std::tie(paramsPrec, maxBoxPrec, thrPrec) = inPrecisions; std::vector bounds; @@ -88,6 +83,7 @@ public: result << "paramsPrec=" << paramsPrec << "_maxBoxPrec=" << maxBoxPrec << "_thrPrec=" << thrPrec << "_"; result << "maxOutBoxesPerClass=" << maxOutBoxesPerClass << "_"; result << "iouThr=" << iouThr << "_scoreThr=" << scoreThr << "_softNmsSigma=" << softNmsSigma << "_"; + using ov::operator<<; result << "boxEncoding=" << boxEncoding << "_sortResDescend=" << sortResDescend << "_outType=" << outType << "_"; result << "config=("; for (const auto& configEntry : additionalConfig) { @@ -99,7 +95,7 @@ public: return result.str(); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { SubgraphBaseTest::generate_inputs(targetInputStaticShapes); // w/a to fill valid data for port 2 const auto& funcInputs = function->inputs(); @@ -122,13 +118,13 @@ protected: InputPrecisions inPrecisions; ThresholdValues thrValues; float iouThr, scoreThr, softNmsSigma; - op::v9::NonMaxSuppression::BoxEncodingType boxEncoding; + ov::op::v9::NonMaxSuppression::BoxEncodingType boxEncoding; bool sortResDescend; - element::Type outType; + ov::element::Type outType; std::map additionalConfig; std::tie(inShapeParams, inPrecisions, maxOutBoxesPerClass, thrValues, boxEncoding, sortResDescend, outType, targetDevice, additionalConfig) = this->GetParam(); - element::Type paramsPrec, maxBoxPrec, thrPrec; + ov::element::Type paramsPrec, maxBoxPrec, thrPrec; std::tie(paramsPrec, maxBoxPrec, thrPrec) = inPrecisions; std::tie(iouThr, scoreThr, softNmsSigma) = thrValues; @@ -137,18 +133,18 @@ protected: std::tie(bounds, targetInDims) = inShapeParams; if (!bounds.empty()) { - inputDynamicShapes = std::vector{{bounds[BATCHES], bounds[BOXES], 4}, {bounds[BATCHES], bounds[CLASSES], bounds[BOXES]}}; + inputDynamicShapes = std::vector{{bounds[BATCHES], bounds[BOXES], 4}, {bounds[BATCHES], bounds[CLASSES], bounds[BOXES]}}; } else { size_t batches, boxes, classes; std::tie(batches, boxes, classes) = targetInDims.front(); ov::Dimension numBatches(batches), numBoxes(boxes), numClasses(classes); - inputDynamicShapes = std::vector{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}}; + inputDynamicShapes = std::vector{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}}; } for (const auto &ts : targetInDims) { size_t numBatches, numBoxes, numClasses; std::tie(numBatches, numBoxes, numClasses) = ts; - targetStaticShapes.push_back(std::vector{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}}); + targetStaticShapes.push_back(std::vector{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}}); } ov::ParameterVector params; @@ -158,17 +154,18 @@ protected: params[0]->set_friendly_name("param_1"); params[1]->set_friendly_name("param_2"); - auto maxOutBoxesPerClassNode = builder::makeConstant(maxBoxPrec, ngraph::Shape{}, std::vector{maxOutBoxesPerClass})->output(0); - auto iouThrNode = builder::makeConstant(thrPrec, ngraph::Shape{}, std::vector{iouThr})->output(0); - auto scoreThrNode = builder::makeConstant(thrPrec, ngraph::Shape{}, std::vector{scoreThr})->output(0); - auto softNmsSigmaNode = builder::makeConstant(thrPrec, ngraph::Shape{}, std::vector{softNmsSigma})->output(0); - auto nms = std::make_shared(params[0], params[1], maxOutBoxesPerClassNode, iouThrNode, scoreThrNode, - softNmsSigmaNode, boxEncoding, sortResDescend, outType); - ngraph::ResultVector results; + auto maxOutBoxesPerClassNode = std::make_shared(maxBoxPrec, ov::Shape{}, std::vector{maxOutBoxesPerClass}); + auto iouThrNode = std::make_shared(thrPrec, ov::Shape{}, std::vector{iouThr}); + auto scoreThrNode = std::make_shared(thrPrec, ov::Shape{}, std::vector{scoreThr}); + auto softNmsSigmaNode = std::make_shared(thrPrec, ov::Shape{}, std::vector{softNmsSigma}); + + auto nms = std::make_shared(params[0], params[1], maxOutBoxesPerClassNode, iouThrNode, scoreThrNode, + softNmsSigmaNode, boxEncoding, sortResDescend, outType); + ov::ResultVector results; for (size_t i = 0; i < nms->get_output_size(); i++) { - results.push_back(std::make_shared(nms->output(i))); + results.push_back(std::make_shared(nms->output(i))); } - function = std::make_shared(results, params, "Nms"); + function = std::make_shared(results, params, "Nms"); } private: @@ -397,14 +394,10 @@ private: int32_t maxOutBoxesPerClass; }; -TEST_P(NmsLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(NmsLayerGPUTest, Inference) { run(); } -namespace { - std::map emptyAdditionalConfig; const std::vector inShapeParams = { @@ -419,18 +412,20 @@ const std::vector inShapeParams = { const std::vector maxOutBoxPerClass = {5, 20}; const std::vector threshold = {0.3f, 0.7f}; const std::vector sigmaThreshold = {0.0f, 0.5f}; -const std::vector encodType = {op::v9::NonMaxSuppression::BoxEncodingType::CENTER, - op::v9::NonMaxSuppression::BoxEncodingType::CORNER}; +const std::vector encodType = + {ov::op::v9::NonMaxSuppression::BoxEncodingType::CENTER, + ov::op::v9::NonMaxSuppression::BoxEncodingType::CORNER}; + const std::vector sortResDesc = {true, false}; -const std::vector outType = {element::i32}; +const std::vector outType = {ov::element::i32}; INSTANTIATE_TEST_SUITE_P(smoke_Nms_dynamic, NmsLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(inShapeParams), ::testing::Combine( - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i32), - ::testing::Values(ElementType::f32)), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::i32), + ::testing::Values(ov::element::f32)), ::testing::ValuesIn(maxOutBoxPerClass), ::testing::Combine( ::testing::ValuesIn(threshold), @@ -444,4 +439,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_Nms_dynamic, NmsLayerGPUTest, NmsLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/normalize_l2.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/normalize_l2.cpp index a7e5f9a8dad..14dcb81dd04 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/normalize_l2.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/normalize_l2.cpp @@ -2,30 +2,32 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/normalize_l2.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; using NormalizeL2LayerGPUTestParams = std::tuple< InputShape, // Input shapes - ElementType, // Input precision + ov::element::Type, // Input precision std::vector, // Reduction axes - ngraph::op::EpsMode, // EpsMode + ov::op::EpsMode, // EpsMode float>; // Epsilon class NormalizeL2LayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InputShape inputShapes; - ElementType netPrecision; + ov::element::Type netPrecision; std::vector axes; - ngraph::op::EpsMode epsMode; + ov::op::EpsMode epsMode; float eps; std::tie(inputShapes, netPrecision, axes, epsMode, eps) = obj.param; @@ -47,9 +49,9 @@ protected: targetDevice = ov::test::utils::DEVICE_GPU; InputShape inputShapes; - ElementType netPrecision; + ov::element::Type netPrecision; std::vector axes; - ngraph::op::EpsMode epsMode; + ov::op::EpsMode epsMode; float eps; std::tie(inputShapes, netPrecision, axes, epsMode, eps) = this->GetParam(); @@ -62,18 +64,15 @@ protected: auto normAxes = std::make_shared(ov::element::i64, ov::Shape{axes.size()}, axes); auto normalize = std::make_shared(params[0], normAxes, eps, epsMode); - ngraph::ResultVector results{std::make_shared(normalize)}; - function = std::make_shared(results, params, "NormalizeL2"); + ov::ResultVector results{std::make_shared(normalize)}; + function = std::make_shared(results, params, "NormalizeL2"); } }; -TEST_P(NormalizeL2LayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(NormalizeL2LayerGPUTest, Inference) { run(); } -namespace { - const std::vector inputShapes_1D = { { // dynamic @@ -163,8 +162,8 @@ const std::vector inputShapes_5D = { } }; -const std::vector epsMode = { - ngraph::op::EpsMode::ADD, ngraph::op::EpsMode::MAX +const std::vector epsMode = { + ov::op::EpsMode::ADD, ov::op::EpsMode::MAX }; const std::vector epsilon = { @@ -179,7 +178,7 @@ const std::vector reduction_axes_12 = {1, 2}; const std::vector reduction_axes_3 = {3}; const std::vector reduction_axes_2 = {2}; -std::vector nrtPrecision = {ElementType::f16, ElementType::f32}; +std::vector nrtPrecision = {ov::element::f16, ov::element::f32}; const auto NormalizeL2_3D = ::testing::Combine( ::testing::ValuesIn(inputShapes_3D), @@ -209,4 +208,3 @@ const auto NormalizeL2_5D = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_NormalizeL2_5D, NormalizeL2LayerGPUTest, NormalizeL2_5D, NormalizeL2LayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pad.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pad.cpp index 4a30f042df0..da642f7f8ed 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pad.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pad.cpp @@ -2,41 +2,38 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/pad.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -#include -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov; -using namespace test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/pad.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; using PadLayerGPUTestParamSet = std::tuple< InputShape, // Input shape - ElementType, // Input element type + ov::element::Type, // Input element type std::vector, // padsBegin std::vector, // padsEnd float, // argPadValue - std::vector, // for {begin, end, padValue} - ov::op::PadMode // padMode ->; + std::vector, // for {begin, end, padValue} + ov::op::PadMode>; // padMode class PadLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InputShape shapes; - ElementType elementType; + ov::element::Type model_type; std::vector padsBegin, padsEnd; ov::op::PadMode padMode; float argPadValue; - std::vector inputLayerTypes; - std::tie(shapes, elementType, padsBegin, padsEnd, argPadValue, inputLayerTypes, padMode) = obj.param; + std::vector inputLayerTypes; + std::tie(shapes, model_type, padsBegin, padsEnd, argPadValue, inputLayerTypes, padMode) = obj.param; std::ostringstream results; results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; @@ -44,7 +41,7 @@ public: for (const auto& item : shapes.second) { results << ov::test::utils::vec2str(item) << "_"; } - results << "Prc=" << elementType << "_"; + results << "Prc=" << model_type << "_"; results << "padsBegin=" << ov::test::utils::vec2str(padsBegin) << "_"; results << "padsEnd=" << ov::test::utils::vec2str(padsEnd) << "_"; if (padMode == ov::op::PadMode::CONSTANT) { @@ -63,24 +60,24 @@ protected: void SetUp() override { InputShape shapes; ov::op::PadMode padMode; - std::vector inputLayerTypes; + std::vector inputLayerTypes; std::tie(shapes, inType, padsBegin, padsEnd, argPadValue, inputLayerTypes, padMode) = this->GetParam(); targetDevice = ov::test::utils::DEVICE_GPU; std::vector inputShapes; inputShapes.push_back(shapes); - if (inputLayerTypes[0] == helpers::InputLayerType::PARAMETER) { + if (inputLayerTypes[0] == ov::test::utils::InputLayerType::PARAMETER) { inputShapes.push_back(InputShape({static_cast(padsBegin.size())}, std::vector(shapes.second.size(), {padsBegin.size()}))); } - if (inputLayerTypes[1] == helpers::InputLayerType::PARAMETER) { + if (inputLayerTypes[1] == ov::test::utils::InputLayerType::PARAMETER) { inputShapes.push_back(InputShape({static_cast(padsEnd.size())}, std::vector(shapes.second.size(), {padsEnd.size()}))); } init_input_shapes(inputShapes); // Add empty shape for parameter input of scalar 'pad_value' - if (inputLayerTypes[2] == helpers::InputLayerType::PARAMETER) { + if (inputLayerTypes[2] == ov::test::utils::InputLayerType::PARAMETER) { inputDynamicShapes.push_back(ov::PartialShape({})); for (size_t i = 0; i < shapes.second.size(); ++i) { for (size_t k = 0; k < targetStaticShapes.size(); ++k) { @@ -95,43 +92,43 @@ protected: std::shared_ptr pads_begin, pads_end, arg_pad_value; // padsBegin - if (inputLayerTypes[0] == helpers::InputLayerType::PARAMETER) { - functionParams.push_back(std::make_shared(ngraph::element::i64, ov::Shape{padsBegin.size()})); + if (inputLayerTypes[0] == ov::test::utils::InputLayerType::PARAMETER) { + functionParams.push_back(std::make_shared(ov::element::i64, ov::Shape{padsBegin.size()})); functionParams.back()->set_friendly_name("padsBegin"); pads_begin = functionParams.back(); } else { - pads_begin = std::make_shared(ngraph::element::i64, ngraph::Shape{padsBegin.size()}, padsBegin.data()); + pads_begin = std::make_shared(ov::element::i64, ov::Shape{padsBegin.size()}, padsBegin.data()); } // padsEnd - if (inputLayerTypes[1] == helpers::InputLayerType::PARAMETER) { - functionParams.push_back(std::make_shared(ngraph::element::i64, ov::Shape{padsEnd.size()})); + if (inputLayerTypes[1] == ov::test::utils::InputLayerType::PARAMETER) { + functionParams.push_back(std::make_shared(ov::element::i64, ov::Shape{padsEnd.size()})); functionParams.back()->set_friendly_name("padsEnd"); pads_end = functionParams.back(); } else { - pads_end = std::make_shared(ngraph::element::i64, ngraph::Shape{padsEnd.size()}, padsEnd.data()); + pads_end = std::make_shared(ov::element::i64, ov::Shape{padsEnd.size()}, padsEnd.data()); } // argPadValue - if (inputLayerTypes[2] == helpers::InputLayerType::PARAMETER) { - functionParams.push_back(std::make_shared(inType, ov::PartialShape({}))); + if (inputLayerTypes[2] == ov::test::utils::InputLayerType::PARAMETER) { + functionParams.push_back(std::make_shared(inType, ov::PartialShape({}))); functionParams.back()->set_friendly_name("padValue"); arg_pad_value = functionParams.back(); } else { - arg_pad_value = std::make_shared(inType, ngraph::Shape{}, &argPadValue); + arg_pad_value = std::make_shared(inType, ov::Shape{}, &argPadValue); } - auto pad = std::make_shared(functionParams[0], pads_begin, pads_end, arg_pad_value, padMode); + auto pad = std::make_shared(functionParams[0], pads_begin, pads_end, arg_pad_value, padMode); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < pad->get_output_size(); ++i) { - results.push_back(std::make_shared(pad->output(i))); + results.push_back(std::make_shared(pad->output(i))); } - function = std::make_shared(results, functionParams, "PadLayerGPUTest"); + function = std::make_shared(results, functionParams, "PadLayerGPUTest"); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0lu; i < funcInputs.size(); i++) { @@ -166,15 +163,12 @@ protected: } }; -TEST_P(PadLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(PadLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector inputPrecisions = { - ElementType::f32 +const std::vector inputPrecisions = { + ov::element::f32 }; const std::vector argPadValue = {0.f, -1.f}; @@ -185,11 +179,11 @@ const std::vector padMode = { ov::op::PadMode::SYMMETRIC }; -const std::vector> isConstantInput = { - {helpers::InputLayerType::CONSTANT, helpers::InputLayerType::CONSTANT, helpers::InputLayerType::CONSTANT}, - {helpers::InputLayerType::CONSTANT, helpers::InputLayerType::PARAMETER, helpers::InputLayerType::CONSTANT}, - {helpers::InputLayerType::CONSTANT, helpers::InputLayerType::PARAMETER, helpers::InputLayerType::PARAMETER}, - {helpers::InputLayerType::PARAMETER, helpers::InputLayerType::PARAMETER, helpers::InputLayerType::PARAMETER} +const std::vector> isConstantInput = { + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT}, + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT}, + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER}, + {ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER} }; //====================== Dynamic Shapes Tests 2D ====================== @@ -210,7 +204,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(padsEnd2D_Smoke), ::testing::ValuesIn(argPadValue), ::testing::ValuesIn(isConstantInput), - ::testing::Values(ngraph::helpers::PadMode::CONSTANT)), + ::testing::Values(ov::op::PadMode::CONSTANT)), PadLayerGPUTest::getTestCaseName ); @@ -246,7 +240,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(padsEnd4D_Smoke), ::testing::ValuesIn(argPadValue), ::testing::ValuesIn(isConstantInput), - ::testing::Values(ngraph::helpers::PadMode::CONSTANT)), + ::testing::Values(ov::op::PadMode::CONSTANT)), PadLayerGPUTest::getTestCaseName ); @@ -282,7 +276,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(padsEnd5D_Smoke), ::testing::ValuesIn(argPadValue), ::testing::ValuesIn(isConstantInput), - ::testing::Values(ngraph::helpers::PadMode::CONSTANT)), + ::testing::Values(ov::op::PadMode::CONSTANT)), PadLayerGPUTest::getTestCaseName ); @@ -299,6 +293,4 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(padMode)), PadLayerGPUTest::getTestCaseName ); - } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pooling.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pooling.cpp index 7b5c7f9ae62..41db2ac8b28 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pooling.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pooling.cpp @@ -2,33 +2,36 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/builders.hpp" - -#include "shared_test_classes/single_layer/pooling.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" +#include "shared_test_classes/single_op/pooling.hpp" -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; -using poolLayerGpuTestParamsSet = std::tuple; +using poolLayerGpuTestParamsSet = + std::tuple; class PoolingLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { - LayerTestsDefinitions::poolSpecificParams basicParamsSet; + ov::test::poolSpecificParams basicParamsSet; InputShape inputShapes; - ElementType inPrc; + ov::element::Type inPrc; std::tie(basicParamsSet, inputShapes, inPrc) = obj.param; - ngraph::helpers::PoolingTypes poolType; + ov::test::utils::PoolingTypes poolType; std::vector kernel, stride; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; bool excludePad; std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet; @@ -41,10 +44,10 @@ public: } results << "Prc=" << inPrc << "_"; switch (poolType) { - case ngraph::helpers::PoolingTypes::MAX: + case ov::test::utils::PoolingTypes::MAX: results << "MaxPool_"; break; - case ngraph::helpers::PoolingTypes::AVG: + case ov::test::utils::PoolingTypes::AVG: results << "AvgPool_"; results << "ExcludePad=" << excludePad << "_"; break; @@ -63,16 +66,16 @@ protected: void SetUp() override { targetDevice = ov::test::utils::DEVICE_GPU; - LayerTestsDefinitions::poolSpecificParams basicParamsSet; + ov::test::poolSpecificParams basicParamsSet; InputShape inputShapes; - ElementType inPrc; + ov::element::Type inPrc; std::tie(basicParamsSet, inputShapes, inPrc) = this->GetParam(); - ngraph::helpers::PoolingTypes poolType; + ov::test::utils::PoolingTypes poolType; std::vector kernel, stride; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; bool excludePad; std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet; @@ -82,7 +85,7 @@ protected: for (auto&& shape : inputDynamicShapes) { params.push_back(std::make_shared(inPrc, shape)); } - std::shared_ptr poolInput = params[0]; + std::shared_ptr poolInput = params[0]; std::shared_ptr pooling; if (ov::test::utils::PoolingTypes::MAX == poolType) { @@ -91,27 +94,23 @@ protected: pooling = std::make_shared(poolInput, stride, padBegin, padEnd, kernel, excludePad, roundingType, padType); } - auto makeFunction = [](const ngraph::element::Type &ngPrc, ngraph::ParameterVector ¶ms, const std::shared_ptr &lastNode) { - ngraph::ResultVector results; + auto makeFunction = [](const ov::element::Type &ngPrc, ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "PoolingGPU"); + return std::make_shared(results, params, "PoolingGPU"); }; function = makeFunction(inPrc, params, pooling); } }; -TEST_P(PoolingLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(PoolingLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector inpOutPrecision = { ElementType::f32 }; +const std::vector inpOutPrecision = { ov::element::f32 }; const std::vector inputShapes3D = { { {}, {{3, 4, 64}} }, @@ -220,22 +219,22 @@ const std::vector inputShapes5D = { }; /* ============= Pooling (1D) ============= */ -const std::vector paramsMax3D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {2}, {0}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4}, {2}, {0}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {1}, {0}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, +const std::vector paramsMax3D = { + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2}, {2}, {0}, {0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {4}, {2}, {0}, {0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2}, {1}, {0}, {0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, }; -const std::vector paramsAvg3D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4}, {4}, {2}, {2}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, +const std::vector paramsAvg3D = { + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {3}, {1}, {1}, {0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {3}, {1}, {1}, {0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {4}, {4}, {2}, {2}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true }, }; INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_GPU_3D, PoolingLayerGPUTest, @@ -253,30 +252,30 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_3D, PoolingLayerGPUTest, PoolingLayerGPUTest::getTestCaseName); /* ============= Pooling (2D) ============= */ -const std::vector paramsMax4D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 1}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, +const std::vector paramsMax4D = { + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {4, 2}, {2, 2}, {0, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {4, 2}, {2, 1}, {0, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, }; -const std::vector paramsAvg4D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4}, {4, 4}, {2, 2}, {2, 2}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true } +const std::vector paramsAvg4D = { + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, true }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, true }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {0, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {4, 4}, {4, 4}, {2, 2}, {2, 2}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true } }; INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_GPU_4D, PoolingLayerGPUTest, @@ -293,9 +292,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_4D, PoolingLayerGPUTest, ::testing::ValuesIn(inpOutPrecision)), PoolingLayerGPUTest::getTestCaseName); -const std::vector paramsAvg4D_Large = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {65, 65}, {65, 65}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::FLOOR, ngraph::op::PadType::VALID, true }, +const std::vector paramsAvg4D_Large = { + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {65, 65}, {65, 65}, {0, 0}, {0, 0}, + ov::op::RoundingType::FLOOR, ov::op::PadType::VALID, true }, }; INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_Large, PoolingLayerGPUTest, @@ -306,32 +305,32 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_Large, PoolingLayerGPUTest, PoolingLayerGPUTest::getTestCaseName); /* ============= Pooling (3D) ============= */ -const std::vector paramsMax5D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, +const std::vector paramsMax5D = { + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, }; -const std::vector paramsAvg5D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4, 4}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true } +const std::vector paramsAvg5D = { + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, true }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, true }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {4, 4, 4}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true } }; INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_GPU_5D, PoolingLayerGPUTest, @@ -348,5 +347,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_5D, PoolingLayerGPUTest, ::testing::ValuesIn(inpOutPrecision)), PoolingLayerGPUTest::getTestCaseName); } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/prior_box.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/prior_box.cpp index 49393f39d71..f8e73f9f058 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/prior_box.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/prior_box.cpp @@ -2,23 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/shape_of.hpp" -#include "shared_test_classes/single_layer/strided_slice.hpp" -#include "shared_test_classes/single_layer/prior_box.hpp" -#include "shared_test_classes/single_layer/prior_box_clustered.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -#include +#include "openvino/core/type/element_type_traits.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/random_uniform.hpp" -using ElementType = ov::element::Type_t; +namespace { +using ov::test::InputShape; -namespace GPULayerTestsDefinitions { enum class priorbox_type { V0, V8, @@ -27,20 +22,21 @@ enum class priorbox_type { typedef std::tuple< InputShape, InputShape, - ElementType, // Net precision + ov::element::Type, std::vector, priorbox_type > PriorBoxLayerGPUTestParamsSet; + class PriorBoxLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InputShape input1Shape; InputShape input2Shape; - ElementType netPrecision; + ov::element::Type model_type; std::vector max_size; priorbox_type priorboxType; - std::tie(input1Shape, input2Shape, netPrecision, max_size, priorboxType) = obj.param; + std::tie(input1Shape, input2Shape, model_type, max_size, priorboxType) = obj.param; std::ostringstream result; switch (priorboxType) { @@ -55,7 +51,7 @@ public: result << "PriorBoxV8Test_"; } result << std::to_string(obj.index) << "_"; - result << "netPrec=" << netPrecision << "_"; + result << "netPrec=" << model_type << "_"; result << "I1S="; result << ov::test::utils::partialShape2str({input1Shape.first}) << "_"; result << "TS=("; @@ -77,29 +73,29 @@ protected: void SetUp() override { targetDevice = ov::test::utils::DEVICE_GPU; - auto netPrecision = ElementType::undefined; + auto model_type = ov::element::undefined; InputShape input1Shape; InputShape input2Shape; std::vector max_size; priorbox_type priorboxType; - std::tie(input1Shape, input2Shape, netPrecision, max_size, priorboxType) = this->GetParam(); + std::tie(input1Shape, input2Shape, model_type, max_size, priorboxType) = this->GetParam(); init_input_shapes({input1Shape, input2Shape}); - inType = ov::element::Type(netPrecision); - outType = ElementType::f32; + inType = ov::element::Type(model_type); + outType = ov::element::f32; - auto beginInput = ov::op::v0::Constant::create(ngraph::element::i32, ngraph::Shape{1}, {2}); - auto endInput = ov::op::v0::Constant::create(ngraph::element::i32, ngraph::Shape{1}, {4}); - auto strideInput = ov::op::v0::Constant::create(ngraph::element::i32, ngraph::Shape{1}, {1}); + auto beginInput = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{1}, {2}); + auto endInput = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{1}, {4}); + auto strideInput = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{1}, {1}); ov::ParameterVector functionParams; for (auto&& shape : inputDynamicShapes) functionParams.push_back(std::make_shared(inType, shape)); - auto shapeOfOp1 = std::make_shared(functionParams[0], element::i32); - auto shapeOfOp2 = std::make_shared(functionParams[1], element::i32); + auto shapeOfOp1 = std::make_shared(functionParams[0], ov::element::i32); + auto shapeOfOp2 = std::make_shared(functionParams[1], ov::element::i32); auto stridedSliceOp1 = std::make_shared(shapeOfOp1, beginInput, @@ -123,7 +119,7 @@ protected: switch (priorboxType) { case priorbox_type::Clustered: { - ngraph::op::v0::PriorBoxClustered::Attributes attributes_clustered; + ov::op::v0::PriorBoxClustered::Attributes attributes_clustered; attributes_clustered.widths = {86, 13, 57, 39, 68, 34, 142, 50, 23}; attributes_clustered.heights = {44, 10, 30, 19, 94, 32, 61, 53, 17}; @@ -134,14 +130,14 @@ protected: attributes_clustered.offset = 0.5; attributes_clustered.clip = false; - auto priorBoxOp = std::make_shared(stridedSliceOp1, stridedSliceOp2, attributes_clustered); + auto priorBoxOp = std::make_shared(stridedSliceOp1, stridedSliceOp2, attributes_clustered); - ngraph::ResultVector results{std::make_shared(priorBoxOp)}; - function = std::make_shared (results, functionParams, "PriorBoxV0Function"); + ov::ResultVector results{std::make_shared(priorBoxOp)}; + function = std::make_shared (results, functionParams, "PriorBoxV0Function"); break; } case priorbox_type::V0: { - ngraph::op::v0::PriorBox::Attributes attributes_v0; + ov::op::v0::PriorBox::Attributes attributes_v0; attributes_v0.min_size = {64}; attributes_v0.max_size = max_size; @@ -153,15 +149,15 @@ protected: attributes_v0.flip = true; attributes_v0.scale_all_sizes = true; - auto priorBoxOp = std::make_shared(stridedSliceOp1, stridedSliceOp2, attributes_v0); + auto priorBoxOp = std::make_shared(stridedSliceOp1, stridedSliceOp2, attributes_v0); - ngraph::ResultVector results{std::make_shared(priorBoxOp)}; - function = std::make_shared (results, functionParams, "PriorBoxV0Function"); + ov::ResultVector results{std::make_shared(priorBoxOp)}; + function = std::make_shared (results, functionParams, "PriorBoxV0Function"); break; } case priorbox_type::V8: default: { - ngraph::op::v8::PriorBox::Attributes attributes_v8; + ov::op::v8::PriorBox::Attributes attributes_v8; attributes_v8.min_size = {64}; attributes_v8.max_size = max_size; @@ -174,25 +170,21 @@ protected: attributes_v8.scale_all_sizes = true; attributes_v8.min_max_aspect_ratios_order = true; - auto priorBoxOp = std::make_shared(stridedSliceOp1, stridedSliceOp2, attributes_v8); + auto priorBoxOp = std::make_shared(stridedSliceOp1, stridedSliceOp2, attributes_v8); - ngraph::ResultVector results{std::make_shared(priorBoxOp)}; - function = std::make_shared (results, functionParams, "PriorBoxV8Function"); + ov::ResultVector results{std::make_shared(priorBoxOp)}; + function = std::make_shared (results, functionParams, "PriorBoxV8Function"); } } } }; -TEST_P(PriorBoxLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(PriorBoxLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector netPrecisions = { - ElementType::f32, +const std::vector model_types = { + ov::element::f32, }; const std::vector mode = { @@ -230,10 +222,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_prior_box_full_dynamic, ::testing::Combine( ::testing::ValuesIn(inShapesDynamic), ::testing::ValuesIn(imgShapesDynamic), - ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(model_types), ::testing::ValuesIn(max_size), ::testing::ValuesIn(mode)), PriorBoxLayerGPUTest::getTestCaseName); -} // namespace - -} // namespace GPULayerTestsDefinitions +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/random_uniform.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/random_uniform.cpp index 755371e1b0a..d3f324a72ff 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/random_uniform.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/random_uniform.cpp @@ -2,26 +2,28 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" +#include "openvino/core/type/element_type_traits.hpp" -using namespace ngraph; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/random_uniform.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< std::vector, // Input shapes std::pair, // Min value, Max value std::pair, // Global seed, operation seed - ElementType, // Network precision - TargetDevice, // Device name - std::map // Additional network configuration + ov::element::Type, // Network precision + std::string // Device name > RandomUnifromDynamicGPUTestParamsSet; class RandomUnifromDynamicGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { RandomUnifromDynamicGPUTestParamsSet basicParamsSet = obj.param; @@ -29,10 +31,9 @@ public: std::vector input_shapes; std::pair min_max_values; std::pair seeds; - ElementType precision; - TargetDevice target_device; - std::map additionalConfig; - std::tie(input_shapes, min_max_values, seeds, precision, target_device, additionalConfig) = basicParamsSet; + ov::element::Type precision; + std::string target_device; + std::tie(input_shapes, min_max_values, seeds, precision, target_device) = basicParamsSet; result << "shape="; for (const auto& shape : input_shapes) { @@ -75,37 +76,37 @@ protected: void set_tensor_value(T scalar, ov::Tensor& tensor) { #define CASE(X) \ case X: { \ - auto *dataPtr = tensor.data::value_type>(); \ - dataPtr[0] = static_cast::value_type>(scalar); \ + auto *dataPtr = tensor.data::value_type>(); \ + dataPtr[0] = static_cast::value_type>(scalar); \ break; \ } switch (tensor.get_element_type()) { - CASE(ElementType::boolean) - CASE(ElementType::i8) - CASE(ElementType::i16) - CASE(ElementType::i32) - CASE(ElementType::i64) - CASE(ElementType::u8) - CASE(ElementType::u16) - CASE(ElementType::u32) - CASE(ElementType::u64) - CASE(ElementType::bf16) - CASE(ElementType::f16) - CASE(ElementType::f32) - CASE(ElementType::f64) - CASE(ElementType::u1) - CASE(ElementType::i4) - CASE(ElementType::u4) + CASE(ov::element::boolean) + CASE(ov::element::i8) + CASE(ov::element::i16) + CASE(ov::element::i32) + CASE(ov::element::i64) + CASE(ov::element::u8) + CASE(ov::element::u16) + CASE(ov::element::u32) + CASE(ov::element::u64) + CASE(ov::element::bf16) + CASE(ov::element::f16) + CASE(ov::element::f32) + CASE(ov::element::f64) + CASE(ov::element::u1) + CASE(ov::element::i4) + CASE(ov::element::u4) default: OPENVINO_THROW("Unsupported element type: ", tensor.get_element_type()); } } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); - auto generate_input = [&](size_t index, ElementType element_type) { + auto generate_input = [&](size_t index, ov::element::Type element_type) { ov::Tensor tensor(element_type, targetInputStaticShapes[index]); if (index != 0) { auto scalar_val = index == 1 ? min_max_values.first : min_max_values.second; @@ -121,11 +122,9 @@ protected: void SetUp() override { RandomUnifromDynamicGPUTestParamsSet basicParamsSet = this->GetParam(); std::vector shapes; - ElementType netType; - std::map additionalConfig; + ov::element::Type netType; std::pair seeds; - - std::tie(shapes, min_max_values, seeds, netType, targetDevice, additionalConfig) = basicParamsSet; + std::tie(shapes, min_max_values, seeds, netType, targetDevice) = basicParamsSet; init_input_shapes(shapes); @@ -144,14 +143,10 @@ private: std::pair min_max_values; }; - -TEST_P(RandomUnifromDynamicGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(RandomUnifromDynamicGPUTest, Inference) { run(); } -namespace { -std::map emptyAdditionalConfig; const std::vector> dynInputShapes = { { {{ov::PartialShape::dynamic(4)}, {{1, 2, 3, 4}, {1, 1, 5, 5}, {2, 3, 4, 5}}}, @@ -183,21 +178,18 @@ const std::vector> seeds = { {100, 10}, }; -const std::vector netPrecisions = { - ElementType::i32, - ElementType::f32, - ElementType::f16, +const std::vector netPrecisions = { + ov::element::i32, + ov::element::f32, + ov::element::f16, }; const auto testParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes), ::testing::ValuesIn(min_max_values), ::testing::ValuesIn(seeds), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); + ::testing::Values(ov::test::utils::DEVICE_GPU)); INSTANTIATE_TEST_SUITE_P(smoke_dynamic_random_uniform, RandomUnifromDynamicGPUTest, testParams_smoke, RandomUnifromDynamicGPUTest::getTestCaseName); - } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/range.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/range.cpp index f171bfebb48..6db87fb65af 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/range.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/range.cpp @@ -2,35 +2,36 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" +#include "openvino/core/type/element_type_traits.hpp" -using namespace ngraph; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/range.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< std::vector, // input shapes std::vector, // input values - ElementType, // Network precision - TargetDevice, // Device name - std::map // Additional network configuration + ov::element::Type, // Model type + std::string // Device name > RangeDynamicGPUTestParamsSet; class RangeDynamicGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { RangeDynamicGPUTestParamsSet basicParamsSet = obj.param; std::ostringstream result; std::vector inputShapes; std::vector inputValues; - ElementType netType; - TargetDevice targetDevice; - std::map additionalConfig; - std::tie(inputShapes, inputValues, netType, targetDevice, additionalConfig) = basicParamsSet; + ov::element::Type model_type; + std::string targetDevice; + std::tie(inputShapes, inputValues, model_type, targetDevice) = basicParamsSet; result << "IS="; for (const auto& shape : inputShapes) { @@ -43,7 +44,7 @@ public: for (const auto& v : inputValues) { result << v << "_"; } - result << "NetType=" << netType << "_"; + result << "model_type=" << model_type << "_"; result << "targetDevice=" << targetDevice; return result.str(); } @@ -75,47 +76,47 @@ protected: void add_scalar_to_tensor(T scalar, ov::Tensor& tensor) { #define CASE(X) \ case X: { \ - auto *dataPtr = tensor.data::value_type>(); \ - dataPtr[0] = static_cast::value_type>(scalar); \ + auto *dataPtr = tensor.data::value_type>(); \ + dataPtr[0] = static_cast::value_type>(scalar); \ break; \ } switch (tensor.get_element_type()) { - CASE(ElementType::boolean) - CASE(ElementType::i8) - CASE(ElementType::i16) - CASE(ElementType::i32) - CASE(ElementType::i64) - CASE(ElementType::u8) - CASE(ElementType::u16) - CASE(ElementType::u32) - CASE(ElementType::u64) - CASE(ElementType::bf16) - CASE(ElementType::f16) - CASE(ElementType::f32) - CASE(ElementType::f64) - CASE(ElementType::u1) - CASE(ElementType::i4) - CASE(ElementType::u4) + CASE(ov::element::boolean) + CASE(ov::element::i8) + CASE(ov::element::i16) + CASE(ov::element::i32) + CASE(ov::element::i64) + CASE(ov::element::u8) + CASE(ov::element::u16) + CASE(ov::element::u32) + CASE(ov::element::u64) + CASE(ov::element::bf16) + CASE(ov::element::f16) + CASE(ov::element::f32) + CASE(ov::element::f64) + CASE(ov::element::u1) + CASE(ov::element::i4) + CASE(ov::element::u4) default: OPENVINO_THROW("Unsupported element type: ", tensor.get_element_type()); } } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); - auto generate_input = [&](size_t index, ElementType element_type) { + auto generate_input = [&](size_t index, ov::element::Type element_type) { ov::Tensor tensor(element_type, targetInputStaticShapes[index]); add_scalar_to_tensor(input_values[index], tensor); inputs.insert({funcInputs[index].get_node_shared_ptr(), tensor}); }; // net_type=undifined means mixed type test - if (net_type == ElementType::undefined) { - generate_input(0, ElementType::f32); - generate_input(1, ElementType::i32); - generate_input(2, ElementType::f32); + if (net_type == ov::element::undefined) { + generate_input(0, ov::element::f32); + generate_input(1, ov::element::i32); + generate_input(2, ov::element::f32); } else { for (size_t i = 0; i < funcInputs.size(); ++i) { generate_input(i, funcInputs[i].get_element_type()); @@ -127,47 +128,43 @@ protected: RangeDynamicGPUTestParamsSet basicParamsSet = this->GetParam(); std::vector inputShapes; std::vector inputValues; - ElementType netType; - std::map additionalConfig; + ov::element::Type model_type; ov::ParameterVector params; - std::tie(inputShapes, inputValues, netType, targetDevice, additionalConfig) = basicParamsSet; + std::tie(inputShapes, inputValues, model_type, targetDevice) = basicParamsSet; input_values = inputValues; - net_type = netType; + net_type = model_type; init_input_shapes(inputShapes); - if (netType == ElementType::undefined) { - std::vector types = { ElementType::f32, ElementType::i32, ElementType::f32 }; + if (model_type == ov::element::undefined) { + std::vector types = { ov::element::f32, ov::element::i32, ov::element::f32 }; for (size_t i = 0; i < types.size(); i++) { auto paramNode = std::make_shared(types[i], inputDynamicShapes[i]); params.push_back(paramNode); } - netType = ElementType::f32; + model_type = ov::element::f32; } else { for (auto&& shape : inputDynamicShapes) { - params.push_back(std::make_shared(netType, shape)); + params.push_back(std::make_shared(model_type, shape)); } } - const auto range = std::make_shared(params[0], params[1], params[2], netType); + const auto range = std::make_shared(params[0], params[1], params[2], model_type); - ngraph::ResultVector results = {std::make_shared(range)}; - function = std::make_shared(results, params, "shapeof_out"); + ov::ResultVector results = {std::make_shared(range)}; + function = std::make_shared(results, params, "shapeof_out"); } private: std::vector input_values; - ElementType net_type; + ov::element::Type net_type; }; -TEST_P(RangeDynamicGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(RangeDynamicGPUTest, Inference) { run(); } -namespace { -std::map emptyAdditionalConfig; const std::vector> dynInputShapes = { { // Inputs for Range @@ -187,17 +184,16 @@ const std::vector> inputValues = { } }; -const std::vector netPrecisions = { - ElementType::i8, - ElementType::i32, - ElementType::i64, +const std::vector netPrecisions = { + ov::element::i8, + ov::element::i32, + ov::element::i64, }; const auto testParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes), ::testing::ValuesIn(inputValues), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); + ::testing::Values(ov::test::utils::DEVICE_GPU)); INSTANTIATE_TEST_SUITE_P(smoke_dynamic_range_01, RangeDynamicGPUTest, testParams_smoke, RangeDynamicGPUTest::getTestCaseName); @@ -211,16 +207,15 @@ const std::vector> inputFloatValues = { } }; -const std::vector netFloatPrecisions = { - ElementType::f16, - ElementType::f32, +const std::vector netFloatPrecisions = { + ov::element::f16, + ov::element::f32, }; const auto testFloatParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes), ::testing::ValuesIn(inputFloatValues), ::testing::ValuesIn(netFloatPrecisions), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); + ::testing::Values(ov::test::utils::DEVICE_GPU)); INSTANTIATE_TEST_SUITE_P(smoke_dynamic_range_02, RangeDynamicGPUTest, testFloatParams_smoke, RangeDynamicGPUTest::getTestCaseName); @@ -233,19 +228,17 @@ const std::vector> inputMixedValues = { } }; -const std::vector netMixedPrecisions = { +const std::vector netMixedPrecisions = { // Mixed type test(start/step:fp32, end:i32) - ElementType::undefined + ov::element::undefined }; const auto testMixedParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes), ::testing::ValuesIn(inputMixedValues), ::testing::ValuesIn(netMixedPrecisions), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); + ::testing::Values(ov::test::utils::DEVICE_GPU)); INSTANTIATE_TEST_SUITE_P(smoke_dynamic_diff_types, RangeDynamicGPUTest, testMixedParams_smoke, RangeDynamicGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reduce.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reduce.cpp index e4ae7b23381..1e19eb0f1d2 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reduce.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reduce.cpp @@ -2,17 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/reduce_ops.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/node_builders/reduce.hpp" +#include "common_test_utils/test_enums.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef struct { std::vector data_shape; @@ -20,22 +20,22 @@ typedef struct { } ReduceInput; typedef std::tuple< - ReduceInput, // input data (data shape, axes shape, axes values) - ElementType, // presion of inputs - helpers::ReductionType, // reduction type - bool, // keepDims - TargetDevice // device name + ReduceInput, // input data (data shape, axes shape, axes values) + ov::element::Type, // presion of inputs + ov::test::utils::ReductionType, // reduction type + bool, // keepDims + std::string // device name > ReduceLayerTestParamSet; class ReduceLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { ReduceInput input_data; - ElementType netType; - helpers::ReductionType reductionType; + ov::element::Type netType; + ov::test::utils::ReductionType reductionType; bool keepDims; - TargetDevice targetDevice; + std::string targetDevice; std::tie(input_data, netType, reductionType, keepDims, targetDevice) = obj.param; std::vector inshapes = input_data.data_shape; @@ -67,8 +67,8 @@ public: protected: void SetUp() override { ReduceInput input_data; - ElementType netPrecision; - helpers::ReductionType reductionType; + ov::element::Type netPrecision; + ov::test::utils::ReductionType reductionType; bool keepDims; std::tie(input_data, netPrecision, reductionType, keepDims, targetDevice) = this->GetParam(); @@ -84,52 +84,43 @@ protected: std::vector shapeAxes; shapeAxes.push_back(axes.size()); - auto reductionAxesNode = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes)); + auto reductionAxesNode = std::make_shared(ov::element::i64, ov::Shape(shapeAxes), axes); - const auto reduce = ngraph::builder::makeReduce(params[0], reductionAxesNode, keepDims, reductionType); + const auto reduce = ov::test::utils::make_reduce(params[0], reductionAxesNode, keepDims, reductionType); - auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr &lastNode) { - ResultVector results; + auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "ReduceLayerGPUTest"); + return std::make_shared(results, params, "ReduceLayerGPUTest"); }; function = makeFunction(params, reduce); } }; -TEST_P(ReduceLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(ReduceLayerGPUTest, Inference) { run(); } -namespace { - const std::vector keepDims = { true, false, }; -const std::vector floatPrecisions = { - ElementType::f32, - ElementType::f16, +const std::vector float_types = { + ov::element::f32, + ov::element::f16, }; -const std::vector floatIntPrecisions = { - ElementType::f32, - ElementType::f16, - ElementType::i32, +const std::vector float_int_types = { + ov::element::f32, + ov::element::f16, + ov::element::i32, }; - - -namespace Reduce { - const ReduceInput dyn1d = { { { {-1}, {{4}, {5}} } @@ -176,8 +167,8 @@ const ReduceInput dyn6d = { // ================== Reduction int32/float types (Sum, Min, Max, L1) ================== const auto reduceSum = ::testing::Combine( ::testing::ValuesIn({dyn1d, dyn5d}), - ::testing::ValuesIn(floatIntPrecisions), - ::testing::Values(helpers::ReductionType::Sum), + ::testing::ValuesIn(float_int_types), + ::testing::Values(ov::test::utils::ReductionType::Sum), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -185,8 +176,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_sum_compareWithRefs_dynamic, ReduceLayerGP const auto reduceMin = ::testing::Combine( ::testing::ValuesIn({dyn2d, dyn6d}), - ::testing::ValuesIn(floatIntPrecisions), - ::testing::Values(helpers::ReductionType::Min), + ::testing::ValuesIn(float_int_types), + ::testing::Values(ov::test::utils::ReductionType::Min), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -194,8 +185,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_min_compareWithRefs_dynamic, ReduceLayerGP const auto reduceMax = ::testing::Combine( ::testing::ValuesIn({dyn3d, dyn5d}), - ::testing::ValuesIn(floatIntPrecisions), - ::testing::Values(helpers::ReductionType::Max), + ::testing::ValuesIn(float_int_types), + ::testing::Values(ov::test::utils::ReductionType::Max), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -203,8 +194,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_max_compareWithRefs_dynamic, ReduceLayerGP const auto reduceL1 = ::testing::Combine( ::testing::ValuesIn({dyn4d, dyn6d}), - ::testing::ValuesIn(floatIntPrecisions), - ::testing::Values(helpers::ReductionType::L1), + ::testing::ValuesIn(float_int_types), + ::testing::Values(ov::test::utils::ReductionType::L1), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -214,8 +205,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_l1_compareWithRefs_dynamic, ReduceLayerGPU // ================== Reduction float types (Mean, Prod, L2) ================== const auto reduceMean = ::testing::Combine( ::testing::ValuesIn({dyn1d, dyn6d}), - ::testing::ValuesIn(floatPrecisions), - ::testing::Values(helpers::ReductionType::Mean), + ::testing::ValuesIn(float_types), + ::testing::Values(ov::test::utils::ReductionType::Mean), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -223,8 +214,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_mean_compareWithRefs_dynamic, ReduceLayerG const auto reduceProd = ::testing::Combine( ::testing::ValuesIn({dyn2d, dyn4d}), - ::testing::ValuesIn({ElementType::f32}), - ::testing::Values(helpers::ReductionType::Prod), + ::testing::ValuesIn({ov::element::f32}), + ::testing::Values(ov::test::utils::ReductionType::Prod), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -232,8 +223,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_prod_compareWithRefs_dynamic, ReduceLayerG const auto reduceL2 = ::testing::Combine( ::testing::ValuesIn({dyn4d, dyn5d}), - ::testing::ValuesIn(floatPrecisions), - ::testing::Values(helpers::ReductionType::L2), + ::testing::ValuesIn(float_types), + ::testing::Values(ov::test::utils::ReductionType::L2), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -243,8 +234,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_l2_compareWithRefs_dynamic, ReduceLayerGPU // ================== Reduction logical types (LogicalOr, LogicalAnd) ================== const auto reduceLogicalOr = ::testing::Combine( ::testing::ValuesIn({dyn1d, dyn6d}), - ::testing::Values(ElementType::boolean), - ::testing::Values(helpers::ReductionType::LogicalOr), + ::testing::Values(ov::element::boolean), + ::testing::Values(ov::test::utils::ReductionType::LogicalOr), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -252,8 +243,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_logicalor_compareWithRefs_dynamic, ReduceL const auto reduceLogicalAnd = ::testing::Combine( ::testing::ValuesIn({dyn3d, dyn5d}), - ::testing::Values(ElementType::boolean), - ::testing::Values(helpers::ReductionType::LogicalAnd), + ::testing::Values(ov::element::boolean), + ::testing::Values(ov::test::utils::ReductionType::LogicalAnd), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -347,15 +338,11 @@ const std::vector dynVariousAxisInputs = { const auto reduceMaxWithVariousAxis = ::testing::Combine( ::testing::ValuesIn(dynVariousAxisInputs), - ::testing::Values(ElementType::f32), - ::testing::Values(helpers::ReductionType::Max), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::test::utils::ReductionType::Max), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); INSTANTIATE_TEST_SUITE_P(smoke_reduce_max_withVariousAxis_compareWithRefs_dynamic, ReduceLayerGPUTest, reduceMaxWithVariousAxis, ReduceLayerGPUTest::getTestCaseName); - - -} // namespace Reduce } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/region_yolo.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/region_yolo.cpp index 9ced7435d6b..eda97b5aacb 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/region_yolo.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/region_yolo.cpp @@ -2,18 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/region_yolo.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/region_yolo.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; struct regionYoloAttributes { size_t classes; @@ -28,9 +26,7 @@ typedef std::tuple< InputShape, // Input Shape regionYoloAttributes, // Params std::vector, // mask - ov::test::ElementType, // Network input precision - ov::test::ElementType, // Network output precision - std::map, // Additional network configuration + ov::element::Type, // Model type std::string // Device name > RegionYoloGPUTestParam; @@ -38,70 +34,60 @@ class RegionYoloLayerGPUTest : public testing::WithParamInterface obj) { - InputShape inputShape; + InputShape shapes; regionYoloAttributes attributes; std::vector mask; - ov::test::ElementType inpPrecision; - ov::test::ElementType outPrecision; + ov::element::Type model_type; std::string targetName; - std::map additionalConfig; - - std::tie(inputShape, attributes, mask, inpPrecision, outPrecision, additionalConfig, targetName) = obj.param; + std::tie(shapes, attributes, mask, model_type, targetName) = obj.param; std::ostringstream result; - result << "IS=" << inputShape << "_"; + result << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; + for (const auto& item : shapes.second) { + result << ov::test::utils::vec2str(item) << "_"; + } result << "classes=" << attributes.classes << "_"; result << "coords=" << attributes.coordinates << "_"; result << "num=" << attributes.num_regions << "_"; result << "doSoftmax=" << attributes.do_softmax << "_"; result << "axis=" << attributes.start_axis << "_"; result << "endAxis=" << attributes.end_axis << "_"; - result << "inpPRC=" << inpPrecision << "_"; - result << "outPRC=" << outPrecision << "_"; + result << "inpPRC=" << model_type << "_"; result << "targetDevice=" << targetName << "_"; return result.str(); } protected: void SetUp() override { - InputShape inputShape; + InputShape shapes; regionYoloAttributes attributes; std::vector mask; - ov::test::ElementType inPrc; - ov::test::ElementType outPrc; - std::map additionalConfig; + ov::element::Type model_type; + std::tie(shapes, attributes, mask, model_type, targetDevice) = this->GetParam(); - std::tie(inputShape, attributes, mask, inPrc, outPrc, additionalConfig, targetDevice) = this->GetParam(); - - init_input_shapes({ inputShape }); + init_input_shapes({ shapes }); ov::ParameterVector paramRegionYolo; for (auto&& shape : inputDynamicShapes) { - paramRegionYolo.push_back(std::make_shared(inPrc, shape)); + paramRegionYolo.push_back(std::make_shared(model_type, shape)); } - const auto region_yolo = std::make_shared(paramRegionYolo[0], + const auto region_yolo = std::make_shared(paramRegionYolo[0], attributes.coordinates, attributes.classes, attributes.num_regions, attributes.do_softmax, mask, attributes.start_axis, attributes.end_axis); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < region_yolo->get_output_size(); i++) - results.push_back(std::make_shared(region_yolo->output(i))); - function = std::make_shared(results, paramRegionYolo, "RegionYolo"); + results.push_back(std::make_shared(region_yolo->output(i))); + function = std::make_shared(results, paramRegionYolo, "RegionYolo"); } }; -TEST_P(RegionYoloLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(RegionYoloLayerGPUTest, Inference) { run(); } -namespace { - -std::map emptyAdditionalConfig; - -const std::vector inpOutPrc = {ov::test::ElementType::f16, ov::test::ElementType::f32}; +const std::vector model_types = {ov::element::f16, ov::element::f32}; const std::vector inShapes_caffe_dynamic = { {{-1, -1, -1, -1}, {{1, 125, 13, 13}, {1, 125, 26, 26}}}, @@ -134,9 +120,7 @@ const auto testCase_yolov3_dynamic = ::testing::Combine( ::testing::ValuesIn(inShapes_v3_dynamic), ::testing::Values(yoloV3attr), ::testing::Values(masks[2]), - ::testing::ValuesIn(inpOutPrc), - ::testing::ValuesIn(inpOutPrc), - ::testing::Values(emptyAdditionalConfig), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -146,9 +130,7 @@ const auto testCase_yolov3_mxnet_dynamic = ::testing::Combine( ::testing::ValuesIn(inShapes_mxnet_dynamic), ::testing::Values(yoloV3mxnetAttr), ::testing::Values(masks[1]), - ::testing::ValuesIn(inpOutPrc), - ::testing::ValuesIn(inpOutPrc), - ::testing::Values(emptyAdditionalConfig), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -158,9 +140,7 @@ const auto testCase_yolov2_caffe_dynamic = ::testing::Combine( ::testing::ValuesIn(inShapes_caffe_dynamic), ::testing::Values(yoloV2caffeAttr), ::testing::Values(masks[0]), - ::testing::ValuesIn(inpOutPrc), - ::testing::ValuesIn(inpOutPrc), - ::testing::Values(emptyAdditionalConfig), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -177,4 +157,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_GPURegionYoloCaffeDynamic, RegionYoloLayerGPUTest RegionYoloLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reorg_yolo.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reorg_yolo.cpp index b27d9fbf423..8f3f4dbd96e 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reorg_yolo.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reorg_yolo.cpp @@ -2,69 +2,65 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/reorg_yolo.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/reorg_yolo.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< - InputShape, // Input Shape - size_t, // Stride - ElementType, // Network precision - TargetDevice // Device + InputShape, // Input Shape + size_t, // Stride + ov::element::Type, // Model type + std::string // Device > ReorgYoloGPUTestParams; class ReorgYoloLayerGPUTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - InputShape inputShape; + InputShape shapes; size_t stride; - ElementType netPrecision; - TargetDevice targetDev; - std::tie(inputShape, stride, netPrecision, targetDev) = obj.param; + ov::element::Type model_type; + std::string targetDev; + std::tie(shapes, stride, model_type, targetDev) = obj.param; std::ostringstream result; - result << "IS=" << ov::test::utils::partialShape2str({inputShape.first}) << "_"; - for (const auto& item : inputShape.second) { + result << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; + for (const auto& item : shapes.second) { result << ov::test::utils::vec2str(item) << "_"; } result << "stride=" << stride << "_"; - result << "netPRC=" << netPrecision << "_"; + result << "modelPRC=" << model_type << "_"; result << "targetDevice=" << targetDev << "_"; return result.str(); } protected: void SetUp() override { - InputShape inputShape; + InputShape shapes; size_t stride; - ElementType netPrecision; - std::tie(inputShape, stride, netPrecision, targetDevice) = this->GetParam(); + ov::element::Type model_type; + std::tie(shapes, stride, model_type, targetDevice) = this->GetParam(); - init_input_shapes({inputShape}); + init_input_shapes({shapes}); - auto param = std::make_shared(ngraph::element::f32, inputDynamicShapes[0]); - auto reorg_yolo = std::make_shared(param, stride); - function = std::make_shared(std::make_shared(reorg_yolo), - ngraph::ParameterVector{param}, + auto param = std::make_shared(ov::element::f32, inputDynamicShapes[0]); + auto reorg_yolo = std::make_shared(param, stride); + function = std::make_shared(std::make_shared(reorg_yolo), + ov::ParameterVector{param}, "ReorgYolo"); } }; -TEST_P(ReorgYoloLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(ReorgYoloLayerGPUTest, Inference) { run(); }; -namespace { - const std::vector inShapesDynamic1 = { {{{1, 2}, -1, -1, -1}, {{1, 4, 4, 4}, {1, 8, 4, 4}, {2, 8, 4, 4}}} }; @@ -94,4 +90,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_TestsReorgYolo_stride2_DynamicShape, ReorgYoloLay ReorgYoloLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/roi_pooling.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/roi_pooling.cpp index 90d2f1b5f63..2739442da2a 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/roi_pooling.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/roi_pooling.cpp @@ -2,61 +2,57 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/roi_pooling.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "common_test_utils/data_utils.hpp" +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; - -namespace GPULayerTestsDefinitions { +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/tile.hpp" +namespace { enum ProposalGenerationMode { RANDOM, ULTIMATE_RIGHT_BORDER }; -using ROIPoolingShapes = std::vector; +using ROIPoolingShapes = std::vector; typedef std::tuple< - ROIPoolingShapes, // Input shapes - std::vector, // Pooled shape {pooled_h, pooled_w} - float, // Spatial scale - ngraph::helpers::ROIPoolingTypes, // ROIPooling method - InferenceEngine::Precision // Net precision + ROIPoolingShapes, // Input shapes + std::vector, // Pooled shape {pooled_h, pooled_w} + float, // Spatial scale + ov::test::utils::ROIPoolingTypes, // ROIPooling method + ov::element::Type // Model type > ROIPoolingParams; typedef std::tuple< ROIPoolingParams, - ProposalGenerationMode, - std::map + ProposalGenerationMode > ROIPoolingGPUTestParams; class ROIPoolingLayerGPUTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - ROIPoolingParams basicParamsSet; - ProposalGenerationMode propMode; - std::map additionalConfig; + ROIPoolingParams basic_params_set; + ProposalGenerationMode prop_mode; - std::tie(basicParamsSet, propMode, additionalConfig) = obj.param; + std::tie(basic_params_set, prop_mode) = obj.param; - ROIPoolingShapes inputShapes; - std::vector poolShape; + ROIPoolingShapes shapes; + std::vector pool_shape; float spatial_scale; - ngraph::helpers::ROIPoolingTypes pool_method; - InferenceEngine::Precision netPrecision; - std::tie(inputShapes, poolShape, spatial_scale, pool_method, netPrecision) = basicParamsSet; + ov::test::utils::ROIPoolingTypes pool_method; + ov::element::Type model_type; + std::tie(shapes, pool_shape, spatial_scale, pool_method, model_type) = basic_params_set; std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - for (const auto& shape : inputShapes) { + result << "netPRC=" << model_type << "_"; + for (const auto& shape : shapes) { result << ov::test::utils::partialShape2str({ shape.first }) << "_"; } result << "TS="; - for (const auto& shape : inputShapes) { + for (const auto& shape : shapes) { result << "("; if (!shape.second.empty()) { auto itr = shape.second.begin(); @@ -67,22 +63,17 @@ public: result << ")_"; } - result << "PS=" << ov::test::utils::vec2str(poolShape) << "_"; + result << "PS=" << ov::test::utils::vec2str(pool_shape) << "_"; result << "Scale=" << spatial_scale << "_"; switch (pool_method) { - case ngraph::helpers::ROIPoolingTypes::ROI_MAX: + case ov::test::utils::ROIPoolingTypes::ROI_MAX: result << "Max_"; break; - case ngraph::helpers::ROIPoolingTypes::ROI_BILINEAR: + case ov::test::utils::ROIPoolingTypes::ROI_BILINEAR: result << "Bilinear_"; break; } - result << "config=("; - for (const auto& configEntry : additionalConfig) { - result << configEntry.first << ", " << configEntry.second << ":"; - } - result << ")"; - switch (propMode) { + switch (prop_mode) { case ProposalGenerationMode::ULTIMATE_RIGHT_BORDER: result << "_UltimateRightBorderProposal"; break; @@ -96,16 +87,16 @@ public: } protected: - void generate_inputs(const std::vector& targetInputStaticShapes) override { - const ProposalGenerationMode propMode = std::get<1>(this->GetParam()); + void generate_inputs(const std::vector& targetInputStaticShapes) override { + const ProposalGenerationMode prop_mode = std::get<1>(this->GetParam()); const float spatial_scale = std::get<2>(std::get<0>(this->GetParam())); - const ngraph::helpers::ROIPoolingTypes pool_method = std::get<3>(std::get<0>(this->GetParam())); + const ov::test::utils::ROIPoolingTypes pool_method = std::get<3>(std::get<0>(this->GetParam())); inputs.clear(); const auto& funcInputs = function->inputs(); auto feat_map_shape = targetInputStaticShapes[0]; - const auto is_roi_max_mode = (pool_method == ngraph::helpers::ROIPoolingTypes::ROI_MAX); + const auto is_roi_max_mode = (pool_method == ov::test::utils::ROIPoolingTypes::ROI_MAX); const int height = is_roi_max_mode ? feat_map_shape[2] / spatial_scale : 1; const int width = is_roi_max_mode ? feat_map_shape[3] / spatial_scale : 1; @@ -115,13 +106,13 @@ protected: if (i == 1) { tensor = ov::Tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); - if (propMode == ULTIMATE_RIGHT_BORDER) { + if (prop_mode == ULTIMATE_RIGHT_BORDER) { // because of nonalgebraic character of floating point operation, the following values causes inequity: // ((end_h - start_h) * (input_h - 1) / (pooled_h - 1)) * (pooled_h - 1) > (end_h - start_h) * (input_h - 1) // and as result excess of right limit for proposal value if the border case (current_h == pooled_h - 1) // will not be handled explicitly switch (funcInput.get_element_type()) { - case ngraph::element::f32: { + case ov::element::f32: { auto* dataPtr = tensor.data(); for (size_t i = 0; i < tensor.get_size(); i += 5) { dataPtr[i] = 0; @@ -132,14 +123,14 @@ protected: } break; } - case ngraph::element::bf16: { + case ov::element::bf16: { auto* dataPtr = tensor.data(); for (size_t i = 0; i < tensor.get_size(); i += 5) { - dataPtr[i] = static_cast(ngraph::float16(0.f).to_bits()); - dataPtr[i + 1] = static_cast(ngraph::float16(0.f).to_bits()); - dataPtr[i + 2] = static_cast(ngraph::float16(0.248046786f).to_bits()); - dataPtr[i + 3] = static_cast(ngraph::float16(0.471333951f).to_bits()); - dataPtr[i + 4] = static_cast(ngraph::float16(1.f).to_bits()); + dataPtr[i] = static_cast(ov::float16(0.f).to_bits()); + dataPtr[i + 1] = static_cast(ov::float16(0.f).to_bits()); + dataPtr[i + 2] = static_cast(ov::float16(0.248046786f).to_bits()); + dataPtr[i + 3] = static_cast(ov::float16(0.471333951f).to_bits()); + dataPtr[i + 4] = static_cast(ov::float16(1.f).to_bits()); } break; } @@ -166,50 +157,42 @@ protected: } void SetUp() override { - ROIPoolingParams basicParamsSet; - ProposalGenerationMode propMode; - std::map additionalConfig; + ROIPoolingParams basic_params_set; + ProposalGenerationMode prop_mode; - std::tie(basicParamsSet, propMode, additionalConfig) = this->GetParam(); - ROIPoolingShapes inputShapes; - std::vector poolShape; + std::tie(basic_params_set, prop_mode) = this->GetParam(); + ROIPoolingShapes shapes; + std::vector pool_shape; float spatial_scale; - ngraph::helpers::ROIPoolingTypes pool_method; - InferenceEngine::Precision netPrecision; - std::tie(inputShapes, poolShape, spatial_scale, pool_method, netPrecision) = basicParamsSet; + ov::test::utils::ROIPoolingTypes pool_method; + ov::element::Type model_type; + std::tie(shapes, pool_shape, spatial_scale, pool_method, model_type) = basic_params_set; targetDevice = ov::test::utils::DEVICE_GPU; - init_input_shapes(inputShapes); + init_input_shapes(shapes); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) - params.push_back(std::make_shared(ngPrc, shape)); + params.push_back(std::make_shared(model_type, shape)); std::shared_ptr roi_pooling; if (ov::test::utils::ROIPoolingTypes::ROI_MAX == pool_method) { - roi_pooling = std::make_shared(params[0], params[1], poolShape, spatial_scale, "max"); + roi_pooling = std::make_shared(params[0], params[1], pool_shape, spatial_scale, "max"); } else { - roi_pooling = std::make_shared(params[0], params[1], poolShape, spatial_scale, "bilinear"); + roi_pooling = std::make_shared(params[0], params[1], pool_shape, spatial_scale, "bilinear"); } - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < roi_pooling->get_output_size(); i++) - results.push_back(std::make_shared(roi_pooling->output(i))); - function = std::make_shared(results, params, "ROIPooling"); - functionRefs = ngraph::clone_function(*function); + results.push_back(std::make_shared(roi_pooling->output(i))); + function = std::make_shared(results, params, "ROIPooling"); + functionRefs = function->clone(); } }; -TEST_P(ROIPoolingLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(ROIPoolingLayerGPUTest, Inference) { run(); } -namespace { - -std::map emptyAdditionalConfig; - const std::vector inShapes = { ROIPoolingShapes{{{}, {{1, 3, 8, 8}}}, {{}, {{1, 5}}}}, ROIPoolingShapes{{{}, {{1, 3, 8, 8}}}, {{}, {{3, 5}}}}, @@ -291,43 +274,39 @@ const std::vector> pooledShapes_bilinear = { {6, 6} }; -const std::vector netPRCs = {InferenceEngine::Precision::FP32}; +const std::vector model_types = {ov::element::f32}; const std::vector spatial_scales = {0.625f, 1.f}; const auto test_ROIPooling_max = ::testing::Combine(::testing::ValuesIn(inShapes), ::testing::ValuesIn(pooledShapes_max), ::testing::ValuesIn(spatial_scales), - ::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_MAX), - ::testing::ValuesIn(netPRCs)); + ::testing::Values(ov::test::utils::ROIPoolingTypes::ROI_MAX), + ::testing::ValuesIn(model_types)); const auto test_ROIPooling_bilinear = ::testing::Combine(::testing::ValuesIn(inShapes), ::testing::ValuesIn(pooledShapes_bilinear), ::testing::Values(spatial_scales[1]), - ::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_BILINEAR), - ::testing::ValuesIn(netPRCs)); + ::testing::Values(ov::test::utils::ROIPoolingTypes::ROI_BILINEAR), + ::testing::ValuesIn(model_types)); INSTANTIATE_TEST_SUITE_P(smoke_ROIPoolingGPU_max, ROIPoolingLayerGPUTest, ::testing::Combine(test_ROIPooling_max, - ::testing::Values(ProposalGenerationMode::RANDOM), - ::testing::Values(emptyAdditionalConfig)), + ::testing::Values(ProposalGenerationMode::RANDOM)), ROIPoolingLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_ROIPoolingGPU_bilinear, ROIPoolingLayerGPUTest, ::testing::Combine(test_ROIPooling_bilinear, - ::testing::Values(ProposalGenerationMode::RANDOM), - ::testing::Values(emptyAdditionalConfig)), + ::testing::Values(ProposalGenerationMode::RANDOM)), ROIPoolingLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_ROIPoolingGPU_bilinear_ultimateRightBorderProposal, ROIPoolingLayerGPUTest, ::testing::Combine(::testing::Combine(::testing::Values(ROIPoolingShapes{{{}, {{1, 1, 50, 50}}}, {{}, {{1, 5}}}}), ::testing::Values(std::vector { 4, 4 }), ::testing::Values(spatial_scales[1]), - ::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_BILINEAR), - ::testing::Values(InferenceEngine::Precision::FP32)), - ::testing::Values(ProposalGenerationMode::ULTIMATE_RIGHT_BORDER), - ::testing::Values(emptyAdditionalConfig)), + ::testing::Values(ov::test::utils::ROIPoolingTypes::ROI_BILINEAR), + ::testing::Values(ov::element::f32)), + ::testing::Values(ProposalGenerationMode::ULTIMATE_RIGHT_BORDER)), ROIPoolingLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/scatter_nd_update.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/scatter_nd_update.cpp index b1c010d80f4..fe0445cd291 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/scatter_nd_update.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/scatter_nd_update.cpp @@ -2,19 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/scatter_ND_update.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -#include -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/scatter_nd_update.hpp" +#include "openvino/op/scatter_update.hpp" +#include "openvino/op/scatter_elements_update.hpp" -namespace GPULayerTestsDefinitions { -using ScatterUpdateShapes = std::vector; +namespace { +using ScatterUpdateShapes = std::vector; using IndicesValues = std::vector; enum class Scatterupdate_type { @@ -31,24 +30,24 @@ struct ScatterUpdateLayerParams { typedef std::tuple< ScatterUpdateLayerParams, - ElementType, // input precision - ElementType // indices precision + ov::element::Type, // input precision + ov::element::Type // indices precision > ScatterUpdateParams; class ScatterUpdateLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { ScatterUpdateLayerParams scatterParams; - ElementType inputPrecision; - ElementType idxPrecision; - std::tie(scatterParams, inputPrecision, idxPrecision) = obj.param; + ov::element::Type model_type; + ov::element::Type idx_type; + std::tie(scatterParams, model_type, idx_type) = obj.param; const auto inputShapes = scatterParams.inputShapes; const auto indicesValues = scatterParams.indicesValues; const auto scType = scatterParams.scType; std::ostringstream result; - result << inputPrecision << "_IS="; + result << model_type << "_IS="; for (const auto& shape : inputShapes) { result << ov::test::utils::partialShape2str({ shape.first }) << "_"; } @@ -61,7 +60,7 @@ public: result << ")_"; } result << "indices_values=" << ov::test::utils::vec2str(indicesValues); - result << "_idx_precision=" << idxPrecision; + result << "_idx_precision=" << idx_type; result << "_scatter_mode="; switch (scType) { case Scatterupdate_type::ND: @@ -84,30 +83,30 @@ protected: const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { const auto& funcInput = funcInputs[i]; - const auto& inputPrecision = funcInput.get_element_type(); + const auto& model_type = funcInput.get_element_type(); const auto& targetShape = targetInputStaticShapes[i]; ov::Tensor tensor; if (i == 1) { - tensor = ov::Tensor{ inputPrecision, targetShape }; + tensor = ov::Tensor{ model_type, targetShape }; const auto indicesVals = std::get<0>(this->GetParam()).indicesValues; - if (inputPrecision == ElementType::i32) { + if (model_type == ov::element::i32) { auto data = tensor.data(); for (size_t i = 0; i < tensor.get_size(); ++i) { data[i] = static_cast(indicesVals[i]); } - } else if (inputPrecision == ElementType::i64) { + } else if (model_type == ov::element::i64) { auto data = tensor.data(); for (size_t i = 0; i < tensor.get_size(); ++i) { data[i] = indicesVals[i]; } } else { - OPENVINO_THROW("GatherNDUpdate. Unsupported indices precision: ", inputPrecision); + OPENVINO_THROW("GatherNDUpdate. Unsupported indices precision: ", model_type); } } else { - if (inputPrecision.is_real()) { - tensor = ov::test::utils::create_and_fill_tensor(inputPrecision, targetShape, 10, 0, 1000); + if (model_type.is_real()) { + tensor = ov::test::utils::create_and_fill_tensor(model_type, targetShape, 10, 0, 1000); } else { - tensor = ov::test::utils::create_and_fill_tensor(inputPrecision, targetShape); + tensor = ov::test::utils::create_and_fill_tensor(model_type, targetShape); } } inputs.insert({ funcInput.get_node_shared_ptr(), tensor }); @@ -117,19 +116,19 @@ protected: void SetUp() override { targetDevice = ov::test::utils::DEVICE_GPU; ScatterUpdateLayerParams scatterParams; - ElementType inputPrecision; - ElementType idxPrecision; - std::tie(scatterParams, inputPrecision, idxPrecision) = this->GetParam(); + ov::element::Type model_type; + ov::element::Type idx_type; + std::tie(scatterParams, model_type, idx_type) = this->GetParam(); const auto inputShapes = scatterParams.inputShapes; const auto scType = scatterParams.scType; init_input_shapes({inputShapes[0], inputShapes[1], inputShapes[2]}); - ov::ParameterVector dataParams{std::make_shared(inputPrecision, inputDynamicShapes[0]), - std::make_shared(inputPrecision, inputDynamicShapes[2])}; + ov::ParameterVector dataParams{std::make_shared(model_type, inputDynamicShapes[0]), + std::make_shared(model_type, inputDynamicShapes[2])}; - auto indicesParam = std::make_shared(idxPrecision, inputDynamicShapes[1]); + auto indicesParam = std::make_shared(idx_type, inputDynamicShapes[1]); dataParams[0]->set_friendly_name("Param_1"); indicesParam->set_friendly_name("Param_2"); dataParams[1]->set_friendly_name("Param_3"); @@ -137,42 +136,39 @@ protected: std::shared_ptr scatter; switch (scType) { case Scatterupdate_type::ND: { - scatter = std::make_shared(dataParams[0], indicesParam, dataParams[1]); + scatter = std::make_shared(dataParams[0], indicesParam, dataParams[1]); break; } case Scatterupdate_type::Elements: { auto axis = ov::op::v0::Constant::create(ov::element::i32, inputShapes[3].first.get_shape(), inputShapes[3].second[0]); - scatter = std::make_shared(dataParams[0], indicesParam, dataParams[1], axis); + scatter = std::make_shared(dataParams[0], indicesParam, dataParams[1], axis); break; } case Scatterupdate_type::Basic: default: { auto axis = ov::op::v0::Constant::create(ov::element::i32, inputShapes[3].first.get_shape(), inputShapes[3].second[0]); - scatter = std::make_shared(dataParams[0], indicesParam, dataParams[1], axis); + scatter = std::make_shared(dataParams[0], indicesParam, dataParams[1], axis); } } - ngraph::ParameterVector allParams{ dataParams[0], indicesParam, dataParams[1] }; + ov::ParameterVector allParams{ dataParams[0], indicesParam, dataParams[1] }; - auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr &lastNode) { - ResultVector results; + auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "ScatterUpdateLayerGPUTest"); + return std::make_shared(results, params, "ScatterUpdateLayerGPUTest"); }; function = makeFunction(allParams, scatter); } }; -TEST_P(ScatterUpdateLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(ScatterUpdateLayerGPUTest, Inference) { run(); } -namespace ScatterNDUpdate { - const std::vector scatterNDParams = { ScatterUpdateLayerParams{ ScatterUpdateShapes{ @@ -245,12 +241,12 @@ const std::vector scatterElementsParams = { }, }; -const std::vector inputPrecisions = { - ElementType::f32, +const std::vector model_types = { + ov::element::f32, }; -const std::vector constantPrecisions = { - ElementType::i32, +const std::vector constantPrecisions = { + ov::element::i32, }; const std::vector scatterUpdate_EmptyInput1_2Params = { @@ -294,28 +290,28 @@ const std::vector scatterElementsUpdate_EmptyInput1_2P INSTANTIATE_TEST_SUITE_P(smoke_ScatterNDUpdate_CompareWithRefs_dynamic, ScatterUpdateLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(scatterNDParams), - ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(model_types), ::testing::ValuesIn(constantPrecisions)), ScatterUpdateLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_ScatterElementsUpdate_CompareWithRefs_dynamic, ScatterUpdateLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(scatterElementsParams), - ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(model_types), ::testing::ValuesIn(constantPrecisions)), ScatterUpdateLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_ScatterUpdate_EmptyInput1_2_CompareWithRefs_dynamic, ScatterUpdateLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(scatterUpdate_EmptyInput1_2Params), - ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(model_types), ::testing::ValuesIn(constantPrecisions)), ScatterUpdateLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_ScatterNDUpdate_EmptyInput1_2_CompareWithRefs_dynamic, ScatterUpdateLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(scatterNDUpdate_EmptyInput1_2Params), - ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(model_types), ::testing::ValuesIn(constantPrecisions)), ScatterUpdateLayerGPUTest::getTestCaseName); @@ -323,8 +319,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_ScatterNDUpdate_EmptyInput1_2_CompareWithRefs_dyn INSTANTIATE_TEST_SUITE_P(smoke_ScatterElementsUpdate_EmptyInput1_2_CompareWithRefs_dynamic, ScatterUpdateLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(scatterElementsUpdate_EmptyInput1_2Params), - ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(model_types), ::testing::ValuesIn(constantPrecisions)), ScatterUpdateLayerGPUTest::getTestCaseName); -} // namespace ScatterNDUpdate -} // namespace GPULayerTestsDefinitions +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/select.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/select.cpp index c8503052f31..73c24ff347f 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/select.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/select.cpp @@ -2,34 +2,33 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/select.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/select.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< - std::vector, // input shapes - ElementType, // presion of 'then' and 'else' of inputs - op::AutoBroadcastSpec, // broadcast spec - TargetDevice // device name + std::vector, // input shapes + ov::element::Type, // presion of 'then' and 'else' of inputs + ov::op::AutoBroadcastSpec, // broadcast spec + std::string // device name > SelectLayerTestParamSet; class SelectLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { std::vector inshapes; - ElementType netType; - op::AutoBroadcastSpec broadcast; - TargetDevice targetDevice; - std::tie(inshapes, netType, broadcast, targetDevice) = obj.param; + ov::element::Type model_type; + ov::op::AutoBroadcastSpec broadcast; + std::string targetDevice; + std::tie(inshapes, model_type, broadcast, targetDevice) = obj.param; std::ostringstream result; @@ -43,7 +42,7 @@ public: result << ov::test::utils::vec2str(item) << "_"; } } - result << "Precision=" << netType << "_"; + result << "Precision=" << model_type << "_"; result << "Broadcast=" << broadcast.m_type << "_"; result << "trgDev=" << targetDevice; @@ -53,48 +52,42 @@ public: protected: void SetUp() override { std::vector inshapes; - ElementType netType; - op::AutoBroadcastSpec broadcast; - std::tie(inshapes, netType, broadcast, targetDevice) = this->GetParam(); + ov::element::Type model_type; + ov::op::AutoBroadcastSpec broadcast; + std::tie(inshapes, model_type, broadcast, targetDevice) = this->GetParam(); init_input_shapes(inshapes); - ParameterVector params = { - std::make_shared(ElementType::boolean, inputDynamicShapes[0]), - std::make_shared(netType, inputDynamicShapes[1]), - std::make_shared(netType, inputDynamicShapes[2]), + ov::ParameterVector params = { + std::make_shared(ov::element::boolean, inputDynamicShapes[0]), + std::make_shared(model_type, inputDynamicShapes[1]), + std::make_shared(model_type, inputDynamicShapes[2]), }; auto select = std::make_shared(params[0], params[1], params[2], broadcast); - auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr &lastNode) { - ResultVector results; + auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "SelectLayerGPUTest"); + return std::make_shared(results, params, "SelectLayerGPUTest"); }; function = makeFunction(params, select); } }; -TEST_P(SelectLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(SelectLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector netPrecisions = { - ElementType::f32, - ElementType::f16, - ElementType::i32, +const std::vector model_types = { + ov::element::f32, + ov::element::f16, + ov::element::i32, }; -namespace Select { - // AutoBroadcastType: NUMPY const std::vector> inShapesDynamicNumpy = { { @@ -131,8 +124,8 @@ const std::vector> inShapesDynamicNumpy = { const auto numpyCases = ::testing::Combine( ::testing::ValuesIn(inShapesDynamicNumpy), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(op::AutoBroadcastType::NUMPY), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::op::AutoBroadcastType::NUMPY), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -148,8 +141,8 @@ const std::vector> inShapesDynamicRangeNumpy = { const auto rangeNumpyCases = ::testing::Combine( ::testing::ValuesIn(inShapesDynamicRangeNumpy), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(op::AutoBroadcastType::NUMPY), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::op::AutoBroadcastType::NUMPY), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -171,13 +164,10 @@ const std::vector> inShapesDynamicNone = { const auto noneCases = ::testing::Combine( ::testing::ValuesIn(inShapesDynamicNone), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(op::AutoBroadcastType::NONE), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::op::AutoBroadcastType::NONE), ::testing::Values(ov::test::utils::DEVICE_GPU) ); INSTANTIATE_TEST_SUITE_P(smoke_select_CompareWithRefsNone_dynamic, SelectLayerGPUTest, noneCases, SelectLayerGPUTest::getTestCaseName); - -} // namespace Select } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/shapeof.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/shapeof.cpp index d231567a6a3..fd9f2912f6b 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/shapeof.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/shapeof.cpp @@ -2,36 +2,34 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/shape_of.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/shape_of.hpp" -using ElementType = ov::element::Type_t; +namespace { +using ov::test::InputShape; -namespace GPULayerTestsDefinitions { typedef std::tuple< InputShape, - ElementType // Net precision + ov::element::Type > ShapeOfLayerGPUTestParamsSet; class ShapeOfLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InputShape inputShape; - ElementType netPrecision; - std::tie(inputShape, netPrecision) = obj.param; + ov::element::Type model_type; + std::tie(inputShape, model_type) = obj.param; std::ostringstream result; result << "ShapeOfTest_"; result << std::to_string(obj.index) << "_"; - result << "netPrec=" << netPrecision << "_"; + result << "netPrec=" << model_type << "_"; result << "IS="; result << ov::test::utils::partialShape2str({inputShape.first}) << "_"; result << "TS=("; @@ -45,43 +43,39 @@ protected: void SetUp() override { targetDevice = ov::test::utils::DEVICE_GPU; - auto netPrecision = ElementType::undefined; + ov::element::Type model_type; InputShape inputShape; - std::tie(inputShape, netPrecision) = this->GetParam(); + std::tie(inputShape, model_type) = this->GetParam(); init_input_shapes({inputShape}); - outType = ElementType::i32; + outType = ov::element::i32; ov::ParameterVector functionParams; for (auto&& shape : inputDynamicShapes) - functionParams.push_back(std::make_shared(netPrecision, shape)); + functionParams.push_back(std::make_shared(model_type, shape)); - auto shapeOfOp = std::make_shared(functionParams[0], element::i32); + auto shapeOfOp = std::make_shared(functionParams[0], ov::element::i32); - auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr &lastNode) { - ResultVector results; + auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "ShapeOfLayerGPUTest"); + return std::make_shared(results, params, "ShapeOfLayerGPUTest"); }; function = makeFunction(functionParams, shapeOfOp); } }; -TEST_P(ShapeOfLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(ShapeOfLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector netPrecisions = { - ElementType::i32, +const std::vector model_types = { + ov::element::i32, }; // We don't check static case, because of constant folding @@ -110,10 +104,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_3d_compareWithRefs_dynamic, ShapeOfLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(inShapesDynamic3d), - ::testing::ValuesIn(netPrecisions)), + ::testing::ValuesIn(model_types)), ShapeOfLayerGPUTest::getTestCaseName); -std::vector inShapesStatic3d = { +std::vector inShapesStatic3d = { { 8, 5, 4 }, { 8, 5, 3 }, { 8, 5, 2 }, @@ -124,8 +118,8 @@ std::vector inShapesStatic3d = { INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_3d_compareWithRefs_static, ShapeOfLayerGPUTest, ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapesStatic3d)), - ::testing::ValuesIn(netPrecisions)), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inShapesStatic3d)), + ::testing::Values(ov::element::i32)), ShapeOfLayerGPUTest::getTestCaseName); // ============================================================================== @@ -152,10 +146,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_4d_compareWithRefs_dynamic, ShapeOfLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(inShapesDynamic4d), - ::testing::ValuesIn(netPrecisions)), + ::testing::ValuesIn(model_types)), ShapeOfLayerGPUTest::getTestCaseName); -std::vector inShapesStatic4d = { +std::vector inShapesStatic4d = { { 8, 5, 3, 4 }, { 8, 5, 3, 3 }, { 8, 5, 3, 2 }, @@ -166,8 +160,8 @@ std::vector inShapesStatic4d = { INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_4d_compareWithRefs_static, ShapeOfLayerGPUTest, ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapesStatic4d)), - ::testing::ValuesIn(netPrecisions)), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inShapesStatic4d)), + ::testing::ValuesIn(model_types)), ShapeOfLayerGPUTest::getTestCaseName); // ============================================================================== @@ -194,10 +188,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_5d_compareWithRefs_dynamic, ShapeOfLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(inShapesDynamic5d), - ::testing::ValuesIn(netPrecisions)), + ::testing::ValuesIn(model_types)), ShapeOfLayerGPUTest::getTestCaseName); -std::vector inShapesStatic5d = { +std::vector inShapesStatic5d = { { 8, 5, 3, 2, 4 }, { 8, 5, 3, 2, 3 }, { 8, 5, 3, 2, 2 }, @@ -208,37 +202,35 @@ std::vector inShapesStatic5d = { INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_5d_compareWithRefs_static, ShapeOfLayerGPUTest, ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapesStatic5d)), - ::testing::ValuesIn(netPrecisions)), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inShapesStatic5d)), + ::testing::ValuesIn(model_types)), ShapeOfLayerGPUTest::getTestCaseName); -} // namespace - using ShapeOfParams = typename std::tuple< - InputShape, // Shape - InferenceEngine::Precision, // Precision - LayerTestsUtils::TargetDevice // Device name + InputShape, // Shape + ov::element::Type, // Model type + std::string // Device name >; class ShapeOfDynamicInputGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { - InputShape inputShapes; - InferenceEngine::Precision dataPrc; + InputShape shapes; + ov::element::Type model_type; std::string targetDevice; - std::tie(inputShapes, dataPrc, targetDevice) = obj.param; + std::tie(shapes, model_type, targetDevice) = obj.param; std::ostringstream result; result << "IS=("; - result << ov::test::utils::partialShape2str({inputShapes.first}) << "_"; - for (size_t i = 0lu; i < inputShapes.second.size(); i++) { + result << ov::test::utils::partialShape2str({shapes.first}) << "_"; + for (size_t i = 0lu; i < shapes.second.size(); i++) { result << "{"; - result << ov::test::utils::vec2str(inputShapes.second[i]) << "_"; + result << ov::test::utils::vec2str(shapes.second[i]) << "_"; result << "}_"; } result << ")_"; - result << "netPRC=" << dataPrc << "_"; + result << "netPRC=" << model_type << "_"; result << "targetDevice=" << targetDevice << "_"; auto res_str = result.str(); std::replace(res_str.begin(), res_str.end(), '-', '_'); @@ -247,56 +239,44 @@ public: protected: void SetUp() override { - InputShape inputShapes; - InferenceEngine::Precision dataPrc; + InputShape shapes; + ov::element::Type model_type; targetDevice = ov::test::utils::DEVICE_GPU; - std::tie(inputShapes, dataPrc, targetDevice) = GetParam(); + std::tie(shapes, model_type, targetDevice) = GetParam(); - init_input_shapes({inputShapes}); + init_input_shapes({shapes}); - InferenceEngine::PreProcessInfo pre_process_info; - pre_process_info.setVariant(InferenceEngine::MeanVariant::MEAN_VALUE); - - const auto prc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(dataPrc); - - auto input = std::make_shared(prc, inputShapes.first); - input->get_output_tensor(0).get_rt_info()["ie_legacy_preproc"] = pre_process_info; + auto input = std::make_shared(model_type, shapes.first); input->set_friendly_name("input_data"); - auto shape_of_01 = std::make_shared(input); + auto shape_of_01 = std::make_shared(input); shape_of_01->set_friendly_name("shape_of_01"); - auto shape_of_02 = std::make_shared(shape_of_01); + auto shape_of_02 = std::make_shared(shape_of_01); shape_of_02->set_friendly_name("shape_of_02"); - auto result = std::make_shared(shape_of_02); + auto result = std::make_shared(shape_of_02); result->set_friendly_name("outer_result"); - function = std::make_shared(ngraph::OutputVector{result}, ngraph::ParameterVector{input}); + function = std::make_shared(ov::OutputVector{result}, ov::ParameterVector{input}); function->set_friendly_name("shape_of_test"); } }; -TEST_P(ShapeOfDynamicInputGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(ShapeOfDynamicInputGPUTest, Inference) { run(); } -const std::vector dynamicInputShapes = { +const std::vector dynamicshapes = { ov::test::InputShape(ov::PartialShape({-1, -1, -1, -1, -1}), {{4, 1, 1, 64, 32}, {6, 1, 1, 8, 4}, {8, 1, 1, 24, 16}}), }; -const std::vector dynamicInputPrec = { - InferenceEngine::Precision::FP16, -}; - INSTANTIATE_TEST_SUITE_P(smoke_Check, ShapeOfDynamicInputGPUTest, testing::Combine( - testing::ValuesIn(dynamicInputShapes), // input shapes - testing::ValuesIn(dynamicInputPrec), // network precision + testing::ValuesIn(dynamicshapes), // input shapes + testing::Values(ov::element::f16), // network precision testing::Values(ov::test::utils::DEVICE_GPU)), // device type ShapeOfDynamicInputGPUTest::getTestCaseName); -} // namespace GPULayerTestsDefinitions +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/softmax.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/softmax.cpp index 5de070d5fab..abdd317138a 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/softmax.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/softmax.cpp @@ -2,33 +2,33 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "shared_test_classes/single_layer/shape_of.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/softmax.hpp" -namespace GPULayerTestsDefinitions { +namespace { -typedef std::tuple - softmaxGPUTestParamsSet; +typedef std::tuple< + ov::element::Type, // model type + ov::test::InputShape, // inputShape + int64_t> // axis +softmaxGPUTestParamsSet; class SoftMaxLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { - ElementType inType; + ov::element::Type model_type; ov::test::InputShape inShape; int64_t axis; - std::tie(inType, inShape, axis) = obj.param; + std::tie(model_type, inShape, axis) = obj.param; std::ostringstream result; - result << "netPRC=" << inType << "_"; + result << "netPRC=" << model_type << "_"; result << "IS=" << ov::test::utils::partialShape2str({inShape.first}) << "_"; result << "TS="; for (const auto& shape : inShape.second) { @@ -42,40 +42,39 @@ public: protected: void SetUp() override { targetDevice = ov::test::utils::DEVICE_GPU; - ElementType inType; + ov::element::Type model_type; ov::test::InputShape inShape; int64_t axis; - std::tie(inType, inShape, axis) = this->GetParam(); + std::tie(model_type, inShape, axis) = this->GetParam(); - if (inType == element::Type_t::f16) { + if (model_type == ov::element::f16) { abs_threshold = 0.005; } init_input_shapes({inShape}); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) - params.push_back(std::make_shared(inType, shape)); + params.push_back(std::make_shared(model_type, shape)); - const auto softMax = std::make_shared(params.at(0), axis); - auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr &lastNode) { - ResultVector results; + const auto softMax = std::make_shared(params.at(0), axis); + auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "ShapeOfLayerGPUTest"); + return std::make_shared(results, params, "ShapeOfLayerGPUTest"); }; function = makeFunction(params, softMax); } }; -TEST_P(SoftMaxLayerGPUTest, CompareWithRefs) { +TEST_P(SoftMaxLayerGPUTest, Inference) { run(); } -namespace { -const std::vector netPrecisions = { - ElementType::f32, ElementType::f16 +const std::vector netPrecisions = { + ov::element::f32, ov::element::f16 }; const std::vector axis2D = {0, 1}; @@ -137,6 +136,4 @@ INSTANTIATE_TEST_SUITE_P(softMaxGPUDynamicTest5D, testing::ValuesIn(inputShapes5D), testing::ValuesIn(axis5D)), SoftMaxLayerGPUTest::getTestCaseName); - } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_batch.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_batch.cpp index e66f099bf3e..5de49d120c0 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_batch.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_batch.cpp @@ -2,16 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/space_to_batch.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/space_to_batch.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; struct SpaceToBatchParams { std::vector block; @@ -22,22 +23,19 @@ struct SpaceToBatchParams { typedef std::tuple< InputShape, // Input shapes SpaceToBatchParams, - ElementType, // Element type - ngraph::helpers::InputLayerType, // block/begin/end input type - std::map // Additional network configuration -> SpaceToBatchParamsLayerParamSet; + ov::element::Type, // Element type + ov::test::utils::InputLayerType> // block/begin/end input type +SpaceToBatchParamsLayerParamSet; class SpaceToBatchLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { InputShape shapes; SpaceToBatchParams params; - ElementType elementType; - ngraph::helpers::InputLayerType restInputType; - TargetDevice targetDevice; - std::map additionalConfig; - std::tie(shapes, params, elementType, restInputType, additionalConfig) = obj.param; + ov::element::Type elementType; + ov::test::utils::InputLayerType restInputType; + std::tie(shapes, params, elementType, restInputType) = obj.param; std::ostringstream results; results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; @@ -49,17 +47,12 @@ public: results << "block=" << ov::test::utils::vec2str(params.block) << "_"; results << "begin=" << ov::test::utils::vec2str(params.begin) << "_"; results << "end=" << ov::test::utils::vec2str(params.end) << "_"; - results << "restInputType=" << restInputType << "_"; - results << "config=("; - for (const auto& configEntry : additionalConfig) { - results << configEntry.first << ", " << configEntry.second << ":"; - } - results << ")"; + results << "restInputType=" << restInputType; return results.str(); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { @@ -100,9 +93,8 @@ protected: void SetUp() override { InputShape shapes; SpaceToBatchParams ssParams; - ngraph::helpers::InputLayerType restInputType; - std::map additionalConfig; - std::tie(shapes, ssParams, inType, restInputType, additionalConfig) = this->GetParam(); + ov::test::utils::InputLayerType restInputType; + std::tie(shapes, ssParams, inType, restInputType) = this->GetParam(); block = ssParams.block; begin = ssParams.begin; @@ -112,7 +104,7 @@ protected: std::vector inputShapes; inputShapes.push_back(shapes); - if (restInputType == ngraph::helpers::InputLayerType::PARAMETER) { + if (restInputType == ov::test::utils::InputLayerType::PARAMETER) { inputShapes.push_back(InputShape({static_cast(block.size())}, std::vector(shapes.second.size(), {block.size()}))); inputShapes.push_back(InputShape({static_cast(begin.size())}, std::vector(shapes.second.size(), {begin.size()}))); inputShapes.push_back(InputShape({static_cast(end.size())}, std::vector(shapes.second.size(), {end.size()}))); @@ -122,10 +114,10 @@ protected: ov::ParameterVector params{std::make_shared(inType, inputDynamicShapes.front())}; std::shared_ptr blockInput, beginInput, endInput; - if (restInputType == ngraph::helpers::InputLayerType::PARAMETER) { - auto blockNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{block.size()}); - auto beginNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{begin.size()}); - auto endNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{end.size()}); + if (restInputType == ov::test::utils::InputLayerType::PARAMETER) { + auto blockNode = std::make_shared(ov::element::i64, ov::Shape{block.size()}); + auto beginNode = std::make_shared(ov::element::i64, ov::Shape{begin.size()}); + auto endNode = std::make_shared(ov::element::i64, ov::Shape{end.size()}); params.push_back(blockNode); params.push_back(beginNode); @@ -135,38 +127,32 @@ protected: beginInput = beginNode; endInput = endNode; } else { - blockInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{block.size()}, block); - beginInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{begin.size()}, begin); - endInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{end.size()}, end); + blockInput = std::make_shared(ov::element::i64, ov::Shape{block.size()}, block); + beginInput = std::make_shared(ov::element::i64, ov::Shape{begin.size()}, begin); + endInput = std::make_shared(ov::element::i64, ov::Shape{end.size()}, end); } - auto ss = std::make_shared(params[0], blockInput, beginInput, endInput); + auto ss = std::make_shared(params[0], blockInput, beginInput, endInput); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < ss->get_output_size(); i++) { - results.push_back(std::make_shared(ss->output(i))); + results.push_back(std::make_shared(ss->output(i))); } - function = std::make_shared(results, params, "SpaceToBatchFuncTest"); + function = std::make_shared(results, params, "SpaceToBatchFuncTest"); } }; -TEST_P(SpaceToBatchLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(SpaceToBatchLayerGPUTest, Inferecne) { run(); } -namespace { - -std::map emptyAdditionalConfig; - -const std::vector inputPrecisions = { - ElementType::f32 +const std::vector inputPrecisions = { + ov::element::f32 }; -const std::vector restInputTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER +const std::vector restInputTypes = { + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER }; const std::vector inputShapesDynamic3D = { @@ -183,8 +169,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Dynamic3D, SpaceToBatchLayerGPUTe ::testing::ValuesIn(inputShapesDynamic3D), ::testing::ValuesIn(paramsPlain3D), ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(restInputTypes), - ::testing::Values(emptyAdditionalConfig)), + ::testing::ValuesIn(restInputTypes)), SpaceToBatchLayerGPUTest::getTestCaseName); @@ -202,8 +187,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Dynamic4D, SpaceToBatchLayerGPUTe ::testing::ValuesIn(inputShapesDynamic4D), ::testing::ValuesIn(paramsPlain4D), ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(restInputTypes), - ::testing::Values(emptyAdditionalConfig)), + ::testing::ValuesIn(restInputTypes)), SpaceToBatchLayerGPUTest::getTestCaseName); const std::vector inputShapesDynamic5D = { @@ -220,9 +204,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Dynamic5D, SpaceToBatchLayerGPUTe ::testing::ValuesIn(inputShapesDynamic5D), ::testing::ValuesIn(paramsPlain5D), ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(restInputTypes), - ::testing::Values(emptyAdditionalConfig)), + ::testing::ValuesIn(restInputTypes)), SpaceToBatchLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_depth.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_depth.cpp index 2976a845e2a..db63149353e 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_depth.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_depth.cpp @@ -2,23 +2,23 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/space_to_depth.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ov::op::v0; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/space_to_depth.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; +using ov::op::v0::SpaceToDepth; typedef std::tuple< - InputShape, // Input shape - ElementType, // Input element type - SpaceToDepth::SpaceToDepthMode, // Mode - std::size_t // Block size + InputShape, // Input shape + ov::element::Type, // Input element type + SpaceToDepth::SpaceToDepthMode, // Mode + std::size_t // Block size > SpaceToDepthLayerGPUTestParams; class SpaceToDepthLayerGPUTest : public testing::WithParamInterface, @@ -26,10 +26,10 @@ class SpaceToDepthLayerGPUTest : public testing::WithParamInterface obj) { InputShape shapes; - ElementType inType; + ov::element::Type model_type; SpaceToDepth::SpaceToDepthMode mode; - std::size_t blockSize; - std::tie(shapes, inType, mode, blockSize) = obj.param; + std::size_t block_size; + std::tie(shapes, model_type, mode, block_size) = obj.param; std::ostringstream results; results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; @@ -37,7 +37,7 @@ public: for (const auto& item : shapes.second) { results << ov::test::utils::vec2str(item) << "_"; } - results << "Prc=" << inType << "_"; + results << "Prc=" << model_type << "_"; switch (mode) { case SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST: results << "BLOCKS_FIRST_"; @@ -48,7 +48,7 @@ public: default: throw std::runtime_error("Unsupported SpaceToDepthMode"); } - results << "BS=" << blockSize; + results << "BS=" << block_size; return results.str(); } @@ -57,17 +57,18 @@ protected: void SetUp() override { InputShape shapes; SpaceToDepth::SpaceToDepthMode mode; - std::size_t blockSize; - std::tie(shapes, inType, mode, blockSize) = this->GetParam(); + std::size_t block_size; + ov::element::Type model_type; + std::tie(shapes, model_type, mode, block_size) = this->GetParam(); targetDevice = ov::test::utils::DEVICE_GPU; init_input_shapes({shapes}); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) - params.push_back(std::make_shared(inType, shape)); + params.push_back(std::make_shared(model_type, shape)); - auto d2s = std::make_shared(params[0], mode, blockSize); + auto d2s = std::make_shared(params[0], mode, block_size); ov::ResultVector results; for (size_t i = 0; i < d2s->get_output_size(); i++) @@ -76,18 +77,14 @@ protected: } }; -TEST_P(SpaceToDepthLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(SpaceToDepthLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector inputElementType = { - ElementType::f32, - ElementType::f16, - ElementType::i8 +const std::vector model_types = { + ov::element::f32, + ov::element::f16, + ov::element::i8 }; const std::vector SpaceToDepthModes = { @@ -96,7 +93,6 @@ const std::vector SpaceToDepthModes = { }; // ======================== Static Shapes Tests ======================== -namespace static_shapes { const std::vector inputShapesBS2_4D = { {1, 16, 8, 8}, @@ -115,16 +111,16 @@ const std::vector inputShapesBS3_4D = { INSTANTIATE_TEST_SUITE_P(smoke_GPUSpaceToDepthStaticBS2_4D, SpaceToDepthLayerGPUTest, testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_4D)), - testing::ValuesIn(inputElementType), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS2_4D)), + testing::ValuesIn(model_types), testing::ValuesIn(SpaceToDepthModes), testing::Values(1, 4)), SpaceToDepthLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_GPUSpaceToDepthStaticBS3_4D, SpaceToDepthLayerGPUTest, testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_4D)), - testing::ValuesIn(inputElementType), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS3_4D)), + testing::ValuesIn(model_types), testing::ValuesIn(SpaceToDepthModes), testing::Values(1, 3)), SpaceToDepthLayerGPUTest::getTestCaseName); @@ -146,24 +142,22 @@ const std::vector inputShapesBS3_5D = { INSTANTIATE_TEST_SUITE_P(smoke_GPUSpaceToDepthStaticBS2_5D, SpaceToDepthLayerGPUTest, testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_5D)), - testing::ValuesIn(inputElementType), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS2_5D)), + testing::ValuesIn(model_types), testing::ValuesIn(SpaceToDepthModes), testing::Values(1, 4)), SpaceToDepthLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_GPUSpaceToDepthStaticBS3_5D, SpaceToDepthLayerGPUTest, testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_5D)), - testing::ValuesIn(inputElementType), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS3_5D)), + testing::ValuesIn(model_types), testing::ValuesIn(SpaceToDepthModes), testing::Values(1, 3)), SpaceToDepthLayerGPUTest::getTestCaseName); -} // namespace static_shapes //======================== Dynamic Shapes Tests ======================== -namespace dynamic_shapes { const std::vector inputShapes4D = { {{-1, -1, -1, -1}, {{2, 3, 12, 24}}}, @@ -176,7 +170,7 @@ const std::vector inputShapes5D = { INSTANTIATE_TEST_SUITE_P(smoke_GPUSpaceToDepthDynamic4D, SpaceToDepthLayerGPUTest, testing::Combine( testing::ValuesIn(inputShapes4D), - testing::ValuesIn(inputElementType), + testing::ValuesIn(model_types), testing::ValuesIn(SpaceToDepthModes), testing::Values(1, 2, 3)), SpaceToDepthLayerGPUTest::getTestCaseName); @@ -184,12 +178,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_GPUSpaceToDepthDynamic4D, SpaceToDepthLayerGPUTes INSTANTIATE_TEST_SUITE_P(smoke_GPUSpaceToDepthDynamic5D, SpaceToDepthLayerGPUTest, testing::Combine( testing::ValuesIn(inputShapes5D), - testing::ValuesIn(inputElementType), + testing::ValuesIn(model_types), testing::ValuesIn(SpaceToDepthModes), testing::Values(1, 2)), SpaceToDepthLayerGPUTest::getTestCaseName); -} // namespace dynamic_shapes - } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/split.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/split.cpp index 7c27e755a1f..57c1c9f948a 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/split.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/split.cpp @@ -2,51 +2,50 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/select.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/variadic_split.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< size_t, // Num splits int64_t, // Axis - ElementType, // Net precision + ov::element::Type, // Model type InputShape, // Input shapes std::vector // Used outputs indices > splitDynamicGPUTestParams; class SplitLayerGPUDynamicTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::ostringstream result; - size_t numSplits; + size_t num_splits; int64_t axis; - ElementType netPrecision; - InputShape inputShape; - std::vector outIndices; - std::tie(numSplits, axis, netPrecision, inputShape, outIndices) = obj.param; + ov::element::Type model_type; + InputShape input_shape; + std::vector out_indices; + std::tie(num_splits, axis, model_type, input_shape, out_indices) = obj.param; result << "IS="; - result << ov::test::utils::partialShape2str({inputShape.first}) << "_"; + result << ov::test::utils::partialShape2str({input_shape.first}) << "_"; result << "TS="; - for (const auto& shape : inputShape.second) { + for (const auto& shape : input_shape.second) { result << ov::test::utils::vec2str(shape) << "_"; } - result << "numSplits=" << numSplits << "_"; + result << "num_splits=" << num_splits << "_"; result << "axis=" << axis << "_"; - if (!outIndices.empty()) { - result << "outIndices" << ov::test::utils::vec2str(outIndices) << "_"; + if (!out_indices.empty()) { + result << "out_indices" << ov::test::utils::vec2str(out_indices) << "_"; } - result << "netPRC=" << netPrecision << "_"; + result << "netPRC=" << model_type << "_"; return result.str(); } @@ -54,47 +53,46 @@ protected: void SetUp() override { targetDevice = ov::test::utils::DEVICE_GPU; int64_t axis; - size_t numSplits; - InputShape inputShape; - std::vector outIndices; - ElementType netPrecision; - std::tie(numSplits, axis, netPrecision, inputShape, outIndices) = this->GetParam(); - if (outIndices.empty()) { - for (size_t i = 0; i < numSplits; ++i) { - outIndices.push_back(i); + size_t num_splits; + InputShape input_shape; + std::vector out_indices; + ov::element::Type model_type; + std::tie(num_splits, axis, model_type, input_shape, out_indices) = this->GetParam(); + if (out_indices.empty()) { + for (size_t i = 0; i < num_splits; ++i) { + out_indices.push_back(i); } } - init_input_shapes({inputShape}); - ov::ParameterVector dyn_params{std::make_shared(netPrecision, inputDynamicShapes[0])}; + init_input_shapes({input_shape}); + ov::ParameterVector dyn_params{std::make_shared(model_type, inputDynamicShapes[0])}; auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{axis}); - auto split = std::make_shared(dyn_params[0], split_axis_op, numSplits); + auto split = std::make_shared(dyn_params[0], split_axis_op, num_splits); - ngraph::ResultVector results; - for (size_t i = 0; i < outIndices.size(); i++) { - results.push_back(std::make_shared(split->output(outIndices[i]))); + ov::ResultVector results; + for (size_t i = 0; i < out_indices.size(); i++) { + results.push_back(std::make_shared(split->output(out_indices[i]))); } - function = std::make_shared(results, dyn_params, "split"); + function = std::make_shared(results, dyn_params, "split"); } }; -TEST_P(SplitLayerGPUDynamicTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(SplitLayerGPUDynamicTest, Inference) { run(); } -const std::vector inputShapes4d = { +const std::vector input_shapes4d = { { {-1, -1, -1, -1}, {{1, 4, 5, 7}, {3, 8, 5, 9}, {5, 16, 1, 8}} } }; -const std::vector inputShapes5d = { +const std::vector input_shapes5d = { { {-1, -1, -1, -1, -1}, {{10, 20, 30, 40, 10}, {5, 18, 3, 10, 10}, {3, 10, 6, 2, 4}} } }; -const std::vector inputShapes6d = { +const std::vector input_shapes6d = { { {-1, -1, -1, -1, -1, -1}, {{10, 32, 3, 4, 12, 24}, {5, 2, 3, 1, 32, 12}, {3, 1, 6, 2, 4, 18}} } @@ -104,63 +102,63 @@ INSTANTIATE_TEST_SUITE_P(smoke_SplitsCheck4Dr, SplitLayerGPUDynamicTest, ::testing::Combine( ::testing::Values(2), // nSplits ::testing::Values(1), // axes - ::testing::Values(ElementType::f16), // netPrec - ::testing::ValuesIn(inputShapes4d), // inShapes - ::testing::Values(std::vector({}))), // outIndices + ::testing::Values(ov::element::f16), // netPrec + ::testing::ValuesIn(input_shapes4d), // inShapes + ::testing::Values(std::vector({}))), // out_indices SplitLayerGPUDynamicTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_SplitsCheck5D, SplitLayerGPUDynamicTest, ::testing::Combine( ::testing::Values(3), // nSplits ::testing::Values(2), // axes - ::testing::Values(ElementType::f32), // netPrec - ::testing::ValuesIn(inputShapes5d), // inShapes - ::testing::Values(std::vector({}))), // outIndices + ::testing::Values(ov::element::f32), // netPrec + ::testing::ValuesIn(input_shapes5d), // inShapes + ::testing::Values(std::vector({}))), // out_indices SplitLayerGPUDynamicTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_SplitsCheck6D, SplitLayerGPUDynamicTest, ::testing::Combine( ::testing::Values(4), // nSplits ::testing::Values(4), // axes - ::testing::Values(ElementType::i8), // netPrec - ::testing::ValuesIn(inputShapes6d), // inShapes - ::testing::Values(std::vector({}))), // outIndices + ::testing::Values(ov::element::i8), // netPrec + ::testing::ValuesIn(input_shapes6d), // inShapes + ::testing::Values(std::vector({}))), // out_indices SplitLayerGPUDynamicTest::getTestCaseName); typedef std::tuple< int64_t, // Axis std::vector, // SplitLength - ElementType, // Net precision + ov::element::Type, // Model type InputShape, // Input shapes - ngraph::helpers::InputLayerType // input type of splitLength + ov::test::utils::InputLayerType // input type of split_length > varSplitDynamicGPUTestParams; class VariadicSplitLayerGPUDynamicTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::ostringstream result; int64_t axis; - std::vector splitLength; - ElementType netPrecision; - InputShape inputShape; - ngraph::helpers::InputLayerType inputType; - std::tie(axis, splitLength, netPrecision, inputShape, inputType) = obj.param; + std::vector split_length; + ov::element::Type model_type; + InputShape input_shape; + ov::test::utils::InputLayerType inputType; + std::tie(axis, split_length, model_type, input_shape, inputType) = obj.param; result << "IS="; - result << ov::test::utils::partialShape2str({inputShape.first}) << "_"; + result << ov::test::utils::partialShape2str({input_shape.first}) << "_"; result << "TS="; - for (const auto& shape : inputShape.second) { + for (const auto& shape : input_shape.second) { result << ov::test::utils::vec2str(shape) << "_"; } - result << "SplitLen=" << ov::test::utils::vec2str(splitLength) << "_"; + result << "SplitLen=" << ov::test::utils::vec2str(split_length) << "_"; result << "axis=" << axis << "_"; - result << "netPRC=" << netPrecision << "_"; + result << "netPRC=" << model_type << "_"; result << "restInputType=" << inputType << "_"; return result.str(); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { @@ -169,8 +167,8 @@ public: if (i == 1) { tensor = ov::Tensor(ov::element::i64, targetInputStaticShapes[i]); auto *dataPtr = tensor.data::value_type>(); - for (size_t i = 0; i < splitLength_vec.size(); i++) { - dataPtr[i] = splitLength_vec[i]; + for (size_t i = 0; i < split_length_vec.size(); i++) { + dataPtr[i] = split_length_vec[i]; } } else { tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); @@ -181,89 +179,88 @@ public: } protected: - std::vector splitLength_vec; + std::vector split_length_vec; size_t inferRequestNum = 0; - ElementType netPrecision; + ov::element::Type model_type; void SetUp() override { targetDevice = ov::test::utils::DEVICE_GPU; int64_t axis; - InputShape inputShape; - std::vector splitLength; - ngraph::helpers::InputLayerType inputType; - std::tie(axis, splitLength, netPrecision, inputShape, inputType) = this->GetParam(); + InputShape input_shape; + std::vector split_length; + ov::test::utils::InputLayerType inputType; + std::tie(axis, split_length, model_type, input_shape, inputType) = this->GetParam(); - splitLength_vec = splitLength; + split_length_vec = split_length; - std::vector inputShapes; - inputShapes.push_back(inputShape); - if (inputType == ngraph::helpers::InputLayerType::PARAMETER) { - inputShapes.push_back(InputShape({static_cast(splitLength.size())}, - std::vector(inputShape.second.size(), {splitLength.size()}))); + std::vector input_shapes; + input_shapes.push_back(input_shape); + if (inputType == ov::test::utils::InputLayerType::PARAMETER) { + input_shapes.push_back(InputShape({static_cast(split_length.size())}, + std::vector(input_shape.second.size(), {split_length.size()}))); } - init_input_shapes(inputShapes); + init_input_shapes(input_shapes); - ov::ParameterVector dyn_params{std::make_shared(netPrecision, inputDynamicShapes[0])}; + ov::ParameterVector dyn_params{std::make_shared(model_type, inputDynamicShapes[0])}; - auto splitAxisOp = std::make_shared(ngraph::element::i64, ngraph::Shape{}, std::vector{static_cast(axis)}); + auto splitAxisOp = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{static_cast(axis)}); - std::shared_ptr splitLengthOp; - if (inputType == ngraph::helpers::InputLayerType::PARAMETER) { - auto splitLengthNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{splitLength.size()}); - dyn_params.push_back(splitLengthNode); - splitLengthOp = splitLengthNode; + std::shared_ptr split_lengthOp; + if (inputType == ov::test::utils::InputLayerType::PARAMETER) { + auto split_lengthNode = std::make_shared(ov::element::i64, ov::Shape{split_length.size()}); + dyn_params.push_back(split_lengthNode); + split_lengthOp = split_lengthNode; } else { - splitLengthOp = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{splitLength.size()}, splitLength); + split_lengthOp = std::make_shared(ov::element::i64, ov::Shape{split_length.size()}, split_length); } - auto varSplit = std::make_shared(dyn_params[0], splitAxisOp, splitLengthOp); - ngraph::ResultVector results; - for (size_t i = 0; i < splitLength.size(); i++) { - results.push_back(std::make_shared(varSplit->output(i))); + auto varSplit = std::make_shared(dyn_params[0], splitAxisOp, split_lengthOp); + ov::ResultVector results; + for (size_t i = 0; i < split_length.size(); i++) { + results.push_back(std::make_shared(varSplit->output(i))); } - function = std::make_shared(results, dyn_params, "varSplit"); + function = std::make_shared(results, dyn_params, "varSplit"); } }; -TEST_P(VariadicSplitLayerGPUDynamicTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(VariadicSplitLayerGPUDynamicTest, Inference) { run(); } -const std::vector restInputTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER +const std::vector restInputTypes = { + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER }; INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplitsCheck4D, VariadicSplitLayerGPUDynamicTest, ::testing::Combine( ::testing::Values(1), // axes - ::testing::Values(std::vector{2, 1, -1}), // splitLength - ::testing::Values(ElementType::f16), // netPrec - ::testing::ValuesIn(inputShapes4d), // inShapes - ::testing::ValuesIn(restInputTypes)), // input type of splitLength + ::testing::Values(std::vector{2, 1, -1}), // split_length + ::testing::Values(ov::element::f16), // netPrec + ::testing::ValuesIn(input_shapes4d), // inShapes + ::testing::ValuesIn(restInputTypes)), // input type of split_length VariadicSplitLayerGPUDynamicTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplitsCheck5D, VariadicSplitLayerGPUDynamicTest, ::testing::Combine( ::testing::Values(2), // axes - ::testing::Values(std::vector{2, -1}), // splitLength - ::testing::Values(ElementType::f32), // netPrec - ::testing::ValuesIn(inputShapes5d), // inShapes - ::testing::ValuesIn(restInputTypes)), // input type of splitLength + ::testing::Values(std::vector{2, -1}), // split_length + ::testing::Values(ov::element::f32), // netPrec + ::testing::ValuesIn(input_shapes5d), // inShapes + ::testing::ValuesIn(restInputTypes)), // input type of split_length VariadicSplitLayerGPUDynamicTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplitsCheck6D, VariadicSplitLayerGPUDynamicTest, ::testing::Combine( ::testing::Values(5), // nSplits - ::testing::Values(std::vector{2, 3, 2, -1}), // splitLength - ::testing::Values(ElementType::i8), // netPrec - ::testing::ValuesIn(inputShapes6d), // inShapes - ::testing::ValuesIn(restInputTypes)), // input type of splitLength + ::testing::Values(std::vector{2, 3, 2, -1}), // split_length + ::testing::Values(ov::element::i8), // netPrec + ::testing::ValuesIn(input_shapes6d), // inShapes + ::testing::ValuesIn(restInputTypes)), // input type of split_length VariadicSplitLayerGPUDynamicTest::getTestCaseName); -const std::vector inputShapes4d_static = { +const std::vector input_shapes4d_static = { { {5, 16, 10, 8}, {{5, 16, 10, 8}, } } @@ -272,10 +269,10 @@ const std::vector inputShapes4d_static = { INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplitsCheck4D_static_input_dyn_output, VariadicSplitLayerGPUDynamicTest, ::testing::Combine( ::testing::Values(1), // axes - ::testing::Values(std::vector{2, 1, -1}), // splitLength - ::testing::Values(ElementType::f16), // netPrec - ::testing::ValuesIn(inputShapes4d_static), // inShapes - ::testing::ValuesIn(restInputTypes)), // input type of splitLength + ::testing::Values(std::vector{2, 1, -1}), // split_length + ::testing::Values(ov::element::f16), // netPrec + ::testing::ValuesIn(input_shapes4d_static), // inShapes + ::testing::ValuesIn(restInputTypes)), // input type of split_length VariadicSplitLayerGPUDynamicTest::getTestCaseName); -} // namespace GPULayerTestsDefinitions +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/strided_slice.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/strided_slice.cpp index c6cd7e65258..954db256c9a 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/strided_slice.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/strided_slice.cpp @@ -2,16 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/strided_slice.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/strided_slice.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; struct StridedSliceParams { std::vector begin; @@ -27,22 +28,20 @@ struct StridedSliceParams { typedef std::tuple< InputShape, // Input shapes StridedSliceParams, - ElementType, // Element type - std::vector, // begin/end/stride input type - std::map // Additional network configuration + ov::element::Type, // Element type + std::vector // begin/end/stride input type > StridedSliceLayerParamSet; class StridedSliceLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { InputShape shapes; StridedSliceParams params; - ElementType elementType; - std::vector restInputType; - TargetDevice targetDevice; - std::map additionalConfig; - std::tie(shapes, params, elementType, restInputType, additionalConfig) = obj.param; + ov::element::Type model_type; + std::vector rest_input_type; + std::string targetDevice; + std::tie(shapes, params, model_type, rest_input_type) = obj.param; std::ostringstream results; results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; @@ -50,7 +49,7 @@ public: for (const auto& item : shapes.second) { results << ov::test::utils::vec2str(item) << "_"; } - results << "netPRC=" << elementType << "_"; + results << "netPRC=" << model_type << "_"; results << "begin=" << ov::test::utils::vec2str(params.begin) << "_"; results << "end=" << ov::test::utils::vec2str(params.end) << "_"; results << "stride=" << ov::test::utils::vec2str(params.stride) << "_"; @@ -59,19 +58,14 @@ public: results << "new_axis_m=" << (params.newAxisMask.empty() ? "def" : ov::test::utils::vec2str(params.newAxisMask)) << "_"; results << "shrink_m=" << (params.shrinkAxisMask.empty() ? "def" : ov::test::utils::vec2str(params.shrinkAxisMask)) << "_"; results << "ellipsis_m=" << (params.ellipsisAxisMask.empty() ? "def" : ov::test::utils::vec2str(params.ellipsisAxisMask)) << "_"; - results << "beginType=" << restInputType[0] << "_"; - results << "endType=" << restInputType[1] << "_"; - results << "strideType=" << restInputType[2] << "_"; - results << "config=("; - for (const auto& configEntry : additionalConfig) { - results << configEntry.first << ", " << configEntry.second << ":"; - } - results << ")"; + results << "beginType=" << rest_input_type[0] << "_"; + results << "endType=" << rest_input_type[1] << "_"; + results << "strideType=" << rest_input_type[2]; return results.str(); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); ov::Tensor tensor; @@ -82,7 +76,7 @@ public: inputs.insert({funcInputs[idx].get_node_shared_ptr(), tensor}); // input1: begin - if (restInputType[0] == ngraph::helpers::InputLayerType::PARAMETER) { + if (rest_input_type[0] == ov::test::utils::InputLayerType::PARAMETER) { idx += 1; tensor = ov::Tensor(funcInputs[idx].get_element_type(), targetInputStaticShapes[idx]); auto *dataPtr = tensor.data(); @@ -93,7 +87,7 @@ public: } // input2: end - if (restInputType[1] == ngraph::helpers::InputLayerType::PARAMETER) { + if (rest_input_type[1] == ov::test::utils::InputLayerType::PARAMETER) { idx += 1; tensor = ov::Tensor(funcInputs[idx].get_element_type(), targetInputStaticShapes[idx]); auto *dataPtr = tensor.data(); @@ -104,7 +98,7 @@ public: } // input3: stride - if (restInputType[2] == ngraph::helpers::InputLayerType::PARAMETER) { + if (rest_input_type[2] == ov::test::utils::InputLayerType::PARAMETER) { idx += 1; tensor = ov::Tensor(funcInputs[idx].get_element_type(), targetInputStaticShapes[idx]); auto *dataPtr = tensor.data(); @@ -121,14 +115,13 @@ protected: std::vector begin; std::vector end; std::vector stride; - std::vector restInputType; + std::vector rest_input_type; size_t inferRequestNum = 0; void SetUp() override { InputShape shapes; StridedSliceParams ssParams; - std::map additionalConfig; - std::tie(shapes, ssParams, inType, restInputType, additionalConfig) = this->GetParam(); + std::tie(shapes, ssParams, inType, rest_input_type) = this->GetParam(); begin = ssParams.begin; end = ssParams.end; @@ -138,11 +131,11 @@ protected: std::vector inputShapes; inputShapes.push_back(shapes); - if (restInputType[0] == ngraph::helpers::InputLayerType::PARAMETER) + if (rest_input_type[0] == ov::test::utils::InputLayerType::PARAMETER) inputShapes.push_back(InputShape({static_cast(begin.size())}, std::vector(shapes.second.size(), {begin.size()}))); - if (restInputType[1] == ngraph::helpers::InputLayerType::PARAMETER) + if (rest_input_type[1] == ov::test::utils::InputLayerType::PARAMETER) inputShapes.push_back(InputShape({static_cast(end.size())}, std::vector(shapes.second.size(), {end.size()}))); - if (restInputType[2] == ngraph::helpers::InputLayerType::PARAMETER) + if (rest_input_type[2] == ov::test::utils::InputLayerType::PARAMETER) inputShapes.push_back(InputShape({static_cast(stride.size())}, std::vector(shapes.second.size(), {stride.size()}))); init_input_shapes(inputShapes); @@ -150,65 +143,59 @@ protected: ov::ParameterVector params{std::make_shared(inType, inputDynamicShapes.front())}; std::shared_ptr beginInput, endInput, strideInput; - if (restInputType[0] == ngraph::helpers::InputLayerType::PARAMETER) { - auto beginNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{begin.size()}); + if (rest_input_type[0] == ov::test::utils::InputLayerType::PARAMETER) { + auto beginNode = std::make_shared(ov::element::i64, ov::Shape{begin.size()}); params.push_back(beginNode); beginInput = beginNode; } else { - beginInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{begin.size()}, begin); + beginInput = std::make_shared(ov::element::i64, ov::Shape{begin.size()}, begin); } - if (restInputType[1] == ngraph::helpers::InputLayerType::PARAMETER) { - auto endNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{end.size()}); + if (rest_input_type[1] == ov::test::utils::InputLayerType::PARAMETER) { + auto endNode = std::make_shared(ov::element::i64, ov::Shape{end.size()}); params.push_back(endNode); endInput = endNode; } else { - endInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{end.size()}, end); + endInput = std::make_shared(ov::element::i64, ov::Shape{end.size()}, end); } - if (restInputType[2] == ngraph::helpers::InputLayerType::PARAMETER) { - auto strideNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{stride.size()}); + if (rest_input_type[2] == ov::test::utils::InputLayerType::PARAMETER) { + auto strideNode = std::make_shared(ov::element::i64, ov::Shape{stride.size()}); params.push_back(strideNode); strideInput = strideNode; } else { - strideInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{stride.size()}, stride); + strideInput = std::make_shared(ov::element::i64, ov::Shape{stride.size()}, stride); } - auto ss = std::make_shared(params[0], beginInput, endInput, strideInput, ssParams.beginMask, ssParams.endMask, + auto ss = std::make_shared(params[0], beginInput, endInput, strideInput, ssParams.beginMask, ssParams.endMask, ssParams.newAxisMask, ssParams.shrinkAxisMask, ssParams.ellipsisAxisMask); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < ss->get_output_size(); i++) { - results.push_back(std::make_shared(ss->output(i))); + results.push_back(std::make_shared(ss->output(i))); } - function = std::make_shared(results, params, "StridedSlice"); + function = std::make_shared(results, params, "StridedSlice"); } }; -TEST_P(StridedSliceLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(StridedSliceLayerGPUTest, Inference) { run(); } -namespace { - -std::map emptyAdditionalConfig; - -const std::vector inputPrecisions = { - ElementType::f32 +const std::vector model_types = { + ov::element::f32 }; -const std::vector> restInputTypes = { - {ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::CONSTANT}, - {ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::PARAMETER}, - {ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::CONSTANT}, - {ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT}, - {ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::PARAMETER}, - {ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::PARAMETER}, - {ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::PARAMETER}, - {ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT}, +const std::vector> rest_input_types = { + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT}, + {ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER}, + {ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT}, + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT}, + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER}, + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER}, + {ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER}, + {ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT}, }; const std::vector inputShapesDynamic2D = { @@ -227,20 +214,18 @@ const std::vector paramsPlain2D = { INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Static_2D, StridedSliceLayerGPUTest, ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation({{32, 20}})), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation({{32, 20}})), ::testing::ValuesIn(paramsPlain2D), - ::testing::ValuesIn(inputPrecisions), - ::testing::Values(restInputTypes[0]), - ::testing::Values(emptyAdditionalConfig)), + ::testing::ValuesIn(model_types), + ::testing::Values(rest_input_types[0])), StridedSliceLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Dynamic_2D, StridedSliceLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(inputShapesDynamic2D), ::testing::ValuesIn(paramsPlain2D), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(restInputTypes), - ::testing::Values(emptyAdditionalConfig)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(rest_input_types)), StridedSliceLayerGPUTest::getTestCaseName); const std::vector testCasesCommon4D = { @@ -266,9 +251,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D, StridedSliceLa ::testing::Combine( ::testing::ValuesIn(inputShapesDynamic4D), ::testing::ValuesIn(testCasesCommon4D), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(restInputTypes), - ::testing::Values(emptyAdditionalConfig)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(rest_input_types)), StridedSliceLayerGPUTest::getTestCaseName); @@ -295,9 +279,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D, StridedSliceLa ::testing::Combine( ::testing::ValuesIn(inputShapesDynamic5D), ::testing::ValuesIn(testCasesCommon5D), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(restInputTypes), - ::testing::Values(emptyAdditionalConfig)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(rest_input_types)), StridedSliceLayerGPUTest::getTestCaseName); @@ -318,10 +301,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_6D, StridedSliceLa ::testing::Combine( ::testing::ValuesIn(inputShapesDynamic6D), ::testing::ValuesIn(testCasesCommon6D), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(restInputTypes), - ::testing::Values(emptyAdditionalConfig)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(rest_input_types)), StridedSliceLayerGPUTest::getTestCaseName); - } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/tile.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/tile.cpp index 2c1268d76db..9cf7d528eda 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/tile.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/tile.cpp @@ -2,26 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/tile.hpp" -#include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; - -namespace GPULayerTestsDefinitions { +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/tile.hpp" +namespace { using TileLayerTestParamsSet = typename std::tuple< std::vector, // Input shapes std::vector, // Repeats - ov::element::Type_t, // Network precision + ov::element::Type, // Model type bool, // Is Repeats input constant std::string>; // Device name @@ -32,27 +25,27 @@ public: static std::string getTestCaseName(testing::TestParamInfo obj) { TileLayerTestParamsSet basicParamsSet = obj.param; - std::vector inputShapes; + std::vector input_shapes; std::vector repeats; - ov::element::Type_t netPrecision; - bool isRepeatsConst; + ov::element::Type_t model_type; + bool is_repeats_const; std::string deviceName; - std::tie(inputShapes, repeats, netPrecision, isRepeatsConst, deviceName) = basicParamsSet; + std::tie(input_shapes, repeats, model_type, is_repeats_const, deviceName) = basicParamsSet; std::ostringstream result; result << "IS=("; - for (const auto& shape : inputShapes) { + for (const auto& shape : input_shapes) { result << ov::test::utils::partialShape2str({shape.first}) << "_"; } result << ")_TS=("; - for (const auto& shape : inputShapes) { + for (const auto& shape : input_shapes) { for (const auto& item : shape.second) { result << ov::test::utils::vec2str(item) << "_"; } } result << "Repeats=" << ov::test::utils::vec2str(repeats) << "_"; - result << "netPrec=" << netPrecision << "_"; - result << "constRepeats=" << (isRepeatsConst ? "True" : "False") << "_"; + result << "netPrec=" << model_type << "_"; + result << "constRepeats=" << (is_repeats_const ? "True" : "False") << "_"; result << "trgDev=" << deviceName; return result.str(); @@ -62,31 +55,31 @@ protected: void SetUp() override { TileLayerTestParamsSet basicParamsSet = this->GetParam(); - std::vector inputShapes; - ov::element::Type_t netPrecision; - bool isRepeatsConst; - std::tie(inputShapes, repeatsData, netPrecision, isRepeatsConst, targetDevice) = basicParamsSet; + std::vector input_shapes; + ov::element::Type_t model_type; + bool is_repeats_const; + std::tie(input_shapes, repeatsData, model_type, is_repeats_const, targetDevice) = basicParamsSet; - if (inputShapes.front().first.rank() != 0) { - inputDynamicShapes.push_back(inputShapes.front().first); - if (!isRepeatsConst) { + if (input_shapes.front().first.rank() != 0) { + inputDynamicShapes.push_back(input_shapes.front().first); + if (!is_repeats_const) { inputDynamicShapes.push_back({ static_cast(repeatsData.size()) }); } } - const size_t targetStaticShapeSize = inputShapes.front().second.size(); + const size_t targetStaticShapeSize = input_shapes.front().second.size(); targetStaticShapes.resize(targetStaticShapeSize); for (size_t i = 0lu; i < targetStaticShapeSize; ++i) { - targetStaticShapes[i].push_back(inputShapes.front().second[i]); - if (!isRepeatsConst) + targetStaticShapes[i].push_back(input_shapes.front().second[i]); + if (!is_repeats_const) targetStaticShapes[i].push_back({ repeatsData.size() }); } ov::ParameterVector functionParams; if (inputDynamicShapes.empty()) { - functionParams.push_back(std::make_shared(netPrecision, targetStaticShapes.front().front())); + functionParams.push_back(std::make_shared(model_type, targetStaticShapes.front().front())); } else { - functionParams.push_back(std::make_shared(netPrecision, inputDynamicShapes.front())); - if (!isRepeatsConst) { + functionParams.push_back(std::make_shared(model_type, inputDynamicShapes.front())); + if (!is_repeats_const) { functionParams.push_back(std::make_shared(ov::element::i64, inputDynamicShapes[1])); functionParams.back()->set_friendly_name("repeats"); } @@ -94,22 +87,22 @@ protected: functionParams.front()->set_friendly_name("data"); std::shared_ptr tileNode; - if (isRepeatsConst) { + if (is_repeats_const) { tileNode = std::make_shared(functionParams[0], ov::op::v0::Constant::create(ov::element::i64, { repeatsData.size() }, repeatsData)); } else { tileNode = std::make_shared(functionParams[0], functionParams[1]); } - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < tileNode->get_output_size(); i++) { - results.push_back(std::make_shared(tileNode->output(i))); + results.push_back(std::make_shared(tileNode->output(i))); } - function = std::make_shared(results, functionParams, "Tile"); + function = std::make_shared(results, functionParams, "Tile"); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0lu; i < funcInputs.size(); i++) { @@ -136,18 +129,16 @@ protected: std::vector repeatsData; }; -TEST_P(TileLayerGPUTest, CompareWithRefs) { +TEST_P(TileLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector netPrecisions = { +const std::vector model_types = { ov::element::f32, ov::element::f16, }; -const std::vector> dynamicInputShapes4D = { +const std::vector> dynamic_input_shapes4D = { { { // Origin dynamic shapes {ov::Dimension(1, 20), ov::Dimension(10, 20), ov::Dimension(1, 20), ov::Dimension(1, 20)}, @@ -169,7 +160,7 @@ const std::vector> dynamicInputShapes4D = { } }; -const std::vector> dynamicInputShapes5D = { +const std::vector> dynamic_input_shapes5D = { { { // Origin dynamic shapes {ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 70)}, @@ -212,22 +203,20 @@ const std::vector> repeats5D = { INSTANTIATE_TEST_CASE_P(DynamicShape4D, TileLayerGPUTest, ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes4D), + ::testing::ValuesIn(dynamic_input_shapes4D), ::testing::ValuesIn(repeats4D), - ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(model_types), ::testing::Values(true, false), ::testing::Values(ov::test::utils::DEVICE_GPU)), TileLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_CASE_P(DynamicShape5D, TileLayerGPUTest, ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes5D), + ::testing::ValuesIn(dynamic_input_shapes5D), ::testing::ValuesIn(repeats5D), - ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(model_types), ::testing::Values(true, false), ::testing::Values(ov::test::utils::DEVICE_GPU)), TileLayerGPUTest::getTestCaseName); } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp index 7ff69b3db00..bc4bcb4bd72 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp @@ -2,64 +2,62 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/topk.hpp" -#include "common_test_utils/test_constants.hpp" +#include + #include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/topk.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< - int64_t, // keepK - int64_t, // axis - ngraph::opset4::TopK::Mode, // mode - ngraph::opset4::TopK::SortType, // sort - ElementType, // Net precision - ElementType, // Input precision - ElementType, // Output precision - InputShape, // inputShape - TargetDevice, // Device name - ngraph::helpers::InputLayerType // Input type + int64_t, // keepK + int64_t, // axis + ov::op::v1::TopK::Mode, // mode + ov::op::v1::TopK::SortType, // sort + ov::element::Type, // Model type + ov::element::Type, // Input precision + ov::element::Type, // Output precision + InputShape, // input_shape + std::string, // Device name + ov::test::utils::InputLayerType // Input type > TopKLayerTestParamsSet; class TopKLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { TopKLayerTestParamsSet basicParamsSet = obj.param; int64_t keepK, axis; - ngraph::opset4::TopK::Mode mode; - ngraph::opset4::TopK::SortType sort; - ElementType netPrecision, inPrc, outPrc; - InputShape inputShape; - TargetDevice targetDevice; - ngraph::helpers::InputLayerType inputType; - std::tie(keepK, axis, mode, sort, netPrecision, inPrc, outPrc, inputShape, targetDevice, inputType) = basicParamsSet; + ov::op::v1::TopK::Mode mode; + ov::op::v1::TopK::SortType sort; + ov::element::Type model_type, inPrc, outPrc; + InputShape input_shape; + std::string targetDevice; + ov::test::utils::InputLayerType input_type; + std::tie(keepK, axis, mode, sort, model_type, inPrc, outPrc, input_shape, targetDevice, input_type) = basicParamsSet; std::ostringstream result; result << "k=" << keepK << "_"; result << "axis=" << axis << "_"; result << "mode=" << mode << "_"; result << "sort=" << sort << "_"; - result << "netPRC=" << netPrecision << "_"; + result << "netPRC=" << model_type << "_"; result << "inPRC=" << inPrc << "_"; result << "outPRC=" << outPrc << "_"; - result << "IS=" << ov::test::utils::partialShape2str({inputShape.first}) << "_" << "TS=("; - for (const auto& shape : inputShape.second) { + result << "IS=" << ov::test::utils::partialShape2str({input_shape.first}) << "_" << "TS=("; + for (const auto& shape : input_shape.second) { result << ov::test::utils::vec2str(shape) << "_"; } result << ")_"; - result << "inputType=" << inputType; + result << "input_type=" << input_type; result << "TargetDevice=" << targetDevice; return result.str(); @@ -70,43 +68,43 @@ protected: TopKLayerTestParamsSet basicParamsSet = this->GetParam(); int64_t keepK; - ngraph::opset4::TopK::Mode mode; - ngraph::opset4::TopK::SortType sort; - ElementType inPrc, outPrc; - InputShape inputShape; - std::tie(keepK, axis, mode, sort, netPrecision, inPrc, outPrc, inputShape, targetDevice, inputType) = basicParamsSet; + ov::op::v1::TopK::Mode mode; + ov::op::v1::TopK::SortType sort; + ov::element::Type inPrc, outPrc; + InputShape input_shape; + std::tie(keepK, axis, mode, sort, model_type, inPrc, outPrc, input_shape, targetDevice, input_type) = basicParamsSet; - if (inputType == ngraph::helpers::InputLayerType::CONSTANT) { - init_input_shapes({inputShape}); + if (input_type == ov::test::utils::InputLayerType::CONSTANT) { + init_input_shapes({input_shape}); } else { - inputDynamicShapes = {inputShape.first, {}}; - for (size_t i = 0; i < inputShape.second.size(); ++i) { - targetStaticShapes.push_back({inputShape.second[i], {}}); + inputDynamicShapes = {input_shape.first, {}}; + for (size_t i = 0; i < input_shape.second.size(); ++i) { + targetStaticShapes.push_back({input_shape.second[i], {}}); } } - ov::ParameterVector params{std::make_shared(netPrecision, inputDynamicShapes[0])}; + ov::ParameterVector params{std::make_shared(model_type, inputDynamicShapes[0])}; - std::shared_ptr topk; - if (inputType == ngraph::helpers::InputLayerType::CONSTANT) { - auto k = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, &keepK); - topk = std::dynamic_pointer_cast(std::make_shared(params[0], k, axis, mode, sort)); + std::shared_ptr topk; + if (input_type == ov::test::utils::InputLayerType::CONSTANT) { + auto k = std::make_shared(ov::element::i64, ov::Shape{}, &keepK); + topk = std::dynamic_pointer_cast(std::make_shared(params[0], k, axis, mode, sort)); } else { - auto k = std::make_shared(ngraph::element::Type_t::i64, inputDynamicShapes[1]); + auto k = std::make_shared(ov::element::i64, inputDynamicShapes[1]); params.push_back(k); - topk = std::dynamic_pointer_cast( - std::make_shared(params[0], k, axis, mode, sort)); + topk = std::dynamic_pointer_cast( + std::make_shared(params[0], k, axis, mode, sort)); } - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < topk->get_output_size(); i++) { - results.push_back(std::make_shared(topk->output(i))); + results.push_back(std::make_shared(topk->output(i))); } - function = std::make_shared(results, params, "TopK"); + function = std::make_shared(results, params, "TopK"); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); auto shape = targetInputStaticShapes.front(); @@ -114,7 +112,7 @@ protected: tensor = ov::test::utils::create_and_fill_tensor(funcInputs[0].get_element_type(), shape); size_t size = tensor.get_size(); - if (netPrecision == ElementType::f32) { + if (model_type == ov::element::f32) { std::vector data(size); int start = - static_cast(size / 2); @@ -127,11 +125,11 @@ protected: rawBlobDataPtr[i] = static_cast(data[i]); } } else { - FAIL() << "generate_inputs for " << netPrecision << " precision isn't supported"; + FAIL() << "generate_inputs for " << model_type << " precision isn't supported"; } inputs.insert({funcInputs[0].get_node_shared_ptr(), tensor}); - if (inputType == ngraph::helpers::InputLayerType::PARAMETER) { + if (input_type == ov::test::utils::InputLayerType::PARAMETER) { const auto& kPrecision = funcInputs[1].get_element_type(); const auto& kShape = targetInputStaticShapes[1]; @@ -147,36 +145,32 @@ protected: private: int64_t axis; size_t inferRequestNum = 0; - ElementType netPrecision; - ngraph::helpers::InputLayerType inputType; + ov::element::Type model_type; + ov::test::utils::InputLayerType input_type; }; -TEST_P(TopKLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(TopKLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector netPrecisions = { - ElementType::f32, +const std::vector model_types = { + ov::element::f32, }; const std::vector axes = {0, 3}; const std::vector k = {3, 5, 7}; -const std::vector modes = { - ngraph::opset4::TopK::Mode::MIN, - ngraph::opset4::TopK::Mode::MAX +const std::vector modes = { + ov::op::v1::TopK::Mode::MIN, + ov::op::v1::TopK::Mode::MAX }; -const std::vector sortTypes = { - ngraph::opset4::TopK::SortType::SORT_VALUES, - ngraph::opset4::TopK::SortType::SORT_INDICES, +const std::vector sortTypes = { + ov::op::v1::TopK::SortType::SORT_VALUES, + ov::op::v1::TopK::SortType::SORT_INDICES, }; -std::vector inputShapesDynamic = { +std::vector input_shapesDynamic = { { {ov::PartialShape::dynamic(4), {{7, 7, 7, 7}, {7, 8, 7, 9}}}, {{-1, -1, -1, -1}, {{8, 9, 10, 11}, {11, 7, 8, 9}}} @@ -189,12 +183,12 @@ INSTANTIATE_TEST_CASE_P(smoke_TopK_constant_dynamic, TopKLayerGPUTest, ::testing::ValuesIn(axes), ::testing::ValuesIn(modes), ::testing::ValuesIn(sortTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesDynamic), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::ValuesIn(input_shapesDynamic), ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT)), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT)), TopKLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_CASE_P(smoke_TopK_parameter_dynamic, TopKLayerGPUTest, @@ -203,13 +197,13 @@ INSTANTIATE_TEST_CASE_P(smoke_TopK_parameter_dynamic, TopKLayerGPUTest, ::testing::ValuesIn(axes), ::testing::ValuesIn(modes), ::testing::ValuesIn(sortTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesDynamic), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::ValuesIn(input_shapesDynamic), ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER)), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER)), TopKLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions + diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/unique.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/unique.cpp index 9e60d64fb7d..1362cc7a488 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/unique.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/unique.cpp @@ -3,82 +3,83 @@ // #include "common_test_utils/ov_tensor_utils.hpp" -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/unique.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple, // Input shapes std::tuple, // Is flattened and axis bool, // Sorted - ElementType // Data precision - > + ov::element::Type> // Model type UniqueDynamicGPUTestParams; class UniqueLayerDynamicGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { - std::vector inputShapes; - std::tuple flatOrAxis; + std::vector input_shapes; + std::tuple flat_or_axis; bool sorted; - ElementType dataPrecision; - std::tie(inputShapes, flatOrAxis, sorted, dataPrecision) = obj.param; + ov::element::Type model_type; + std::tie(input_shapes, flat_or_axis, sorted, model_type) = obj.param; std::ostringstream result; result << "IS=("; - for (size_t i = 0lu; i < inputShapes.size(); i++) { - result << ov::test::utils::partialShape2str({inputShapes[i].first}) - << (i < inputShapes.size() - 1lu ? "_" : ""); + for (size_t i = 0lu; i < input_shapes.size(); i++) { + result << ov::test::utils::partialShape2str({input_shapes[i].first}) + << (i < input_shapes.size() - 1lu ? "_" : ""); } result << ")_TS="; - for (size_t i = 0lu; i < inputShapes.front().second.size(); i++) { + for (size_t i = 0lu; i < input_shapes.front().second.size(); i++) { result << "{"; - for (size_t j = 0lu; j < inputShapes.size(); j++) { - result << ov::test::utils::vec2str(inputShapes[j].second[i]) - << (j < inputShapes.size() - 1lu ? "_" : ""); + for (size_t j = 0lu; j < input_shapes.size(); j++) { + result << ov::test::utils::vec2str(input_shapes[j].second[i]) + << (j < input_shapes.size() - 1lu ? "_" : ""); } result << "}_"; } - if (!std::get<0>(flatOrAxis)) { - result << "axis=" << std::get<1>(flatOrAxis) << "_"; + if (!std::get<0>(flat_or_axis)) { + result << "axis=" << std::get<1>(flat_or_axis) << "_"; } else { result << "flattened" << "_"; } result << "sorted=" << (sorted ? "True" : "False") << "_"; - result << "dataPrc=" << dataPrecision; + result << "dataPrc=" << model_type; return result.str(); } protected: void SetUp() override { - std::vector inputShapes; - std::tuple flatOrAxis; + std::vector input_shapes; + std::tuple flat_or_axis; bool sorted, flattened; int axis; - ElementType dataPrecision; + ov::element::Type model_type; - std::tie(inputShapes, flatOrAxis, sorted, dataPrecision) = this->GetParam(); + std::tie(input_shapes, flat_or_axis, sorted, model_type) = this->GetParam(); targetDevice = ov::test::utils::DEVICE_GPU; - init_input_shapes(inputShapes); - flattened = std::get<0>(flatOrAxis); + init_input_shapes(input_shapes); + flattened = std::get<0>(flat_or_axis); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) { - params.push_back(std::make_shared(dataPrecision, shape)); + params.push_back(std::make_shared(model_type, shape)); } params[0]->set_friendly_name("data"); std::shared_ptr uniqueNode; if (flattened) { uniqueNode = std::make_shared(params[0], sorted); } else { - axis = std::get<1>(flatOrAxis); + axis = std::get<1>(flat_or_axis); uniqueNode = std::make_shared( params[0], ov::op::v0::Constant::create(ov::element::i64, ov::Shape({1}), {axis}), @@ -86,12 +87,12 @@ protected: } // Need to create results for all outputs - ngraph::ResultVector results; + ov::ResultVector results; for (auto i = 0U; i < uniqueNode->get_output_size(); ++i) { - results.push_back(std::make_shared(uniqueNode->output(i))); + results.push_back(std::make_shared(uniqueNode->output(i))); } - function = std::make_shared(results, params, "Unique"); + function = std::make_shared(results, params, "Unique"); } void generate_inputs(const std::vector& targetInputStaticShapes) override { @@ -107,7 +108,7 @@ protected: targetInputStaticShapes[0].end(), 1, std::multiplies()); - tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[0], range, -range / 2, @@ -118,19 +119,16 @@ protected: } }; -TEST_P(UniqueLayerDynamicGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(UniqueLayerDynamicGPUTest, Inference) { run(); } -namespace { - -const std::vector dataPrecision = { - ElementType::f16, - ElementType::i32, +const std::vector model_types = { + ov::element::f16, + ov::element::i32, }; -std::vector> flatOrAxis{{true, 0}, {false, 0}, {false, 1}, {false, -1}}; +std::vector> flat_or_axis{{true, 0}, {false, 0}, {false, 1}, {false, -1}}; std::vector sorted{true, false}; @@ -145,9 +143,9 @@ std::vector> getStaticShapes() { INSTANTIATE_TEST_SUITE_P(smoke_static, UniqueLayerDynamicGPUTest, ::testing::Combine(::testing::ValuesIn(getStaticShapes()), - ::testing::ValuesIn(flatOrAxis), + ::testing::ValuesIn(flat_or_axis), ::testing::ValuesIn(sorted), - ::testing::ValuesIn(dataPrecision)), + ::testing::ValuesIn(model_types)), UniqueLayerDynamicGPUTest::getTestCaseName); std::vector> getDynamicShapes() { @@ -162,10 +160,9 @@ std::vector> getDynamicShapes() { INSTANTIATE_TEST_SUITE_P(smoke_dynamic, UniqueLayerDynamicGPUTest, ::testing::Combine(::testing::ValuesIn(getDynamicShapes()), - ::testing::ValuesIn(flatOrAxis), + ::testing::ValuesIn(flat_or_axis), ::testing::ValuesIn(sorted), - ::testing::ValuesIn(dataPrecision)), + ::testing::ValuesIn(model_types)), UniqueLayerDynamicGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions