diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/interpolate.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/interpolate.cpp index bcf1bb5cc1a..1105165476f 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/interpolate.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/interpolate.cpp @@ -2,41 +2,40 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/ov_tensor_utils.hpp" +#include "openvino/core/preprocess/pre_post_process.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" -#include -#include "openvino/core/preprocess/pre_post_process.hpp" -#include +#include "transformations/op_conversions/convert_interpolate11_downgrade.hpp" -using namespace ov::test; using namespace CPUTestUtils; -using ngraph::helpers::operator<<; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using InterpolateSpecificParams = std::tuple, // PadBegin - std::vector, // PadEnd - double>; // Cube coef +using InterpolateSpecificParams = + std::tuple, // PadBegin + std::vector, // PadEnd + double>; // Cube coef -using ShapeParams = std::tuple>, // scales or sizes values - std::vector>; // axes + ov::test::utils::InputLayerType, // input type + std::vector>, // scales or sizes values + std::vector>; // axes using InterpolateLayerCPUTestParamsSet = std::tuple>; + ov::AnyMap>; class InterpolateLayerCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CpuTestWithFusing { @@ -47,7 +46,7 @@ public: ElementType prec; CPUSpecificParams cpuParams; fusingSpecificParams fusingParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; std::tie(specificParams, shapeParams, prec, cpuParams, fusingParams, additionalConfig) = obj.param; ov::op::v11::Interpolate::InterpolateMode mode; @@ -61,11 +60,12 @@ public: ov::op::v11::Interpolate::ShapeCalcMode shapeCalcMode; InputShape inputShapes; - ngraph::helpers::InputLayerType shapeInputType; + ov::test::utils::InputLayerType shapeInputType; std::vector> shapeDataForInput; std::vector axes; std::tie(shapeCalcMode, inputShapes, shapeInputType, shapeDataForInput, axes) = shapeParams; + using ov::test::utils::operator<<; std::ostringstream result; result << "ShapeCalcMode=" << shapeCalcMode << "_"; result << "IS="; @@ -99,7 +99,7 @@ public: if (!additionalConfig.empty()) { result << "_PluginConf"; for (auto& item : additionalConfig) { - result << "_" << item.first << "=" << item.second; + result << "_" << item.first << "=" << item.second.as(); } } @@ -166,7 +166,7 @@ protected: ElementType ngPrc; CPUSpecificParams cpuParams; fusingSpecificParams fusingParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; std::tie(specificParams, shapeParams, ngPrc, cpuParams, fusingParams, additionalConfig) = this->GetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; @@ -183,7 +183,7 @@ protected: std::tie(mode, transfMode, nearMode, antiAlias, padBegin, padEnd, cubeCoef) = specificParams; InputShape dataShape; - ngraph::helpers::InputLayerType shapeInputType; + ov::test::utils::InputLayerType shapeInputType; std::vector> shapeDataForInput; std::vector axes; std::tie(shapeCalcMode, dataShape, shapeInputType, shapeDataForInput, axes) = shapeParams; @@ -201,11 +201,12 @@ protected: std::vector inputShapes; inputShapes.push_back(dataShape); - if (shapeInputType == ngraph::helpers::InputLayerType::PARAMETER) { + if (shapeInputType == ov::test::utils::InputLayerType::PARAMETER) { inputShapes.push_back(InputShape({static_cast(axes.size())}, std::vector(dataShape.second.size(), {axes.size()}))); } - if (additionalConfig[InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16] == InferenceEngine::PluginConfigParams::YES) { + auto it = additionalConfig.find(ov::hint::inference_precision.name()); + if (it != additionalConfig.end() && it->second.as() == ov::element::bf16) { inType = outType = ngPrc = ElementType::bf16; rel_threshold = 1e-2f; } else { @@ -217,7 +218,7 @@ protected: ov::ParameterVector params{std::make_shared(ngPrc, inputDynamicShapes.front())}; std::shared_ptr sizesInput, scalesInput; if (shapeCalcMode == ov::op::v11::Interpolate::ShapeCalcMode::SCALES) { - if (shapeInputType == ngraph::helpers::InputLayerType::PARAMETER) { + if (shapeInputType == ov::test::utils::InputLayerType::PARAMETER) { auto paramNode = std::make_shared(ElementType::f32, ov::Shape{scales.front().size()}); params.push_back(paramNode); scalesInput = paramNode; @@ -225,7 +226,7 @@ protected: scalesInput = std::make_shared(ElementType::f32, ov::Shape{scales.front().size()}, scales.front()); } } else { - if (shapeInputType == ngraph::helpers::InputLayerType::PARAMETER) { + if (shapeInputType == ov::test::utils::InputLayerType::PARAMETER) { auto paramNode = std::make_shared(ElementType::i32, ov::Shape{sizes.front().size()}); params.push_back(paramNode); sizesInput = paramNode; @@ -315,24 +316,21 @@ const std::vector interpolateFusingParamsSet{ #endif }; -std::vector> filterAdditionalConfig() { - if (InferenceEngine::with_cpu_x86_avx512f()) { - return { - {{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}} - }; +std::vector filterAdditionalConfig() { + if (ov::with_cpu_x86_avx512f()) { + return {{{ov::hint::inference_precision(ov::element::f32)}}, + {{ov::hint::inference_precision(ov::element::bf16)}}}; } else { - return { - // default config as an stub for target without avx512, otherwise all tests with BF16 in its name are skipped - {} - }; + return {// default config as an stub for target without avx512, otherwise all tests with BF16 in its name are + // skipped + {}}; } } // 3D std::vector filterCPUInfoForDevice3D() { std::vector resCPUParams; - if (InferenceEngine::with_cpu_x86_avx2()) { + if (ov::with_cpu_x86_avx2()) { resCPUParams.push_back(CPUSpecificParams{{ncw, x, x, x}, {ncw}, {"jit_avx2"}, "jit_avx2"}); } else { resCPUParams.push_back(CPUSpecificParams{{ncw, x, x, x}, {ncw}, {"ref"}, "ref"}); @@ -340,7 +338,7 @@ std::vector filterCPUInfoForDevice3D() { return resCPUParams; } -std::vector> filterAdditionalConfig3D() { +std::vector filterAdditionalConfig3D() { return { {} }; @@ -362,14 +360,14 @@ const std::vector shapeParams3D = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1}, {{1, 3, 4}, {2, 4, 6}, {1, 3, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1.f, 1.f, 1.25f}, {1.f, 1.f, 1.25f}, {1.f, 1.f, 1.5f}}, defaultAxes3D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1}, {{1, 3, 4}, {2, 4, 6}, {1, 3, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1, 3, 6}, {2, 4, 8}, {1, 3, 6}}, defaultAxes3D.front() } @@ -414,7 +412,7 @@ INSTANTIATE_TEST_SUITE_P(InterpolateNN_Layout_Test_3D, InterpolateLayerCPUTest, #if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) const std::vector interpolateFusingParamsSet3D_fixed_C() { std::vector fuseParams; - if (InferenceEngine::with_cpu_x86_avx2()) { + if (ov::with_cpu_x86_avx2()) { fuseParams.push_back(fusingFakeQuantizePerChannelRelu); fuseParams.push_back(fusingMultiplyPerChannel); } @@ -426,14 +424,14 @@ const std::vector shapeParams3D_fixed_C = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{}, {{1, 3, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f}}, defaultAxes3D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, 3, -1}, {{1, 3, 4}, {1, 3, 6}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 3, 8}}, defaultAxes3D.front() } @@ -539,14 +537,14 @@ INSTANTIATE_TEST_SUITE_P(InterpolateCubic_Layout3D_Test, InterpolateLayerCPUTest // 4D std::vector filterCPUInfoForDevice() { std::vector resCPUParams; - if (InferenceEngine::with_cpu_x86_avx512f()) { + if (ov::with_cpu_x86_avx512f()) { resCPUParams.push_back(CPUSpecificParams{{nChw16c, x, x, x}, {nChw16c}, {"jit_avx512"}, "jit_avx512"}); resCPUParams.push_back(CPUSpecificParams{{nhwc, x, x, x}, {nhwc}, {"jit_avx512"}, "jit_avx512"}); - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { resCPUParams.push_back(CPUSpecificParams{{nChw8c, x, x, x}, {nChw8c}, {"jit_avx2"}, "jit_avx2"}); resCPUParams.push_back(CPUSpecificParams{{nhwc, x, x, x}, {nhwc}, {"jit_avx2"}, "jit_avx2"}); resCPUParams.push_back(CPUSpecificParams{{nchw, x, x, x}, {nchw}, {"jit_avx2"}, "jit_avx2"}); - } else if (InferenceEngine::with_cpu_x86_sse42()) { + } else if (ov::with_cpu_x86_sse42()) { resCPUParams.push_back(CPUSpecificParams{{nChw8c, x, x, x}, {nChw8c}, {"jit_sse42"}, "jit_sse42"}); resCPUParams.push_back(CPUSpecificParams{{nhwc, x, x, x}, {nhwc}, {"jit_sse42"}, "jit_sse42"}); } else { @@ -568,28 +566,28 @@ const std::vector shapeParams4D_Smoke = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{}, {{1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f, 1.5f}}, defaultAxes4D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{}, {{1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6}}, defaultAxes4D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1.f, 1.f, 1.25f, 1.5f}, {1.f, 1.f, 1.25f, 1.25f}, {1.f, 1.f, 1.25f, 1.5f}}, defaultAxes4D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1, 11, 6, 7}, {2, 7, 8, 7}, {1, 11, 6, 7}}, defaultAxes4D.front() } @@ -599,14 +597,14 @@ const std::vector shapeParams4D_Full = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f, 1.5f}}, defaultAxes4D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {1, 11, 5, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6}}, defaultAxes4D.front() } @@ -660,14 +658,14 @@ const std::vector shapeParams4D_fixed_C = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{}, {{1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f, 1.5f}}, defaultAxes4D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, 16, -1, -1}, {{1, 16, 4, 4}, {1, 16, 6, 5}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 16, 6, 7}}, defaultAxes4D.front() } @@ -811,14 +809,14 @@ INSTANTIATE_TEST_SUITE_P(InterpolateCubic_Layout_Test, InterpolateLayerCPUTest, ////////////////////////5D///////////////////////////// std::vector filterCPUInfoForDevice5D() { std::vector resCPUParams; - if (InferenceEngine::with_cpu_x86_avx512f()) { + if (ov::with_cpu_x86_avx512f()) { resCPUParams.push_back(CPUSpecificParams{{nCdhw16c, x, x, x}, {nCdhw16c}, {"jit_avx512"}, "jit_avx512"}); resCPUParams.push_back(CPUSpecificParams{{ndhwc, x, x, x}, {ndhwc}, {"jit_avx512"}, "jit_avx512"}); - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { resCPUParams.push_back(CPUSpecificParams{{nCdhw8c, x, x, x}, {nCdhw8c}, {"jit_avx2"}, "jit_avx2"}); resCPUParams.push_back(CPUSpecificParams{{ndhwc, x, x, x}, {ndhwc}, {"jit_avx2"}, "jit_avx2"}); resCPUParams.push_back(CPUSpecificParams{{ncdhw, x, x, x}, {ncdhw}, {"jit_avx2"}, "jit_avx2"}); - } else if (InferenceEngine::with_cpu_x86_sse42()) { + } else if (ov::with_cpu_x86_sse42()) { resCPUParams.push_back(CPUSpecificParams{{nCdhw8c, x, x, x}, {nCdhw8c}, {"jit_sse42"}, "jit_sse42"}); resCPUParams.push_back(CPUSpecificParams{{ndhwc, x, x, x}, {ndhwc}, {"jit_sse42"}, "jit_sse42"}); } else { @@ -839,28 +837,28 @@ const std::vector shapeParams5D_Smoke = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{}, {{1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f, 1.5f, 0.5f}}, defaultAxes5D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{}, {{1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6, 2}}, defaultAxes5D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1.f, 1.f, 1.25f, 1.5f, 0.5f}, {1.f, 1.f, 1.25f, 1.25f, 1.25f}, {1.f, 1.f, 1.25f, 1.5f, 0.5f}}, defaultAxes5D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1, 11, 6, 7, 2}, {2, 7, 8, 7, 4}, {1, 11, 6, 7, 2}}, defaultAxes5D.front() }, @@ -870,14 +868,14 @@ const std::vector shapeParams5D_Full = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f, 1.5f, 0.5f}}, defaultAxes5D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {1, 11, 5, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6, 4}}, defaultAxes5D.front() } @@ -963,14 +961,14 @@ const std::vector shapeParams4D_corner = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{1, 11, 4, 4}, {{1, 11, 4, 4}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1.f, 1.f, 1.25f, 1.5f}, {1.f, 1.f, 1.25f, 1.25f}}, defaultAxes4D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{1, 11, 4, 4}, {{1, 11, 4, 4}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1, 11, 6, 7}, {1, 11, 8, 7}}, defaultAxes4D.front() } @@ -1016,56 +1014,56 @@ const std::vector shapeParams4D_Pillow_Smoke = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{}, {{1, 3, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{2.0f, 4.0f}}, defaultAxes4D_pillow.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{}, {{2, 4, 16, 16}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{0.25f, 0.5f}}, defaultAxes4D_pillow.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{}, {{1, 3, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{5, 6}}, defaultAxes4D_pillow.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{}, {{2, 4, 16, 16}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{2, 8}}, defaultAxes4D_pillow.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1.25f, 1.5f}, {0.5f, 0.75f}, {1.25f, 1.5f}}, defaultAxes4D_pillow.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.25f, 0.75f}}, defaultAxes4D_pillow.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 17, 4, 4}, {2, 3, 10, 12}, {1, 17, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{6, 8}, {5, 4}, {6, 8}}, defaultAxes4D_pillow.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 17, 4, 4}, {2, 3, 10, 12}, {1, 17, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{6, 8}}, defaultAxes4D_pillow.front() }, @@ -1073,7 +1071,7 @@ const std::vector shapeParams4D_Pillow_Smoke = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 17, 4, 4}, {2, 3, 10, 12}, {1, 17, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{4, 4}, {10, 20}, {10, 4}}, defaultAxes4D_pillow.front() } @@ -1081,20 +1079,18 @@ const std::vector shapeParams4D_Pillow_Smoke = { std::vector filterCPUInfoForDevice_pillow() { std::vector resCPUParams; - if (InferenceEngine::with_cpu_x86_avx512f()) { + if (ov::with_cpu_x86_avx512f()) { resCPUParams.push_back(CPUSpecificParams{{nhwc, x, x}, {nhwc}, {"jit_avx512"}, "jit_avx512"}); - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { resCPUParams.push_back(CPUSpecificParams{{nhwc, x, x}, {nhwc}, {"jit_avx2"}, "jit_avx2"}); - } else if (InferenceEngine::with_cpu_x86_sse42()) { + } else if (ov::with_cpu_x86_sse42()) { resCPUParams.push_back(CPUSpecificParams{{nhwc, x, x}, {nhwc}, {"jit_sse42"}, "jit_sse42"}); } resCPUParams.push_back(CPUSpecificParams{{nchw, x, x}, {nchw}, {"ref"}, "ref"}); return resCPUParams; } -std::vector> filterPillowAdditionalConfig() { - return { - {{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::NO}} - }; +std::vector filterPillowAdditionalConfig() { + return {{}}; } const auto interpolateCasesBilinearPillow_Smoke = ::testing::Combine( @@ -1144,28 +1140,28 @@ const std::vector shapeParams4D_Pillow_Smoke_nchw_as_nhwc = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{}, {{1, 4, 4, 3}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{2.0f, 4.0f}}, defaultAxes4D_pillow_nchw_as_nhwc.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{}, {{2, 16, 16, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{2, 8}}, defaultAxes4D_pillow_nchw_as_nhwc.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, -1, -1, {2, 20}}, {{1, 4, 4, 11}, {2, 6, 5, 7}, {1, 4, 4, 11}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.25f, 0.75f}}, defaultAxes4D_pillow_nchw_as_nhwc.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, -1, -1, {2, 20}}, {{1, 4, 4, 17}, {2, 10, 12, 3}, {1, 4, 4, 17}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{6, 8}}, defaultAxes4D_pillow_nchw_as_nhwc.front() } @@ -1177,11 +1173,11 @@ const std::vector> pads4D_nchw_as_nhwc = { std::vector filterCPUInfoForDevice_pillow_nchw_as_nhwc() { std::vector resCPUParams; - if (InferenceEngine::with_cpu_x86_avx512f()) { + if (ov::with_cpu_x86_avx512f()) { resCPUParams.push_back(CPUSpecificParams{{nchw, x, x}, {nchw}, {"jit_avx512"}, "jit_avx512"}); - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { resCPUParams.push_back(CPUSpecificParams{{nchw, x, x}, {nchw}, {"jit_avx2"}, "jit_avx2"}); - } else if (InferenceEngine::with_cpu_x86_sse42()) { + } else if (ov::with_cpu_x86_sse42()) { resCPUParams.push_back(CPUSpecificParams{{nchw, x, x}, {nchw}, {"jit_sse42"}, "jit_sse42"}); } return resCPUParams; @@ -1225,5 +1221,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateBicubicPillow_LayoutAlign_Test, Interp ::testing::ValuesIn(filterPillowAdditionalConfig())), InterpolateLayerCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/log_softmax.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/log_softmax.cpp index 4ba51e71dec..8a5fe083cb1 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/log_softmax.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/log_softmax.cpp @@ -3,20 +3,16 @@ // #include "test_utils/cpu_test_utils.hpp" -#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using logSoftmaxLayerTestParams = std::tuple< - std::vector, // inputShape - Precision, // netPrecision - int64_t>; // axis +using logSoftmaxLayerTestParams = std::tuple, // inputShape + ov::element::Type, // netPrecision + int64_t>; // axis class LogSoftmaxLayerCPUTest : public testing::WithParamInterface, @@ -25,7 +21,7 @@ class LogSoftmaxLayerCPUTest public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::vector inputShapes; - Precision netPrecision; + ov::element::Type netPrecision; int64_t axis; std::tie(inputShapes, netPrecision, axis) = obj.param; @@ -44,7 +40,7 @@ public: result << ov::test::utils::vec2str(item) << "_"; } } - result << "netPRC=" << netPrecision.name(); + result << "netPRC=" << netPrecision.to_string(); result << "Axis=" << axis; return result.str(); } @@ -54,20 +50,20 @@ protected: targetDevice = ov::test::utils::DEVICE_CPU; std::vector inputShapes; - Precision netPrecision; + ov::element::Type netPrecision; int64_t axis; std::tie(inputShapes, netPrecision, axis) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + auto ngPrc = netPrecision; inType = outType = ngPrc; - selectedType = std::string("unknown_") + netPrecision.name(); + selectedType = std::string("unknown_") + netPrecision.to_string(); init_input_shapes(inputShapes); ov::ParameterVector params{std::make_shared(ngPrc, inputDynamicShapes.front())}; - const auto logSoftmax = std::make_shared(params[0], axis); - const ngraph::ResultVector results{std::make_shared(logSoftmax)}; - function = std::make_shared(results, params, "logSoftmax"); + const auto logSoftmax = std::make_shared(params[0], axis); + const ov::ResultVector results{std::make_shared(logSoftmax)}; + function = std::make_shared(results, params, "logSoftmax"); } }; @@ -77,8 +73,8 @@ TEST_P(LogSoftmaxLayerCPUTest, CompareWithRefs) { } namespace { -const std::vector netPrecisions = { - Precision::FP32 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector> inputShapes2D = { @@ -120,4 +116,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_LogSoftmax4D_dynamic, LogSoftmaxLayerCPUTest, par LogSoftmaxLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/loop.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/loop.cpp index 00d88720669..88492cbe5f9 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/loop.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/loop.cpp @@ -2,17 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "shared_test_classes/single_layer/loop.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ov_models/builders.hpp" -#include +#include "common_test_utils/ov_tensor_utils.hpp" -using namespace InferenceEngine; -using namespace ov; -using namespace test; -using namespace ngraph::helpers; +using namespace ov::test::utils; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { enum LOOP_IN_TYPE { INVARIANT, @@ -101,39 +99,39 @@ protected: } // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - const std::vector body_params_shapes(shapes.size(), ngraph::PartialShape::dynamic()); - ngraph::ParameterVector body_params; + const std::vector body_params_shapes(shapes.size(), ov::PartialShape::dynamic()); + ov::ParameterVector body_params; for (const auto &pshape : body_params_shapes) { - body_params.emplace_back(std::make_shared(netType, pshape)); + body_params.emplace_back(std::make_shared(netType, pshape)); } - auto body_condition_const = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, exec_cond); - std::shared_ptr trip_count_input; + auto body_condition_const = std::make_shared(ov::element::boolean, ov::Shape{1}, true); + auto exec_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, exec_cond); + std::shared_ptr trip_count_input; int shift = 0; if (trip_count_type == InputLayerType::PARAMETER) { for (auto& target : targetStaticShapes) - target.insert(target.begin(), ngraph::Shape{}); - trip_count_input = std::make_shared(ngraph::element::i64, ngraph::Shape{1}); + target.insert(target.begin(), ov::Shape{}); + trip_count_input = std::make_shared(ov::element::i64, ov::Shape{1}); trip_count_input->set_friendly_name("trip_count"); - params.insert(params.begin(), ov::as_type_ptr(trip_count_input)); + params.insert(params.begin(), ov::as_type_ptr(trip_count_input)); shift++; } else { - trip_count_input = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, trip_count); + trip_count_input = std::make_shared(ov::element::i64, ov::Shape{1}, trip_count); } // Body - std::shared_ptr Zo = body_params[0]; + std::shared_ptr Zo = body_params[0]; for (size_t i = 1; i < body_params.size(); ++i) { - Zo = std::make_shared(body_params[i], Zo); + Zo = std::make_shared(body_params[i], Zo); } - auto body = std::make_shared(ngraph::OutputVector{body_condition_const, Zo}, + auto body = std::make_shared(ov::OutputVector{body_condition_const, Zo}, body_params); - auto loop = std::make_shared(trip_count_input, exec_condition); + auto loop = std::make_shared(trip_count_input, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); for (size_t i = 0; i < body_params.size(); ++i) { if (types[i] == LOOP_IN_TYPE::INVARIANT) { @@ -152,10 +150,10 @@ protected: // start=0, stride=1, part_size=1, end=-1, axis=1 auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); - auto result0 = std::make_shared(out0); - auto result1 = std::make_shared(out1); - auto result2 = std::make_shared(out2); - function = std::make_shared(ngraph::ResultVector{result0, result1, result2}, params, "loop"); + auto result0 = std::make_shared(out0); + auto result1 = std::make_shared(out1); + auto result2 = std::make_shared(out2); + function = std::make_shared(ov::ResultVector{result0, result1, result2}, params, "loop"); } }; @@ -177,45 +175,45 @@ protected: targetDevice = ov::test::utils::DEVICE_CPU; init_input_shapes(shapes); for (auto& target : targetStaticShapes) - target.insert(target.begin(), ngraph::Shape{}); + target.insert(target.begin(), ov::Shape{}); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) { params.push_back(std::make_shared(inType, shape)); } // Body parameters - const std::vector body_params_shapes(shapes.size(), ngraph::PartialShape::dynamic()); - ngraph::ParameterVector body_params = { std::make_shared(ngraph::element::i64, ngraph::Shape{}) }; + const std::vector body_params_shapes(shapes.size(), ov::PartialShape::dynamic()); + ov::ParameterVector body_params = { std::make_shared(ov::element::i64, ov::Shape{}) }; for (const auto &pshape : body_params_shapes) { - body_params.emplace_back(std::make_shared(inType, pshape)); + body_params.emplace_back(std::make_shared(inType, pshape)); } - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{}, exec_cond); - auto trip_count_input = std::make_shared(ngraph::element::i64, ngraph::Shape{}); + auto exec_condition = std::make_shared(ov::element::boolean, ov::Shape{}, exec_cond); + auto trip_count_input = std::make_shared(ov::element::i64, ov::Shape{}); trip_count_input->set_friendly_name("trip_count"); params.insert(params.begin(), trip_count_input); // Body - auto const_body_cond = std::make_shared(ngraph::element::i64, ngraph::Shape{}, 10); - auto const_body_step = std::make_shared(ngraph::element::i64, ngraph::Shape{}, 2); - auto less = std::make_shared(body_params[0], const_body_cond); - auto exec_idx = std::make_shared(body_params[0], const_body_step); + auto const_body_cond = std::make_shared(ov::element::i64, ov::Shape{}, 10); + auto const_body_step = std::make_shared(ov::element::i64, ov::Shape{}, 2); + auto less = std::make_shared(body_params[0], const_body_cond); + auto exec_idx = std::make_shared(body_params[0], const_body_step); - auto node_const = std::make_shared(inType, ngraph::Shape{}, 2); - auto node = std::make_shared(body_params[1], node_const); + auto node_const = std::make_shared(inType, ov::Shape{}, 2); + auto node = std::make_shared(body_params[1], node_const); - // reference ngraph function is resized by input static shapes in tests but + // reference model is resized by input static shapes in tests but // loop with pad in body has different input shape in each infer request so tests don't support it. // Alternative - eltwise instead of pad // const std::vector begin(inputDynamicShapes[0].rank().get_length(), 1); // const std::vector end(inputDynamicShapes[0].rank().get_length(), 0); // auto node = ngraph::builder::makePad(body_params[1], begin, end, .0f, PadMode::CONSTANT); - auto body = std::make_shared(ngraph::OutputVector{less, exec_idx, node}, body_params); + auto body = std::make_shared(ov::OutputVector{less, exec_idx, node}, body_params); - auto loop = std::make_shared(params[0], exec_condition); + auto loop = std::make_shared(params[0], exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); loop->set_merged_input(body_params[0], params[0], exec_idx); loop->set_merged_input(body_params[1], params[1], node); @@ -223,9 +221,9 @@ protected: auto out0 = loop->get_iter_value(exec_idx, -1); auto out1 = loop->get_iter_value(node, -1); - auto result0 = std::make_shared(out0); - auto result1 = std::make_shared(out1); - function = std::make_shared(ngraph::ResultVector{ result0, result1 }, params, "loop"); + auto result0 = std::make_shared(out0); + auto result1 = std::make_shared(out1); + function = std::make_shared(ov::ResultVector{ result0, result1 }, params, "loop"); } }; @@ -256,25 +254,25 @@ protected: } // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - const std::vector body_params_shapes(shapes.size(), ngraph::PartialShape::dynamic()); - ngraph::ParameterVector body_params; + const std::vector body_params_shapes(shapes.size(), ov::PartialShape::dynamic()); + ov::ParameterVector body_params; for (const auto &pshape : body_params_shapes) { - body_params.emplace_back(std::make_shared(inType, pshape)); + body_params.emplace_back(std::make_shared(inType, pshape)); } - auto body_condition_const = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, exec_cond); - std::shared_ptr trip_count_input; + auto body_condition_const = std::make_shared(ov::element::boolean, ov::Shape{1}, true); + auto exec_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, exec_cond); + std::shared_ptr trip_count_input; int shift = 0; if (trip_count_type == InputLayerType::PARAMETER) { for (auto& target : targetStaticShapes) - target.insert(target.begin(), ngraph::Shape{}); - trip_count_input = std::make_shared(ngraph::element::i64, ngraph::Shape{1}); + target.insert(target.begin(), ov::Shape{}); + trip_count_input = std::make_shared(ov::element::i64, ov::Shape{1}); trip_count_input->set_friendly_name("trip_count"); - params.insert(params.begin(), ov::as_type_ptr(trip_count_input)); + params.insert(params.begin(), ov::as_type_ptr(trip_count_input)); shift++; } else { - trip_count_input = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, trip_count); + trip_count_input = std::make_shared(ov::element::i64, ov::Shape{1}, trip_count); } // Body @@ -288,11 +286,11 @@ protected: auto constant = ngraph::builder::makeConstant(inType, std::vector{1}, std::vector{0.5}); auto eltwise = std::make_shared(body_params[0], constant); - auto body = std::make_shared(ngraph::OutputVector{body_condition_const, s, eltwise}, body_params); + auto body = std::make_shared(ov::OutputVector{body_condition_const, s, eltwise}, body_params); - auto loop = std::make_shared(trip_count_input, exec_condition); + auto loop = std::make_shared(trip_count_input, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); loop->set_merged_input(body_params[0], params[shift], eltwise); @@ -303,10 +301,10 @@ protected: // start=0, stride=1, part_size=1, end=-1, axis=1 auto out2 = loop->get_concatenated_slices(s, 0, 1, 1, -1, 1); - auto result0 = std::make_shared(out0); - auto result1 = std::make_shared(out1); - auto result2 = std::make_shared(out2); - function = std::make_shared(ngraph::ResultVector{result0, result1, result2}, params, "loop"); + auto result0 = std::make_shared(out0); + auto result1 = std::make_shared(out1); + auto result2 = std::make_shared(out2); + function = std::make_shared(ov::ResultVector{result0, result1, result2}, params, "loop"); } }; @@ -333,36 +331,36 @@ protected: params.push_back(std::make_shared(inType, shape)); } // Body parameters - const std::vector body_params_shapes(shapes.size(), ngraph::PartialShape::dynamic()); + const std::vector body_params_shapes(shapes.size(), ov::PartialShape::dynamic()); ov::ParameterVector body_params; for (auto&& shape : inputDynamicShapes) { body_params.push_back(std::make_shared(inType, shape)); } - auto body_condition_const = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, exec_cond); - std::shared_ptr trip_count_input; + auto body_condition_const = std::make_shared(ov::element::boolean, ov::Shape{1}, true); + auto exec_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, exec_cond); + std::shared_ptr trip_count_input; int shift = 0; if (trip_count_type == InputLayerType::PARAMETER) { for (auto& target : targetStaticShapes) - target.insert(target.begin(), ngraph::Shape{}); - trip_count_input = std::make_shared(ngraph::element::i64, ngraph::Shape{1}); + target.insert(target.begin(), ov::Shape{}); + trip_count_input = std::make_shared(ov::element::i64, ov::Shape{1}); trip_count_input->set_friendly_name("trip_count"); - params.insert(params.begin(), ov::as_type_ptr(trip_count_input)); + params.insert(params.begin(), ov::as_type_ptr(trip_count_input)); shift++; } else { - trip_count_input = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, trip_count); + trip_count_input = std::make_shared(ov::element::i64, ov::Shape{1}, trip_count); } // Body auto constant = ngraph::builder::makeConstant(inType, std::vector{1}, std::vector{10}); - auto add = std::make_shared(body_params[0], constant); + auto add = std::make_shared(body_params[0], constant); auto concat = std::make_shared(ov::NodeVector{body_params[1], add}, 0); - auto body = std::make_shared(ngraph::OutputVector{body_condition_const, concat}, body_params); + auto body = std::make_shared(ov::OutputVector{body_condition_const, concat}, body_params); - auto loop = std::make_shared(trip_count_input, exec_condition); + auto loop = std::make_shared(trip_count_input, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); loop->set_invariant_input(body_params[0], params[shift]); loop->set_merged_input(body_params[1], params[shift + 1], concat); @@ -370,9 +368,9 @@ protected: auto out0 = loop->get_iter_value(body_condition_const, -1); auto out1 = loop->get_iter_value(concat, -1); - auto result0 = std::make_shared(out0); - auto result1 = std::make_shared(out1); - function = std::make_shared(ngraph::ResultVector{result0, result1}, params, "loop"); + auto result0 = std::make_shared(out0); + auto result1 = std::make_shared(out1); + function = std::make_shared(ov::ResultVector{result0, result1}, params, "loop"); } }; @@ -669,4 +667,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_LoopForConcat, LoopForConcatLayerCPUTest, LoopLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lrn.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lrn.cpp index 2d47fe19f18..2e0e5b95413 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lrn.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lrn.cpp @@ -2,17 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "common_test_utils/ov_tensor_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" - -using namespace ngraph; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using LRNParams = std::tuple< ElementType, // data precision InputShape, // data shape @@ -59,8 +56,8 @@ protected: for (auto&& shape : inputDynamicShapes) { params.push_back(std::make_shared(inputPrecision, shape)); } - auto axesNode = ngraph::opset1::Constant::create(ngraph::element::i32, { axes.size() }, axes); - auto lrn = std::make_shared(params[0], axesNode, alpha, beta, bias, size); + auto axesNode = ov::op::v0::Constant::create(ov::element::i32, { axes.size() }, axes); + auto lrn = std::make_shared(params[0], axesNode, alpha, beta, bias, size); function = makeNgraphFunction(inputPrecision, params, lrn, "LRN"); } }; @@ -71,7 +68,7 @@ TEST_P(LRNLayerCPUTest, CompareWithRefs) { } const std::vector inputPrecisions = { - ngraph::element::f32, + ov::element::f32, }; const std::vector> axes = { @@ -120,4 +117,5 @@ const auto testCases = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, LRNLayerCPUTest, testCases, LRNLayerCPUTest::getTestCaseName); -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_cell.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_cell.cpp index b03a7a35e53..83a0126444f 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_cell.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_cell.cpp @@ -2,14 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/node_builders/lstm_cell.hpp" + #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using LSTMCellCpuSpecificParams = typename std::tuple< std::vector, // Shapes @@ -18,7 +19,7 @@ using LSTMCellCpuSpecificParams = typename std::tuple< float, // clip ElementType, // Network precision CPUSpecificParams, // CPU specific params - std::map // Additional config + ov::AnyMap // Additional config >; class LSTMCellLayerCPUTest : public testing::WithParamInterface, @@ -31,7 +32,7 @@ public: float clip = 0.f; ElementType netPrecision; CPUSpecificParams cpuParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; std::tie(inputShapes, decompose, activations, clip, netPrecision, cpuParams, additionalConfig) = obj.param; @@ -57,8 +58,7 @@ public: if (!additionalConfig.empty()) { result << "_PluginConf"; for (auto& item : additionalConfig) { - if (item.second == InferenceEngine::PluginConfigParams::YES) - result << "_" << item.first << "=" << item.second; + result << "_" << item.first << "=" << item.second.as(); } } return result.str(); @@ -72,7 +72,7 @@ protected: float clip = 0.f; ElementType netPrecision; CPUSpecificParams cpuParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; abs_threshold = 0.05; std::tie(inputShapes, decompose, activations, clip, netPrecision, cpuParams, additionalConfig) = this->GetParam(); @@ -86,7 +86,8 @@ protected: const size_t hiddenSize = targetStaticShapes.front()[1][1]; const size_t inputSize = targetStaticShapes.front()[0][1]; - if (additionalConfig[InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16] == InferenceEngine::PluginConfigParams::YES) { + auto it = additionalConfig.find(ov::hint::inference_precision.name()); + if (it != additionalConfig.end() && it->second.as() == ov::element::bf16) { selectedType = makeSelectedTypeStr(selectedType, ElementType::bf16); } else { selectedType = makeSelectedTypeStr(selectedType, netPrecision); @@ -100,8 +101,8 @@ protected: paramsOuts.push_back(param); } - std::vector WRB = {{4 * hiddenSize, inputSize}, {4 * hiddenSize, hiddenSize}, {4 * hiddenSize}}; - auto lstmCellOp = ngraph::builder::makeLSTM(paramsOuts, WRB, hiddenSize, activations, {}, {}, clip); + std::vector WRB = {{4 * hiddenSize, inputSize}, {4 * hiddenSize, hiddenSize}, {4 * hiddenSize}}; + auto lstmCellOp = utils::make_lstm(paramsOuts, WRB, hiddenSize, activations, {}, {}, clip); function = makeNgraphFunction(netPrecision, params, lstmCellOp, "LSTMCell"); } @@ -114,9 +115,8 @@ TEST_P(LSTMCellLayerCPUTest, CompareWithRefs) { namespace { /* CPU PARAMS */ -std::vector> additionalConfig - = {{{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}}, - {{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}}}; +std::vector additionalConfig = {{{ov::hint::inference_precision(ov::element::bf16)}}, + {{ov::hint::inference_precision(ov::element::f32)}}}; CPUSpecificParams cpuParams{{nc, nc, nc}, {nc}, {"ref_any"}, "ref_any"}; @@ -201,4 +201,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamic, LSTMCellLayerCPUTest, ::testing::ValuesIn(additionalConfig)), LSTMCellLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_sequence.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_sequence.cpp index 206c29731b7..d484942d899 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_sequence.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_sequence.cpp @@ -2,41 +2,42 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "common_test_utils/node_builders/lstm_cell.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" #include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" +#include + using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using LSTMSequenceCpuSpecificParams = typename std::tuple< - std::vector, // Shapes - ngraph::helpers::SequenceTestsMode, // Pure Sequence or TensorIterator - std::vector, // Activations - float, // Clip - ngraph::op::RecurrentSequenceDirection, // Direction - ElementType, // Network precision - CPUSpecificParams, // CPU specific params - std::map // Additional config ->; +using LSTMSequenceCpuSpecificParams = + typename std::tuple, // Shapes + ov::test::utils::SequenceTestsMode, // Pure Sequence or TensorIterator + std::vector, // Activations + float, // Clip + ov::op::RecurrentSequenceDirection, // Direction + ElementType, // Network precision + CPUSpecificParams, // CPU specific params + ov::AnyMap // Additional config + >; class LSTMSequenceCPUTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { public: static std::string getTestCaseName(const testing::TestParamInfo &obj) { std::vector inputShapes; - ngraph::helpers::SequenceTestsMode seqMode; + ov::test::utils::SequenceTestsMode seqMode; std::vector activations; float clip; ov::op::RecurrentSequenceDirection direction; ElementType netPrecision; CPUSpecificParams cpuParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; std::tie(inputShapes, seqMode, activations, clip, direction, netPrecision, cpuParams, additionalConfig) = obj.param; @@ -62,9 +63,8 @@ public: if (!additionalConfig.empty()) { result << "_PluginConf"; - for (auto &item : additionalConfig) { - if (item.second == InferenceEngine::PluginConfigParams::YES) - result << "_" << item.first << "=" << item.second; + for (auto& item : additionalConfig) { + result << "_" << item.first << "=" << item.second.as(); } } return result.str(); @@ -73,13 +73,13 @@ public: protected: void SetUp() override { std::vector inputShapes; - ngraph::helpers::SequenceTestsMode seqMode; + ov::test::utils::SequenceTestsMode seqMode; std::vector activations; float clip; ov::op::RecurrentSequenceDirection direction; ElementType netPrecision; CPUSpecificParams cpuParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; std::tie(inputShapes, seqMode, activations, clip, direction, netPrecision, cpuParams, additionalConfig) = this->GetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; @@ -105,7 +105,8 @@ protected: configuration.insert(additionalConfig.begin(), additionalConfig.end()); - if (additionalConfig[InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16] == InferenceEngine::PluginConfigParams::YES) { + auto it = additionalConfig.find(ov::hint::inference_precision.name()); + if (it != additionalConfig.end() && it->second.as() == ov::element::bf16) { selectedType = makeSelectedTypeStr(selectedType, ElementType::bf16); } else { selectedType = makeSelectedTypeStr(selectedType, netPrecision); @@ -122,8 +123,8 @@ protected: 1lu; if (inputDynamicShapes.size() > 3) { if (!inputDynamicShapes[3].is_dynamic() && - seqMode != ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM && - seqMode != ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM) { + seqMode != ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM && + seqMode != ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM) { params.pop_back(); } else { params[3]->set_element_type(ElementType::i64); @@ -136,23 +137,23 @@ protected: std::vector WRB = {{numDirections, 4 * hiddenSize, inputSize}, {numDirections, 4 * hiddenSize, hiddenSize}, {numDirections, 4 * hiddenSize}, {batchSize}}; - auto lstmSequenceOp = ngraph::builder::makeLSTM(paramsOuts, - WRB, - hiddenSize, - activations, - {}, - {}, - clip, - true, - direction, - seqMode, - WRB_range); + auto lstmSequenceOp = utils::make_lstm(paramsOuts, + WRB, + hiddenSize, + activations, + {}, + {}, + clip, + true, + direction, + seqMode, + WRB_range); function = makeNgraphFunction(netPrecision, params, lstmSequenceOp, "lstmSequenceOp"); - if (seqMode != ngraph::helpers::SequenceTestsMode::PURE_SEQ) { + if (seqMode != ov::test::utils::SequenceTestsMode::PURE_SEQ) { ov::pass::Manager manager; - if (direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL) + if (direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL) manager.register_pass(); manager.register_pass(); manager.run_passes(function); @@ -188,15 +189,14 @@ TEST_P(LSTMSequenceCPUTest, CompareWithRefs) { namespace { /* CPU PARAMS */ -std::vector> additionalConfig - = {{{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}}}; +std::vector additionalConfig = {{{ov::hint::inference_precision(ov::element::f32)}}, + {{ov::hint::inference_precision(ov::element::bf16)}}}; CPUSpecificParams cpuParams{{ntc, tnc, tnc}, {ntc, tnc, tnc}, {"ref_any"}, "ref_any"}; // CPUSpecificParams cpuParamsBatchSizeOne{{tnc, ntc, ntc}, {tnc, ntc, ntc}, {"ref_any"}, "ref_any"}; CPUSpecificParams cpuParamsBatchSizeOne{{tnc, tnc, tnc}, {tnc, tnc, tnc}, {"ref_any"}, "ref_any"}; -std::vector mode{ngraph::helpers::SequenceTestsMode::PURE_SEQ}; +std::vector mode{ov::test::utils::SequenceTestsMode::PURE_SEQ}; // oneDNN supports only sigmoid-tanh-tanh std::vector> activations = {{"sigmoid", "tanh", "tanh"}}; // oneDNN supports only zero clip @@ -236,7 +236,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_static, LSTMSequenceCPUTest, ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), ::testing::Values(cpuParams), - ::testing::Values(std::map{})), + ::testing::Values(ov::AnyMap{})), LSTMSequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_static_BatchSizeOne, LSTMSequenceCPUTest, @@ -247,7 +247,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_static_BatchSizeOne, LSTMSequenceCPUTest, ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), ::testing::Values(cpuParamsBatchSizeOne), - ::testing::Values(std::map{})), + ::testing::Values(ov::AnyMap{})), LSTMSequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(nightly_static_bf16, LSTMSequenceCPUTest, @@ -345,7 +345,7 @@ namespace dynamicShapesBatchSwitch { const int seq_length = 1; const int hidden_size = 1024; const int num_directions = 1; - const ngraph::helpers::SequenceTestsMode mode = ngraph::helpers::SequenceTestsMode::PURE_SEQ; + const ov::test::utils::SequenceTestsMode mode = ov::test::utils::SequenceTestsMode::PURE_SEQ; CPUSpecificParams cpuParams{{ntc, tnc, tnc}, {ntc, tnc, tnc}, {"ref_any"}, "ref_any"}; const std::vector shapes = { @@ -396,7 +396,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamic_batch, LSTMSequenceCPUTest, ::testing::Values(ov::op::RecurrentSequenceDirection::FORWARD), ::testing::ValuesIn(netPrecisions), ::testing::Values(dynamicShapesBatchSwitch::cpuParams), - ::testing::Values(std::map{{"_dynamic_batch_test", "yes"}})), + ::testing::Values(ov::AnyMap{{"_dynamic_batch_test", "yes"}})), LSTMSequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_dynamic, LSTMSequenceCPUTest, @@ -407,7 +407,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamic, LSTMSequenceCPUTest, ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), ::testing::Values(cpuParams), - ::testing::Values(std::map{})), + ::testing::Values(ov::AnyMap{})), LSTMSequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_dynamic_BatchSizeOne, LSTMSequenceCPUTest, @@ -418,7 +418,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamic_BatchSizeOne, LSTMSequenceCPUTest, ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), ::testing::Values(cpuParamsBatchSizeOne), - ::testing::Values(std::map{})), + ::testing::Values(ov::AnyMap{})), LSTMSequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(nightly_dynamic, LSTMSequenceCPUTest, @@ -429,7 +429,7 @@ INSTANTIATE_TEST_SUITE_P(nightly_dynamic, LSTMSequenceCPUTest, ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), ::testing::Values(cpuParams), - ::testing::Values(std::map{})), + ::testing::Values(ov::AnyMap{})), LSTMSequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(nightly_dynamic_bf16, LSTMSequenceCPUTest, @@ -453,5 +453,6 @@ INSTANTIATE_TEST_SUITE_P(nightly_dynamic_bf16_BatchSizeOne, LSTMSequenceCPUTest, ::testing::Values(cpuParamsBatchSizeOne), ::testing::Values(additionalConfig[1])), LSTMSequenceCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/matmul_sparse.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/matmul_sparse.cpp index ef99871333e..8f602c697f5 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/matmul_sparse.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/matmul_sparse.cpp @@ -2,23 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/mat_mul.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "test_utils/fusing_test_utils.hpp" -#include "ov_models/builders.hpp" -#include -#include -#include "shared_test_classes/base/utils/generate_inputs.hpp" -#include "cpu/cpu_config.hpp" #include "common_test_utils/ov_tensor_utils.hpp" +#include "cpu/cpu_config.hpp" +#include "openvino/runtime/intel_cpu/properties.hpp" +#include "ov_models/builders.hpp" +#include "ov_ops/type_relaxed.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "shared_test_classes/base/utils/generate_inputs.hpp" +#include "shared_test_classes/single_layer/mat_mul.hpp" +#include "test_utils/fusing_test_utils.hpp" -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { struct ShapeRelatedParams { std::vector inputShapes; @@ -32,7 +29,7 @@ typedef std::tuple< ElementType, // Output precision fusingSpecificParams, CPUSpecificParams, - std::map, // Additional config + ov::AnyMap, // Additional config float // Weights sparse rate > MatMulSparseParamSet; @@ -44,7 +41,7 @@ public: ElementType inType, weiType, outType; fusingSpecificParams fusingParams; CPUSpecificParams cpuParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; float weiSparseRate; std::tie(shapeRelatedParams, inType, weiType, outType, fusingParams, cpuParams, additionalConfig, weiSparseRate) = obj.param; @@ -76,7 +73,7 @@ public: if (!additionalConfig.empty()) { result << "_PluginConf"; for (auto& item : additionalConfig) { - result << "_" << item.first << "=" << item.second; + result << "_" << item.first << "=" << item.second.as(); } } result << "_weiSparseRate=" << weiSparseRate; @@ -129,12 +126,11 @@ protected: bool transpose_a, bool transpose_b, const std::vector& weiData) { - using namespace ngraph; auto inputParamsFP32 = std::make_shared(element::f32, A.get_partial_shape()); auto tensor = ov::test::utils::create_and_fill_tensor(element::f32, inShapeB.to_shape()); auto matrixBFP32 = std::make_shared(tensor); - auto matMulRelaxed = std::make_shared>( + auto matMulRelaxed = std::make_shared>( ov::op::v0::MatMul(inputParamsFP32, matrixBFP32, transpose_a, transpose_b), element::f32); @@ -147,13 +143,12 @@ protected: void SetUp() override { abs_threshold = 0.5f; - using ngraph::pass::ConvertPrecision; ShapeRelatedParams shapeRelatedParams; ElementType inType, weiType, outType; fusingSpecificParams fusingParams; CPUSpecificParams cpuParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; float weiSparseRate; std::tie(shapeRelatedParams, inType, weiType, outType, fusingParams, cpuParams, additionalConfig, @@ -196,7 +191,7 @@ protected: auto tensor = ov::test::utils::create_and_fill_tensor(element::f32, inShapeB.to_shape()); auto matrixB = std::make_shared(tensor); - auto weiData = generateSparseVector(ngraph::shape_size(inShapeB.get_shape()), weiSparseRate); + auto weiData = generateSparseVector(ov::shape_size(inShapeB.get_shape()), weiSparseRate); auto matMul = makeMatMulRelaxed(params[0], inShapeB, weiType, transpA, transpB, weiData); function = makeNgraphFunction(element::f32, params, matMul, cpuNodeType); @@ -204,9 +199,8 @@ protected: checkFusingPosition = false; functionRefs = ov::clone_model(*function); - ngraph::pass::ConvertPrecision().run_on_model(functionRefs); - ngraph::pass::ConvertPrecision().run_on_model(functionRefs); - functionRefs->validate_nodes_and_infer_types(); + convert_precisions.insert({ov::element::i8, ov::element::f32}); + convert_precisions.insert({ov::element::u8, ov::element::f32}); } }; @@ -236,9 +230,9 @@ std::vector filterSpecificParams(bool sparseExpected) { namespace fullyConnected { // cpu (sparse) configs -const std::map emptyConfig = {}; -const std::map SparseRate50 = {{CPUConfigParams::KEY_CPU_SPARSE_WEIGHTS_DECOMPRESSION_RATE, "0.5"}}; -const std::map SparseRate80 = {{CPUConfigParams::KEY_CPU_SPARSE_WEIGHTS_DECOMPRESSION_RATE, "0.8"}}; +const ov::AnyMap emptyConfig = {}; +const ov::AnyMap SparseRate50 = {{ov::intel_cpu::sparse_weights_decompression_rate(0.5)}}; +const ov::AnyMap SparseRate80 = {{ov::intel_cpu::sparse_weights_decompression_rate(0.8)}}; const std::vector IS2D_sparse_smoke = { {static_shapes_to_test_representation({{64, 64}, {64, 64}}), {false, true}}, @@ -340,4 +334,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_FC_3D_I8_sparse, MatMulSparseCPUTest, testParams3 } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/non_max_suppression.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/non_max_suppression.cpp index 07b1e9c3405..dfc7a48bf25 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/non_max_suppression.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/non_max_suppression.cpp @@ -2,20 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - -#include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "ov_models/builders.hpp" -#include -#include "test_utils/cpu_test_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/base/utils/ranges.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace ov::test; -using namespace ngraph; using namespace CPUTestUtils; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { enum { BATCHES, @@ -42,7 +38,7 @@ using NmsParams = std::tuple{{bounds[BATCHES], bounds[BOXES], 4}, {bounds[BATCHES], bounds[CLASSES], bounds[BOXES]}}; + inputDynamicShapes = std::vector{{bounds[BATCHES], bounds[BOXES], 4}, {bounds[BATCHES], bounds[CLASSES], bounds[BOXES]}}; } else { size_t batches, boxes, classes; std::tie(batches, boxes, classes) = targetInDims.front(); ov::Dimension numBatches(batches), numBoxes(boxes), numClasses(classes); - inputDynamicShapes = std::vector{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}}; + inputDynamicShapes = std::vector{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}}; } for (const auto &ts : targetInDims) { size_t numBatches, numBoxes, numClasses; std::tie(numBatches, numBoxes, numClasses) = ts; - targetStaticShapes.push_back(std::vector{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}}); - if (maxOutBoxesType == ngraph::helpers::InputLayerType::PARAMETER) { - targetStaticShapes.back().push_back(ngraph::Shape{1}); + targetStaticShapes.push_back(std::vector{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}}); + if (maxOutBoxesType == ov::test::utils::InputLayerType::PARAMETER) { + targetStaticShapes.back().push_back(ov::Shape{1}); } } - std::shared_ptr maxOutBoxesPerClassNode; + std::shared_ptr maxOutBoxesPerClassNode; ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) { params.push_back(std::make_shared(paramsPrec, shape)); @@ -154,18 +151,18 @@ protected: params[0]->set_friendly_name("param_1"); params[1]->set_friendly_name("param_2"); - if (maxOutBoxesType == ngraph::helpers::InputLayerType::PARAMETER) { - inputDynamicShapes.push_back(ngraph::PartialShape{1}); - params.push_back(std::make_shared(ElementType::i32, inputDynamicShapes.back())); + if (maxOutBoxesType == ov::test::utils::InputLayerType::PARAMETER) { + inputDynamicShapes.push_back(ov::PartialShape{1}); + params.push_back(std::make_shared(ElementType::i32, inputDynamicShapes.back())); params[1]->set_friendly_name("param_3"); maxOutBoxesPerClassNode = params.back(); } else { - maxOutBoxesPerClassNode = builder::makeConstant(maxBoxPrec, ngraph::Shape{}, std::vector{maxOutBoxesPerClass}); + maxOutBoxesPerClassNode = ngraph::builder::makeConstant(maxBoxPrec, ov::Shape{}, std::vector{maxOutBoxesPerClass}); } - auto iouThrNode = builder::makeConstant(thrPrec, ngraph::Shape{}, std::vector{iouThr})->output(0); - auto scoreThrNode = builder::makeConstant(thrPrec, ngraph::Shape{}, std::vector{scoreThr})->output(0); - auto softNmsSigmaNode = builder::makeConstant(thrPrec, ngraph::Shape{}, std::vector{softNmsSigma})->output(0); + auto iouThrNode = ngraph::builder::makeConstant(thrPrec, ov::Shape{}, std::vector{iouThr})->output(0); + auto scoreThrNode = ngraph::builder::makeConstant(thrPrec, ov::Shape{}, std::vector{scoreThr})->output(0); + auto softNmsSigmaNode = ngraph::builder::makeConstant(thrPrec, ov::Shape{}, std::vector{softNmsSigma})->output(0); auto nms = std::make_shared(params[0], params[1], maxOutBoxesPerClassNode, iouThrNode, scoreThrNode, softNmsSigmaNode, boxEncoding, sortResDescend, outType); @@ -419,7 +416,7 @@ const std::vector encodType = {o ov::op::v9::NonMaxSuppression::BoxEncodingType::CORNER}; const std::vector sortResDesc = {true, false}; const std::vector outType = {ElementType::i32, ElementType::i64}; -const std::vector maxBoxInputTypes = {ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT}; +const std::vector maxBoxInputTypes = {ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT}; const auto nmsParams = ::testing::Combine(::testing::ValuesIn(inShapeParams), ::testing::Combine(::testing::Values(ElementType::f32), @@ -438,4 +435,5 @@ const auto nmsParams = ::testing::Combine(::testing::ValuesIn(inShapeParams), INSTANTIATE_TEST_SUITE_P(smoke_NmsLayerCPUTest, NmsLayerCPUTest, nmsParams, NmsLayerCPUTest::getTestCaseName); -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/nonzero.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/nonzero.cpp index ecce08dedb2..6a52efb79f0 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/nonzero.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/nonzero.cpp @@ -2,18 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include - -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { typedef std::tuple< InputShape, // Input shape definition @@ -87,7 +83,7 @@ protected: inputParams.push_back(std::make_shared(netType, shape)); } - auto nonZero = std::make_shared(inputParams[0]); + auto nonZero = std::make_shared(inputParams[0]); // I8 was used as a special placeholder during calculating of primitive type if input was U8, // real runtime precision is still U8 selectedType = makeSelectedTypeStr("ref", netType == ElementType::u8 ? ElementType::i8 : netType); @@ -199,7 +195,7 @@ std::vector inShapesDynamic = { } } }; -std::vector inShapesStatic = { +std::vector inShapesStatic = { { 100 }, { 4, 100 }, { 4, 2, 100 }, @@ -227,4 +223,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_NonZeroDynamicCPUTest, NonZeroLayerCPUTest, } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/normalize.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/normalize.cpp index b80b2175603..6593dd0c759 100755 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/normalize.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/normalize.cpp @@ -4,26 +4,21 @@ #include "shared_test_classes/single_layer/normalize_l2.hpp" #include "test_utils/fusing_test_utils.hpp" -#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include +#include "common_test_utils/ov_tensor_utils.hpp" -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace LayerTestsDefinitions; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using NormalizeL2LayerCPUTestParamSet = std::tuple< - InputShape, // input shape - ElementType, // input element type - std::vector, // axes - float, // eps - ngraph::op::EpsMode, // eps_mode - CPUSpecificParams, - fusingSpecificParams>; +using NormalizeL2LayerCPUTestParamSet = std::tuple, // axes + float, // eps + ov::op::EpsMode, // eps_mode + CPUSpecificParams, + fusingSpecificParams>; class NormalizeL2LayerCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CpuTestWithFusing { @@ -33,7 +28,7 @@ public: ElementType inType; std::vector axes; float eps; - ngraph::op::EpsMode epsMode; + ov::op::EpsMode epsMode; CPUSpecificParams cpuParams; fusingSpecificParams fusingParams; std::tie(shapes, inType, axes, eps, epsMode, cpuParams, fusingParams) = obj.param; @@ -60,7 +55,7 @@ protected: ElementType inType; std::vector axes; float eps; - ngraph::op::EpsMode epsMode; + ov::op::EpsMode epsMode; CPUSpecificParams cpuParams; fusingSpecificParams fusingParams; std::tie(shapes, inType, axes, eps, epsMode, cpuParams, fusingParams) = this->GetParam(); @@ -88,7 +83,7 @@ protected: } } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { @@ -329,6 +324,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Dynamic_4D_FusingPerChannel, NormalizeL2LayerCPUT ::testing::ValuesIn(fusingParamsSetPerChannel)), NormalizeL2LayerCPUTest::getTestCaseName); -} // namespace +} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/one_hot.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/one_hot.cpp index 60d9a417454..25e4e7011be 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/one_hot.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/one_hot.cpp @@ -7,21 +7,20 @@ #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using oneHotCPUTestParams = std::tuple< - InputShape, // Input shape - int, // axis to extend - std::pair, // secondary input type && need to generate depth - size_t, // depth - float, // on_value - float, // off_value - InferenceEngine::Precision, // Output precision - CPUSpecificParams>; +using oneHotCPUTestParams = + std::tuple, // secondary input type && need to generate depth + size_t, // depth + float, // on_value + float, // off_value + ov::element::Type, // Output precision + CPUSpecificParams>; class OneHotLayerCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CPUTestsBase { @@ -29,10 +28,10 @@ public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { InputShape inputShape; int axis; - std::pair inputType; + std::pair inputType; size_t depth; float onValue, offValue; - InferenceEngine::Precision outPrc; + ov::element::Type outPrc; CPUSpecificParams cpuParams; std::tie(inputShape, axis, inputType, depth, onValue, offValue, outPrc, cpuParams) = obj.param; @@ -45,20 +44,20 @@ public: result << ov::test::utils::vec2str(shape) << "_"; } result << "axis=" << axis << "_"; - if (inputType.first == ngraph::helpers::InputLayerType::CONSTANT && !inputType.second) { + if (inputType.first == utils::InputLayerType::CONSTANT && !inputType.second) { result << "depth=" << depth << "_"; - } else if (inputType.first == ngraph::helpers::InputLayerType::CONSTANT && inputType.second) { + } else if (inputType.first == utils::InputLayerType::CONSTANT && inputType.second) { result << "depth=WillBeGenerated" << "_"; } else { result << "depth=PARAMETER" << "_"; } result << "OnVal=" << onValue << "_"; result << "OffVal=" << offValue << "_"; - result << "outPRC=" << outPrc.name(); + result << "outPRC=" << outPrc.to_string(); result << CPUTestsBase::getTestCaseName(cpuParams); return result.str(); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { @@ -81,18 +80,16 @@ protected: targetDevice = ov::test::utils::DEVICE_CPU; InputShape inputShape; - std::pair inputType; - InferenceEngine::Precision outPrc; + std::pair inputType; CPUSpecificParams cpuParams; - std::tie(inputShape, Axis, inputType, Depth, OnValue, OffValue, outPrc, cpuParams) = this->GetParam(); + std::tie(inputShape, Axis, inputType, Depth, OnValue, OffValue, outType, cpuParams) = this->GetParam(); - if (inputType.second && inputType.first == ngraph::helpers::InputLayerType::CONSTANT) { + if (inputType.second && inputType.first == utils::InputLayerType::CONSTANT) { generateDepth(); } std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; selectedType = std::string("ref_any_I32"); - outType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(outPrc); init_input_shapes({inputShape}); if (inputType.second) { @@ -100,7 +97,7 @@ protected: target.push_back({}); } - function = createFunction(inputType.first == ngraph::helpers::InputLayerType::CONSTANT); + function = createFunction(inputType.first == utils::InputLayerType::CONSTANT); if (function->get_parameters().size() == 2) { generateDepth(); functionRefs = createFunction(true); @@ -125,22 +122,22 @@ protected: compare(expectedOutputs, actualOutputs); } - std::shared_ptr createFunction(bool depthConst) { - ov::ParameterVector params{std::make_shared(ngraph::element::i32, inputDynamicShapes.front())}; + std::shared_ptr createFunction(bool depthConst) { + ov::ParameterVector params{std::make_shared(ov::element::i32, inputDynamicShapes.front())}; params.front()->set_friendly_name("ParamsIndices"); std::shared_ptr depth; if (depthConst) { - depth = ngraph::op::Constant::create(ngraph::element::i32, ngraph::Shape{ }, {Depth}); + depth = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{ }, {Depth}); } else { - auto depthParam = std::make_shared(ngraph::element::i32, ngraph::Shape{ }); + auto depthParam = std::make_shared(ov::element::i32, ov::Shape{ }); depthParam->set_friendly_name("ParamDepth"); params.push_back(depthParam); depth = depthParam; } - auto on_value_const = std::make_shared(outType, ngraph::Shape{ }, OnValue); - auto off_value_const = std::make_shared(outType, ngraph::Shape{ }, OffValue); - auto oneHot = std::make_shared(params[0], depth, on_value_const, off_value_const, Axis); - return makeNgraphFunction(ngraph::element::i32, params, oneHot, "OneHot"); + auto on_value_const = std::make_shared(outType, ov::Shape{ }, OnValue); + auto off_value_const = std::make_shared(outType, ov::Shape{ }, OffValue); + auto oneHot = std::make_shared(params[0], depth, on_value_const, off_value_const, Axis); + return makeNgraphFunction(ov::element::i32, params, oneHot, "OneHot"); } void generateDepth() { testing::internal::Random random(time(nullptr)); @@ -159,21 +156,21 @@ TEST_P(OneHotLayerCPUTest, CompareWithRefs) { } namespace { -const std::vector outPrc = { - Precision::FP32, - Precision::BF16, - Precision::I8 - // Precision::U8 // Precision cannot be wrapped to constant one hot +const std::vector outPrc = { + ov::element::f32, + ov::element::bf16, + ov::element::i8 + // ov::element::u8 // Precision cannot be wrapped to constant one hot }; -std::vector> secondaryInputTypesStaticCase = { - {ngraph::helpers::InputLayerType::CONSTANT, true}, - {ngraph::helpers::InputLayerType::CONSTANT, false} +std::vector> secondaryInputTypesStaticCase = { + {utils::InputLayerType::CONSTANT, true}, + {utils::InputLayerType::CONSTANT, false} }; -std::vector> secondaryInputTypesDynamicCase = { - {ngraph::helpers::InputLayerType::CONSTANT, true}, - {ngraph::helpers::InputLayerType::CONSTANT, false}, - {ngraph::helpers::InputLayerType::PARAMETER, true} +std::vector> secondaryInputTypesDynamicCase = { + {utils::InputLayerType::CONSTANT, true}, + {utils::InputLayerType::CONSTANT, false}, + {utils::InputLayerType::PARAMETER, true} }; const std::vector staticInputShapes0D = { @@ -328,5 +325,6 @@ const auto testCase_5d_dynamic = ::testing::Combine( ); INSTANTIATE_TEST_SUITE_P(smoke_OneHotCPU_5D_Dynamic, OneHotLayerCPUTest, testCase_5d_dynamic, OneHotLayerCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/pad.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/pad.cpp index 227933e2a3f..a6c06628262 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/pad.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/pad.cpp @@ -2,21 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "test_utils/cpu_test_utils.hpp" +#include "shared_test_classes/single_layer/pad.hpp" + +#include "common_test_utils/ov_tensor_utils.hpp" +#include "openvino/op/pad.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include -#include +#include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using PadLayerCPUTestParamSet = std::tuple< InputShape, // Input shape - ngraph::helpers::InputLayerType, // Secondary input types + ov::test::utils::InputLayerType, // Secondary input types ElementType, // Input element type std::vector, // padsBegin std::vector, // padsEnd @@ -30,7 +30,7 @@ class PadLayerCPUTest : public testing::WithParamInterface obj) { InputShape shapes; - ngraph::helpers::InputLayerType secondaryInputType; + ov::test::utils::InputLayerType secondaryInputType; ElementType elementType; std::vector padsBegin, padsEnd; ov::op::PadMode padMode; @@ -79,7 +79,7 @@ protected: } void SetUp() override { InputShape shapes; - ngraph::helpers::InputLayerType secondaryInputType; + ov::test::utils::InputLayerType secondaryInputType; ov::op::PadMode padMode; ElementType dataType; CPUSpecificParams cpuParams; @@ -99,7 +99,7 @@ protected: params.push_back(std::make_shared(dataType, shape)); } std::shared_ptr pad; - if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) { + if (secondaryInputType == ov::test::utils::InputLayerType::PARAMETER) { ov::Shape inShape = {padsBegin.size()}; auto beginNode = std::make_shared(ElementType::i64, inShape); @@ -155,13 +155,13 @@ const std::vector inputPrecisions = { ElementType::i8 }; -const std::vector inputLayerTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER +const std::vector inputLayerTypes = { + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER }; -const std::vector inputLayerTypesBlocked = { - ngraph::helpers::InputLayerType::CONSTANT, +const std::vector inputLayerTypesBlocked = { + ov::test::utils::InputLayerType::CONSTANT, }; const std::vector argPadValue = {0.f, 2.5f}; @@ -747,5 +747,6 @@ INSTANTIATE_TEST_SUITE_P( /* *======================* *=====================* *======================* */ } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box.cpp index 13142ae4f39..76cc3498f83 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box.cpp @@ -2,21 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include - -#include +#include "openvino/core/partial_shape.hpp" #include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using priorBoxSpecificParams = std::tuple< std::vector, // min_size @@ -32,16 +27,14 @@ using priorBoxSpecificParams = std::tuple< std::vector, // variance bool>; // scale_all_sizes -typedef std::tuple< - priorBoxSpecificParams, - ov::test::ElementType, // net precision - ov::test::ElementType, // Input precision - ov::test::ElementType, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - ov::test::InputShape, // input shape - ov::test::InputShape, // image shape - std::string> priorBoxLayerParams; +typedef std::tuple + priorBoxLayerParams; class PriorBoxLayerCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CPUTestsBase { @@ -49,19 +42,18 @@ public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { ov::test::ElementType netPrecision; ov::test::ElementType inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; ov::test::InputShape inputShapes; ov::test::InputShape imageShapes; std::string targetDevice; priorBoxSpecificParams specParams; std::tie(specParams, netPrecision, - inPrc, outPrc, inLayout, outLayout, + inPrc, outPrc, inputShapes, imageShapes, targetDevice) = obj.param; - ngraph::op::PriorBoxAttrs attributes; + ov::op::v0::PriorBox::Attributes attributes; std::tie( attributes.min_size, attributes.max_size, @@ -83,8 +75,6 @@ public: result << "netPRC=" << netPrecision << separator; result << "inPRC=" << inPrc << separator; result << "outPRC=" << outPrc << separator; - result << "inL=" << inLayout << separator; - result << "outL=" << outLayout << separator; result << "min_size=" << ov::test::utils::vec2str(attributes.min_size) << separator; result << "max_size=" << ov::test::utils::vec2str(attributes.max_size)<< separator; result << "aspect_ratio=" << ov::test::utils::vec2str(attributes.aspect_ratio)<< separator; @@ -106,23 +96,19 @@ protected: void SetUp() override { priorBoxSpecificParams specParams; - InferenceEngine::Layout inLayout; - InferenceEngine::Layout outLayout; ov::test::ElementType netPrecision; ov::test::ElementType inPrc; ov::test::ElementType outPrc; ov::test::InputShape inputShapes; ov::test::InputShape imageShapes; - std::tie(specParams, netPrecision, - inPrc, outPrc, inLayout, outLayout, - inputShapes, imageShapes, targetDevice) = GetParam(); + std::tie(specParams, netPrecision, inPrc, outPrc, inputShapes, imageShapes, targetDevice) = GetParam(); selectedType = makeSelectedTypeStr("ref_any", ov::test::ElementType::i32); targetDevice = ov::test::utils::DEVICE_CPU; init_input_shapes({ inputShapes, imageShapes }); - ngraph::op::PriorBoxAttrs attributes; + ov::op::v0::PriorBox::Attributes attributes; std::tie( attributes.min_size, attributes.max_size, @@ -141,15 +127,15 @@ protected: for (auto&& shape : inputDynamicShapes) { params.push_back(std::make_shared(netPrecision, shape)); } - auto shape_of_1 = std::make_shared(params[0]); - auto shape_of_2 = std::make_shared(params[1]); - auto priorBox = std::make_shared( + auto shape_of_1 = std::make_shared(params[0]); + auto shape_of_2 = std::make_shared(params[1]); + auto priorBox = std::make_shared( shape_of_1, shape_of_2, attributes); - ngraph::ResultVector results{std::make_shared(priorBox)}; - function = std::make_shared (results, params, "priorBox"); + ov::ResultVector results{std::make_shared(priorBox)}; + function = std::make_shared (results, params, "priorBox"); } }; @@ -219,12 +205,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_PriorBox, PriorBoxLayerCPUTest, ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::ElementType::undefined), ::testing::Values(ov::test::ElementType::undefined), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), ::testing::ValuesIn(inputShape), ::testing::ValuesIn(imageShape), ::testing::Values(ov::test::utils::DEVICE_CPU)), PriorBoxLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box_clustered.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box_clustered.cpp index 06fe498f62b..5aa4df776dd 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box_clustered.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box_clustered.cpp @@ -2,21 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include - -#include -#include "ov_models/builders.hpp" +#include "openvino/core/partial_shape.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { typedef std::tuple< std::vector, // widths @@ -28,16 +22,14 @@ typedef std::tuple< float, // offset std::vector> priorBoxClusteredSpecificParams; -typedef std::tuple< - priorBoxClusteredSpecificParams, - ov::test::ElementType, // net precision - ov::test::ElementType, // Input precision - ov::test::ElementType, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - ov::test::InputShape, // input shape - ov::test::InputShape, // image shape - std::string> priorBoxClusteredLayerParams; +typedef std::tuple + priorBoxClusteredLayerParams; class PriorBoxClusteredLayerCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CPUTestsBase { @@ -45,18 +37,12 @@ public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { ov::test::ElementType netPrecision; ov::test::ElementType inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; ov::test::InputShape inputShapes, imageShapes; std::string targetDevice; priorBoxClusteredSpecificParams specParams; - std::tie(specParams, - netPrecision, - inPrc, outPrc, inLayout, outLayout, - inputShapes, - imageShapes, - targetDevice) = obj.param; + std::tie(specParams, netPrecision, inPrc, outPrc, inputShapes, imageShapes, targetDevice) = obj.param; - ngraph::op::PriorBoxClusteredAttrs attributes; + ov::op::v0::PriorBoxClustered::Attributes attributes; std::tie( attributes.widths, attributes.heights, @@ -75,8 +61,6 @@ public: result << "netPRC=" << netPrecision << separator; result << "inPRC=" << inPrc << separator; result << "outPRC=" << outPrc << separator; - result << "inL=" << inLayout << separator; - result << "outL=" << outLayout << separator; result << "widths=" << ov::test::utils::vec2str(attributes.widths) << separator; result << "heights=" << ov::test::utils::vec2str(attributes.heights) << separator; result << "variances="; @@ -96,24 +80,19 @@ public: protected: void SetUp() override { priorBoxClusteredSpecificParams specParams; - - InferenceEngine::Layout inLayout; - InferenceEngine::Layout outLayout; ov::test::ElementType netPrecision; ov::test::ElementType inPrc; ov::test::ElementType outPrc; ov::test::InputShape inputShapes; ov::test::InputShape imageShapes; - std::tie(specParams, netPrecision, - inPrc, outPrc, inLayout, outLayout, - inputShapes, imageShapes, targetDevice) = GetParam(); + std::tie(specParams, netPrecision, inPrc, outPrc, inputShapes, imageShapes, targetDevice) = GetParam(); selectedType = makeSelectedTypeStr("ref_any", ov::test::ElementType::i32); targetDevice = ov::test::utils::DEVICE_CPU; init_input_shapes({ inputShapes, imageShapes }); - ngraph::op::PriorBoxClusteredAttrs attributes; + ov::op::v0::PriorBoxClustered::Attributes attributes; std::tie( attributes.widths, attributes.heights, @@ -128,15 +107,15 @@ protected: for (auto&& shape : { inputShapes.first, imageShapes.first }) { params.push_back(std::make_shared(netPrecision, shape)); } - auto shape_of_1 = std::make_shared(params[0]); - auto shape_of_2 = std::make_shared(params[1]); - auto priorBoxClustered = std::make_shared( + auto shape_of_1 = std::make_shared(params[0]); + auto shape_of_2 = std::make_shared(params[1]); + auto priorBoxClustered = std::make_shared( shape_of_1, shape_of_2, attributes); - ngraph::ResultVector results{ std::make_shared(priorBoxClustered) }; - function = std::make_shared(results, params, "priorBoxClustered"); + ov::ResultVector results{ std::make_shared(priorBoxClustered) }; + function = std::make_shared(results, params, "priorBoxClustered"); } }; @@ -217,8 +196,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_PriorBoxClustered, PriorBoxClusteredLayerCPUTest, ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::ElementType::undefined), ::testing::Values(ov::test::ElementType::undefined), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), ::testing::ValuesIn(inputShapes), ::testing::ValuesIn(imageShapes), ::testing::Values(ov::test::utils::DEVICE_CPU)), @@ -226,4 +203,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_PriorBoxClustered, PriorBoxClusteredLayerCPUTest, ); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/proposal.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/proposal.cpp index 03240dcfdeb..2e08c459cf0 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/proposal.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/proposal.cpp @@ -2,17 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "common_test_utils/ov_tensor_utils.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace proposalTypes { @@ -51,10 +48,9 @@ using proposalSpecificParams = std::tuple< ratio_type, scale_type>; -using proposalLayerTestCPUParams = std::tuple< - std::vector, // Input shapes - proposalSpecificParams, // Node attributes - Precision>; // Network precision +using proposalLayerTestCPUParams = std::tuple, // Input shapes + proposalSpecificParams, // Node attributes + ov::element::Type>; // Network precision class ProposalLayerCPUTest : public testing::WithParamInterface, public SubgraphBaseTest, public CPUTestsBase { @@ -62,7 +58,7 @@ public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::vector inputShapes; proposalSpecificParams proposalParams; - Precision netPrecision; + ov::element::Type netPrecision; std::tie(inputShapes, proposalParams, netPrecision) = obj.param; base_size_type base_size; @@ -103,7 +99,7 @@ public: result << "framework=" << framework << "_"; result << "ratio=" << ov::test::utils::vec2str(ratio) << "_"; result << "scale=" << ov::test::utils::vec2str(scale) << "_"; - result << "netPRC=" << netPrecision.name(); + result << "netPRC=" << netPrecision.to_string(); return result.str(); } @@ -113,7 +109,7 @@ protected: std::vector inputShapes; proposalSpecificParams proposalParams; - Precision netPrecision; + ov::element::Type netPrecision; std::tie(inputShapes, proposalParams, netPrecision) = this->GetParam(); base_size_type base_size; @@ -135,16 +131,15 @@ protected: framework, min_size, nms_thresh, normalize, post_nms_topn, pre_nms_topn, ratio, scale) = proposalParams; - selectedType = std::string("ref_any_") + netPrecision.name(); + selectedType = std::string("ref_any_") + netPrecision.to_string(); init_input_shapes(inputShapes); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params; for (auto&& shape : {inputDynamicShapes[0], inputDynamicShapes[1], inputDynamicShapes[2]}) { - params.push_back(std::make_shared(ngPrc, shape)); + params.push_back(std::make_shared(netPrecision, shape)); } - ngraph::op::ProposalAttrs attrs; + ov::op::v0::Proposal::Attributes attrs; attrs.base_size = base_size; attrs.pre_nms_topn = pre_nms_topn; attrs.post_nms_topn = post_nms_topn; @@ -161,14 +156,14 @@ protected: attrs.framework = framework; attrs.infer_probs = true; - auto proposal = std::make_shared(params[0], params[1], params[2], attrs); + auto proposal = std::make_shared(params[0], params[1], params[2], attrs); - ngraph::ResultVector results{ - std::make_shared(proposal->output(0)), - std::make_shared(proposal->output(1)) + ov::ResultVector results{ + std::make_shared(proposal->output(0)), + std::make_shared(proposal->output(1)) }; - function = std::make_shared(results, params, "Proposal"); + function = std::make_shared(results, params, "Proposal"); } void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); @@ -199,8 +194,8 @@ TEST_P(ProposalLayerCPUTest, CompareWithRefs) { namespace { -const std::vector netPrecision = { - Precision::FP32 +const std::vector netPrecision = { + ov::element::f32 }; std::vector> staticInputShapesCase1 = { @@ -338,4 +333,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_Proposal_Dynamic_Test_Case2, ProposalLayerCPUTest ProposalLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov