diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/enforce_precision.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/enforce_precision.cpp index 653f70d8f75..b473ff00dbd 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/enforce_precision.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/enforce_precision.cpp @@ -4,7 +4,6 @@ #include "snippets/enforce_precision.hpp" #include -#include namespace ov { namespace test { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/fake_quantize_decomposition_test.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/fake_quantize_decomposition_test.cpp index d01d7a0785c..42ab5f34595 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/fake_quantize_decomposition_test.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/fake_quantize_decomposition_test.cpp @@ -8,7 +8,6 @@ #include "snippets/fake_quantize_decomposition_test.hpp" using namespace LayerTestsDefinitions; -using namespace ngraph; namespace { @@ -16,7 +15,7 @@ namespace decompositionInSubgraph { const std::vector testValuesDecompositionScalars = { { ov::element::f32, - ngraph::Shape{1, 3, 16, 16}, + ov::Shape{1, 3, 16, 16}, ov::element::f32, 1.f, {{}, {}, {}, {}}, @@ -25,23 +24,23 @@ const std::vector testValuesDecompositionScalars = { const std::vector testValuesDecompositionPerChannel = { { ov::element::f32, - ngraph::Shape{1, 3, 16, 16}, + ov::Shape{1, 3, 16, 16}, ov::element::f32, 1.f, {{1, 3, 1, 1}, {1, 3, 1, 1}, {1, 3, 1, 1}, {1, 3, 1, 1}}, }, { ov::element::f32, - ngraph::Shape{1, 3, 16, 16}, + ov::Shape{1, 3, 16, 16}, ov::element::f32, 1.f, {{1, 3, 1, 1}, {1, 3, 1, 1}, {}, {}}, }, }; -std::vector, std::pair >> operations = { - {std::make_shared(), {"Subgraph", "Abs,fakeQuantize"}}, - {std::make_shared(), {"Subgraph", "Swish,fakeQuantize"}}, +std::vector, std::pair >> operations = { + {std::make_shared(), {"Subgraph", "Abs,fakeQuantize"}}, + {std::make_shared(), {"Subgraph", "Swish,fakeQuantize"}}, }; INSTANTIATE_TEST_SUITE_P( @@ -83,36 +82,36 @@ namespace legacyFuse { const std::vector testValuesLegacyFuse = { { ov::element::f32, - ngraph::Shape{1, 3, 16, 16}, + ov::Shape{1, 3, 16, 16}, ov::element::f32, 1.f, {{1, 3, 1, 1}, {1, 3, 1, 1}, {}, {}} }, { ov::element::f32, - ngraph::Shape{1, 3, 16, 16}, + ov::Shape{1, 3, 16, 16}, ov::element::f32, 1.f, {{}, {}, {1, 3, 1, 1}, {1, 3, 1, 1}} }, { ov::element::f32, - ngraph::Shape{1, 3, 16, 16}, + ov::Shape{1, 3, 16, 16}, ov::element::f32, 1.f, {{}, {}, {}, {}} }, { ov::element::f32, - ngraph::Shape{1, 3, 16, 16}, + ov::Shape{1, 3, 16, 16}, ov::element::f32, 1.f, {{1, 3, 1, 1}, {1, 3, 1, 1}, {1, 3, 1, 1}, {1, 3, 1, 1}} }, }; -std::vector, std::pair>> operations = { - {std::make_shared(), {"Convolution", "Convolution,fakeQuantize"}}, +std::vector, std::pair>> operations = { + {std::make_shared(), {"Convolution", "Convolution,fakeQuantize"}}, }; INSTANTIATE_TEST_SUITE_P( diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/matmul.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/matmul.cpp index 630cf4374ac..78c1c8dccf0 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/matmul.cpp @@ -25,7 +25,7 @@ std::vector> input_shapes{ static inline std::vector> quantized_precisions() { std::vector> prc = {}; // In Snippets MatMul INT8 is supported only on VNNI/AMX platforms - if (InferenceEngine::with_cpu_x86_avx512_core_vnni() || InferenceEngine::with_cpu_x86_avx512_core_amx_int8()) { + if (ov::with_cpu_x86_avx512_core_vnni() || ov::with_cpu_x86_avx512_core_amx_int8()) { prc.emplace_back(std::vector{element::i8, element::i8}); prc.emplace_back(std::vector{element::u8, element::i8}); } @@ -40,7 +40,7 @@ static inline std::vector> precisions(bool only_fp32 auto quant = quantized_precisions(); std::copy(quant.begin(), quant.end(), std::back_inserter(prc)); // In Snippets MatMul BF16 is supported only on bf16/AMX platforms - if (InferenceEngine::with_cpu_x86_bfloat16() || InferenceEngine::with_cpu_x86_avx512_core_amx_bf16()) { + if (ov::with_cpu_x86_bfloat16() || ov::with_cpu_x86_avx512_core_amx_bf16()) { prc.emplace_back(std::vector{element::bf16, element::bf16}); } } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha.cpp index 0aae38f4f48..608e2958313 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha.cpp @@ -30,7 +30,7 @@ const std::vector> inputShapes_3D = { }; static inline bool is_bf16_supported() { - return InferenceEngine::with_cpu_x86_bfloat16() || InferenceEngine::with_cpu_x86_avx512_core_amx_bf16(); + return ov::with_cpu_x86_bfloat16() || ov::with_cpu_x86_avx512_core_amx_bf16(); } static inline std::vector> precision_f32(size_t count) { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/precision_propagation_convertion.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/precision_propagation_convertion.cpp index 2bd4c7ddbdf..f52cbcc38cd 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/precision_propagation_convertion.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/precision_propagation_convertion.cpp @@ -4,7 +4,6 @@ #include "snippets/precision_propagation_convertion.hpp" #include -#include namespace ov { namespace test { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/transpose_matmul.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/transpose_matmul.cpp index 580f3a9b667..437c8c5b97e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/transpose_matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/transpose_matmul.cpp @@ -18,12 +18,12 @@ static inline std::vector> precisions(bool only_fp32 }; if (!only_fp32) { // In Snippets MatMul INT8 is supported only on VNNI/AMX platforms - if (InferenceEngine::with_cpu_x86_avx512_core_vnni() || InferenceEngine::with_cpu_x86_avx512_core_amx_int8()) { + if (ov::with_cpu_x86_avx512_core_vnni() || ov::with_cpu_x86_avx512_core_amx_int8()) { prc.emplace_back(std::vector{element::i8, element::i8}); prc.emplace_back(std::vector{element::u8, element::i8}); } // In Snippets MatMul BF16 is supported only on bf16/AMX platforms - if (InferenceEngine::with_cpu_x86_bfloat16() || InferenceEngine::with_cpu_x86_avx512_core_amx_bf16()) { + if (ov::with_cpu_x86_bfloat16() || ov::with_cpu_x86_avx512_core_amx_bf16()) { prc.emplace_back(std::vector{element::bf16, element::bf16}); } } @@ -115,12 +115,12 @@ static inline std::vector> precisions(bool only_fp32 }; if (!only_fp32) { // In Snippets MatMul INT8 is supported only on VNNI/AMX platforms - if (InferenceEngine::with_cpu_x86_avx512_core_vnni() || InferenceEngine::with_cpu_x86_avx512_core_amx_int8()) { + if (ov::with_cpu_x86_avx512_core_vnni() || ov::with_cpu_x86_avx512_core_amx_int8()) { prc.emplace_back(std::vector{element::i8, element::i8}); prc.emplace_back(std::vector{element::u8, element::i8}); } // In Snippets MatMul BF16 is supported only on bf16/AMX platforms - if (InferenceEngine::with_cpu_x86_bfloat16() || InferenceEngine::with_cpu_x86_avx512_core_amx_bf16()) { + if (ov::with_cpu_x86_bfloat16() || ov::with_cpu_x86_avx512_core_amx_bf16()) { prc.emplace_back(std::vector{element::bf16, element::bf16}); } }