Xuejun/snippet remove inference engine (#21293)

* [CPU Plugin][Snippet] remove InferenceEngine, replace with ov

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>

* [CPU Plugin][Snippet] remove ngraph namespace

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>

---------

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>
This commit is contained in:
Xuejun Zhai 2023-11-27 16:00:58 +08:00 committed by GitHub
parent 815980f290
commit 4fdbb2d4e8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 19 additions and 22 deletions

View File

@ -4,7 +4,6 @@
#include "snippets/enforce_precision.hpp"
#include <gtest/gtest.h>
#include <ngraph/ngraph.hpp>
namespace ov {
namespace test {

View File

@ -8,7 +8,6 @@
#include "snippets/fake_quantize_decomposition_test.hpp"
using namespace LayerTestsDefinitions;
using namespace ngraph;
namespace {
@ -16,7 +15,7 @@ namespace decompositionInSubgraph {
const std::vector<TestValues> testValuesDecompositionScalars = {
{
ov::element::f32,
ngraph::Shape{1, 3, 16, 16},
ov::Shape{1, 3, 16, 16},
ov::element::f32,
1.f,
{{}, {}, {}, {}},
@ -25,23 +24,23 @@ const std::vector<TestValues> testValuesDecompositionScalars = {
const std::vector<TestValues> testValuesDecompositionPerChannel = {
{
ov::element::f32,
ngraph::Shape{1, 3, 16, 16},
ov::Shape{1, 3, 16, 16},
ov::element::f32,
1.f,
{{1, 3, 1, 1}, {1, 3, 1, 1}, {1, 3, 1, 1}, {1, 3, 1, 1}},
},
{
ov::element::f32,
ngraph::Shape{1, 3, 16, 16},
ov::Shape{1, 3, 16, 16},
ov::element::f32,
1.f,
{{1, 3, 1, 1}, {1, 3, 1, 1}, {}, {}},
},
};
std::vector<std::pair<std::shared_ptr<Node>, std::pair<std::string, std::string> >> operations = {
{std::make_shared<opset1::Abs>(), {"Subgraph", "Abs,fakeQuantize"}},
{std::make_shared<ngraph::op::v4::Swish>(), {"Subgraph", "Swish,fakeQuantize"}},
std::vector<std::pair<std::shared_ptr<ov::Node>, std::pair<std::string, std::string> >> operations = {
{std::make_shared<ov::op::v0::Abs>(), {"Subgraph", "Abs,fakeQuantize"}},
{std::make_shared<ov::op::v4::Swish>(), {"Subgraph", "Swish,fakeQuantize"}},
};
INSTANTIATE_TEST_SUITE_P(
@ -83,36 +82,36 @@ namespace legacyFuse {
const std::vector<TestValues> testValuesLegacyFuse = {
{
ov::element::f32,
ngraph::Shape{1, 3, 16, 16},
ov::Shape{1, 3, 16, 16},
ov::element::f32,
1.f,
{{1, 3, 1, 1}, {1, 3, 1, 1}, {}, {}}
},
{
ov::element::f32,
ngraph::Shape{1, 3, 16, 16},
ov::Shape{1, 3, 16, 16},
ov::element::f32,
1.f,
{{}, {}, {1, 3, 1, 1}, {1, 3, 1, 1}}
},
{
ov::element::f32,
ngraph::Shape{1, 3, 16, 16},
ov::Shape{1, 3, 16, 16},
ov::element::f32,
1.f,
{{}, {}, {}, {}}
},
{
ov::element::f32,
ngraph::Shape{1, 3, 16, 16},
ov::Shape{1, 3, 16, 16},
ov::element::f32,
1.f,
{{1, 3, 1, 1}, {1, 3, 1, 1}, {1, 3, 1, 1}, {1, 3, 1, 1}}
},
};
std::vector<std::pair<std::shared_ptr<Node>, std::pair<std::string, std::string>>> operations = {
{std::make_shared<opset1::Convolution>(), {"Convolution", "Convolution,fakeQuantize"}},
std::vector<std::pair<std::shared_ptr<ov::Node>, std::pair<std::string, std::string>>> operations = {
{std::make_shared<ov::op::v1::Convolution>(), {"Convolution", "Convolution,fakeQuantize"}},
};
INSTANTIATE_TEST_SUITE_P(

View File

@ -25,7 +25,7 @@ std::vector<std::vector<ov::PartialShape>> input_shapes{
static inline std::vector<std::vector<element::Type>> quantized_precisions() {
std::vector<std::vector<element::Type>> prc = {};
// In Snippets MatMul INT8 is supported only on VNNI/AMX platforms
if (InferenceEngine::with_cpu_x86_avx512_core_vnni() || InferenceEngine::with_cpu_x86_avx512_core_amx_int8()) {
if (ov::with_cpu_x86_avx512_core_vnni() || ov::with_cpu_x86_avx512_core_amx_int8()) {
prc.emplace_back(std::vector<element::Type>{element::i8, element::i8});
prc.emplace_back(std::vector<element::Type>{element::u8, element::i8});
}
@ -40,7 +40,7 @@ static inline std::vector<std::vector<element::Type>> precisions(bool only_fp32
auto quant = quantized_precisions();
std::copy(quant.begin(), quant.end(), std::back_inserter(prc));
// In Snippets MatMul BF16 is supported only on bf16/AMX platforms
if (InferenceEngine::with_cpu_x86_bfloat16() || InferenceEngine::with_cpu_x86_avx512_core_amx_bf16()) {
if (ov::with_cpu_x86_bfloat16() || ov::with_cpu_x86_avx512_core_amx_bf16()) {
prc.emplace_back(std::vector<element::Type>{element::bf16, element::bf16});
}
}

View File

@ -30,7 +30,7 @@ const std::vector<std::vector<ov::PartialShape>> inputShapes_3D = {
};
static inline bool is_bf16_supported() {
return InferenceEngine::with_cpu_x86_bfloat16() || InferenceEngine::with_cpu_x86_avx512_core_amx_bf16();
return ov::with_cpu_x86_bfloat16() || ov::with_cpu_x86_avx512_core_amx_bf16();
}
static inline std::vector<std::vector<element::Type>> precision_f32(size_t count) {

View File

@ -4,7 +4,6 @@
#include "snippets/precision_propagation_convertion.hpp"
#include <gtest/gtest.h>
#include <ngraph/ngraph.hpp>
namespace ov {
namespace test {

View File

@ -18,12 +18,12 @@ static inline std::vector<std::vector<element::Type>> precisions(bool only_fp32
};
if (!only_fp32) {
// In Snippets MatMul INT8 is supported only on VNNI/AMX platforms
if (InferenceEngine::with_cpu_x86_avx512_core_vnni() || InferenceEngine::with_cpu_x86_avx512_core_amx_int8()) {
if (ov::with_cpu_x86_avx512_core_vnni() || ov::with_cpu_x86_avx512_core_amx_int8()) {
prc.emplace_back(std::vector<element::Type>{element::i8, element::i8});
prc.emplace_back(std::vector<element::Type>{element::u8, element::i8});
}
// In Snippets MatMul BF16 is supported only on bf16/AMX platforms
if (InferenceEngine::with_cpu_x86_bfloat16() || InferenceEngine::with_cpu_x86_avx512_core_amx_bf16()) {
if (ov::with_cpu_x86_bfloat16() || ov::with_cpu_x86_avx512_core_amx_bf16()) {
prc.emplace_back(std::vector<element::Type>{element::bf16, element::bf16});
}
}
@ -115,12 +115,12 @@ static inline std::vector<std::vector<element::Type>> precisions(bool only_fp32
};
if (!only_fp32) {
// In Snippets MatMul INT8 is supported only on VNNI/AMX platforms
if (InferenceEngine::with_cpu_x86_avx512_core_vnni() || InferenceEngine::with_cpu_x86_avx512_core_amx_int8()) {
if (ov::with_cpu_x86_avx512_core_vnni() || ov::with_cpu_x86_avx512_core_amx_int8()) {
prc.emplace_back(std::vector<element::Type>{element::i8, element::i8});
prc.emplace_back(std::vector<element::Type>{element::u8, element::i8});
}
// In Snippets MatMul BF16 is supported only on bf16/AMX platforms
if (InferenceEngine::with_cpu_x86_bfloat16() || InferenceEngine::with_cpu_x86_avx512_core_amx_bf16()) {
if (ov::with_cpu_x86_bfloat16() || ov::with_cpu_x86_avx512_core_amx_bf16()) {
prc.emplace_back(std::vector<element::Type>{element::bf16, element::bf16});
}
}