diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index 7cae04259cb..ddf14ef59a7 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -499,6 +499,9 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std DEBUG_LOG(PrintableModel(*nGraphFunc, "org_")); + Transformations transformations(nGraphFunc, enableLPT, inferencePrecision, isLegacyAPI(), snippetsMode, engConfig); + transformations.UpToLpt(); + if (!is_cpu_map_available()) { ApplyPerformanceHints(config, nGraphFunc); } @@ -510,8 +513,8 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std conf.readProperties(config, modelType); CalculateStreams(conf, nGraphFunc); - Transformations transformations(nGraphFunc, enableLPT, inferencePrecision, isLegacyAPI(), snippetsMode, conf); - transformations.UpToCpuSpecificOpSet(); + transformations.PostLpt(); + transformations.Snippets(); // need to check that all outputs have static shapes // checking that all inputs have static shapes is performed in the common part @@ -783,7 +786,9 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const std::ma auto supported = GetSupportedNodes(model, [&](std::shared_ptr& model) { Transformations transformation(model, enableLPT, conf.inferencePrecision, isLegacyAPI(), snippetsMode, engConfig); - transformation.UpToCpuSpecificOpSet(); + transformation.UpToLpt(); + transformation.PostLpt(); + transformation.Snippets(); transformation.CpuSpecificOpSet(); }, [&](const std::shared_ptr& op) { diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index 4037b54d1e6..ae0e82a1c37 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -157,14 +157,11 @@ bool Transformations::fuse_type_to_convert(const std::shared_ptr& return false; } -void Transformations::UpToCpuSpecificOpSet() { +void Transformations::UpToLpt() { const bool useLpt = enableLpt && ngraph::pass::low_precision::LowPrecision::isFunctionQuantized(model) && CPU_DEBUG_CAP_IS_TRANSFORMATION_ENABLED(config.debugCaps, Lpt); - const bool useSnippets = snippetsMode != Config::SnippetsMode::Disable && - CPU_DEBUG_CAP_IS_TRANSFORMATION_ENABLED(config.debugCaps, Snippets); - auto defaultPrecisions = useLpt ? ngraph::pass::low_precision::precision_set::int8_support : std::vector{}; bool hasINT16orINT32Levels = false; @@ -183,11 +180,6 @@ void Transformations::UpToCpuSpecificOpSet() { if (useLpt) Lpt(hasINT16orINT32Levels, defaultPrecisions); - - PostLpt(); - - if (useSnippets) - Snippets(); } void Transformations::CpuSpecificOpSet(void) { @@ -731,8 +723,12 @@ void Transformations::PostSnippets(void) { } void Transformations::Snippets(void) { - CPU_DEBUG_CAP_TRANSFORMATION_SCOPE(this, Snippets); + const bool useSnippets = snippetsMode != Config::SnippetsMode::Disable && + CPU_DEBUG_CAP_IS_TRANSFORMATION_ENABLED(config.debugCaps, Snippets); + if (!useSnippets) + return; + CPU_DEBUG_CAP_TRANSFORMATION_SCOPE(this, Snippets); MainSnippets(); PostSnippets(); } diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.h b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.h index 57ad2e95e12..dc7c734abce 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.h +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.h @@ -39,8 +39,10 @@ public: CPU_DEBUG_CAPS_MAYBE_UNUSED(this->config); } - void UpToCpuSpecificOpSet(); + void UpToLpt(); void CpuSpecificOpSet(); + void PostLpt(); + void Snippets(void); private: std::shared_ptr model; @@ -54,14 +56,10 @@ private: void Lpt(const bool hasINT16orINT32Levels, const std::vector& defaultPrecisions); - void PostLpt(); - void MainSnippets(void); void PostSnippets(void); - void Snippets(void); - static bool fuse_type_to_convert(const std::shared_ptr& node, const precisions_map& precisions); };