[GNA] Add 3.6 and 4.0 targets (#15735)

This commit is contained in:
Szymon Irzabek 2023-03-07 17:14:59 +01:00 committed by GitHub
parent cf7dfff35f
commit e79636bfbb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 138 additions and 93 deletions

View File

@ -221,19 +221,23 @@ int main(int argc, char* argv[]) {
} }
} }
gnaPluginConfig[ov::inference_precision.name()] = (FLAGS_qb == 8) ? ov::element::i8 : ov::element::i16; gnaPluginConfig[ov::inference_precision.name()] = (FLAGS_qb == 8) ? ov::element::i8 : ov::element::i16;
const std::unordered_map<std::string, ov::intel_gna::HWGeneration> StringHWGenerationMap{
{"GNA_TARGET_1_0", ov::intel_gna::HWGeneration::GNA_1_0},
{"GNA_TARGET_2_0", ov::intel_gna::HWGeneration::GNA_2_0},
{"GNA_TARGET_3_0", ov::intel_gna::HWGeneration::GNA_3_0},
{"GNA_TARGET_3_1", ov::intel_gna::HWGeneration::GNA_3_1},
{"GNA_TARGET_3_5", ov::intel_gna::HWGeneration::GNA_3_5},
{"GNA_TARGET_3_5_E", ov::intel_gna::HWGeneration::GNA_3_5_E},
{"GNA_TARGET_3_6", ov::intel_gna::HWGeneration::GNA_3_6},
{"GNA_TARGET_4_0", ov::intel_gna::HWGeneration::GNA_4_0}};
auto parse_target = [&](const std::string& target) -> ov::intel_gna::HWGeneration { auto parse_target = [&](const std::string& target) -> ov::intel_gna::HWGeneration {
auto hw_target = ov::intel_gna::HWGeneration::UNDEFINED; auto hw_target = ov::intel_gna::HWGeneration::UNDEFINED;
const auto key_iter = StringHWGenerationMap.find(target);
if (target == "GNA_TARGET_2_0") { if (key_iter != StringHWGenerationMap.end()) {
hw_target = ov::intel_gna::HWGeneration::GNA_2_0; hw_target = key_iter->second;
} else if (target == "GNA_TARGET_3_0") {
hw_target = ov::intel_gna::HWGeneration::GNA_3_0;
} else if (target == "GNA_TARGET_3_5") {
hw_target = ov::intel_gna::HWGeneration::GNA_3_5;
} else if (!target.empty()) { } else if (!target.empty()) {
slog::warn << "Unsupported target: " << target << slog::endl; slog::warn << "Unsupported target: " << target << slog::endl;
} }
return hw_target; return hw_target;
}; };

View File

@ -53,13 +53,6 @@ DECLARE_GNA_CONFIG_KEY(PRECISION);
*/ */
DECLARE_GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE); DECLARE_GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE);
/**
* @brief Generation of GNA embedded device to export the model.
* @deprecated Key is deprecated and will be removed in a future release.
*/
INFERENCE_ENGINE_DEPRECATED("The config key will be removed")
DECLARE_GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE_GENERATION);
/** /**
* @brief GNA proc_type setting that should be one of GNA_AUTO, GNA_HW, GNA_HW_WITH_SW_FBACK, GNA_SW_EXACT * @brief GNA proc_type setting that should be one of GNA_AUTO, GNA_HW, GNA_HW_WITH_SW_FBACK, GNA_SW_EXACT
*/ */

View File

@ -117,9 +117,14 @@ inline std::istream& operator>>(std::istream& is, ExecutionMode& execution_mode)
*/ */
enum class HWGeneration { enum class HWGeneration {
UNDEFINED = 0, //!< GNA HW generation is undefined UNDEFINED = 0, //!< GNA HW generation is undefined
GNA_1_0 = 4, //!< GNA HW generation 1.0
GNA_2_0 = 1, //!< GNA HW generation 2.0 GNA_2_0 = 1, //!< GNA HW generation 2.0
GNA_3_0 = 2, //!< GNA HW generation 3.0 GNA_3_0 = 2, //!< GNA HW generation 3.0
GNA_3_1 = 5, //!< GNA HW generation 3.1
GNA_3_5 = 3, //!< GNA HW generation 3.5 GNA_3_5 = 3, //!< GNA HW generation 3.5
GNA_3_5_E = 6, //!< GNA HW generation 3.5 embedded
GNA_3_6 = 7, //!< GNA HW generation 3.6
GNA_4_0 = 8, //!< GNA HW generation 4.0
}; };
/** @cond INTERNAL */ /** @cond INTERNAL */
@ -127,12 +132,22 @@ inline std::ostream& operator<<(std::ostream& os, const HWGeneration& hw_generat
switch (hw_generation) { switch (hw_generation) {
case HWGeneration::UNDEFINED: case HWGeneration::UNDEFINED:
return os << "UNDEFINED"; return os << "UNDEFINED";
case HWGeneration::GNA_1_0:
return os << "GNA_1_0";
case HWGeneration::GNA_2_0: case HWGeneration::GNA_2_0:
return os << "GNA_2_0"; return os << "GNA_2_0";
case HWGeneration::GNA_3_0: case HWGeneration::GNA_3_0:
return os << "GNA_3_0"; return os << "GNA_3_0";
case HWGeneration::GNA_3_1:
return os << "GNA_3_1";
case HWGeneration::GNA_3_5: case HWGeneration::GNA_3_5:
return os << "GNA_3_5"; return os << "GNA_3_5";
case HWGeneration::GNA_3_5_E:
return os << "GNA_3_5_E";
case HWGeneration::GNA_3_6:
return os << "GNA_3_6";
case HWGeneration::GNA_4_0:
return os << "GNA_4_0";
default: default:
throw ov::Exception{"Unsupported HW generation!"}; throw ov::Exception{"Unsupported HW generation!"};
} }
@ -143,12 +158,22 @@ inline std::istream& operator>>(std::istream& is, HWGeneration& hw_generation) {
is >> str; is >> str;
if (str == "UNDEFINED") { if (str == "UNDEFINED") {
hw_generation = HWGeneration::UNDEFINED; hw_generation = HWGeneration::UNDEFINED;
} else if (str == "GNA_1_0") {
hw_generation = HWGeneration::GNA_1_0;
} else if (str == "GNA_2_0") { } else if (str == "GNA_2_0") {
hw_generation = HWGeneration::GNA_2_0; hw_generation = HWGeneration::GNA_2_0;
} else if (str == "GNA_3_0") { } else if (str == "GNA_3_0") {
hw_generation = HWGeneration::GNA_3_0; hw_generation = HWGeneration::GNA_3_0;
} else if (str == "GNA_3_1") {
hw_generation = HWGeneration::GNA_3_1;
} else if (str == "GNA_3_5") { } else if (str == "GNA_3_5") {
hw_generation = HWGeneration::GNA_3_5; hw_generation = HWGeneration::GNA_3_5;
} else if (str == "GNA_3_5_E") {
hw_generation = HWGeneration::GNA_3_5_E;
} else if (str == "GNA_3_6") {
hw_generation = HWGeneration::GNA_3_6;
} else if (str == "GNA_4_0") {
hw_generation = HWGeneration::GNA_4_0;
} else { } else {
throw ov::Exception{"Unsupported HW generation: " + str}; throw ov::Exception{"Unsupported HW generation: " + str};
} }

View File

@ -34,6 +34,7 @@ inline std::ostream& operator<<(std::ostream& os, const std::set<ov::element::Ty
namespace ov { namespace ov {
namespace intel_gna { namespace intel_gna {
using namespace target;
namespace limitations { namespace limitations {
const std::set<ov::element::Type> SupportedElementTypes::supported_parameter_types = {ov::element::u8, const std::set<ov::element::Type> SupportedElementTypes::supported_parameter_types = {ov::element::u8,
@ -71,7 +72,7 @@ bool SupportedElementTypes::is_constant_type_supported(ov::element::Type elem_ty
} }
bool is_conv_supported(const std::shared_ptr<ngraph::op::ConvolutionIE>& conv_ie, bool is_conv_supported(const std::shared_ptr<ngraph::op::ConvolutionIE>& conv_ie,
const ov::intel_gna::common::DeviceVersion& effective_compile_target, const DeviceVersion& effective_compile_target,
const InferenceEngine::Precision gna_precision, const InferenceEngine::Precision gna_precision,
bool is_exception_allowed) { bool is_exception_allowed) {
OPENVINO_ASSERT(conv_ie, "ConvolutionIE node is empty!"); OPENVINO_ASSERT(conv_ie, "ConvolutionIE node is empty!");
@ -129,7 +130,7 @@ bool is_conv_supported(const std::shared_ptr<ngraph::op::ConvolutionIE>& conv_ie
} }
bool is_pooling_supported(const std::shared_ptr<ngraph::opset7::MaxPool> max_pool, bool is_pooling_supported(const std::shared_ptr<ngraph::opset7::MaxPool> max_pool,
const ov::intel_gna::common::DeviceVersion& effective_compile_target, const DeviceVersion& effective_compile_target,
bool is_exception_allowed) { bool is_exception_allowed) {
OPENVINO_ASSERT(max_pool, "MaxPool node is empty!"); OPENVINO_ASSERT(max_pool, "MaxPool node is empty!");
auto kernels = max_pool->get_kernel(); auto kernels = max_pool->get_kernel();
@ -172,7 +173,7 @@ bool is_split_supported(const std::shared_ptr<ov::Node>& node, bool is_exception
} }
bool is_op_supported(const std::shared_ptr<ov::Node>& node, bool is_op_supported(const std::shared_ptr<ov::Node>& node,
const ov::intel_gna::common::DeviceVersion& effective_compile_target, const DeviceVersion& effective_compile_target,
const InferenceEngine::Precision gna_precision, const InferenceEngine::Precision gna_precision,
bool is_exception_allowed) { bool is_exception_allowed) {
if (ov::op::util::is_parameter(node)) { if (ov::op::util::is_parameter(node)) {
@ -210,7 +211,7 @@ bool is_op_supported(const std::shared_ptr<ov::Node>& node,
} }
void check_all_ops_supported(const std::shared_ptr<ov::Model>& model, void check_all_ops_supported(const std::shared_ptr<ov::Model>& model,
const ov::intel_gna::common::DeviceVersion& effective_compile_target, const DeviceVersion& effective_compile_target,
const InferenceEngine::Precision gna_precision) { const InferenceEngine::Precision gna_precision) {
std::stringstream error; std::stringstream error;
// Walk through the transformed model // Walk through the transformed model
@ -627,13 +628,15 @@ bool Validator_35::ShouldUseOnlyConv2DGnaIface() const {
return true; return true;
} }
std::unique_ptr<AbstractValidator> AbstractValidator::Create(const common::DeviceVersion& target) { std::unique_ptr<AbstractValidator> AbstractValidator::Create(const DeviceVersion& target) {
switch (target) { switch (target) {
case common::DeviceVersion::GNA3_0: case DeviceVersion::GNA3_0:
case common::DeviceVersion::GNAEmbedded3_1: case DeviceVersion::GNA3_1:
return tools::make_unique<Validator_30>(); return tools::make_unique<Validator_30>();
case common::DeviceVersion::GNA3_5: case DeviceVersion::GNA3_5:
case common::DeviceVersion::GNAEmbedded3_5: case DeviceVersion::GNAEmbedded3_5:
case DeviceVersion::GNA3_6:
case DeviceVersion::GNA4_0:
return tools::make_unique<Validator_35>(); return tools::make_unique<Validator_35>();
default: default:
return nullptr; return nullptr;
@ -658,9 +661,9 @@ bool AbstractValidator::ValidationSuccesful(const bool throwOnError,
return error.empty(); return error.empty();
} }
bool UseOnly16BitConvolutionWeights(const common::DeviceVersion& compile_target) { bool UseOnly16BitConvolutionWeights(const DeviceVersion& compile_target) {
return (compile_target == common::DeviceVersion::GNA2_0 || compile_target == common::DeviceVersion::GNA3_0) || return compile_target == DeviceVersion::GNA1_0 || compile_target == DeviceVersion::GNA2_0 ||
compile_target == common::DeviceVersion::GNAEmbedded3_1; compile_target == DeviceVersion::GNA3_0 || compile_target == DeviceVersion::GNA3_1;
} }
} // namespace cnn2d } // namespace cnn2d

View File

@ -95,7 +95,7 @@ private:
* @return true if supported * @return true if supported
*/ */
bool is_conv_supported(const std::shared_ptr<ngraph::op::ConvolutionIE>& conv_ie, bool is_conv_supported(const std::shared_ptr<ngraph::op::ConvolutionIE>& conv_ie,
const ov::intel_gna::common::DeviceVersion& effective_compile_target, const target::DeviceVersion& effective_compile_target,
const InferenceEngine::Precision gna_precision, const InferenceEngine::Precision gna_precision,
bool is_exception_allowed = false); bool is_exception_allowed = false);
/** /**
@ -107,7 +107,7 @@ bool is_conv_supported(const std::shared_ptr<ngraph::op::ConvolutionIE>& conv_ie
* @return true if precision is found in supported * @return true if precision is found in supported
*/ */
bool is_pooling_supported(const std::shared_ptr<ngraph::opset7::MaxPool> max_pool, bool is_pooling_supported(const std::shared_ptr<ngraph::opset7::MaxPool> max_pool,
const ov::intel_gna::common::DeviceVersion& effective_compile_target, const target::DeviceVersion& effective_compile_target,
bool is_exception_allowed = false); bool is_exception_allowed = false);
/** /**
@ -136,7 +136,7 @@ bool is_split_supported(const std::shared_ptr<ov::Node>& node, bool is_exception
* @return true if supported * @return true if supported
*/ */
bool is_op_supported(const std::shared_ptr<ov::Node>& node, bool is_op_supported(const std::shared_ptr<ov::Node>& node,
const ov::intel_gna::common::DeviceVersion& effective_compile_target, const target::DeviceVersion& effective_compile_target,
const InferenceEngine::Precision gna_precision, const InferenceEngine::Precision gna_precision,
bool is_exception_allowed = false); bool is_exception_allowed = false);
@ -147,7 +147,7 @@ bool is_op_supported(const std::shared_ptr<ov::Node>& node,
* @param gna_precision GNA inference precision * @param gna_precision GNA inference precision
*/ */
void check_all_ops_supported(const std::shared_ptr<ov::Model>& model, void check_all_ops_supported(const std::shared_ptr<ov::Model>& model,
const ov::intel_gna::common::DeviceVersion& effective_compile_target, const target::DeviceVersion& effective_compile_target,
const InferenceEngine::Precision gna_precision); const InferenceEngine::Precision gna_precision);
namespace cnn2d { namespace cnn2d {
@ -279,7 +279,7 @@ public:
OvGnaType inPrecision, OvGnaType inPrecision,
bool exception = true) const = 0; bool exception = true) const = 0;
static std::unique_ptr<AbstractValidator> Create(const common::DeviceVersion& target); static std::unique_ptr<AbstractValidator> Create(const target::DeviceVersion& target);
}; };
class Validator_30 : public AbstractValidator { class Validator_30 : public AbstractValidator {
@ -431,7 +431,7 @@ public:
bool exception = true) const override; bool exception = true) const override;
}; };
bool UseOnly16BitConvolutionWeights(const common::DeviceVersion& compile_target); bool UseOnly16BitConvolutionWeights(const target::DeviceVersion& compile_target);
} // namespace cnn2d } // namespace cnn2d

View File

@ -12,7 +12,8 @@
namespace ov { namespace ov {
namespace intel_gna { namespace intel_gna {
namespace common { using namespace common;
namespace target {
static constexpr const char* kGnaTargetUnspecified = ""; static constexpr const char* kGnaTargetUnspecified = "";
static constexpr const char* kGnaTargetSoftwareEmulation = "GNA_SW_EMULATION"; static constexpr const char* kGnaTargetSoftwareEmulation = "GNA_SW_EMULATION";
@ -22,35 +23,48 @@ static constexpr const char* kGnaTarget3_0 = "GNA_TARGET_3_0";
static constexpr const char* kGnaTarget3_1 = "GNA_TARGET_3_1"; static constexpr const char* kGnaTarget3_1 = "GNA_TARGET_3_1";
static constexpr const char* kGnaTarget3_5 = "GNA_TARGET_3_5"; static constexpr const char* kGnaTarget3_5 = "GNA_TARGET_3_5";
static constexpr const char* kGnaTarget3_5_e = "GNA_TARGET_3_5_E"; static constexpr const char* kGnaTarget3_5_e = "GNA_TARGET_3_5_E";
static constexpr const char* kGnaTarget3_6 = "GNA_TARGET_3_6";
static constexpr const char* kGnaTarget4_0 = "GNA_TARGET_4_0";
static const std::unordered_map<HWGeneration, DeviceVersion> HWGenerationDeviceMap{ static const std::unordered_map<HWGeneration, DeviceVersion> HWGenerationDeviceMap{
{HWGeneration::GNA_1_0, DeviceVersion::GNA1_0},
{HWGeneration::GNA_2_0, DeviceVersion::GNA2_0}, {HWGeneration::GNA_2_0, DeviceVersion::GNA2_0},
{HWGeneration::GNA_3_0, DeviceVersion::GNA3_0}, {HWGeneration::GNA_3_0, DeviceVersion::GNA3_0},
{HWGeneration::GNA_3_1, DeviceVersion::GNA3_1},
{HWGeneration::GNA_3_5, DeviceVersion::GNA3_5}, {HWGeneration::GNA_3_5, DeviceVersion::GNA3_5},
{HWGeneration::GNA_3_5_E, DeviceVersion::GNAEmbedded3_5},
{HWGeneration::GNA_3_6, DeviceVersion::GNA3_6},
{HWGeneration::GNA_4_0, DeviceVersion::GNA4_0},
{HWGeneration::UNDEFINED, DeviceVersion::NotSet}}; {HWGeneration::UNDEFINED, DeviceVersion::NotSet}};
static const std::unordered_map<Gna2DeviceVersion, DeviceVersion> GnaDeviceMap{ static const std::unordered_map<Gna2DeviceVersion, DeviceVersion> GnaDeviceMap{
{Gna2DeviceVersionEmbedded1_0, DeviceVersion::GNAEmbedded1_0}, {Gna2DeviceVersionEmbedded1_0, DeviceVersion::GNA1_0},
{Gna2DeviceVersion2_0, DeviceVersion::GNA2_0}, {Gna2DeviceVersion2_0, DeviceVersion::GNA2_0},
{Gna2DeviceVersion3_0, DeviceVersion::GNA3_0}, {Gna2DeviceVersion3_0, DeviceVersion::GNA3_0},
{Gna2DeviceVersionEmbedded3_1, DeviceVersion::GNAEmbedded3_1}, {Gna2DeviceVersionEmbedded3_1, DeviceVersion::GNA3_1},
{Gna2DeviceVersion3_5, DeviceVersion::GNA3_5}, {Gna2DeviceVersion3_5, DeviceVersion::GNA3_5},
{Gna2DeviceVersionEmbedded3_5, DeviceVersion::GNAEmbedded3_5}, {Gna2DeviceVersionEmbedded3_5, DeviceVersion::GNAEmbedded3_5},
{Gna2DeviceVersionEmbedded3_5, DeviceVersion::GNA3_6},
{Gna2DeviceVersionEmbedded3_5, DeviceVersion::GNA4_0},
{Gna2DeviceVersionSoftwareEmulation, DeviceVersion::SoftwareEmulation}}; {Gna2DeviceVersionSoftwareEmulation, DeviceVersion::SoftwareEmulation}};
static const std::unordered_map<std::string, DeviceVersion> StringDeviceMap{ static const std::unordered_map<std::string, DeviceVersion> StringDeviceMap{
{kGnaTarget1_0, DeviceVersion::GNAEmbedded1_0}, {kGnaTarget1_0, DeviceVersion::GNA1_0},
{kGnaTarget2_0, DeviceVersion::GNA2_0}, {kGnaTarget2_0, DeviceVersion::GNA2_0},
{kGnaTarget3_0, DeviceVersion::GNA3_0}, {kGnaTarget3_0, DeviceVersion::GNA3_0},
{kGnaTarget3_1, DeviceVersion::GNAEmbedded3_1}, {kGnaTarget3_1, DeviceVersion::GNA3_1},
{kGnaTarget3_5, DeviceVersion::GNA3_5}, {kGnaTarget3_5, DeviceVersion::GNA3_5},
{kGnaTarget3_5_e, DeviceVersion::GNAEmbedded3_5}, {kGnaTarget3_5_e, DeviceVersion::GNAEmbedded3_5},
{kGnaTarget3_6, DeviceVersion::GNA3_6},
{kGnaTarget4_0, DeviceVersion::GNA4_0},
{kGnaTargetSoftwareEmulation, DeviceVersion::SoftwareEmulation}, {kGnaTargetSoftwareEmulation, DeviceVersion::SoftwareEmulation},
{kGnaTargetUnspecified, DeviceVersion::NotSet}}; {kGnaTargetUnspecified, DeviceVersion::NotSet}};
static const std::vector<DeviceVersion> EmbeddedDevices{DeviceVersion::GNAEmbedded1_0, static const std::vector<DeviceVersion> EmbeddedDevices{DeviceVersion::GNA1_0,
DeviceVersion::GNAEmbedded3_1, DeviceVersion::GNA3_1,
DeviceVersion::GNAEmbedded3_5}; DeviceVersion::GNAEmbedded3_5,
DeviceVersion::GNA3_6,
DeviceVersion::GNA4_0};
DeviceVersion HwGenerationToDevice(const HWGeneration& target) { DeviceVersion HwGenerationToDevice(const HWGeneration& target) {
return GetValueForKey<HWGeneration, DeviceVersion>(target, HWGenerationDeviceMap); return GetValueForKey<HWGeneration, DeviceVersion>(target, HWGenerationDeviceMap);
@ -124,6 +138,6 @@ DeviceVersion Target::get_effective_compile_target() const {
} }
} }
} // namespace common } // namespace target
} // namespace intel_gna } // namespace intel_gna
} // namespace ov } // namespace ov

View File

@ -11,17 +11,19 @@
namespace ov { namespace ov {
namespace intel_gna { namespace intel_gna {
namespace common { namespace target {
enum class DeviceVersion { enum class DeviceVersion {
NotSet = -1, NotSet = -1,
SoftwareEmulation = 0, SoftwareEmulation = 0,
GNAEmbedded1_0 = 0x10e, GNA1_0 = 0x10e,
GNA2_0 = 0x20, GNA2_0 = 0x20,
GNA3_0 = 0x30, GNA3_0 = 0x30,
GNAEmbedded3_1 = 0x31e, GNA3_1 = 0x31e,
GNA3_5 = 0x35, GNA3_5 = 0x35,
GNAEmbedded3_5 = 0x35e, GNAEmbedded3_5 = 0x35e,
GNA3_6 = 0x36e,
GNA4_0 = 0x40e,
Default = GNA3_0 Default = GNA3_0
}; };
@ -50,6 +52,6 @@ DeviceVersion StringToDevice(const std::string& target);
std::string DeviceToString(const DeviceVersion& target); std::string DeviceToString(const DeviceVersion& target);
bool IsEmbeddedDevice(const DeviceVersion& target); bool IsEmbeddedDevice(const DeviceVersion& target);
} // namespace common } // namespace target
} // namespace intel_gna } // namespace intel_gna
} // namespace ov } // namespace ov

View File

@ -20,7 +20,6 @@
namespace ov { namespace ov {
namespace intel_gna { namespace intel_gna {
using namespace common;
#define Gna2TlvTypeOVInputScaleFactor GNA2_TLV_IMPL_CHAR_TO_TYPE("OVIS") #define Gna2TlvTypeOVInputScaleFactor GNA2_TLV_IMPL_CHAR_TO_TYPE("OVIS")
#define Gna2TlvTypeOVOutputScaleFactor GNA2_TLV_IMPL_CHAR_TO_TYPE("OVOS") #define Gna2TlvTypeOVOutputScaleFactor GNA2_TLV_IMPL_CHAR_TO_TYPE("OVOS")
@ -31,7 +30,7 @@ static_assert(std::numeric_limits<float>::is_iec559, "Float is not IEC 559 compa
typedef std::array<char, sizeof(Gna2TlvRecord) + sizeof(float)> TlvFloatRecord; typedef std::array<char, sizeof(Gna2TlvRecord) + sizeof(float)> TlvFloatRecord;
static TlvFloatRecord GetFloatInTLV(Gna2TlvType type, float value) { static TlvFloatRecord GetFloatInTLV(Gna2TlvType type, float value) {
TlvFloatRecord r; TlvFloatRecord r{};
reinterpret_cast<Gna2TlvRecord*>(r.data())->type = type; reinterpret_cast<Gna2TlvRecord*>(r.data())->type = type;
reinterpret_cast<Gna2TlvRecord*>(r.data())->length = sizeof(float); reinterpret_cast<Gna2TlvRecord*>(r.data())->length = sizeof(float);
*reinterpret_cast<float*>(r.data() + sizeof(Gna2TlvRecord)) = value; *reinterpret_cast<float*>(r.data() + sizeof(Gna2TlvRecord)) = value;
@ -95,11 +94,11 @@ static void WriteStringToTlv(std::ostream& outStream, const Gna2TlvType tlvType,
void ExportTlvModel(uint32_t modelId, void ExportTlvModel(uint32_t modelId,
uint32_t deviceIndex, uint32_t deviceIndex,
std::ostream& outStream, std::ostream& outStream,
const DeviceVersion& compile_target, const target::DeviceVersion& compile_target,
const std::vector<GnaEndpoint>& allInputs, const std::vector<GnaEndpoint>& allInputs,
const std::vector<GnaEndpoint>& allOutputs, const std::vector<GnaEndpoint>& allOutputs,
const GnaAllocations& allAllocations) { const GnaAllocations& allAllocations) {
if (compile_target == DeviceVersion::GNAEmbedded1_0) { if (compile_target == target::DeviceVersion::GNA1_0) {
THROW_GNA_EXCEPTION << "Unsupported compile target for TLV export: GNA Embedded 1.0" << std::endl; THROW_GNA_EXCEPTION << "Unsupported compile target for TLV export: GNA Embedded 1.0" << std::endl;
} }
@ -195,7 +194,7 @@ void ExportTlvModel(uint32_t modelId,
Gna2TlvTypeOVOutputScaleFactor, Gna2TlvTypeOVOutputScaleFactor,
allAllocations.Get(Gna2MemoryTagOutput)); allAllocations.Get(Gna2MemoryTagOutput));
WriteStringToTlv(outStream, Gna2TlvTypeOVString, metadata); WriteStringToTlv(outStream, Gna2TlvTypeOVString, metadata);
const auto& ovVersionString = ov::intel_gna::get_openvino_version_string(); const auto& ovVersionString = common::get_openvino_version_string();
WriteStringToTlv(outStream, Gna2TlvTypeOVVersion, ovVersionString); WriteStringToTlv(outStream, Gna2TlvTypeOVVersion, ovVersionString);
} }
@ -223,7 +222,7 @@ void* ExportSueLegacyUsingGnaApi2(uint32_t modelId, uint32_t deviceIndex, Gna2Mo
status = Gna2ModelExportConfigSetSource(exportConfig, deviceIndex, modelId); status = Gna2ModelExportConfigSetSource(exportConfig, deviceIndex, modelId);
GNADeviceHelper::checkGna2Status(status, "Gna2ModelExportConfigSetSource"); GNADeviceHelper::checkGna2Status(status, "Gna2ModelExportConfigSetSource");
status = Gna2ModelExportConfigSetTarget(exportConfig, DeviceToGna(DeviceVersion::GNAEmbedded1_0)); status = Gna2ModelExportConfigSetTarget(exportConfig, DeviceToGna(target::DeviceVersion::GNA1_0));
GNADeviceHelper::checkGna2Status(status, "Gna2ModelExportConfigSetTarget"); GNADeviceHelper::checkGna2Status(status, "Gna2ModelExportConfigSetTarget");
void* bufferSueCreekHeader = nullptr; void* bufferSueCreekHeader = nullptr;

View File

@ -73,7 +73,7 @@ void* ExportSueLegacyUsingGnaApi2(uint32_t modelId, uint32_t deviceIndex, Gna2Mo
void ExportTlvModel(uint32_t modelId, void ExportTlvModel(uint32_t modelId,
uint32_t deviceIndex, uint32_t deviceIndex,
std::ostream& outStream, std::ostream& outStream,
const common::DeviceVersion& compileTarget, const target::DeviceVersion& compileTarget,
const std::vector<GnaEndpoint>& inputs, const std::vector<GnaEndpoint>& inputs,
const std::vector<GnaEndpoint>& outputs, const std::vector<GnaEndpoint>& outputs,
const GnaAllocations& allAllocation); const GnaAllocations& allAllocation);

View File

@ -30,7 +30,7 @@
namespace ov { namespace ov {
namespace intel_gna { namespace intel_gna {
using namespace common; using namespace target;
std::mutex GNADeviceHelper::acrossPluginsSync{}; std::mutex GNADeviceHelper::acrossPluginsSync{};
@ -570,10 +570,15 @@ uint32_t GNADeviceHelper::retrieveMaxLayersCount() {
using namespace limitations; using namespace limitations;
switch (target->get_effective_execution_target()) { switch (target->get_effective_execution_target()) {
case DeviceVersion::GNA1_0:
case DeviceVersion::GNA2_0: case DeviceVersion::GNA2_0:
return kMaxLayersCountGNA2_0; return kMaxLayersCountGNA2_0;
case DeviceVersion::GNA3_0: case DeviceVersion::GNA3_0:
case DeviceVersion::GNA3_1:
case DeviceVersion::GNA3_5: case DeviceVersion::GNA3_5:
case DeviceVersion::GNAEmbedded3_5:
case DeviceVersion::GNA3_6:
case DeviceVersion::GNA4_0:
default: default:
return kMaxLayersCountGNA3_X; return kMaxLayersCountGNA3_X;
} }

View File

@ -41,7 +41,7 @@ class GNADeviceHelper : public GNADevice {
static std::string gnaLibraryVersion{", GNA library version: " + GNADeviceHelper::GetGnaLibraryVersion()}; static std::string gnaLibraryVersion{", GNA library version: " + GNADeviceHelper::GetGnaLibraryVersion()};
return gnaLibraryVersion; return gnaLibraryVersion;
} }
std::shared_ptr<common::Target> target; std::shared_ptr<target::Target> target;
std::string modeOfOperation = "default"; std::string modeOfOperation = "default";
GnaAllocations allAllocations; GnaAllocations allAllocations;
uint32_t nGnaDeviceIndex = 0; uint32_t nGnaDeviceIndex = 0;
@ -69,7 +69,7 @@ class GNADeviceHelper : public GNADevice {
static constexpr const char* kDumpDelimiter = "."; static constexpr const char* kDumpDelimiter = ".";
public: public:
explicit GNADeviceHelper(std::shared_ptr<common::Target> target = std::make_shared<common::Target>(), explicit GNADeviceHelper(std::shared_ptr<target::Target> target = std::make_shared<target::Target>(),
bool isPerformanceMeasuring = false, bool isPerformanceMeasuring = false,
bool deviceEmbedded = false); bool deviceEmbedded = false);
@ -92,8 +92,8 @@ public:
void releaseModel(const uint32_t model_id); void releaseModel(const uint32_t model_id);
static uint32_t getNumberOfGnaDevices(); static uint32_t getNumberOfGnaDevices();
static uint32_t selectGnaDevice(); static uint32_t selectGnaDevice();
static bool is_hw_target(const common::DeviceVersion device_version) { static bool is_hw_target(const target::DeviceVersion device_version) {
return common::DeviceVersion::SoftwareEmulation != device_version; return target::DeviceVersion::SoftwareEmulation != device_version;
} }
bool is_hw_detected() const { bool is_hw_detected() const {
return is_hw_target(target->get_detected_device_version()); return is_hw_target(target->get_detected_device_version());
@ -170,10 +170,10 @@ private:
static void enforceLegacyCnns(Gna2Model& gnaModel); static void enforceLegacyCnns(Gna2Model& gnaModel);
static void enforceLegacyCnnsWhenNeeded(Gna2Model& gnaModel); static void enforceLegacyCnnsWhenNeeded(Gna2Model& gnaModel);
static bool is_up_to_20_hw(const common::DeviceVersion device_version) { static bool is_up_to_20_hw(const target::DeviceVersion device_version) {
return device_version <= common::DeviceVersion::GNA2_0 && is_hw_target(device_version); return device_version <= target::DeviceVersion::GNA2_0 && is_hw_target(device_version);
} }
void createVirtualDevice(const common::DeviceVersion& devVersion); void createVirtualDevice(const target::DeviceVersion& devVersion);
void updateGnaDeviceVersion(); void updateGnaDeviceVersion();
void initGnaPerfCounters() { void initGnaPerfCounters() {

View File

@ -221,7 +221,7 @@ void GNAGraphCompiler::fillSplitConnections(InferenceEngine::CNNLayerPtr layer)
split_connection.emplace(id, layerInfoItem); split_connection.emplace(id, layerInfoItem);
} }
void GNAGraphCompiler::SetValidatorTarget(const DeviceVersion& target) { void GNAGraphCompiler::SetValidatorTarget(const target::DeviceVersion& target) {
auto temp = limitations::cnn2d::AbstractValidator::Create(target); auto temp = limitations::cnn2d::AbstractValidator::Create(target);
cnn2dValidator.reset(temp.release()); cnn2dValidator.reset(temp.release());
} }

View File

@ -93,7 +93,7 @@ public:
const uint32_t strideH, const uint32_t strideH,
const uint32_t strideW) const; const uint32_t strideW) const;
void SetValidatorTarget(const common::DeviceVersion& target); void SetValidatorTarget(const target::DeviceVersion& target);
/** /**
* Connects either memory output, or generic output to a layer * Connects either memory output, or generic output to a layer

View File

@ -661,7 +661,7 @@ void GNAPlugin::LoadNetwork(const CNNNetwork& _network) {
const auto effectiveCompileTarget = config.target->get_effective_compile_target(); const auto effectiveCompileTarget = config.target->get_effective_compile_target();
graphCompiler.SetValidatorTarget(effectiveCompileTarget); graphCompiler.SetValidatorTarget(effectiveCompileTarget);
auto transformer = TransformationsPipeline(config, effectiveCompileTarget); auto transformer = TransformationsPipeline(config);
if (_network.getFunction()) { if (_network.getFunction()) {
CNNNetwork clonedNetwork = InferenceEngine::cloneNetwork(_network); CNNNetwork clonedNetwork = InferenceEngine::cloneNetwork(_network);
@ -1039,7 +1039,7 @@ void GNAPlugin::DumpXNNToFile() const {
const auto& inputsDesc = inputs_ptr_->Get(); const auto& inputsDesc = inputs_ptr_->Get();
const auto& outputsDesc = outputs_.Get(); const auto& outputsDesc = outputs_.Get();
if (config.target->get_effective_compile_target() == common::DeviceVersion::GNAEmbedded1_0) { if (config.target->get_effective_compile_target() == target::DeviceVersion::GNA1_0) {
auto dump = gnadevice->dumpXnn(modelId); auto dump = gnadevice->dumpXnn(modelId);
dump.header.RwRegionSize = gnamem->getRegionBytes(REGION_SCRATCH); dump.header.RwRegionSize = gnamem->getRegionBytes(REGION_SCRATCH);
dump.header.InputScalingFactor = inputsDesc.begin()->scale_factor; dump.header.InputScalingFactor = inputsDesc.begin()->scale_factor;
@ -1631,7 +1631,7 @@ InferenceEngine::QueryNetworkResult GNAPlugin::QueryNetwork(
auto supported = GetSupportedNodes( auto supported = GetSupportedNodes(
model, model,
[&](std::shared_ptr<ov::Model>& model) { [&](std::shared_ptr<ov::Model>& model) {
TransformationsPipeline(qn_config, effectiveCompileTarget).apply(model); TransformationsPipeline(qn_config).apply(model);
}, },
[&](const std::shared_ptr<ngraph::Node>& op) { [&](const std::shared_ptr<ngraph::Node>& op) {
return limitations::is_op_supported(op, effectiveCompileTarget, qn_config.gnaPrecision); return limitations::is_op_supported(op, effectiveCompileTarget, qn_config.gnaPrecision);

View File

@ -24,7 +24,7 @@ using namespace InferenceEngine::details;
namespace ov { namespace ov {
namespace intel_gna { namespace intel_gna {
using namespace common; using namespace target;
const uint8_t Config::max_num_requests; const uint8_t Config::max_num_requests;
@ -52,7 +52,7 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& config) {
auto value = item.second; auto value = item.second;
auto check_scale_factor = [&](float scale_factor) { auto check_scale_factor = [&](float scale_factor) {
if (AreFpEq(scale_factor, 0.0f) || std::isinf(scale_factor)) { if (common::AreFpEq(scale_factor, 0.0f) || std::isinf(scale_factor)) {
THROW_GNA_EXCEPTION << "input scale factor of 0.0f or +-inf not supported"; THROW_GNA_EXCEPTION << "input scale factor of 0.0f or +-inf not supported";
} }
}; };
@ -127,8 +127,6 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& config) {
} else if (key == GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE) || key == ov::intel_gna::firmware_model_image_path) { } else if (key == GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE) || key == ov::intel_gna::firmware_model_image_path) {
embedded_export_path = value; embedded_export_path = value;
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
} else if (key == GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE_GENERATION)) {
dumpXNNGeneration = value;
} else if (key == GNA_CONFIG_KEY(DEVICE_MODE) || key == ov::intel_gna::execution_mode) { } else if (key == GNA_CONFIG_KEY(DEVICE_MODE) || key == ov::intel_gna::execution_mode) {
auto procType = supported_values.find(value); auto procType = supported_values.find(value);
if (procType == supported_values.end()) { if (procType == supported_values.end()) {
@ -290,9 +288,6 @@ void Config::AdjustKeyMapValues() {
} }
} }
keyConfigMap[ov::intel_gna::firmware_model_image_path.name()] = embedded_export_path; keyConfigMap[ov::intel_gna::firmware_model_image_path.name()] = embedded_export_path;
IE_SUPPRESS_DEPRECATED_START
keyConfigMap[GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE_GENERATION)] = dumpXNNGeneration;
IE_SUPPRESS_DEPRECATED_END
std::string device_mode; std::string device_mode;
if (gnaFlags.sw_fp32) { if (gnaFlags.sw_fp32) {
device_mode = ov::util::to_string(ov::intel_gna::ExecutionMode::SW_FP32); device_mode = ov::util::to_string(ov::intel_gna::ExecutionMode::SW_FP32);

View File

@ -36,8 +36,7 @@ struct Config {
inference_precision = r.inference_precision; inference_precision = r.inference_precision;
gnaPrecision = r.gnaPrecision; gnaPrecision = r.gnaPrecision;
embedded_export_path = r.embedded_export_path; embedded_export_path = r.embedded_export_path;
dumpXNNGeneration = r.dumpXNNGeneration; target = std::make_shared<target::Target>();
target = std::make_shared<common::Target>();
if (r.target) { if (r.target) {
*target = *r.target; *target = *r.target;
} }
@ -63,9 +62,8 @@ struct Config {
InferenceEngine::Precision gnaPrecision = InferenceEngine::Precision::I16; InferenceEngine::Precision gnaPrecision = InferenceEngine::Precision::I16;
std::string embedded_export_path; std::string embedded_export_path;
std::string dumpXNNGeneration;
std::shared_ptr<common::Target> target = std::make_shared<common::Target>(); std::shared_ptr<target::Target> target = std::make_shared<target::Target>();
Gna2AccelerationMode pluginGna2AccMode = Gna2AccelerationModeSoftware; Gna2AccelerationMode pluginGna2AccMode = Gna2AccelerationModeSoftware;
bool swExactMode = true; bool swExactMode = true;

View File

@ -15,11 +15,9 @@ namespace intel_gna {
class TransformationsPipeline { class TransformationsPipeline {
public: public:
explicit TransformationsPipeline(const Config& config, explicit TransformationsPipeline(const Config& config) : config(config) {
const ov::intel_gna::common::DeviceVersion& effective_compile_target = effective_compile_target = config.target->get_effective_compile_target();
ov::intel_gna::common::DeviceVersion::NotSet) }
: config(config),
effective_compile_target(effective_compile_target) {}
void apply(const std::shared_ptr<ov::Model>& model); void apply(const std::shared_ptr<ov::Model>& model);
IE_SUPPRESS_DEPRECATED_START IE_SUPPRESS_DEPRECATED_START
void apply_legacy(const InferenceEngine::CNNNetwork& network, bool runBeforeCopy); void apply_legacy(const InferenceEngine::CNNNetwork& network, bool runBeforeCopy);
@ -34,7 +32,7 @@ private:
bool is_ngraph_passes_used = false; bool is_ngraph_passes_used = false;
bool fake_quantized = false; bool fake_quantized = false;
int legacy_pass_index = 0; int legacy_pass_index = 0;
ov::intel_gna::common::DeviceVersion effective_compile_target; ov::intel_gna::target::DeviceVersion effective_compile_target;
}; };
} // namespace intel_gna } // namespace intel_gna

View File

@ -19,7 +19,7 @@
namespace ov { namespace ov {
namespace intel_gna { namespace intel_gna {
using namespace common; using namespace target;
namespace pass { namespace pass {
using namespace helper; using namespace helper;

View File

@ -35,7 +35,7 @@ namespace pass {
class Decompose2DConv : public ngraph::pass::MatcherPass { class Decompose2DConv : public ngraph::pass::MatcherPass {
public: public:
OPENVINO_RTTI("Decompose2DConv", "0"); OPENVINO_RTTI("Decompose2DConv", "0");
Decompose2DConv(const common::DeviceVersion& compile_target, const InferenceEngine::Precision& gnaPrecision); Decompose2DConv(const target::DeviceVersion& compile_target, const InferenceEngine::Precision& gnaPrecision);
}; };
/** /**
@ -56,7 +56,7 @@ public:
class Decompose2DConvTransposedWithBias : public ngraph::pass::MatcherPass { class Decompose2DConvTransposedWithBias : public ngraph::pass::MatcherPass {
public: public:
OPENVINO_RTTI("Decompose2DConvTransposedWithBias", "0"); OPENVINO_RTTI("Decompose2DConvTransposedWithBias", "0");
Decompose2DConvTransposedWithBias(const common::DeviceVersion& compile_target, Decompose2DConvTransposedWithBias(const target::DeviceVersion& compile_target,
const InferenceEngine::Precision& gnaPrecision); const InferenceEngine::Precision& gnaPrecision);
}; };
@ -80,7 +80,7 @@ public:
class Decompose2DConvTransposedWithBiasAF : public ngraph::pass::MatcherPass { class Decompose2DConvTransposedWithBiasAF : public ngraph::pass::MatcherPass {
public: public:
OPENVINO_RTTI("Decompose2DConvTransposedWithBiasAF", "0"); OPENVINO_RTTI("Decompose2DConvTransposedWithBiasAF", "0");
Decompose2DConvTransposedWithBiasAF(const common::DeviceVersion& compile_target, Decompose2DConvTransposedWithBiasAF(const target::DeviceVersion& compile_target,
const InferenceEngine::Precision& gnaPrecision); const InferenceEngine::Precision& gnaPrecision);
}; };

View File

@ -11,7 +11,7 @@
#include "common/gna_target.hpp" #include "common/gna_target.hpp"
using namespace ov::intel_gna::limitations; using namespace ov::intel_gna::limitations;
using namespace ov::intel_gna::common; using namespace ov::intel_gna::target;
struct GNACnn2DValidatorTestParam { struct GNACnn2DValidatorTestParam {
DeviceVersion target; DeviceVersion target;

View File

@ -12,14 +12,13 @@
using namespace InferenceEngine; using namespace InferenceEngine;
using namespace ov::intel_gna; using namespace ov::intel_gna;
using namespace ov::intel_gna::common; using namespace ov::intel_gna::target;
IE_SUPPRESS_DEPRECATED_START IE_SUPPRESS_DEPRECATED_START
const std::map<std::string, std::string> supportedConfigKeysWithDefaults = { const std::map<std::string, std::string> supportedConfigKeysWithDefaults = {
{GNA_CONFIG_KEY(SCALE_FACTOR), "1.000000"}, {GNA_CONFIG_KEY(SCALE_FACTOR), "1.000000"},
{GNA_CONFIG_KEY(SCALE_FACTOR) + std::string("_0"), "1.000000"}, {GNA_CONFIG_KEY(SCALE_FACTOR) + std::string("_0"), "1.000000"},
{GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE), ""}, {GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE), ""},
{GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE_GENERATION), ""},
{GNA_CONFIG_KEY(EXEC_TARGET), ""}, {GNA_CONFIG_KEY(EXEC_TARGET), ""},
{GNA_CONFIG_KEY(COMPILE_TARGET), ""}, {GNA_CONFIG_KEY(COMPILE_TARGET), ""},
{GNA_CONFIG_KEY(DEVICE_MODE), GNAConfigParams::GNA_SW_EXACT}, {GNA_CONFIG_KEY(DEVICE_MODE), GNAConfigParams::GNA_SW_EXACT},
@ -182,15 +181,25 @@ TEST_F(GNAPluginConfigTest, GnaConfigSingleThreadTest) {
IE_SUPPRESS_DEPRECATED_END IE_SUPPRESS_DEPRECATED_END
TEST_F(GNAPluginConfigTest, GnaConfigGnaExecTargetTest) { TEST_F(GNAPluginConfigTest, GnaConfigGnaExecTargetTest) {
SetAndCompare(GNA_CONFIG_KEY(EXEC_TARGET), "GNA_TARGET_1_0");
EXPECT_EQ(config.target->get_user_set_execution_target(), DeviceVersion::GNA1_0);
SetAndCompare(GNA_CONFIG_KEY(EXEC_TARGET), "GNA_TARGET_2_0"); SetAndCompare(GNA_CONFIG_KEY(EXEC_TARGET), "GNA_TARGET_2_0");
EXPECT_EQ(config.target->get_user_set_execution_target(), DeviceVersion::GNA2_0); EXPECT_EQ(config.target->get_user_set_execution_target(), DeviceVersion::GNA2_0);
SetAndCompare(GNA_CONFIG_KEY(EXEC_TARGET), "GNA_TARGET_3_0"); SetAndCompare(GNA_CONFIG_KEY(EXEC_TARGET), "GNA_TARGET_3_0");
EXPECT_EQ(config.target->get_user_set_execution_target(), DeviceVersion::GNA3_0); EXPECT_EQ(config.target->get_user_set_execution_target(), DeviceVersion::GNA3_0);
SetAndCompare(GNA_CONFIG_KEY(EXEC_TARGET), "GNA_TARGET_3_1");
EXPECT_EQ(config.target->get_user_set_execution_target(), DeviceVersion::GNA3_1);
ExpectThrow(GNA_CONFIG_KEY(EXEC_TARGET), "GNA_TARGET_3_7"); ExpectThrow(GNA_CONFIG_KEY(EXEC_TARGET), "GNA_TARGET_3_7");
SetAndCompare(GNA_CONFIG_KEY(EXEC_TARGET), "GNA_TARGET_3_5"); SetAndCompare(GNA_CONFIG_KEY(EXEC_TARGET), "GNA_TARGET_3_5");
EXPECT_EQ(config.target->get_user_set_execution_target(), DeviceVersion::GNA3_5); EXPECT_EQ(config.target->get_user_set_execution_target(), DeviceVersion::GNA3_5);
SetAndCompare(GNA_CONFIG_KEY(EXEC_TARGET), "GNA_TARGET_3_5_E");
EXPECT_EQ(config.target->get_user_set_execution_target(), DeviceVersion::GNAEmbedded3_5);
SetAndCompare(GNA_CONFIG_KEY(EXEC_TARGET), "GNA_TARGET_3_6");
EXPECT_EQ(config.target->get_user_set_execution_target(), DeviceVersion::GNA3_6);
SetAndCompare(GNA_CONFIG_KEY(EXEC_TARGET), "GNA_TARGET_4_0");
EXPECT_EQ(config.target->get_user_set_execution_target(), DeviceVersion::GNA4_0);
ExpectThrow(GNA_CONFIG_KEY(EXEC_TARGET), "0"); ExpectThrow(GNA_CONFIG_KEY(EXEC_TARGET), "0");
ExpectThrow(GNA_CONFIG_KEY(EXEC_TARGET), "GNA_TARGET_1_5"); ExpectThrow(GNA_CONFIG_KEY(EXEC_TARGET), "GNA_TARGET_1_5");

View File

@ -884,17 +884,17 @@ void execute_test(modelType model,
case modelType::TranspConvBcastAddMaxPoolTransp: case modelType::TranspConvBcastAddMaxPoolTransp:
case modelType::TranspConvBcastAddActTransp: case modelType::TranspConvBcastAddActTransp:
case modelType::TranspConvBcastAddMaxPoolActTransp: case modelType::TranspConvBcastAddMaxPoolActTransp:
manager.register_pass<ov::intel_gna::pass::Decompose2DConv>(ov::intel_gna::common::DeviceVersion::Default, manager.register_pass<ov::intel_gna::pass::Decompose2DConv>(ov::intel_gna::target::DeviceVersion::Default,
gnaPrecision); gnaPrecision);
break; break;
case modelType::TranspConvTranspBcastAdd: case modelType::TranspConvTranspBcastAdd:
manager.register_pass<ov::intel_gna::pass::Decompose2DConvTransposedWithBias>( manager.register_pass<ov::intel_gna::pass::Decompose2DConvTransposedWithBias>(
ov::intel_gna::common::DeviceVersion::Default, ov::intel_gna::target::DeviceVersion::Default,
gnaPrecision); gnaPrecision);
break; break;
case modelType::TranspConvTranspBcastAddAct: case modelType::TranspConvTranspBcastAddAct:
manager.register_pass<ov::intel_gna::pass::Decompose2DConvTransposedWithBiasAF>( manager.register_pass<ov::intel_gna::pass::Decompose2DConvTransposedWithBiasAF>(
ov::intel_gna::common::DeviceVersion::Default, ov::intel_gna::target::DeviceVersion::Default,
gnaPrecision); gnaPrecision);
break; break;
} }