diff --git a/src/common/legacy/include/legacy/ie_layers.h b/src/common/legacy/include/legacy/ie_layers.h index b4fa1ef61a3..60b4bc86164 100644 --- a/src/common/legacy/include/legacy/ie_layers.h +++ b/src/common/legacy/include/legacy/ie_layers.h @@ -486,7 +486,7 @@ public: /** * @brief A convolution paddings end array [X, Y, Z, ...] */ - PropertyVector _pads_end; + DEFINE_PROP(_pads_end); /** * @brief A convolution strides array [X, Y, Z, ...] */ diff --git a/src/plugins/intel_gna/src/backend/gna_limitations.cpp b/src/plugins/intel_gna/src/backend/gna_limitations.cpp index 39b760dce65..ffe9de1bab7 100644 --- a/src/plugins/intel_gna/src/backend/gna_limitations.cpp +++ b/src/plugins/intel_gna/src/backend/gna_limitations.cpp @@ -21,6 +21,31 @@ namespace GNAPluginNS { namespace GNALimitations { namespace Cnn2D { +bool IsEqualToLimit::isValid(const uint32_t val) const { + return val == compared_value; +} + +std::string IsEqualToLimit::GetErrorOrEmpty(const uint32_t val) const { + std::ostringstream out; + if (!isValid(val)) { + out << "Unsupported " << what << ", actual value: " << val << ", but should be equal to " << compared_value + << "\n"; + } + return out.str(); +} + +bool IsLessThanLimit ::isValid(const uint32_t val) const { + return val < compared_value; +} + +std::string IsLessThanLimit ::GetErrorOrEmpty(const uint32_t val) const { + std::ostringstream out; + if (!isValid(val)) { + out << "Unsupported " << what << ", actual value: " << val << ", but should be less than " << compared_value << "\n"; + } + return out.str(); +} + bool RangeLimit::isValid(const uint32_t val) const { return val >= min && val <= max; } @@ -178,7 +203,21 @@ bool Validator_30::ValidatePooling2D(const std::string& name, return ValidationSuccesful(throwOnError, error, name, "Pooling2D"); } -bool Validator_30::IsPaddingSupported() const { +bool Validator_30::ValidateInputPadding(const std::string& name, + const uint32_t pad_h_begin, const uint32_t pad_h_end, + const uint32_t pad_w_begin, const uint32_t pad_w_end, + const uint32_t, + const uint32_t, + const bool throwOnError) const { + const IsEqualToLimit padding_zero{0, "convolution input padding size (must equal zero)"}; + auto error = padding_zero.GetErrorOrEmpty(pad_h_begin); + error += padding_zero.GetErrorOrEmpty(pad_h_end); + error += padding_zero.GetErrorOrEmpty(pad_w_begin); + error += padding_zero.GetErrorOrEmpty(pad_w_end); + return ValidationSuccesful(throwOnError, error, name, "Convolution2D"); +} + +bool Validator_30::ShouldUseOnlyConv2DGnaIface() const { return false; } @@ -262,7 +301,28 @@ bool Validator_35::ValidatePooling2D(const std::string& name, const uint32_t win return ValidationSuccesful(throwOnError, error, name, "Pooling2D"); } -bool Validator_35::IsPaddingSupported() const { +bool Validator_35::ValidateInputPadding(const std::string& name, + const uint32_t pad_h_begin, const uint32_t pad_h_end, + const uint32_t pad_w_begin, const uint32_t pad_w_end, + const uint32_t kernel_h, + const uint32_t kernel_w, + const bool throwOnError) const { + const IsEqualToLimit padding_h_symetric{pad_h_end, "convolution input padding along height axis (must be symmetric)"}; + const IsEqualToLimit padding_w_symetric{pad_w_end, "convolution input padding along width axis (must be symmetric)"}; + + const IsLessThanLimit padding_h_limit{kernel_h, "convolution input padding height (must be less than kernel height)"}; + const IsLessThanLimit padding_w_limit{kernel_w, "convolution input padding width (must be less than kernel width)"}; + + auto error = padding_h_symetric.GetErrorOrEmpty(pad_h_begin); + error += padding_w_symetric.GetErrorOrEmpty(pad_w_begin); + + error += padding_h_limit.GetErrorOrEmpty(pad_h_begin); + error += padding_w_limit.GetErrorOrEmpty(pad_w_begin); + + return ValidationSuccesful(throwOnError, error, name, "Convolution2D"); +} + +bool Validator_35::ShouldUseOnlyConv2DGnaIface() const { return true; } diff --git a/src/plugins/intel_gna/src/backend/gna_limitations.hpp b/src/plugins/intel_gna/src/backend/gna_limitations.hpp index 718d8dfe9b7..e1a8467be38 100644 --- a/src/plugins/intel_gna/src/backend/gna_limitations.hpp +++ b/src/plugins/intel_gna/src/backend/gna_limitations.hpp @@ -66,6 +66,20 @@ inline bool IsTransposeSupported(const std::vector& shape) { } namespace Cnn2D { +struct IsEqualToLimit { + uint32_t compared_value; + std::string what; + bool isValid(const uint32_t val) const; + std::string GetErrorOrEmpty(const uint32_t val) const; +}; + +struct IsLessThanLimit { + uint32_t compared_value; + std::string what; + bool isValid(const uint32_t val) const; + std::string GetErrorOrEmpty(const uint32_t val) const; +}; + struct RangeLimit { uint32_t min; uint32_t max; @@ -140,7 +154,14 @@ public: const uint32_t strideH, const uint32_t strideW, bool exception = true) const = 0; - virtual bool IsPaddingSupported() const = 0; + virtual bool ValidateInputPadding(const std::string& name, + const uint32_t pad_h_begin, const uint32_t pad_h_end, + const uint32_t pad_w_begin, const uint32_t pad_w_end, + const uint32_t kernel_h, + const uint32_t kernel_w, + const bool throwOnError = true) const = 0; + + virtual bool ShouldUseOnlyConv2DGnaIface() const = 0; virtual bool ValidateCnn1D(const std::string& name, const uint32_t inHeight, const uint32_t inWidth, const uint32_t inChannels, const uint32_t kH, const uint32_t kW, const uint32_t kN, @@ -173,7 +194,14 @@ public: const uint32_t strideH, const uint32_t strideW, bool exception = true) const override; - bool IsPaddingSupported() const override; + bool ValidateInputPadding(const std::string& name, + const uint32_t pad_h_begin, const uint32_t pad_h_end, + const uint32_t pad_w_begin, const uint32_t pad_w_end, + const uint32_t kernel_h, + const uint32_t kernel_w, + const bool throwOnError = true) const override; + + bool ShouldUseOnlyConv2DGnaIface() const override; bool ValidateCnn1D(const std::string& name, const uint32_t inHeight, const uint32_t inWidth, const uint32_t inChannels, const uint32_t kH, const uint32_t kW, const uint32_t kN, @@ -233,7 +261,14 @@ public: const uint32_t strideH, const uint32_t strideW, bool exception = true) const override; - bool IsPaddingSupported() const override; + bool ValidateInputPadding(const std::string& name, + const uint32_t pad_h_begin, const uint32_t pad_h_end, + const uint32_t pad_w_begin, const uint32_t pad_w_end, + const uint32_t kernel_h, + const uint32_t kernel_w, + const bool throwOnError = true) const override; + + bool ShouldUseOnlyConv2DGnaIface() const override; bool ValidateCnn1D(const std::string& name, const uint32_t inHeight, const uint32_t inWidth, const uint32_t inChannels, const uint32_t kH, const uint32_t kW, const uint32_t kN, diff --git a/src/plugins/intel_gna/src/common/gna_target.hpp b/src/plugins/intel_gna/src/common/gna_target.hpp index b3f935aa4af..467d629d0b5 100644 --- a/src/plugins/intel_gna/src/common/gna_target.hpp +++ b/src/plugins/intel_gna/src/common/gna_target.hpp @@ -11,5 +11,6 @@ static constexpr const char* kGnaTarget2_0 = "GNA_TARGET_2_0"; static constexpr const char* kGnaTarget3_0 = "GNA_TARGET_3_0"; static constexpr const char* kGnaTarget3_1 = "GNA_TARGET_3_1"; static constexpr const char* kGnaTarget3_5 = "GNA_TARGET_3_5"; +static constexpr const char* kGnaDefaultTarget = kGnaTarget3_0; } // namespace common } // namespace GNAPluginNS diff --git a/src/plugins/intel_gna/src/gna_device.cpp b/src/plugins/intel_gna/src/gna_device.cpp index cb89f4b941f..06bec0c4b44 100644 --- a/src/plugins/intel_gna/src/gna_device.cpp +++ b/src/plugins/intel_gna/src/gna_device.cpp @@ -254,7 +254,7 @@ Gna2DeviceVersion GNADeviceHelper::parseTarget(const std::string& target) { Gna2DeviceVersion GNADeviceHelper::getDefaultTarget() const { if (detectedGnaDevVersion == Gna2DeviceVersionSoftwareEmulation) - return Gna2DeviceVersion3_0; + return parseTarget(GNAPluginNS::common::kGnaDefaultTarget); return detectedGnaDevVersion; } diff --git a/src/plugins/intel_gna/src/gna_graph_compiler.cpp b/src/plugins/intel_gna/src/gna_graph_compiler.cpp index e8605f69716..b20b5083343 100644 --- a/src/plugins/intel_gna/src/gna_graph_compiler.cpp +++ b/src/plugins/intel_gna/src/gna_graph_compiler.cpp @@ -220,7 +220,7 @@ void GNAPluginNS::GNAGraphCompiler::SetValidatorTarget(const std::string& target } bool GNAPluginNS::GNAGraphCompiler::ShouldUseOnlyConv2DGnaIface() const { - return gna_config.gnaCompileTarget == common::kGnaTarget3_5; + return cnn2dValidator && cnn2dValidator->ShouldUseOnlyConv2DGnaIface(); } void GNAPluginNS::GNAGraphCompiler::ValidateCnn2D(const std::string& name, @@ -257,14 +257,6 @@ void GNAPluginNS::GNAGraphCompiler::ValidatePooling2D(const std::string& name, } } -bool GNAPluginNS::GNAGraphCompiler::IsCnn2DInputPaddingSupported(const std::string& name) const { - if (cnn2dValidator) { - return cnn2dValidator->IsPaddingSupported(); - } else { - THROW_GNA_EXCEPTION << "No Cnn2D input padding validator found for layer " << name; - } -} - void GNAGraphCompiler::DiagonalPrimitive(InferenceEngine::CNNLayerPtr layer) { AffinePrimitive(layer, true); } @@ -335,6 +327,7 @@ void GNAGraphCompiler::ConvolutionPrimitive(InferenceEngine::CNNLayerPtr layer) std::swap(out_height, out_width); std::swap(convolution._kernel_x, convolution._kernel_y); std::swap(convolution._padding_x, convolution._padding_y); + std::swap(convolution._pads_end_x, convolution._pads_end_y); std::swap(convolution._stride_x, convolution._stride_y); std::swap(convolution._dilation_x, convolution._dilation_y); } @@ -636,26 +629,19 @@ void GNAGraphCompiler::finalizeConvolution2DPrimitive(InferenceEngine::CNNLayerP // TODO add function // printConvolution2DLayer(convolution); - auto effectiveInputWidth = in_width; - auto effectiveInputHeight = in_height; - - if (!IsCnn2DInputPaddingSupported(convolution.name) && - (convolution._padding_x != 0 || convolution._padding_y != 0 || - convolution._pads_end.at(X_AXIS) != 0 || convolution._pads_end.at(Y_AXIS) != 0)) { - THROW_GNA_LAYER_EXCEPTION(layer) << "Convolution's input padding is not supported"; + if (!cnn2dValidator) { + THROW_GNA_EXCEPTION << "No Cnn2D validator found for layer " << convolution.name; } - if (convolution._padding_x != convolution._pads_end.at(X_AXIS)) { - THROW_GNA_LAYER_EXCEPTION(layer) << "Convolution's input padding is not symetric along X axis"; - } - if (convolution._padding_y != convolution._pads_end.at(Y_AXIS)) { - THROW_GNA_LAYER_EXCEPTION(layer) << "Convolution's input padding is not symetric along Y axis"; - } - convolution._padding_x = convolution._pads_end.at(X_AXIS); - convolution._padding_y = convolution._pads_end.at(Y_AXIS); + cnn2dValidator->ValidateInputPadding(convolution.name, + convolution._padding_y, + convolution._pads_end_y, + convolution._padding_x, + convolution._pads_end_x, + convolution._kernel_y, + convolution._kernel_x); - if (convolution._kernel_x > effectiveInputWidth || - convolution._kernel_y > effectiveInputHeight) { + if (convolution._kernel_x > in_width || convolution._kernel_y > in_height) { THROW_GNA_LAYER_EXCEPTION(layer) << "Kernel dimensions XY (" << convolution._kernel_x << ", " << convolution._kernel_y << ")" << " are bigger than input dimensions WH (" << in_width << "," << in_height << ")"; } diff --git a/src/plugins/intel_gna/src/gna_graph_compiler.hpp b/src/plugins/intel_gna/src/gna_graph_compiler.hpp index 9221585b649..b542171c37a 100644 --- a/src/plugins/intel_gna/src/gna_graph_compiler.hpp +++ b/src/plugins/intel_gna/src/gna_graph_compiler.hpp @@ -80,8 +80,6 @@ public: const uint32_t windowH, const uint32_t windowW, const uint32_t strideH, const uint32_t strideW) const; - bool IsCnn2DInputPaddingSupported(const std::string& name) const; - void SetValidatorTarget(const std::string& target); /** diff --git a/src/plugins/intel_gna/src/gna_plugin.cpp b/src/plugins/intel_gna/src/gna_plugin.cpp index 94fce97c934..0cf12502d97 100644 --- a/src/plugins/intel_gna/src/gna_plugin.cpp +++ b/src/plugins/intel_gna/src/gna_plugin.cpp @@ -344,15 +344,6 @@ GNAPlugin::GNAPlugin() : InitGNADevice(); } -std::string GNAPluginNS::GNAPlugin::GetCompileTarget() const { - if (gnadevice) { - return gnadevice->GetCompileTarget(); - } else if (!config.gnaCompileTarget.empty()) { - return config.gnaCompileTarget; - } - return common::kGnaTarget3_0; -} - GNAPlugin::GNAPlugin(const std::map& configMap) : graphCompiler(config) { Init(); @@ -669,7 +660,8 @@ void GNAPlugin::LoadNetwork(const CNNNetwork& _network) { OV_ITT_SCOPED_TASK(itt::domains::GNAPlugin, "LoadNetwork"); std::shared_ptr convertedNetwork; - std::string effectiveGnaCompileTargetValue = effectiveGnaCompileTarget(); + const auto effectiveGnaCompileTargetValue = effectiveGnaCompileTarget(); + graphCompiler.SetValidatorTarget(effectiveGnaCompileTargetValue); bool isNgraphPassesUsed = false; bool fake_quantized = false; @@ -941,8 +933,6 @@ void GNAPlugin::LoadNetwork(const CNNNetwork& _network) { gnaFlags->num_requests = 1; } - graphCompiler.SetValidatorTarget(GetCompileTarget()); - // keep inputs information and create input primitives inputs_data_map_ = newNet.getInputsInfo(); if (inputs_data_map_.empty()) { @@ -1130,9 +1120,12 @@ bool GNAPluginNS::GNAPlugin::isFP32ModeActive() const { std::string GNAPluginNS::GNAPlugin::effectiveGnaCompileTarget() const { if (gnadevice) { return gnadevice->GetCompileTarget(); + } else if (!config.gnaCompileTarget.empty()) { + return config.gnaCompileTarget; } - return config.gnaCompileTarget; + return common::kGnaDefaultTarget; } + std::shared_ptr GNAPlugin::createWorkerForLoadNetwork(bool trivial, bool fp32Mode) { return createWorker(createModelWrapperForLoadNetwork(trivial), trivial, fp32Mode); } diff --git a/src/plugins/intel_gna/src/gna_plugin.hpp b/src/plugins/intel_gna/src/gna_plugin.hpp index 6a1259cdf90..4894a883e22 100644 --- a/src/plugins/intel_gna/src/gna_plugin.hpp +++ b/src/plugins/intel_gna/src/gna_plugin.hpp @@ -67,7 +67,6 @@ protected: std::vector memoryStates; bool trivialTopology = false; - std::string GetCompileTarget() const; public: explicit GNAPlugin(const std::map& configMap); diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/convolution_negative.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/convolution_negative.cpp index 4771e213e53..9d2263f4cdf 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/convolution_negative.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/convolution_negative.cpp @@ -7,6 +7,7 @@ #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/test_constants.hpp" #include "../skip_tests_check.hpp" +#include "openvino/runtime/intel_gna/properties.hpp" using namespace LayerTestsDefinitions; @@ -58,6 +59,10 @@ const std::vector> strides2DInvalid = { }; const std::vector> padBegins2D = { {0, 0}, }; +// Padding must be less than kernel size +const std::vector> padTooBigForKernels2D = { + {3, 3}, +}; const std::vector> padEnds2D = { {0, 0}, }; const std::vector> padBegins2DInvalid = { {1, 0}, {1, 1}, {0, 1} @@ -158,10 +163,19 @@ const auto conv2DParametersInvalidDilation = ::testing::Combine( ::testing::ValuesIn(numOutChannels2D), ::testing::Values(ngraph::op::PadType::EXPLICIT) ); +const auto conv2DParametersInvalidPaddingSize = ::testing::Combine( + ::testing::ValuesIn(kernels2D), + ::testing::ValuesIn(strides2D), + ::testing::ValuesIn(padTooBigForKernels2D), + ::testing::ValuesIn(padTooBigForKernels2D), + ::testing::ValuesIn(dilations2D), + ::testing::ValuesIn(numOutChannels2D), + ::testing::Values(ngraph::op::PadType::EXPLICIT)); class GnaConv2DNegativeTest : public ConvolutionLayerTest { protected: virtual std::string expectedSubstring() = 0; + virtual std::string getTarget() = 0; void Run() override { try { ConvolutionLayerTest::LoadNetwork(); @@ -177,19 +191,27 @@ protected: } void SetUp() override { ConvolutionLayerTest::SetUp(); + const auto target = getTarget(); + configuration[ov::intel_gna::execution_target.name()] = target; + configuration[ov::intel_gna::compile_target.name()] = target; } }; -#define GNA_NEG_INSTANTIATE(whats_wrong, suffix_params, suffix_input, error_message) \ +#define GNA_NEG_INSTANTIATE(whats_wrong, suffix_params, suffix_input, error_message, gna_hw_gen) \ struct GnaConv2DNegativeTest##whats_wrong : GnaConv2DNegativeTest { \ std::string expectedSubstring() override { \ return error_message; \ } \ + std::string getTarget() override { \ + std::stringstream s; \ + s << gna_hw_gen; \ + return s.str(); \ + } \ }; \ TEST_P(GnaConv2DNegativeTest##whats_wrong, ThrowAsNotSupported) { \ Run(); \ } \ -INSTANTIATE_TEST_SUITE_P(smoke_GnaConv2DNegativeTestInvalid##whats_wrong, GnaConv2DNegativeTest##whats_wrong, \ +INSTANTIATE_TEST_SUITE_P(smoke_GnaConv2DNegativeTestInvalid##whats_wrong, GnaConv2DNegativeTest##whats_wrong, \ ::testing::Combine( \ conv2DParameters##suffix_params, \ ::testing::ValuesIn(netPrecisions), \ @@ -201,15 +223,21 @@ INSTANTIATE_TEST_SUITE_P(smoke_GnaConv2DNegativeTestInvalid##whats_wrong, GnaCon ::testing::Values(CommonTestUtils::DEVICE_GNA)), \ GnaConv2DNegativeTest##whats_wrong::getTestCaseName); -GNA_NEG_INSTANTIATE(FilterNumber, InvalidFilterNumber, Fine, "Unsupported number of kernels") -GNA_NEG_INSTANTIATE(Kernel, InvalidKernel, Fine, "Unsupported kernel shape") -GNA_NEG_INSTANTIATE(BigKernelFor56InC, InvalidKernelFor56InC, WithInC56, "Unsupported kernel shape") -GNA_NEG_INSTANTIATE(BigKernelFor120InC, InvalidKernelFor120InC, WithInC120, "Unsupported kernel shape") -GNA_NEG_INSTANTIATE(InputH, Fine, InvalidInputH, "Unsupported input height") -GNA_NEG_INSTANTIATE(InputW, Fine, InvalidInputW, "Unsupported input width") -GNA_NEG_INSTANTIATE(InputC, Fine, InvalidInputC, "Unsupported number of input channels") -GNA_NEG_INSTANTIATE(Padding, InvalidPadding, Fine, "Convolution's input padding is not supported") -GNA_NEG_INSTANTIATE(Stride, InvalidStride, Fine, "Unsupported convolution stride shape") -GNA_NEG_INSTANTIATE(Dilation, InvalidDilation, Fine, "dilation is not supported on GNA") +constexpr auto GNA_3_0 = ov::intel_gna::HWGeneration::GNA_3_0; +constexpr auto GNA_3_5 = ov::intel_gna::HWGeneration::GNA_3_5; + +GNA_NEG_INSTANTIATE(FilterNumber, InvalidFilterNumber, Fine, "Unsupported number of kernels", GNA_3_0) +GNA_NEG_INSTANTIATE(Kernel, InvalidKernel, Fine, "Unsupported kernel shape", GNA_3_0) +GNA_NEG_INSTANTIATE(BigKernelFor56InC, InvalidKernelFor56InC, WithInC56, "Unsupported kernel shape", GNA_3_0) +GNA_NEG_INSTANTIATE(BigKernelFor120InC, InvalidKernelFor120InC, WithInC120, "Unsupported kernel shape", GNA_3_0) +GNA_NEG_INSTANTIATE(InputH, Fine, InvalidInputH, "Unsupported input height", GNA_3_0) +GNA_NEG_INSTANTIATE(InputW, Fine, InvalidInputW, "Unsupported input width", GNA_3_0) +GNA_NEG_INSTANTIATE(InputC, Fine, InvalidInputC, "Unsupported number of input channels", GNA_3_0) +GNA_NEG_INSTANTIATE(Padding, InvalidPadding, Fine, "Unsupported convolution input padding", GNA_3_0) +GNA_NEG_INSTANTIATE(Stride, InvalidStride, Fine, "Unsupported convolution stride shape", GNA_3_0) +GNA_NEG_INSTANTIATE(Dilation, InvalidDilation, Fine, "dilation is not supported on GNA", GNA_3_0) +GNA_NEG_INSTANTIATE(Dilation35, InvalidDilation, Fine, "dilation is not supported on GNA", GNA_3_5) +GNA_NEG_INSTANTIATE(PaddingSize, InvalidPaddingSize, Fine, "Unsupported convolution input padding", GNA_3_0) +GNA_NEG_INSTANTIATE(PaddingSize35, InvalidPaddingSize, Fine, "Unsupported convolution input padding", GNA_3_5) } // namespace diff --git a/src/plugins/intel_gna/tests/unit/CMakeLists.txt b/src/plugins/intel_gna/tests/unit/CMakeLists.txt index f86b638e278..a9eb9a1b242 100644 --- a/src/plugins/intel_gna/tests/unit/CMakeLists.txt +++ b/src/plugins/intel_gna/tests/unit/CMakeLists.txt @@ -14,7 +14,7 @@ endif() # TODO: fix CVS-71010 and remove BUILD_SHARED_LIBS if(NOT BUILD_SHARED_LIBS) - set(exclude_path EXCLUDED_SOURCE_PATHS "${CMAKE_CURRENT_SOURCE_DIR}/(gna_api_stub|gna_wait_test|gna_export_import_test|gna_infer_request_test).cpp") + set(exclude_path EXCLUDED_SOURCE_PATHS "${CMAKE_CURRENT_SOURCE_DIR}/(gna_api_stub|gna_wait_test|gna_export_import_test|gna_infer_request_test|gna_plugin_load_network_test|gna_mock_api_initializer).cpp") endif() addIeTargetTest( diff --git a/src/plugins/intel_gna/tests/unit/backend/gna_limitations_test.cpp b/src/plugins/intel_gna/tests/unit/backend/gna_limitations_test.cpp index 01244676be5..c2e068b7f8e 100644 --- a/src/plugins/intel_gna/tests/unit/backend/gna_limitations_test.cpp +++ b/src/plugins/intel_gna/tests/unit/backend/gna_limitations_test.cpp @@ -300,7 +300,7 @@ class GNACnn2DValidatorTestPooling2D : public GNACnn2DValidatorTest {}; namespace { TEST_P(GNACnn2DValidatorTestPadding, testPaddingSupported) { - ASSERT_TRUE(validator->IsPaddingSupported() == isPaddingSupported()); + ASSERT_TRUE(validator->ValidateInputPadding("", 1, 1, 1, 1, 2, 2, false) == isPaddingSupported()); } TEST_P(GNACnn2DValidatorTest, testValidateCnn2DInvalid) { diff --git a/src/plugins/intel_gna/tests/unit/gna_mock_api.hpp b/src/plugins/intel_gna/tests/unit/gna_mock_api.hpp index 97fa2ac9650..0116970b373 100644 --- a/src/plugins/intel_gna/tests/unit/gna_mock_api.hpp +++ b/src/plugins/intel_gna/tests/unit/gna_mock_api.hpp @@ -3,24 +3,14 @@ // #pragma once -#include -#include -#include - -#if defined(_WIN32) - #ifdef libGNAStubs_EXPORTS - #define GNA_STUBS_EXPORT __declspec(dllexport) - #else - #define GNA_STUBS_EXPORT __declspec(dllimport) - #endif -#else - #define GNA_STUBS_EXPORT -#endif +#include +#include +#include class GNACppApi { public: - GNA_STUBS_EXPORT GNACppApi(); - GNA_STUBS_EXPORT ~GNACppApi(); + GNACppApi(); + ~GNACppApi(); MOCK_METHOD3(Gna2MemoryAlloc, Gna2Status( uint32_t sizeRequested, diff --git a/src/plugins/intel_gna/tests/unit/gna_mock_api_initializer.cpp b/src/plugins/intel_gna/tests/unit/gna_mock_api_initializer.cpp new file mode 100644 index 00000000000..48a17dd65dc --- /dev/null +++ b/src/plugins/intel_gna/tests/unit/gna_mock_api_initializer.cpp @@ -0,0 +1,68 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gna_mock_api_initializer.hpp" + +#include "gna_mock_api.hpp" +#include +#include +#include + +void GnaMockApiInitializer::init() { + using ::testing::_; + using ::testing::AtLeast; + using ::testing::InSequence; + using ::testing::Invoke; + using ::testing::Return; + + EXPECT_CALL(_mock_api, Gna2DeviceGetVersion(_, _)) + .WillOnce(Invoke([this](uint32_t deviceIndex, enum Gna2DeviceVersion* deviceVersion) { + *deviceVersion = this->_gna_device_version; + return Gna2StatusSuccess; + })); + + EXPECT_CALL(_mock_api, Gna2DeviceOpen(_)).WillOnce(Return(Gna2StatusSuccess)); + + EXPECT_CALL(_mock_api, Gna2GetLibraryVersion(_, _)).Times(AtLeast(0)).WillRepeatedly(Return(Gna2StatusSuccess)); + + EXPECT_CALL(_mock_api, Gna2InstrumentationConfigCreate(_, _, _, _)).WillOnce(Return(Gna2StatusSuccess)); + + if (_create_model) { + EXPECT_CALL(_mock_api, Gna2MemoryAlloc(_, _, _)) + .Times(AtLeast(1)) + .WillRepeatedly(Invoke([this](uint32_t sizeRequested, uint32_t* sizeGranted, void** memoryAddress) { + this->_mocked_gna_memory.push_back(std::vector(sizeRequested)); + *sizeGranted = sizeRequested; + *memoryAddress = this->_mocked_gna_memory.back().data(); + return Gna2StatusSuccess; + })); + + EXPECT_CALL(_mock_api, Gna2ModelCreate(_, _, _)) + .WillOnce(Invoke([](uint32_t deviceIndex, struct Gna2Model const* model, uint32_t* modelId) { + *modelId = 0; + return Gna2StatusSuccess; + })); + + EXPECT_CALL(_mock_api, Gna2RequestConfigCreate(_, _)) + .WillOnce(Invoke([](uint32_t modelId, uint32_t* requestConfigId) { + *requestConfigId = 0; + return Gna2StatusSuccess; + })); + + EXPECT_CALL(_mock_api, Gna2InstrumentationConfigAssignToRequestConfig(_, _)) + .Times(AtLeast(1)) + .WillRepeatedly(Return(Gna2StatusSuccess)); + } + InSequence seq; + EXPECT_CALL(_mock_api, Gna2DeviceClose(_)).WillOnce(Return(Gna2StatusSuccess)); + if (_create_model) { + EXPECT_CALL(_mock_api, Gna2MemoryFree(_)).Times(AtLeast(1)).WillRepeatedly(Return(Gna2StatusSuccess)); + } +} +void GnaMockApiInitializer::set_gna_device_version(const Gna2DeviceVersion val) { + _gna_device_version = val; +} +void GnaMockApiInitializer::set_create_model(const bool val) { + _create_model = val; +} diff --git a/src/plugins/intel_gna/tests/unit/gna_mock_api_initializer.hpp b/src/plugins/intel_gna/tests/unit/gna_mock_api_initializer.hpp new file mode 100644 index 00000000000..69a415c7fc4 --- /dev/null +++ b/src/plugins/intel_gna/tests/unit/gna_mock_api_initializer.hpp @@ -0,0 +1,23 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "gna_mock_api.hpp" + +#include + +#include "cstdint" +#include "vector" + +class GnaMockApiInitializer { + GNACppApi _mock_api; + std::vector> _mocked_gna_memory; + Gna2DeviceVersion _gna_device_version = Gna2DeviceVersionSoftwareEmulation; + bool _create_model = true; + +public: + void init(); + void set_gna_device_version(const Gna2DeviceVersion val); + void set_create_model(const bool val); +}; diff --git a/src/plugins/intel_gna/tests/unit/gna_plugin_load_network_test.cpp b/src/plugins/intel_gna/tests/unit/gna_plugin_load_network_test.cpp new file mode 100644 index 00000000000..b7fd7bf2baf --- /dev/null +++ b/src/plugins/intel_gna/tests/unit/gna_plugin_load_network_test.cpp @@ -0,0 +1,90 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gna_mock_api_initializer.hpp" +#include "common/gna_target.hpp" +#include "gna_plugin.hpp" +#include "ngraph_functions/builders.hpp" +#include + +namespace { +typedef struct { + const std::vector input_size; + const std::vector filter_size; + const std::vector pads_begin; + const std::vector pads_end; +} ConvModel; + +const std::vector models{ + {{1, 8, 32, 1}, {2, 1}, {1, 0}, {1, 0}}, + {{1, 8, 1, 32}, {1, 2}, {0, 1}, {0, 1}} +}; + +typedef struct { + ConvModel model; + Gna2DeviceVersion mock_target; + bool load_succesfull; +} ConvModelTestParams; + +std::vector all_tests{ + {models[0], Gna2DeviceVersion3_0, false}, + {models[1], Gna2DeviceVersion3_0, false}, + {models[0], Gna2DeviceVersion3_5, true}, + {models[1], Gna2DeviceVersion3_5, true}}; + +class GNAPluginLoadNetworkTest : public ::testing::Test, public ::testing::WithParamInterface { + std::shared_ptr function; + +protected: + void Run() { + SetUp(); + const auto test_parameter = GetParam(); + GnaMockApiInitializer mock; + mock.set_gna_device_version(test_parameter.mock_target); + mock.set_create_model(test_parameter.load_succesfull); + mock.init(); + + GNAPluginNS::GNAPlugin gna_plugin{}; + InferenceEngine::CNNNetwork cnn_network{function}; + bool load_succesfull = true; + try { + gna_plugin.LoadNetwork(cnn_network); + } catch (std::exception&) { + load_succesfull = false; + } + EXPECT_EQ(test_parameter.load_succesfull, load_succesfull); + } + + void SetUp() override { + const std::vector c_strides{1, 1}; + const std::vector c_dilations{1, 1}; + constexpr size_t c_num_out_channels = 8; + const auto& model = GetParam().model; + + using ngraph::element::f32; + auto parameter = std::make_shared(f32, ngraph::Shape{model.input_size}); + + auto conv = std::dynamic_pointer_cast( + ngraph::builder::makeConvolution(parameter, + f32, + model.filter_size, + c_strides, + model.pads_begin, + model.pads_end, + c_dilations, + ngraph::op::PadType::EXPLICIT, + c_num_out_channels)); + auto result = std::make_shared(conv); + function = std::make_shared(result, ov::ParameterVector{parameter}, "convolution"); + } +}; + +// This test covers GNAGraphCompiler::ShouldUseOnlyConv2DGnaIface() +// behavior when specific Gna2DeviceVersion is detected by Gna2DeviceGetVersion() +TEST_P(GNAPluginLoadNetworkTest, ReturnsSpecificGna2DeviceVersion) { + Run(); +} + +INSTANTIATE_TEST_SUITE_P(smoke_LoadConvolution1D, GNAPluginLoadNetworkTest, ::testing::ValuesIn(all_tests)); +} // namespace diff --git a/src/tests_deprecated/unit/engines/gna/gna_mock_api.hpp b/src/tests_deprecated/unit/engines/gna/gna_mock_api.hpp index ef2cc3885f8..ade1207631f 100644 --- a/src/tests_deprecated/unit/engines/gna/gna_mock_api.hpp +++ b/src/tests_deprecated/unit/engines/gna/gna_mock_api.hpp @@ -3,7 +3,7 @@ // #pragma once -#include +#include #include #include #include