[GNA] Fix input padding validator (#14607)

* [GNA] Fix input padding validator

   Assert that kernel above padding size
   Add tests
   Simplify compile target handling in graph compiler
   Fix issue 98584
   Swap padding end axes when swaping begin (fix 99008)

* [GNA] Add unit test for ShouldUseOnlyConv2DGnaIface fixed

* Apply review

* Simplify mock for GNA Lib

* Fixup missing includes

* Fix static build issues described in 71010
This commit is contained in:
Krzysztof Bruniecki 2022-12-20 16:04:40 +01:00 committed by GitHub
parent fa55306794
commit 9ba6e48dbf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 350 additions and 79 deletions

View File

@ -486,7 +486,7 @@ public:
/**
* @brief A convolution paddings end array [X, Y, Z, ...]
*/
PropertyVector<unsigned int> _pads_end;
DEFINE_PROP(_pads_end);
/**
* @brief A convolution strides array [X, Y, Z, ...]
*/

View File

@ -21,6 +21,31 @@ namespace GNAPluginNS {
namespace GNALimitations {
namespace Cnn2D {
bool IsEqualToLimit::isValid(const uint32_t val) const {
return val == compared_value;
}
std::string IsEqualToLimit::GetErrorOrEmpty(const uint32_t val) const {
std::ostringstream out;
if (!isValid(val)) {
out << "Unsupported " << what << ", actual value: " << val << ", but should be equal to " << compared_value
<< "\n";
}
return out.str();
}
bool IsLessThanLimit ::isValid(const uint32_t val) const {
return val < compared_value;
}
std::string IsLessThanLimit ::GetErrorOrEmpty(const uint32_t val) const {
std::ostringstream out;
if (!isValid(val)) {
out << "Unsupported " << what << ", actual value: " << val << ", but should be less than " << compared_value << "\n";
}
return out.str();
}
bool RangeLimit::isValid(const uint32_t val) const {
return val >= min && val <= max;
}
@ -178,7 +203,21 @@ bool Validator_30::ValidatePooling2D(const std::string& name,
return ValidationSuccesful(throwOnError, error, name, "Pooling2D");
}
bool Validator_30::IsPaddingSupported() const {
bool Validator_30::ValidateInputPadding(const std::string& name,
const uint32_t pad_h_begin, const uint32_t pad_h_end,
const uint32_t pad_w_begin, const uint32_t pad_w_end,
const uint32_t,
const uint32_t,
const bool throwOnError) const {
const IsEqualToLimit padding_zero{0, "convolution input padding size (must equal zero)"};
auto error = padding_zero.GetErrorOrEmpty(pad_h_begin);
error += padding_zero.GetErrorOrEmpty(pad_h_end);
error += padding_zero.GetErrorOrEmpty(pad_w_begin);
error += padding_zero.GetErrorOrEmpty(pad_w_end);
return ValidationSuccesful(throwOnError, error, name, "Convolution2D");
}
bool Validator_30::ShouldUseOnlyConv2DGnaIface() const {
return false;
}
@ -262,7 +301,28 @@ bool Validator_35::ValidatePooling2D(const std::string& name, const uint32_t win
return ValidationSuccesful(throwOnError, error, name, "Pooling2D");
}
bool Validator_35::IsPaddingSupported() const {
bool Validator_35::ValidateInputPadding(const std::string& name,
const uint32_t pad_h_begin, const uint32_t pad_h_end,
const uint32_t pad_w_begin, const uint32_t pad_w_end,
const uint32_t kernel_h,
const uint32_t kernel_w,
const bool throwOnError) const {
const IsEqualToLimit padding_h_symetric{pad_h_end, "convolution input padding along height axis (must be symmetric)"};
const IsEqualToLimit padding_w_symetric{pad_w_end, "convolution input padding along width axis (must be symmetric)"};
const IsLessThanLimit padding_h_limit{kernel_h, "convolution input padding height (must be less than kernel height)"};
const IsLessThanLimit padding_w_limit{kernel_w, "convolution input padding width (must be less than kernel width)"};
auto error = padding_h_symetric.GetErrorOrEmpty(pad_h_begin);
error += padding_w_symetric.GetErrorOrEmpty(pad_w_begin);
error += padding_h_limit.GetErrorOrEmpty(pad_h_begin);
error += padding_w_limit.GetErrorOrEmpty(pad_w_begin);
return ValidationSuccesful(throwOnError, error, name, "Convolution2D");
}
bool Validator_35::ShouldUseOnlyConv2DGnaIface() const {
return true;
}

View File

@ -66,6 +66,20 @@ inline bool IsTransposeSupported(const std::vector<size_t>& shape) {
}
namespace Cnn2D {
struct IsEqualToLimit {
uint32_t compared_value;
std::string what;
bool isValid(const uint32_t val) const;
std::string GetErrorOrEmpty(const uint32_t val) const;
};
struct IsLessThanLimit {
uint32_t compared_value;
std::string what;
bool isValid(const uint32_t val) const;
std::string GetErrorOrEmpty(const uint32_t val) const;
};
struct RangeLimit {
uint32_t min;
uint32_t max;
@ -140,7 +154,14 @@ public:
const uint32_t strideH, const uint32_t strideW,
bool exception = true) const = 0;
virtual bool IsPaddingSupported() const = 0;
virtual bool ValidateInputPadding(const std::string& name,
const uint32_t pad_h_begin, const uint32_t pad_h_end,
const uint32_t pad_w_begin, const uint32_t pad_w_end,
const uint32_t kernel_h,
const uint32_t kernel_w,
const bool throwOnError = true) const = 0;
virtual bool ShouldUseOnlyConv2DGnaIface() const = 0;
virtual bool ValidateCnn1D(const std::string& name, const uint32_t inHeight, const uint32_t inWidth,
const uint32_t inChannels, const uint32_t kH, const uint32_t kW, const uint32_t kN,
@ -173,7 +194,14 @@ public:
const uint32_t strideH, const uint32_t strideW,
bool exception = true) const override;
bool IsPaddingSupported() const override;
bool ValidateInputPadding(const std::string& name,
const uint32_t pad_h_begin, const uint32_t pad_h_end,
const uint32_t pad_w_begin, const uint32_t pad_w_end,
const uint32_t kernel_h,
const uint32_t kernel_w,
const bool throwOnError = true) const override;
bool ShouldUseOnlyConv2DGnaIface() const override;
bool ValidateCnn1D(const std::string& name, const uint32_t inHeight, const uint32_t inWidth,
const uint32_t inChannels, const uint32_t kH, const uint32_t kW, const uint32_t kN,
@ -233,7 +261,14 @@ public:
const uint32_t strideH, const uint32_t strideW,
bool exception = true) const override;
bool IsPaddingSupported() const override;
bool ValidateInputPadding(const std::string& name,
const uint32_t pad_h_begin, const uint32_t pad_h_end,
const uint32_t pad_w_begin, const uint32_t pad_w_end,
const uint32_t kernel_h,
const uint32_t kernel_w,
const bool throwOnError = true) const override;
bool ShouldUseOnlyConv2DGnaIface() const override;
bool ValidateCnn1D(const std::string& name, const uint32_t inHeight, const uint32_t inWidth,
const uint32_t inChannels, const uint32_t kH, const uint32_t kW, const uint32_t kN,

View File

@ -11,5 +11,6 @@ static constexpr const char* kGnaTarget2_0 = "GNA_TARGET_2_0";
static constexpr const char* kGnaTarget3_0 = "GNA_TARGET_3_0";
static constexpr const char* kGnaTarget3_1 = "GNA_TARGET_3_1";
static constexpr const char* kGnaTarget3_5 = "GNA_TARGET_3_5";
static constexpr const char* kGnaDefaultTarget = kGnaTarget3_0;
} // namespace common
} // namespace GNAPluginNS

View File

@ -254,7 +254,7 @@ Gna2DeviceVersion GNADeviceHelper::parseTarget(const std::string& target) {
Gna2DeviceVersion GNADeviceHelper::getDefaultTarget() const {
if (detectedGnaDevVersion == Gna2DeviceVersionSoftwareEmulation)
return Gna2DeviceVersion3_0;
return parseTarget(GNAPluginNS::common::kGnaDefaultTarget);
return detectedGnaDevVersion;
}

View File

@ -220,7 +220,7 @@ void GNAPluginNS::GNAGraphCompiler::SetValidatorTarget(const std::string& target
}
bool GNAPluginNS::GNAGraphCompiler::ShouldUseOnlyConv2DGnaIface() const {
return gna_config.gnaCompileTarget == common::kGnaTarget3_5;
return cnn2dValidator && cnn2dValidator->ShouldUseOnlyConv2DGnaIface();
}
void GNAPluginNS::GNAGraphCompiler::ValidateCnn2D(const std::string& name,
@ -257,14 +257,6 @@ void GNAPluginNS::GNAGraphCompiler::ValidatePooling2D(const std::string& name,
}
}
bool GNAPluginNS::GNAGraphCompiler::IsCnn2DInputPaddingSupported(const std::string& name) const {
if (cnn2dValidator) {
return cnn2dValidator->IsPaddingSupported();
} else {
THROW_GNA_EXCEPTION << "No Cnn2D input padding validator found for layer " << name;
}
}
void GNAGraphCompiler::DiagonalPrimitive(InferenceEngine::CNNLayerPtr layer) {
AffinePrimitive(layer, true);
}
@ -335,6 +327,7 @@ void GNAGraphCompiler::ConvolutionPrimitive(InferenceEngine::CNNLayerPtr layer)
std::swap(out_height, out_width);
std::swap(convolution._kernel_x, convolution._kernel_y);
std::swap(convolution._padding_x, convolution._padding_y);
std::swap(convolution._pads_end_x, convolution._pads_end_y);
std::swap(convolution._stride_x, convolution._stride_y);
std::swap(convolution._dilation_x, convolution._dilation_y);
}
@ -636,26 +629,19 @@ void GNAGraphCompiler::finalizeConvolution2DPrimitive(InferenceEngine::CNNLayerP
// TODO add function
// printConvolution2DLayer(convolution);
auto effectiveInputWidth = in_width;
auto effectiveInputHeight = in_height;
if (!IsCnn2DInputPaddingSupported(convolution.name) &&
(convolution._padding_x != 0 || convolution._padding_y != 0 ||
convolution._pads_end.at(X_AXIS) != 0 || convolution._pads_end.at(Y_AXIS) != 0)) {
THROW_GNA_LAYER_EXCEPTION(layer) << "Convolution's input padding is not supported";
if (!cnn2dValidator) {
THROW_GNA_EXCEPTION << "No Cnn2D validator found for layer " << convolution.name;
}
if (convolution._padding_x != convolution._pads_end.at(X_AXIS)) {
THROW_GNA_LAYER_EXCEPTION(layer) << "Convolution's input padding is not symetric along X axis";
}
if (convolution._padding_y != convolution._pads_end.at(Y_AXIS)) {
THROW_GNA_LAYER_EXCEPTION(layer) << "Convolution's input padding is not symetric along Y axis";
}
convolution._padding_x = convolution._pads_end.at(X_AXIS);
convolution._padding_y = convolution._pads_end.at(Y_AXIS);
cnn2dValidator->ValidateInputPadding(convolution.name,
convolution._padding_y,
convolution._pads_end_y,
convolution._padding_x,
convolution._pads_end_x,
convolution._kernel_y,
convolution._kernel_x);
if (convolution._kernel_x > effectiveInputWidth ||
convolution._kernel_y > effectiveInputHeight) {
if (convolution._kernel_x > in_width || convolution._kernel_y > in_height) {
THROW_GNA_LAYER_EXCEPTION(layer) << "Kernel dimensions XY (" << convolution._kernel_x << ", " << convolution._kernel_y << ")"
<< " are bigger than input dimensions WH (" << in_width << "," << in_height << ")";
}

View File

@ -80,8 +80,6 @@ public:
const uint32_t windowH, const uint32_t windowW,
const uint32_t strideH, const uint32_t strideW) const;
bool IsCnn2DInputPaddingSupported(const std::string& name) const;
void SetValidatorTarget(const std::string& target);
/**

View File

@ -344,15 +344,6 @@ GNAPlugin::GNAPlugin() :
InitGNADevice();
}
std::string GNAPluginNS::GNAPlugin::GetCompileTarget() const {
if (gnadevice) {
return gnadevice->GetCompileTarget();
} else if (!config.gnaCompileTarget.empty()) {
return config.gnaCompileTarget;
}
return common::kGnaTarget3_0;
}
GNAPlugin::GNAPlugin(const std::map<std::string, std::string>& configMap) :
graphCompiler(config) {
Init();
@ -669,7 +660,8 @@ void GNAPlugin::LoadNetwork(const CNNNetwork& _network) {
OV_ITT_SCOPED_TASK(itt::domains::GNAPlugin, "LoadNetwork");
std::shared_ptr<InferenceEngine::details::CNNNetworkImpl> convertedNetwork;
std::string effectiveGnaCompileTargetValue = effectiveGnaCompileTarget();
const auto effectiveGnaCompileTargetValue = effectiveGnaCompileTarget();
graphCompiler.SetValidatorTarget(effectiveGnaCompileTargetValue);
bool isNgraphPassesUsed = false;
bool fake_quantized = false;
@ -941,8 +933,6 @@ void GNAPlugin::LoadNetwork(const CNNNetwork& _network) {
gnaFlags->num_requests = 1;
}
graphCompiler.SetValidatorTarget(GetCompileTarget());
// keep inputs information and create input primitives
inputs_data_map_ = newNet.getInputsInfo();
if (inputs_data_map_.empty()) {
@ -1130,9 +1120,12 @@ bool GNAPluginNS::GNAPlugin::isFP32ModeActive() const {
std::string GNAPluginNS::GNAPlugin::effectiveGnaCompileTarget() const {
if (gnadevice) {
return gnadevice->GetCompileTarget();
} else if (!config.gnaCompileTarget.empty()) {
return config.gnaCompileTarget;
}
return config.gnaCompileTarget;
return common::kGnaDefaultTarget;
}
std::shared_ptr<request::Worker> GNAPlugin::createWorkerForLoadNetwork(bool trivial, bool fp32Mode) {
return createWorker(createModelWrapperForLoadNetwork(trivial), trivial, fp32Mode);
}

View File

@ -67,7 +67,6 @@ protected:
std::vector<InferenceEngine::IVariableStateInternal::Ptr> memoryStates;
bool trivialTopology = false;
std::string GetCompileTarget() const;
public:
explicit GNAPlugin(const std::map<std::string, std::string>& configMap);

View File

@ -7,6 +7,7 @@
#include "common_test_utils/test_assertions.hpp"
#include "common_test_utils/test_constants.hpp"
#include "../skip_tests_check.hpp"
#include "openvino/runtime/intel_gna/properties.hpp"
using namespace LayerTestsDefinitions;
@ -58,6 +59,10 @@ const std::vector<std::vector<size_t >> strides2DInvalid = {
};
const std::vector<std::vector<ptrdiff_t>> padBegins2D = { {0, 0},
};
// Padding must be less than kernel size
const std::vector<std::vector<ptrdiff_t>> padTooBigForKernels2D = {
{3, 3},
};
const std::vector<std::vector<ptrdiff_t>> padEnds2D = { {0, 0},
};
const std::vector<std::vector<ptrdiff_t>> padBegins2DInvalid = { {1, 0}, {1, 1}, {0, 1}
@ -158,10 +163,19 @@ const auto conv2DParametersInvalidDilation = ::testing::Combine(
::testing::ValuesIn(numOutChannels2D),
::testing::Values(ngraph::op::PadType::EXPLICIT)
);
const auto conv2DParametersInvalidPaddingSize = ::testing::Combine(
::testing::ValuesIn(kernels2D),
::testing::ValuesIn(strides2D),
::testing::ValuesIn(padTooBigForKernels2D),
::testing::ValuesIn(padTooBigForKernels2D),
::testing::ValuesIn(dilations2D),
::testing::ValuesIn(numOutChannels2D),
::testing::Values(ngraph::op::PadType::EXPLICIT));
class GnaConv2DNegativeTest : public ConvolutionLayerTest {
protected:
virtual std::string expectedSubstring() = 0;
virtual std::string getTarget() = 0;
void Run() override {
try {
ConvolutionLayerTest::LoadNetwork();
@ -177,19 +191,27 @@ protected:
}
void SetUp() override {
ConvolutionLayerTest::SetUp();
const auto target = getTarget();
configuration[ov::intel_gna::execution_target.name()] = target;
configuration[ov::intel_gna::compile_target.name()] = target;
}
};
#define GNA_NEG_INSTANTIATE(whats_wrong, suffix_params, suffix_input, error_message) \
#define GNA_NEG_INSTANTIATE(whats_wrong, suffix_params, suffix_input, error_message, gna_hw_gen) \
struct GnaConv2DNegativeTest##whats_wrong : GnaConv2DNegativeTest { \
std::string expectedSubstring() override { \
return error_message; \
} \
std::string getTarget() override { \
std::stringstream s; \
s << gna_hw_gen; \
return s.str(); \
} \
}; \
TEST_P(GnaConv2DNegativeTest##whats_wrong, ThrowAsNotSupported) { \
Run(); \
} \
INSTANTIATE_TEST_SUITE_P(smoke_GnaConv2DNegativeTestInvalid##whats_wrong, GnaConv2DNegativeTest##whats_wrong, \
INSTANTIATE_TEST_SUITE_P(smoke_GnaConv2DNegativeTestInvalid##whats_wrong, GnaConv2DNegativeTest##whats_wrong, \
::testing::Combine( \
conv2DParameters##suffix_params, \
::testing::ValuesIn(netPrecisions), \
@ -201,15 +223,21 @@ INSTANTIATE_TEST_SUITE_P(smoke_GnaConv2DNegativeTestInvalid##whats_wrong, GnaCon
::testing::Values(CommonTestUtils::DEVICE_GNA)), \
GnaConv2DNegativeTest##whats_wrong::getTestCaseName);
GNA_NEG_INSTANTIATE(FilterNumber, InvalidFilterNumber, Fine, "Unsupported number of kernels")
GNA_NEG_INSTANTIATE(Kernel, InvalidKernel, Fine, "Unsupported kernel shape")
GNA_NEG_INSTANTIATE(BigKernelFor56InC, InvalidKernelFor56InC, WithInC56, "Unsupported kernel shape")
GNA_NEG_INSTANTIATE(BigKernelFor120InC, InvalidKernelFor120InC, WithInC120, "Unsupported kernel shape")
GNA_NEG_INSTANTIATE(InputH, Fine, InvalidInputH, "Unsupported input height")
GNA_NEG_INSTANTIATE(InputW, Fine, InvalidInputW, "Unsupported input width")
GNA_NEG_INSTANTIATE(InputC, Fine, InvalidInputC, "Unsupported number of input channels")
GNA_NEG_INSTANTIATE(Padding, InvalidPadding, Fine, "Convolution's input padding is not supported")
GNA_NEG_INSTANTIATE(Stride, InvalidStride, Fine, "Unsupported convolution stride shape")
GNA_NEG_INSTANTIATE(Dilation, InvalidDilation, Fine, "dilation is not supported on GNA")
constexpr auto GNA_3_0 = ov::intel_gna::HWGeneration::GNA_3_0;
constexpr auto GNA_3_5 = ov::intel_gna::HWGeneration::GNA_3_5;
GNA_NEG_INSTANTIATE(FilterNumber, InvalidFilterNumber, Fine, "Unsupported number of kernels", GNA_3_0)
GNA_NEG_INSTANTIATE(Kernel, InvalidKernel, Fine, "Unsupported kernel shape", GNA_3_0)
GNA_NEG_INSTANTIATE(BigKernelFor56InC, InvalidKernelFor56InC, WithInC56, "Unsupported kernel shape", GNA_3_0)
GNA_NEG_INSTANTIATE(BigKernelFor120InC, InvalidKernelFor120InC, WithInC120, "Unsupported kernel shape", GNA_3_0)
GNA_NEG_INSTANTIATE(InputH, Fine, InvalidInputH, "Unsupported input height", GNA_3_0)
GNA_NEG_INSTANTIATE(InputW, Fine, InvalidInputW, "Unsupported input width", GNA_3_0)
GNA_NEG_INSTANTIATE(InputC, Fine, InvalidInputC, "Unsupported number of input channels", GNA_3_0)
GNA_NEG_INSTANTIATE(Padding, InvalidPadding, Fine, "Unsupported convolution input padding", GNA_3_0)
GNA_NEG_INSTANTIATE(Stride, InvalidStride, Fine, "Unsupported convolution stride shape", GNA_3_0)
GNA_NEG_INSTANTIATE(Dilation, InvalidDilation, Fine, "dilation is not supported on GNA", GNA_3_0)
GNA_NEG_INSTANTIATE(Dilation35, InvalidDilation, Fine, "dilation is not supported on GNA", GNA_3_5)
GNA_NEG_INSTANTIATE(PaddingSize, InvalidPaddingSize, Fine, "Unsupported convolution input padding", GNA_3_0)
GNA_NEG_INSTANTIATE(PaddingSize35, InvalidPaddingSize, Fine, "Unsupported convolution input padding", GNA_3_5)
} // namespace

View File

@ -14,7 +14,7 @@ endif()
# TODO: fix CVS-71010 and remove BUILD_SHARED_LIBS
if(NOT BUILD_SHARED_LIBS)
set(exclude_path EXCLUDED_SOURCE_PATHS "${CMAKE_CURRENT_SOURCE_DIR}/(gna_api_stub|gna_wait_test|gna_export_import_test|gna_infer_request_test).cpp")
set(exclude_path EXCLUDED_SOURCE_PATHS "${CMAKE_CURRENT_SOURCE_DIR}/(gna_api_stub|gna_wait_test|gna_export_import_test|gna_infer_request_test|gna_plugin_load_network_test|gna_mock_api_initializer).cpp")
endif()
addIeTargetTest(

View File

@ -300,7 +300,7 @@ class GNACnn2DValidatorTestPooling2D : public GNACnn2DValidatorTest {};
namespace {
TEST_P(GNACnn2DValidatorTestPadding, testPaddingSupported) {
ASSERT_TRUE(validator->IsPaddingSupported() == isPaddingSupported());
ASSERT_TRUE(validator->ValidateInputPadding("", 1, 1, 1, 1, 2, 2, false) == isPaddingSupported());
}
TEST_P(GNACnn2DValidatorTest, testValidateCnn2DInvalid) {

View File

@ -3,24 +3,14 @@
//
#pragma once
#include <gmock/gmock-generated-function-mockers.h>
#include <gna2-instrumentation-api.h>
#include <gna2-model-export-api.h>
#if defined(_WIN32)
#ifdef libGNAStubs_EXPORTS
#define GNA_STUBS_EXPORT __declspec(dllexport)
#else
#define GNA_STUBS_EXPORT __declspec(dllimport)
#endif
#else
#define GNA_STUBS_EXPORT
#endif
#include <gna2-api.h>
#include <gmock/gmock.h>
#include <cstdint>
class GNACppApi {
public:
GNA_STUBS_EXPORT GNACppApi();
GNA_STUBS_EXPORT ~GNACppApi();
GNACppApi();
~GNACppApi();
MOCK_METHOD3(Gna2MemoryAlloc, Gna2Status(
uint32_t sizeRequested,

View File

@ -0,0 +1,68 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gna_mock_api_initializer.hpp"
#include "gna_mock_api.hpp"
#include <gna2-common-api.h>
#include <cstdint>
#include <vector>
void GnaMockApiInitializer::init() {
using ::testing::_;
using ::testing::AtLeast;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::Return;
EXPECT_CALL(_mock_api, Gna2DeviceGetVersion(_, _))
.WillOnce(Invoke([this](uint32_t deviceIndex, enum Gna2DeviceVersion* deviceVersion) {
*deviceVersion = this->_gna_device_version;
return Gna2StatusSuccess;
}));
EXPECT_CALL(_mock_api, Gna2DeviceOpen(_)).WillOnce(Return(Gna2StatusSuccess));
EXPECT_CALL(_mock_api, Gna2GetLibraryVersion(_, _)).Times(AtLeast(0)).WillRepeatedly(Return(Gna2StatusSuccess));
EXPECT_CALL(_mock_api, Gna2InstrumentationConfigCreate(_, _, _, _)).WillOnce(Return(Gna2StatusSuccess));
if (_create_model) {
EXPECT_CALL(_mock_api, Gna2MemoryAlloc(_, _, _))
.Times(AtLeast(1))
.WillRepeatedly(Invoke([this](uint32_t sizeRequested, uint32_t* sizeGranted, void** memoryAddress) {
this->_mocked_gna_memory.push_back(std::vector<uint8_t>(sizeRequested));
*sizeGranted = sizeRequested;
*memoryAddress = this->_mocked_gna_memory.back().data();
return Gna2StatusSuccess;
}));
EXPECT_CALL(_mock_api, Gna2ModelCreate(_, _, _))
.WillOnce(Invoke([](uint32_t deviceIndex, struct Gna2Model const* model, uint32_t* modelId) {
*modelId = 0;
return Gna2StatusSuccess;
}));
EXPECT_CALL(_mock_api, Gna2RequestConfigCreate(_, _))
.WillOnce(Invoke([](uint32_t modelId, uint32_t* requestConfigId) {
*requestConfigId = 0;
return Gna2StatusSuccess;
}));
EXPECT_CALL(_mock_api, Gna2InstrumentationConfigAssignToRequestConfig(_, _))
.Times(AtLeast(1))
.WillRepeatedly(Return(Gna2StatusSuccess));
}
InSequence seq;
EXPECT_CALL(_mock_api, Gna2DeviceClose(_)).WillOnce(Return(Gna2StatusSuccess));
if (_create_model) {
EXPECT_CALL(_mock_api, Gna2MemoryFree(_)).Times(AtLeast(1)).WillRepeatedly(Return(Gna2StatusSuccess));
}
}
void GnaMockApiInitializer::set_gna_device_version(const Gna2DeviceVersion val) {
_gna_device_version = val;
}
void GnaMockApiInitializer::set_create_model(const bool val) {
_create_model = val;
}

View File

@ -0,0 +1,23 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "gna_mock_api.hpp"
#include <gna2-common-api.h>
#include "cstdint"
#include "vector"
class GnaMockApiInitializer {
GNACppApi _mock_api;
std::vector<std::vector<uint8_t>> _mocked_gna_memory;
Gna2DeviceVersion _gna_device_version = Gna2DeviceVersionSoftwareEmulation;
bool _create_model = true;
public:
void init();
void set_gna_device_version(const Gna2DeviceVersion val);
void set_create_model(const bool val);
};

View File

@ -0,0 +1,90 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gna_mock_api_initializer.hpp"
#include "common/gna_target.hpp"
#include "gna_plugin.hpp"
#include "ngraph_functions/builders.hpp"
#include <gtest/gtest.h>
namespace {
typedef struct {
const std::vector<size_t> input_size;
const std::vector<size_t> filter_size;
const std::vector<ptrdiff_t> pads_begin;
const std::vector<ptrdiff_t> pads_end;
} ConvModel;
const std::vector<ConvModel> models{
{{1, 8, 32, 1}, {2, 1}, {1, 0}, {1, 0}},
{{1, 8, 1, 32}, {1, 2}, {0, 1}, {0, 1}}
};
typedef struct {
ConvModel model;
Gna2DeviceVersion mock_target;
bool load_succesfull;
} ConvModelTestParams;
std::vector<ConvModelTestParams> all_tests{
{models[0], Gna2DeviceVersion3_0, false},
{models[1], Gna2DeviceVersion3_0, false},
{models[0], Gna2DeviceVersion3_5, true},
{models[1], Gna2DeviceVersion3_5, true}};
class GNAPluginLoadNetworkTest : public ::testing::Test, public ::testing::WithParamInterface<ConvModelTestParams> {
std::shared_ptr<ngraph::Function> function;
protected:
void Run() {
SetUp();
const auto test_parameter = GetParam();
GnaMockApiInitializer mock;
mock.set_gna_device_version(test_parameter.mock_target);
mock.set_create_model(test_parameter.load_succesfull);
mock.init();
GNAPluginNS::GNAPlugin gna_plugin{};
InferenceEngine::CNNNetwork cnn_network{function};
bool load_succesfull = true;
try {
gna_plugin.LoadNetwork(cnn_network);
} catch (std::exception&) {
load_succesfull = false;
}
EXPECT_EQ(test_parameter.load_succesfull, load_succesfull);
}
void SetUp() override {
const std::vector<size_t> c_strides{1, 1};
const std::vector<size_t> c_dilations{1, 1};
constexpr size_t c_num_out_channels = 8;
const auto& model = GetParam().model;
using ngraph::element::f32;
auto parameter = std::make_shared<ngraph::opset9::Parameter>(f32, ngraph::Shape{model.input_size});
auto conv = std::dynamic_pointer_cast<ngraph::opset9::Convolution>(
ngraph::builder::makeConvolution(parameter,
f32,
model.filter_size,
c_strides,
model.pads_begin,
model.pads_end,
c_dilations,
ngraph::op::PadType::EXPLICIT,
c_num_out_channels));
auto result = std::make_shared<ngraph::opset9::Result>(conv);
function = std::make_shared<ngraph::Function>(result, ov::ParameterVector{parameter}, "convolution");
}
};
// This test covers GNAGraphCompiler::ShouldUseOnlyConv2DGnaIface()
// behavior when specific Gna2DeviceVersion is detected by Gna2DeviceGetVersion()
TEST_P(GNAPluginLoadNetworkTest, ReturnsSpecificGna2DeviceVersion) {
Run();
}
INSTANTIATE_TEST_SUITE_P(smoke_LoadConvolution1D, GNAPluginLoadNetworkTest, ::testing::ValuesIn(all_tests));
} // namespace

View File

@ -3,7 +3,7 @@
//
#pragma once
#include <gmock/gmock-generated-function-mockers.h>
#include <gmock/gmock.h>
#include <gna2-instrumentation-api.h>
#include <gna2-inference-api.h>
#include <gna2-model-export-api.h>