[GNA] Additional tests on compact mode (#10969)

* Moved InitGNADevice to plugin constructor

* Added tests for ordering layers

* Added allocator header

* Fixed fused_iterator header

* protected GNAMemRequestsQueue properties

* Fixed unit test names

* Fixed compile issue

* Fixed default initialization

* Fixed depricated matchers

* Fixed pwl deprecated tests

* Added page alignment

* Reset gnadevice in the tests

* Update src/plugins/intel_gna/gna_fused_iterator.hpp

Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>

* Revert "Update src/plugins/intel_gna/gna_fused_iterator.hpp"

This reverts commit d624bdadaf.

Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
This commit is contained in:
Mikhail Ryzhov
2022-03-31 13:56:25 +03:00
committed by GitHub
parent 3578ee9c3f
commit a9853d2790
9 changed files with 260 additions and 145 deletions

View File

@@ -27,7 +27,7 @@ void * ExportSueLegacyUsingGnaApi2(
status = Gna2ModelExportConfigSetTarget(exportConfig, Gna2DeviceVersionEmbedded1_0);
GNADeviceHelper::checkGna2Status(status, "Gna2ModelExportConfigSetTarget");
void * bufferSueCreekHeader;
void * bufferSueCreekHeader = nullptr;
uint32_t bufferSueCreekHeaderSize;
status = Gna2ModelExport(exportConfig,
@@ -37,7 +37,7 @@ void * ExportSueLegacyUsingGnaApi2(
(*modelHeader) = *(reinterpret_cast<Gna2ModelSueCreekHeader*>(bufferSueCreekHeader));
void * bufferDump;
void * bufferDump = nullptr;
uint32_t bufferDumpSize;
status = Gna2ModelExport(exportConfig,
Gna2ModelExportComponentLegacySueCreekDump,

View File

@@ -111,7 +111,7 @@ void GNADeviceHelper::enforceLegacyCnnsWhenNeeded(Gna2Model& gnaModel) {
uint32_t GNADeviceHelper::createModel(Gna2Model& gnaModel) const {
std::unique_lock<std::mutex> lockGnaCalls{ acrossPluginsSync };
uint32_t modelId;
uint32_t modelId = 0;
const auto legacyExecTarget = enforceLegacyCnnNeeded();
if (legacyExecTarget) {
enforceLegacyCnns(gnaModel);
@@ -231,7 +231,7 @@ void GNADeviceHelper::checkGna2Status(Gna2Status status, const Gna2Model& gnaMod
decoratedGnaLibVersion();
}
Gna2ModelError error;
Gna2ModelError error{};
auto getLastErrorStatus = Gna2ModelGetLastError(&error);
checkGna2Status(getLastErrorStatus, "Gna2ModelGetLastError");

View File

@@ -8,6 +8,7 @@
#include <list>
#include <string>
#include "gna_graph_tools.hpp"
#include "layers/gna_layer_helpers.hpp"
namespace GNAPluginNS {
/**

View File

@@ -31,7 +31,6 @@
#include "frontend/model_quantizer.hpp"
#include "gna_fused_iterator.hpp"
#include "backend/am_intel_dnn.hpp"
#include "memory/gna_allocator.hpp"
#include "memory/gna_memory_state.hpp"
#include "gna_model_serial.hpp"
#include "runtime/gna_float_runtime.hpp"
@@ -324,11 +323,13 @@ void GNAPlugin::ImportFrames(void *ptr_dst,
GNAPlugin::GNAPlugin() {
Init();
UpdateFieldsFromConfig();
InitGNADevice();
}
GNAPlugin::GNAPlugin(const std::map<std::string, std::string>& configMap) {
Init();
SetConfig(configMap);
InitGNADevice();
}
void GNAPlugin::Init() {
@@ -345,14 +346,18 @@ void GNAPlugin::Init() {
void GNAPlugin::InitGNADevice() {
OV_ITT_SCOPED_TASK(itt::domains::GNA_LT, "InitGNADevice");
gnadevice = std::make_shared<GNADeviceHelper>(config.gnaExecTarget,
config.gnaCompileTarget,
config.swExactMode,
gnaFlags->performance_counting,
!config.dumpXNNPath.empty(),
GetDeviceVersionFromString(config.dumpXNNGeneration));
size_t page_size_bytes = 4096;
gnamem = std::make_shared<gna_memory_type>(memory::make_polymorph<memory::GNAAllocator>(gnadevice), page_size_bytes);
if (gnaFlags->sw_fp32) {
gnamem.reset(new gna_memory_type(memory::make_polymorph<std::allocator<uint8_t>>()));
} else {
gnadevice = std::make_shared<GNADeviceHelper>(config.gnaExecTarget,
config.gnaCompileTarget,
config.swExactMode,
gnaFlags->performance_counting,
!config.dumpXNNPath.empty(),
GetDeviceVersionFromString(config.dumpXNNGeneration));
size_t page_size_bytes = 4096;
gnamem = std::make_shared<gna_memory_type>(memory::make_polymorph<memory::GNAAllocator>(gnadevice), page_size_bytes);
}
graphCompiler.setGNAMemoryPtr(gnamem);
}
@@ -635,10 +640,6 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
OV_ITT_SCOPED_TASK(itt::domains::GNAPlugin, "LoadNetwork");
std::shared_ptr<InferenceEngine::details::CNNNetworkImpl> convertedNetwork;
if (!gnaFlags->sw_fp32) {
InitGNADevice();
}
std::string effectiveGnaCompileTarget = config.gnaCompileTarget;
if (gnadevice) {
effectiveGnaCompileTarget = gnadevice->getEffectiveGnaCompileTarget();
@@ -894,15 +895,7 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
graphCompiler.fillMemoryConnections(memoryPairs);
if (!graphCompiler.memory_connection.empty() && gnaFlags->num_requests != 1) {
// TODO: check if updating the number of threads is needed for sw_fp32
gnaFlags->num_requests = 1;
if (!gnaFlags->sw_fp32)
InitGNADevice();
}
if (gnaFlags->sw_fp32) {
gnamem.reset(new gna_memory_type(memory::make_polymorph<std::allocator<uint8_t>>()));
graphCompiler.setGNAMemoryPtr(gnamem);
}
// keep inputs information and create input primitives
@@ -1508,9 +1501,6 @@ void GNAPlugin::SetName(const std::string & pluginName) noexcept {
InferenceEngine::IExecutableNetworkInternal::Ptr GNAPlugin::ImportNetwork(std::istream& networkModel) {
auto header = GNAModelSerial::ReadHeader(networkModel);
InitGNADevice();
graphCompiler.setGNAMemoryPtr(gnamem);
void *basePtr = nullptr;
gnamem->reserve_ptr(nullptr, &basePtr, header.gnaMemSize);
gnamem->commit();

View File

@@ -17,6 +17,7 @@
#include "gna_lib_ver_selector.hpp"
#include "memory_solver.hpp"
#include "gna_plugin_log.hpp"
#include "memory/gna_allocator.hpp"
#ifdef GNA_HEAP_PROFILER
#include <iomanip>
@@ -32,6 +33,7 @@ namespace memory {
*/
template<class Allocator = std::allocator<uint8_t>>
class GNAMemory : public GNAMemRequestsQueue {
protected:
std::vector<MemRequest> _future_heap;
std::list<std::vector<char>> _local_storage;
size_t _total = 0;

View File

@@ -3,9 +3,18 @@
//
#include <vector>
#include <memory>
#include <gtest/gtest.h>
#include <legacy/ie_layers.h>
#include <legacy/graph_tools.hpp>
#include <legacy/details/ie_cnn_network_tools.h>
#include "ngraph_functions/builders.hpp"
#include "memory/gna_memory.hpp"
#include "gna_plugin.hpp"
#include "gna_fused_iterator.hpp"
#include "gna_data_types.hpp"
using namespace InferenceEngine;
using namespace GNAPluginNS::memory;
@@ -247,4 +256,112 @@ TEST_F(GNAMemoryCompactTest, canOptimizeReservePtrWithOffset) {
mem.commit(isCompact);
ASSERT_EQ(mem.getRWBytes(), 4 * sizeof(float));
ASSERT_EQ(mem.getTotalBytes(), 4 * sizeof(float));
}
class GNAMemoryTested : public GNAPluginNS::memory::GNAMemory<GNAPluginNS::memory::PolymorphAllocator<uint8_t>> {
using GNAMemory::GNAMemory;
public:
void Test() {
// filtering RW allocation requests only
auto filter_req = [] (const MemRequest &re) { return re._region == REGION_RW && re._type != REQUEST_BIND; };
std::vector<MemRequest> test_reqs;
auto it = std::copy_if(_future_heap.begin(), _future_heap.end(), std::back_inserter(test_reqs), filter_req);
// intercrossing condition
auto is_crossed = [] (const MemRequest &re1, const MemRequest &re2) {
const std::pair<uint16_t, uint16_t> limits_default {0, UINT16_MAX};
if (re1._life_limits == limits_default || re2._life_limits == limits_default) {
return true;
}
return (re1._life_limits.first > re2._life_limits.first && re1._life_limits.first < re2._life_limits.second) ||
(re1._life_limits.second > re2._life_limits.first && re1._life_limits.second < re2._life_limits.second);
};
// verify that requests are intercrossed
for (auto re_it_1 = test_reqs.begin(); re_it_1 != test_reqs.end(); ++re_it_1) {
for (auto re_it_2 = re_it_1 + 1; re_it_2 != test_reqs.end(); ++re_it_2) {
ASSERT_TRUE(is_crossed(*re_it_1, *re_it_2));
}
}
}
};
class GNAPluginTested : public GNAPluginNS::GNAPlugin {
public:
std::shared_ptr<GNAMemoryTested> gnamem_t;
GNAPluginTested() : GNAPluginNS::GNAPlugin() {
gnamem_t = std::make_shared<GNAMemoryTested>(make_polymorph<std::allocator<uint8_t>>());
gnamem = gnamem_t;
graphCompiler.setGNAMemoryPtr(gnamem);
gnadevice.reset();
}
void Test() {
gnamem_t->Test();
}
};
class GNAMemoryOrderTest : public ::testing::Test {};
TEST_F(GNAMemoryOrderTest, orderingFusedLayersActivation) {
auto plugin = GNAPluginTested();
ov::Shape input_shape = { 1, 8, 20, 16 };
ov::Strides strides = { 1, 1 };
ov::Strides dilations = { 1, 1 };
ov::CoordinateDiff pad_begin(0, 0), pad_end(0, 0);
auto weights = ngraph::builder::makeConstant<float>(ov::element::f32, { 8, 8, 1, 1 }, { 1.f });
auto input = std::make_shared<ngraph::opset8::Parameter>(ov::element::f32, input_shape);
auto conv = std::make_shared<ngraph::opset8::Convolution>(input, weights, strides, pad_begin, pad_end, dilations);
auto activation = ngraph::builder::makeActivation(conv, ov::element::f32, ngraph::helpers::ActivationTypes::Sigmoid);
auto result = std::make_shared<ngraph::opset8::Result>(activation);
auto function = std::make_shared<ov::Model>(ov::ResultVector({result}), ov::ParameterVector({input}), "convolution");
InferenceEngine::CNNNetwork cnn_network(function);
plugin.LoadNetwork(cnn_network);
plugin.Test();
}
TEST_F(GNAMemoryOrderTest, orderingFusedLayersMaxPool) {
auto plugin = GNAPluginTested();
ov::Shape input_shape = { 1, 8, 20, 16 };
ov::Strides strides = { 1, 1 };
ov::Strides dilations = { 1, 1 };
ov::CoordinateDiff pad_begin(0, 0), pad_end(0, 0);
auto weights = ngraph::builder::makeConstant<float>(ov::element::f32, { 8, 8, 1, 1 }, { 1.f });
auto input = std::make_shared<ngraph::opset8::Parameter>(ov::element::f32, input_shape);
auto conv = std::make_shared<ngraph::opset8::Convolution>(input, weights, strides, pad_begin, pad_end, dilations);
auto maxpool = ngraph::builder::makePooling(conv, {1, 1}, {0, 0}, {0, 0}, {1, 1}, ngraph::op::RoundingType::FLOOR,
ngraph::op::PadType::VALID, false, ngraph::helpers::PoolingTypes::MAX);
auto result = std::make_shared<ngraph::opset8::Result>(maxpool);
auto function = std::make_shared<ov::Model>(ov::ResultVector({result}), ov::ParameterVector({input}), "convolution");
InferenceEngine::CNNNetwork cnn_network(function);
plugin.LoadNetwork(cnn_network);
plugin.Test();
}
TEST_F(GNAMemoryOrderTest, orderingFusedLayersActivationMaxPool) {
auto plugin = GNAPluginTested();
ov::Shape input_shape = { 1, 8, 20, 16 };
ov::Strides strides = { 1, 1 };
ov::Strides dilations = { 1, 1 };
ov::CoordinateDiff pad_begin(0, 0), pad_end(0, 0);
auto weights = ngraph::builder::makeConstant<float>(ov::element::f32, { 8, 8, 1, 1 }, { 1.f });
auto input = std::make_shared<ngraph::opset8::Parameter>(ov::element::f32, input_shape);
auto conv = std::make_shared<ngraph::opset8::Convolution>(input, weights, strides, pad_begin, pad_end, dilations);
auto activation = ngraph::builder::makeActivation(conv, ov::element::f32, ngraph::helpers::ActivationTypes::Sigmoid);
auto maxpool = ngraph::builder::makePooling(activation, {1, 1}, {0, 0}, {0, 0}, {1, 1}, ngraph::op::RoundingType::FLOOR,
ngraph::op::PadType::VALID, false, ngraph::helpers::PoolingTypes::MAX);
auto result = std::make_shared<ngraph::opset8::Result>(maxpool);
auto function = std::make_shared<ov::Model>(ov::ResultVector({result}), ov::ParameterVector({input}), "convolution");
InferenceEngine::CNNNetwork cnn_network(function);
plugin.LoadNetwork(cnn_network);
plugin.Test();
}

View File

@@ -101,12 +101,112 @@ void expect_enqueue_calls(GNACppApi &mockApi){
void GNAPropagateMatcher :: match() {
try {
// matching gna propagate forward call.
GNAPlugin plugin(_env.config);
size_t inputSize = 10;
size_t outputSize = 10;
InputsDataMap inputsInfo;
OutputsDataMap outputsInfo;
StrictMock<GNACppApi> mockApi;
std::vector<uint8_t> data;
if (_env.config[GNA_CONFIG_KEY(DEVICE_MODE)].compare(GNA_CONFIG_VALUE(SW_FP32)) != 0 &&
!_env.matchThrows) {
EXPECT_CALL(mockApi, Gna2MemoryAlloc(_, _, _)).WillOnce(Invoke([&data](
uint32_t sizeRequested,
uint32_t *sizeGranted,
void **memoryAddress
) {
data.resize(sizeRequested);
*sizeGranted = sizeRequested;
*memoryAddress = &data.front();
return Gna2StatusSuccess;
}));
EXPECT_CALL(mockApi, Gna2DeviceGetVersion(_,_)).WillOnce(Invoke([](
uint32_t deviceIndex,
enum Gna2DeviceVersion * deviceVersion) {
*deviceVersion = Gna2DeviceVersionSoftwareEmulation;
return Gna2StatusSuccess;
}));
EXPECT_CALL(mockApi, Gna2DeviceOpen(_)).WillOnce(Return(Gna2StatusSuccess));
EXPECT_CALL(mockApi, Gna2GetLibraryVersion(_,_)).Times(AtLeast(0)).WillRepeatedly(Return(Gna2StatusSuccess));
EXPECT_CALL(mockApi, Gna2InstrumentationConfigCreate(_,_,_,_)).WillOnce(Return(Gna2StatusSuccess));
if(_env.is_setup_of_omp_theads_expected == true) {
EXPECT_CALL(mockApi, Gna2DeviceSetNumberOfThreads(_,_)).WillOnce(Return(Gna2StatusSuccess));
}
std::unique_ptr<NNetComponentMatcher> combined(new NNetComponentMatcher());
for (auto & matchWhat : _env.whatToMatch) {
switch(matchWhat.type) {
case GnaPluginTestEnvironment::matchPrecision :
combined->add(new NNetPrecisionMatcher(_env.nnet_precision, INTEL_AFFINE));
break;
case GnaPluginTestEnvironment::matchProcType :
expect_enqueue_calls(mockApi);
break;
case GnaPluginTestEnvironment::matchPwlInserted :
combined->add(new PWLMatcher(_env.matchInserted, matchWhat.matchQuantity, _env.pwlsToMatchWith));
break;
case GnaPluginTestEnvironment::matchConvInserted:
combined->add(new ConvoluionLayerMatcher(_env.matchInserted, matchWhat.matchQuantity));
break;
case GnaPluginTestEnvironment::matchMaxPoolingInserted:
combined->add(new PoolingLayerMatcher(_env.matchInserted, matchWhat.matchQuantity, true));
break;
case GnaPluginTestEnvironment::matchPwlQuantizeMetrics :
combined->add(new PWLQuantizationMetricsMatcher(_env.type,
_env.quantization_presicion_threshold,
_env.quantization_segments_threshold));
break;
case GnaPluginTestEnvironment::matchCopyInserted :
combined->add(new CopyLayerMatcher(_env.matchInserted, matchWhat.matchQuantity));
break;
case GnaPluginTestEnvironment::matchDiagonalInserted :
combined->add(new DiagLayerMatcher(_env.matchInserted, matchWhat.matchQuantity));
break;
case GnaPluginTestEnvironment::saveArgs :
expect_enqueue_calls(mockApi);
break;
case GnaPluginTestEnvironment::matchInputData :
combined->add(new InputDataMatcher(_env.input_processed));
break;
case GnaPluginTestEnvironment::fillOutputValues :
combined->add(new OutputFiller(_env.fillValue, _env.fillValue));
break;
case GnaPluginTestEnvironment::matchAffineWeightsTranspose:
HasWeightsTranspozed(combined, _env.transposedData, _env.transposeArgs);
break;
case GnaPluginTestEnvironment::matchAffineWeights:
HasWeightsEq(combined, _env.transposedData);
break;
case GnaPluginTestEnvironment::matchAffineWeightsSize:
HasWeightsSizeEq(combined, _env.matched_weight_size);
break;
case GnaPluginTestEnvironment::saveAffineWeights:
SaveWeights(combined, _env.transposedData, _env.transposedArgsForSaving);
break;
default:
expect_enqueue_calls(mockApi);
break;
}
}
if (combined && !combined->empty()) {
expect_enqueue_calls(mockApi);
}
EXPECT_CALL(mockApi, Gna2MemoryFree(_)).WillOnce(Return(Gna2StatusSuccess));
EXPECT_CALL(mockApi, Gna2DeviceClose(_)).WillOnce(Return(Gna2StatusSuccess));
}
GNAPlugin plugin(_env.config);
auto loadNetworkFromIR = [&] () -> InferenceEngine::CNNNetwork {
Core core;
auto weights_fake = make_shared_blob<uint8_t>(TensorDesc(Precision::U8,
@@ -273,102 +373,6 @@ void GNAPropagateMatcher :: match() {
}
};
StrictMock<GNACppApi> mockApi;
std::vector<uint8_t> data;
if (_env.config[GNA_CONFIG_KEY(DEVICE_MODE)].compare(GNA_CONFIG_VALUE(SW_FP32)) != 0 &&
!_env.matchThrows) {
EXPECT_CALL(mockApi, Gna2MemoryAlloc(_, _, _)).WillOnce(Invoke([&data](
uint32_t sizeRequested,
uint32_t *sizeGranted,
void **memoryAddress
) {
data.resize(sizeRequested);
*sizeGranted = sizeRequested;
*memoryAddress = &data.front();
return Gna2StatusSuccess;
}));
EXPECT_CALL(mockApi, Gna2DeviceGetVersion(_,_)).WillOnce(Invoke([](
uint32_t deviceIndex,
enum Gna2DeviceVersion * deviceVersion) {
*deviceVersion = Gna2DeviceVersionSoftwareEmulation;
return Gna2StatusSuccess;
}));
EXPECT_CALL(mockApi, Gna2DeviceOpen(_)).WillOnce(Return(Gna2StatusSuccess));
EXPECT_CALL(mockApi, Gna2GetLibraryVersion(_,_)).Times(AtLeast(0)).WillRepeatedly(Return(Gna2StatusSuccess));
EXPECT_CALL(mockApi, Gna2InstrumentationConfigCreate(_,_,_,_)).WillOnce(Return(Gna2StatusSuccess));
if(_env.is_setup_of_omp_theads_expected == true) {
EXPECT_CALL(mockApi, Gna2DeviceSetNumberOfThreads(_,_)).WillOnce(Return(Gna2StatusSuccess));
}
std::unique_ptr<NNetComponentMatcher> combined(new NNetComponentMatcher());
for (auto & matchWhat : _env.whatToMatch) {
switch(matchWhat.type) {
case GnaPluginTestEnvironment::matchPrecision :
combined->add(new NNetPrecisionMatcher(_env.nnet_precision, INTEL_AFFINE));
break;
case GnaPluginTestEnvironment::matchProcType :
expect_enqueue_calls(mockApi);
break;
case GnaPluginTestEnvironment::matchPwlInserted :
combined->add(new PWLMatcher(_env.matchInserted, matchWhat.matchQuantity, _env.pwlsToMatchWith));
break;
case GnaPluginTestEnvironment::matchConvInserted:
combined->add(new ConvoluionLayerMatcher(_env.matchInserted, matchWhat.matchQuantity));
break;
case GnaPluginTestEnvironment::matchMaxPoolingInserted:
combined->add(new PoolingLayerMatcher(_env.matchInserted, matchWhat.matchQuantity, true));
break;
case GnaPluginTestEnvironment::matchPwlQuantizeMetrics :
combined->add(new PWLQuantizationMetricsMatcher(_env.type,
_env.quantization_presicion_threshold,
_env.quantization_segments_threshold));
break;
case GnaPluginTestEnvironment::matchCopyInserted :
combined->add(new CopyLayerMatcher(_env.matchInserted, matchWhat.matchQuantity));
break;
case GnaPluginTestEnvironment::matchDiagonalInserted :
combined->add(new DiagLayerMatcher(_env.matchInserted, matchWhat.matchQuantity));
break;
case GnaPluginTestEnvironment::saveArgs :
expect_enqueue_calls(mockApi);
break;
case GnaPluginTestEnvironment::matchInputData :
combined->add(new InputDataMatcher(_env.input_processed));
break;
case GnaPluginTestEnvironment::fillOutputValues :
combined->add(new OutputFiller(_env.fillValue, _env.fillValue));
break;
case GnaPluginTestEnvironment::matchAffineWeightsTranspose:
HasWeightsTranspozed(combined, _env.transposedData, _env.transposeArgs);
break;
case GnaPluginTestEnvironment::matchAffineWeights:
HasWeightsEq(combined, _env.transposedData);
break;
case GnaPluginTestEnvironment::matchAffineWeightsSize:
HasWeightsSizeEq(combined, _env.matched_weight_size);
break;
case GnaPluginTestEnvironment::saveAffineWeights:
SaveWeights(combined, _env.transposedData, _env.transposedArgsForSaving);
break;
default:
expect_enqueue_calls(mockApi);
break;
}
}
if (combined && !combined->empty()) {
expect_enqueue_calls(mockApi);
}
}
loadNetwork();
if (!inputsInfo.empty()) {

View File

@@ -642,6 +642,7 @@ class GNATest : public U, public GNATestConfigurability<GNATest<U>> {
return *this;
}
GNATest & gna() {
_env.config[GNA_CONFIG_KEY(DEVICE_MODE)] = GNA_CONFIG_VALUE(AUTO);
return *this;
}
GNATest & from() {

View File

@@ -41,7 +41,7 @@ using namespace GNATestIRs;
TEST_F(PWLAproximationTest, forTanhOnRecursiveAlgoWithPrecisionThresholdIsSuccess) {
assert_that().onInferNgraphModel(detail::CreateActivationFunction<ngraph::opset8::Tanh>({1, 10}))
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.propagate_forward()
.called_with()
@@ -51,7 +51,7 @@ TEST_F(PWLAproximationTest, forTanhOnRecursiveAlgoWithPrecisionThresholdIsSucces
TEST_F(PWLAproximationTest, forSigmoidOnRecursiveAlgoWithPrecisionThresholdIsSuccess) {
assert_that().onInferNgraphModel(detail::CreateActivationFunction<ngraph::opset8::Sigmoid>({1, 10}))
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.propagate_forward()
.called_with()
@@ -61,7 +61,7 @@ TEST_F(PWLAproximationTest, forSigmoidOnRecursiveAlgoWithPrecisionThresholdIsSuc
TEST_F(PWLAproximationTest, forReLUonRecursiveAlgoWithPrecisionThresholdIsSuccess) {
assert_that().onInferNgraphModel(detail::CreateActivationFunction<ngraph::opset8::Relu>({1, 1, 10, 10}))
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.propagate_forward()
.called_with()
@@ -71,7 +71,7 @@ TEST_F(PWLAproximationTest, forReLUonRecursiveAlgoWithPrecisionThresholdIsSucces
TEST_F(PWLAproximationTest, forLeakyReLUonRecursiveAlgoWithPrecisionThresholdIsSuccess) {
assert_that().onInferModel(LeakyReLUActivationModel())
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.propagate_forward()
.called_with()
@@ -81,7 +81,7 @@ TEST_F(PWLAproximationTest, forLeakyReLUonRecursiveAlgoWithPrecisionThresholdIsS
TEST_F(PWLAproximationTest, DISABLED_forIdentityOnRecursiveAlgoWithPrecisionThresholdIsSuccess) {
assert_that().onInferModel(IdentityActivationModel())
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.propagate_forward()
.called_with()
@@ -91,7 +91,7 @@ TEST_F(PWLAproximationTest, DISABLED_forIdentityOnRecursiveAlgoWithPrecisionThre
TEST_F(PWLAproximationTest, forClampOnRecursiveAlgoWithPrecisionThresholdIsSuccess) {
assert_that().onInferNgraphModel(detail::CreateActivationFunction<ngraph::opset8::Clamp>({1, 10}, -50, 50))
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.propagate_forward()
.called_with()
@@ -101,7 +101,7 @@ TEST_F(PWLAproximationTest, forClampOnRecursiveAlgoWithPrecisionThresholdIsSucce
TEST_F(PWLAproximationTest, forTanhOnUniformAlgoWithPrecisionThresholdIsSuccess) {
assert_that().onInferNgraphModel(detail::CreateActivationFunction<ngraph::opset8::Tanh>({1, 10}))
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.withUniformPWLAlgo()
.propagate_forward()
@@ -112,7 +112,7 @@ TEST_F(PWLAproximationTest, forTanhOnUniformAlgoWithPrecisionThresholdIsSuccess)
TEST_F(PWLAproximationTest, forSigmoidOnUniformAlgoWithPrecisionThresholdIsSuccess) {
assert_that().onInferNgraphModel(detail::CreateActivationFunction<ngraph::opset8::Sigmoid>({1, 10}))
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.withUniformPWLAlgo()
.propagate_forward()
@@ -123,7 +123,7 @@ TEST_F(PWLAproximationTest, forSigmoidOnUniformAlgoWithPrecisionThresholdIsSucce
TEST_F(PWLAproximationTest, DISABLED_forIdentityOnUniformAlgoWithPrecisionThresholdIsSuccess) {
assert_that().onInferModel(IdentityActivationModel())
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.withUniformPWLAlgo()
.propagate_forward()
@@ -134,7 +134,7 @@ TEST_F(PWLAproximationTest, DISABLED_forIdentityOnUniformAlgoWithPrecisionThresh
TEST_F(PWLAproximationTest, forClampOnUniformAlgoWithPrecisionThresholdIsSuccess) {
assert_that().onInferNgraphModel(detail::CreateActivationFunction<ngraph::opset8::Clamp>({1, 10}, -50, 50))
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.withUniformPWLAlgo()
.propagate_forward()
@@ -145,7 +145,7 @@ TEST_F(PWLAproximationTest, forClampOnUniformAlgoWithPrecisionThresholdIsSuccess
TEST_F(PWLAproximationTest, forSigmoidonRecursiveAlgoWithSegmentThresholdIsSuccess) {
assert_that().onInferNgraphModel(detail::CreateActivationFunction<ngraph::opset8::Sigmoid>({1, 10}))
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.propagate_forward()
.called_with()
@@ -155,7 +155,7 @@ TEST_F(PWLAproximationTest, forSigmoidonRecursiveAlgoWithSegmentThresholdIsSucce
TEST_F(PWLAproximationTest, forTanhonRecursiveAlgoWithSegmentThresholdIsSuccess) {
assert_that().onInferNgraphModel(detail::CreateActivationFunction<ngraph::opset8::Tanh>({1, 10}))
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.propagate_forward()
.called_with()
@@ -165,7 +165,7 @@ TEST_F(PWLAproximationTest, forTanhonRecursiveAlgoWithSegmentThresholdIsSuccess)
TEST_F(PWLAproximationTest, forReLUonRecursiveAlgoWithSegmentThresholdIsSuccess) {
assert_that().onInferNgraphModel(detail::CreateActivationFunction<ngraph::opset8::Relu>({1, 1, 10, 10}))
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.propagate_forward()
.called_with()
@@ -175,7 +175,7 @@ TEST_F(PWLAproximationTest, forReLUonRecursiveAlgoWithSegmentThresholdIsSuccess)
TEST_F(PWLAproximationTest, forLeakyReLUonRecursiveAlgoWithSegmentThresholdIsSuccess) {
assert_that().onInferModel(LeakyReLUActivationModel())
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.propagate_forward()
.called_with()
@@ -185,7 +185,7 @@ TEST_F(PWLAproximationTest, forLeakyReLUonRecursiveAlgoWithSegmentThresholdIsSuc
TEST_F(PWLAproximationTest, DISABLED_forIdentityOnRecursiveAlgoWithSegmentThresholdIsSuccess) {
assert_that().onInferModel(IdentityActivationModel())
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.propagate_forward()
.called_with()
@@ -195,7 +195,7 @@ TEST_F(PWLAproximationTest, DISABLED_forIdentityOnRecursiveAlgoWithSegmentThresh
TEST_F(PWLAproximationTest, forClampOnRecursiveAlgoWithSegmentThresholdIsSuccess) {
assert_that().onInferNgraphModel(detail::CreateActivationFunction<ngraph::opset8::Clamp>({1, 10}, -50, 50))
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.propagate_forward()
.called_with()
@@ -205,7 +205,7 @@ TEST_F(PWLAproximationTest, forClampOnRecursiveAlgoWithSegmentThresholdIsSuccess
TEST_F(PWLAproximationTest, forSigmoidonUniformAlgoWithSegmentThresholdIsSuccess) {
assert_that().onInferNgraphModel(detail::CreateActivationFunction<ngraph::opset8::Sigmoid>({1, 10}))
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.withUniformPWLAlgo()
.propagate_forward()
@@ -216,7 +216,7 @@ TEST_F(PWLAproximationTest, forSigmoidonUniformAlgoWithSegmentThresholdIsSuccess
TEST_F(PWLAproximationTest, forTanhonUniformAlgoWithSegmentThresholdIsSuccess) {
assert_that().onInferNgraphModel(detail::CreateActivationFunction<ngraph::opset8::Tanh>({1, 10}))
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.withUniformPWLAlgo()
.propagate_forward()
@@ -227,7 +227,7 @@ TEST_F(PWLAproximationTest, forTanhonUniformAlgoWithSegmentThresholdIsSuccess) {
TEST_F(PWLAproximationTest, DISABLED_forIdentityOnUniformAlgoWithSegmentThresholdIsSuccess) {
assert_that().onInferModel(IdentityActivationModel())
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.withUniformPWLAlgo()
.propagate_forward()
@@ -238,7 +238,7 @@ TEST_F(PWLAproximationTest, DISABLED_forIdentityOnUniformAlgoWithSegmentThreshol
TEST_F(PWLAproximationTest, forClampOnUniformAlgoWithSegmentThresholdIsSuccess) {
assert_that().onInferNgraphModel(detail::CreateActivationFunction<ngraph::opset8::Clamp>({1, 10}, -50, 50))
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.withUniformPWLAlgo()
.propagate_forward()