[IE Myriad] Use instance of InferenceEngine::Core via ie::ICore interface in Myriad plugin (#1316)

* [ci-skip][IE Myriad] ie::ICore pointer passed into FrontEnd from plugin

* [ci-skip][IE Myriad] Added MockICore to fix graph transformer tests

* [ci-skip][IE Myriad] IN renamed to I_N to avoid compile error in Windows build: C2513: 'int': no variable declared before '='
This commit is contained in:
Nikita Kudriavtsev
2020-07-29 11:30:30 +03:00
committed by GitHub
parent 3a87653483
commit a644cb85d2
19 changed files with 130 additions and 72 deletions

View File

@@ -12,6 +12,7 @@
#include <tuple>
#include <set>
#include <ie_icore.hpp>
#include <cpp/ie_cnn_network.h>
#include <details/caseless.hpp>
@@ -30,7 +31,7 @@ class FrontEnd final {
public:
using Ptr = std::shared_ptr<FrontEnd>;
explicit FrontEnd(StageBuilder::Ptr stageBuilder);
explicit FrontEnd(StageBuilder::Ptr stageBuilder, const ie::ICore* core);
ModelPtr buildInitialModel(ie::ICNNNetwork& network);
@@ -204,6 +205,7 @@ private:
private:
StageBuilder::Ptr _stageBuilder;
const ie::ICore* _core = nullptr;
IeParsedNetwork _ieParsedNetwork;
std::unordered_set<ie::DataPtr> _unbatchedOutputs;

View File

@@ -14,6 +14,7 @@
#include <set>
#include <utility>
#include <ie_icore.hpp>
#include <ie_icnn_network.hpp>
#include <details/caseless.hpp>
@@ -164,11 +165,13 @@ CompiledGraph::Ptr compileNetwork(
ie::ICNNNetwork& network,
Platform platform,
const CompilationConfig& config,
const Logger::Ptr& log);
const Logger::Ptr& log,
const ie::ICore* core);
CompiledGraph::Ptr compileSubNetwork(
ie::ICNNNetwork& network,
const CompilationConfig& subConfig);
const CompilationConfig& subConfig,
const ie::ICore* core);
//
// getSupportedLayers
@@ -178,7 +181,8 @@ std::set<std::string> getSupportedLayers(
const ie::ICNNNetwork& network,
Platform platform,
const CompilationConfig& config,
const Logger::Ptr& log);
const Logger::Ptr& log,
const ie::ICore* core);
//
// Blob version and checks

View File

@@ -33,9 +33,10 @@ namespace vpu {
[this](const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) \
{ functor_name(model, layer, inputs, outputs); }
FrontEnd::FrontEnd(StageBuilder::Ptr stageBuilder)
: _stageBuilder(std::move(stageBuilder))
, parsers{{
FrontEnd::FrontEnd(StageBuilder::Ptr stageBuilder, const ie::ICore* core)
: _stageBuilder(std::move(stageBuilder)),
_core(core),
parsers{{
{"Convolution", LAYER_PARSER(parseConvolution)},
{"Pooling", LAYER_PARSER(parsePooling)},
{"ReLU", LAYER_PARSER(parseReLU)},
@@ -120,7 +121,9 @@ FrontEnd::FrontEnd(StageBuilder::Ptr stageBuilder)
{"StaticShapeReshape", LAYER_PARSER(parseReshape)},
{"Mish", LAYER_PARSER(parseMish)},
{"Gelu", LAYER_PARSER(parseGelu)},
}} {}
}} {
VPU_THROW_UNLESS(_core != nullptr, "Argument core is null");
}
ModelPtr FrontEnd::buildInitialModel(ie::ICNNNetwork& network) {
VPU_PROFILE(buildInitialModel);

View File

@@ -145,14 +145,15 @@ void CompileEnv::free() {
namespace {
CompiledGraph::Ptr compileImpl(ie::ICNNNetwork& network) {
CompiledGraph::Ptr compileImpl(ie::ICNNNetwork& network,
const ie::ICore* core) {
const auto& env = CompileEnv::get();
env.log->debug("Compile network [%s]", network.getName());
VPU_LOGGER_SECTION(env.log);
auto stageBuilder = std::make_shared<StageBuilder>();
auto frontEnd = std::make_shared<FrontEnd>(stageBuilder);
auto frontEnd = std::make_shared<FrontEnd>(stageBuilder, core);
auto backEnd = std::make_shared<BackEnd>();
auto passManager = std::make_shared<PassManager>(stageBuilder, backEnd);
@@ -197,7 +198,8 @@ CompiledGraph::Ptr compileNetwork(
ie::ICNNNetwork& network,
Platform platform,
const CompilationConfig& config,
const Logger::Ptr& log) {
const Logger::Ptr& log,
const ie::ICore* core) {
CompileEnv::init(platform, config, log);
AutoScope autoDeinit([] {
CompileEnv::free();
@@ -205,7 +207,7 @@ CompiledGraph::Ptr compileNetwork(
VPU_PROFILE(compileNetwork);
return compileImpl(network);
return compileImpl(network, core);
}
CompiledGraph::Ptr compileModel(
@@ -225,7 +227,8 @@ CompiledGraph::Ptr compileModel(
CompiledGraph::Ptr compileSubNetwork(
ie::ICNNNetwork& network,
const CompilationConfig& subConfig) {
const CompilationConfig& subConfig,
const ie::ICore* core) {
VPU_PROFILE(compileSubNetwork);
const auto& env = CompileEnv::get();
@@ -237,7 +240,7 @@ CompiledGraph::Ptr compileSubNetwork(
CompileEnv::updateConfig(subConfig);
return compileImpl(network);
return compileImpl(network, core);
}
//
@@ -248,7 +251,8 @@ std::set<std::string> getSupportedLayers(
const ie::ICNNNetwork& network,
Platform platform,
const CompilationConfig& config,
const Logger::Ptr& log) {
const Logger::Ptr& log,
const ie::ICore* core) {
CompileEnv::init(platform, config, log);
AutoScope autoDeinit([] {
CompileEnv::free();
@@ -257,7 +261,7 @@ std::set<std::string> getSupportedLayers(
VPU_PROFILE(getSupportedLayers);
auto stageBuilder = std::make_shared<StageBuilder>();
auto frontEnd = std::make_shared<FrontEnd>(stageBuilder);
auto frontEnd = std::make_shared<FrontEnd>(stageBuilder, core);
auto clonedNetworkImpl = ie::cloneNet(network);

View File

@@ -115,7 +115,7 @@ void PassImpl::run(const Model& model) {
continue;
}
int IN = inputDesc.dim(Dim::N);
int I_N = inputDesc.dim(Dim::N);
int IC = inputDesc.dim(Dim::C);
int ID = inputDesc.dim(Dim::D);
int IH = inputDesc.dim(Dim::H);
@@ -144,7 +144,7 @@ void PassImpl::run(const Model& model) {
"but: KO=%d, OC=%d", KO, OC);
// check spacial dims of output
int inputShape[] = {IW, IH, ID, IC, IN};
int inputShape[] = {IW, IH, ID, IC, I_N};
int outputShape[] = {OW, OH, OD, OC, ON};
int weightsShape[] = {KW, KH, KD, KI, KO};
for (int i = 0; i < 3; i++) {
@@ -311,7 +311,7 @@ void PassImpl::run(const Model& model) {
// create subInputs[i], if it was not created previously
if (subInputs[i] == nullptr) {
auto postfix = formatString("@input_depth=%d/%d", i + 1, ID);
DataDesc subInputsDesc(inputDesc.type(), DimsOrder::NCHW, {IW, IH, IC, IN});
DataDesc subInputsDesc(inputDesc.type(), DimsOrder::NCHW, {IW, IH, IC, I_N});
subInputs[i] = model->duplicateData(input, postfix, subInputsDesc);
}
@@ -378,7 +378,7 @@ void PassImpl::run(const Model& model) {
continue; // this subInputs[d] is not needed
}
auto postfix = formatString("@input_depth=%d/%d", d + 1, ID);
DataDesc subInputsDesc3D(inputDesc.type(), DimsOrder::NCDHW, {IW, IH, 1, IC, IN});
DataDesc subInputsDesc3D(inputDesc.type(), DimsOrder::NCDHW, {IW, IH, 1, IC, I_N});
subInputs3D[d] = model->duplicateData(input, postfix + "@3D", subInputsDesc3D);
_stageBuilder->addReshapeStage(model,
stage->name() + "@split",

View File

@@ -116,7 +116,7 @@ void PassImpl::run(const Model& model) {
VPU_THROW_UNLESS(inputDesc.type() == outputDesc.type(), "incompatible data types");
VPU_THROW_UNLESS(inputDesc.dimsOrder() == outputDesc.dimsOrder(), "incompatible dim orders");
int IN = inputDesc.dim(Dim::N);
int I_N = inputDesc.dim(Dim::N);
int IC = inputDesc.dim(Dim::C);
int ID = inputDesc.dim(Dim::D);
int IH = inputDesc.dim(Dim::H);
@@ -128,11 +128,11 @@ void PassImpl::run(const Model& model) {
int OH = outputDesc.dim(Dim::H);
int OW = outputDesc.dim(Dim::W);
VPU_THROW_UNLESS(IN == ON, "incompatible: input batch=%d, output batch=%d", IN, ON);
VPU_THROW_UNLESS(I_N == ON, "incompatible: input batch=%d, output batch=%d", I_N, ON);
VPU_THROW_UNLESS(IC == OC, "incompatible: input channels=%d, output channels=%d", IC, OC);
// check spacial dims of output
int inputShape[] = {IW, IH, ID, IC, IN};
int inputShape[] = {IW, IH, ID, IC, I_N};
int outputShape[] = {OW, OH, OD, OC, ON};
for (int i = 0; i < 3; i++) {
int expectedOutputSize = (inputShape[i]
@@ -318,7 +318,7 @@ void PassImpl::run(const Model& model) {
// create subInputs[i], if it was not created previously
if (subInputs[i] == nullptr) {
auto postfix = formatString("@input_depth=%d/%d", i + 1, ID);
DataDesc subInputsDesc(inputDesc.type(), DimsOrder::NCHW, {IW, IH, IC, IN});
DataDesc subInputsDesc(inputDesc.type(), DimsOrder::NCHW, {IW, IH, IC, I_N});
subInputs[i] = model->duplicateData(input, postfix, subInputsDesc);
}
@@ -358,7 +358,7 @@ void PassImpl::run(const Model& model) {
continue; // this subInputs[d] is not needed
}
auto postfix = formatString("@input_depth=%d/%d", d + 1, ID);
DataDesc subInputsDesc3D(inputDesc.type(), DimsOrder::NCDHW, {IW, IH, 1, IC, IN});
DataDesc subInputsDesc3D(inputDesc.type(), DimsOrder::NCDHW, {IW, IH, 1, IC, I_N});
subInputs3D[d] = model->duplicateData(input, postfix + "@3D", subInputsDesc3D);
_stageBuilder->addReshapeStage(model,
stage->name() + "@split",

View File

@@ -107,14 +107,14 @@ std::pair<int, int> getResolution(const std::string& str) {
ie::CNNNetwork loadSubNetwork(
const std::string& fileName,
const std::pair<int, int>& imgSize, int* zdir_batchsize = nullptr) {
const std::pair<int, int>& imgSize,
const ie::ICore* core,
int* zdir_batchsize = nullptr) {
//
// Load network
//
// ticket 30632 : replace with ICore interface
InferenceEngine::Core reader;
auto network = reader.ReadNetwork(fileName);
auto network = core->ReadNetwork(fileName, std::string());
//
// Set precision of input/output
@@ -206,8 +206,8 @@ void FrontEnd::parseMTCNN(const Model& model, const ie::CNNLayerPtr& layer, cons
// Convert p-nets
for (const auto& p_net_input : pyramid) {
auto pNet = loadSubNetwork(pnet_ir_name, p_net_input);
auto res = compileSubNetwork(pNet, env.config);
auto pNet = loadSubNetwork(pnet_ir_name, p_net_input, _core);
auto res = compileSubNetwork(pNet, env.config, _core);
mergedBlobSize += res->blob.size();
compiledSubNetworks.emplace_back(std::move(res));
}
@@ -215,16 +215,16 @@ void FrontEnd::parseMTCNN(const Model& model, const ie::CNNLayerPtr& layer, cons
int stage2_zdir_batchsize = 1;
// Convert r-net
{
auto rNet = loadSubNetwork(rnet_ir_name, r_net_input, &stage2_zdir_batchsize);
auto res = compileSubNetwork(rNet, env.config);
auto rNet = loadSubNetwork(rnet_ir_name, r_net_input, _core, &stage2_zdir_batchsize);
auto res = compileSubNetwork(rNet, env.config, _core);
mergedBlobSize += res->blob.size();
compiledSubNetworks.emplace_back(std::move(res));
}
// Convert o-net
{
auto oNet = loadSubNetwork(onet_ir_name, o_net_input);
auto res = compileSubNetwork(oNet, env.config);
auto oNet = loadSubNetwork(onet_ir_name, o_net_input, _core);
auto res = compileSubNetwork(oNet, env.config, _core);
mergedBlobSize += res->blob.size();
compiledSubNetworks.emplace_back(std::move(res));
}

View File

@@ -23,8 +23,10 @@ namespace MyriadPlugin {
ExecutableNetwork::ExecutableNetwork(
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr>& devicePool,
const MyriadConfig& config) :
_config(config) {
const MyriadConfig& config,
const ie::ICore* core) :
_config(config),
_core(core) {
VPU_PROFILE(ExecutableNetwork);
_log = std::make_shared<Logger>(
@@ -52,8 +54,9 @@ ExecutableNetwork::ExecutableNetwork(
ICNNNetwork& network,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr>& devicePool,
const MyriadConfig& config) :
ExecutableNetwork(std::move(mvnc), devicePool, config) {
const MyriadConfig& config,
const ie::ICore* core) :
ExecutableNetwork(std::move(mvnc), devicePool, config, core) {
VPU_PROFILE(ExecutableNetwork);
const auto compilerLog = std::make_shared<Logger>(
@@ -67,7 +70,8 @@ ExecutableNetwork::ExecutableNetwork(
network,
static_cast<Platform>(_device->_platform),
_config.compileConfig(),
compilerLog);
compilerLog,
_core);
_actualNumExecutors = compiledGraph->numExecutors;
_graphBlob = std::move(compiledGraph->blob);
@@ -145,8 +149,9 @@ void ExecutableNetwork::Import(std::istream& strm,
ExecutableNetwork::ExecutableNetwork(std::istream& strm,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr> &devicePool,
const MyriadConfig& config) :
ExecutableNetwork(std::move(mvnc), devicePool, config) {
const MyriadConfig& config,
const ie::ICore* core) :
ExecutableNetwork(std::move(mvnc), devicePool, config, core) {
VPU_PROFILE(ExecutableNetwork);
Import(strm, devicePool, config);
}
@@ -155,8 +160,9 @@ ExecutableNetwork::ExecutableNetwork(
const std::string& blobFilename,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr>& devicePool,
const MyriadConfig& config) :
ExecutableNetwork(std::move(mvnc), devicePool, config) {
const MyriadConfig& config,
const ie::ICore* core) :
ExecutableNetwork(std::move(mvnc), devicePool, config, core) {
VPU_PROFILE(ExecutableNetwork);
std::ifstream blobFile{blobFilename, std::ios::binary};
Import(blobFile, devicePool, config);

View File

@@ -35,17 +35,20 @@ public:
explicit ExecutableNetwork(InferenceEngine::ICNNNetwork &network,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr> &devicePool,
const MyriadConfig& config);
const MyriadConfig& config,
const ie::ICore* core);
explicit ExecutableNetwork(std::istream& strm,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr> &devicePool,
const MyriadConfig& config);
const MyriadConfig& config,
const ie::ICore* core);
explicit ExecutableNetwork(const std::string &blobFilename,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr> &devicePool,
const MyriadConfig& config);
const MyriadConfig& config,
const ie::ICore* core);
virtual ~ExecutableNetwork() {
@@ -120,6 +123,7 @@ private:
DevicePtr _device;
GraphMetaInfo _graphMetaData;
MyriadConfig _config;
const ie::ICore* _core = nullptr;
int _actualNumExecutors = 0;
std::vector<std::string> _supportedMetrics;
@@ -131,7 +135,8 @@ private:
ExecutableNetwork(std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr> &devicePool,
const MyriadConfig& config);
const MyriadConfig& config,
const ie::ICore* core);
InferenceEngine::ITaskExecutor::Ptr getNextTaskExecutor() {
std::string id = _taskExecutorGetResultIds.front();

View File

@@ -49,7 +49,9 @@ ExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl(
vpu::EliminateShapeOfAfterDSR().run_on_function(function);
}
return std::make_shared<ExecutableNetwork>(*clonedNetwork, _mvnc, _devicePool, parsedConfigCopy);
return std::make_shared<ExecutableNetwork>(*clonedNetwork,
_mvnc, _devicePool,
parsedConfigCopy, GetCore());
}
void Engine::SetConfig(const std::map<std::string, std::string> &config) {
@@ -103,7 +105,8 @@ void Engine::QueryNetwork(
network,
static_cast<Platform>(parsedConfigCopy.platform()),
parsedConfigCopy.compileConfig(),
log);
log,
GetCore());
for (const auto& layerName : layerNames) {
res.supportedLayersMap.insert({ layerName, GetName() });
@@ -141,7 +144,7 @@ InferenceEngine::ExecutableNetwork Engine::ImportNetwork(
const auto executableNetwork =
std::make_shared<ExecutableNetwork>(
model, _mvnc, _devicePool, parsedConfigCopy);
model, _mvnc, _devicePool, parsedConfigCopy, GetCore());
return InferenceEngine::ExecutableNetwork{IExecutableNetwork::Ptr(
new ExecutableNetworkBase<ExecutableNetworkInternal>(executableNetwork),

View File

@@ -0,0 +1,29 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gmock/gmock.h>
#include "ie_icore.hpp"
class MockICore : public InferenceEngine::ICore {
public:
MOCK_QUALIFIED_METHOD0(GetTaskExecutor, const, std::shared_ptr<InferenceEngine::ITaskExecutor>());
MOCK_QUALIFIED_METHOD2(ReadNetwork, const, InferenceEngine::CNNNetwork(const std::string&, const InferenceEngine::Blob::CPtr&));
MOCK_QUALIFIED_METHOD2(ReadNetwork, const, InferenceEngine::CNNNetwork(const std::string&, const std::string&));
MOCK_METHOD3(LoadNetwork, InferenceEngine::ExecutableNetwork(
const InferenceEngine::CNNNetwork&, const std::string&, const std::map<std::string, std::string>&));
MOCK_METHOD3(ImportNetwork, InferenceEngine::ExecutableNetwork(
std::istream&, const std::string&, const std::map<std::string, std::string>&));
MOCK_QUALIFIED_METHOD3(QueryNetwork, const, InferenceEngine::QueryNetworkResult(
const InferenceEngine::ICNNNetwork&, const std::string&, const std::map<std::string, std::string>&));
MOCK_QUALIFIED_METHOD2(GetMetric, const, InferenceEngine::Parameter(const std::string&, const std::string&));
~MockICore() = default;
};

View File

@@ -21,6 +21,7 @@ addIeTargetTest(
LINK_LIBRARIES
vpu_graph_transformer_test_static
unitTestUtils
ngraphFunctions
mvnc
ADD_CPPLINT
LABELS

View File

@@ -262,7 +262,7 @@ void GraphTransformerTest::SetUp() {
consoleOutput());
stageBuilder = std::make_shared<StageBuilder>();
frontEnd = std::make_shared<FrontEnd>(stageBuilder);
frontEnd = std::make_shared<FrontEnd>(stageBuilder, &_mockCore);
backEnd = std::make_shared<BackEnd>();
passManager = std::make_shared<PassManager>(stageBuilder, backEnd);
}

View File

@@ -4,6 +4,10 @@
#pragma once
#include <list>
#include <gtest/gtest.h>
#include <vpu/compile_env.hpp>
#include <vpu/model/stage.hpp>
#include <vpu/model/model.hpp>
@@ -12,9 +16,7 @@
#include <vpu/backend/backend.hpp>
#include <vpu/utils/ie_helpers.hpp>
#include <gtest/gtest.h>
#include <list>
#include <unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp>
namespace vpu {
@@ -144,6 +146,7 @@ public:
TestModel CreateTestModel();
private:
MockICore _mockCore;
Logger::Ptr _log;
std::list<ModelPtr> _models;
};

View File

@@ -3,7 +3,6 @@
//
#include <gtest/gtest.h>
#include <tests_common.hpp>
#include <memory>
#include <ie_common.h>
@@ -13,15 +12,16 @@
#include <vpu/graph_transformer.hpp>
#include <vpu/utils/logger.hpp>
#include <myriad_plugin/myriad_config.h>
#include <ngraph/op/util/attr_types.hpp>
#include <ngraph_functions/subgraph_builders.hpp>
#include <unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp>
using namespace ::testing;
using namespace vpu;
using namespace InferenceEngine;
class VPUBlobReaderHeaderTests: public TestsCommon, public testing::WithParamInterface<std::vector<size_t>> {
class VPUBlobReaderHeaderTests: public ::testing::Test, public testing::WithParamInterface<std::vector<size_t>> {
private:
std::vector<size_t> inputShape;
@@ -50,11 +50,12 @@ public:
CompilationConfig compileConfig;
auto log = std::make_shared<Logger>("GraphCompiler", LogLevel::None, consoleOutput());
_compiledGraph = compileNetwork(_network, Platform::MYRIAD_X, compileConfig, log);
_compiledGraph = compileNetwork(_network, Platform::MYRIAD_X, compileConfig, log, &_mockCore);
}
CNNNetwork _network;
CompiledGraph::Ptr _compiledGraph;
MockICore _mockCore;
};
TEST_P(VPUBlobReaderHeaderTests, canReadCorrectMagicNumber) {
@@ -113,7 +114,7 @@ TEST_P(VPUBlobReaderInputTests, canGetCorrectInputDimsFromImportedNetwork) {
auto actualDims = actual.second->getTensorDesc().getDims();
size_t actualTotalSize = std::accumulate(actualDims.begin(), actualDims.end(), 1, std::multiplies<size_t>());
ASSERT_TRUE(expectedNetworkInputs.count(actual.first) > 0);
ASSERT_GT(expectedNetworkInputs.count(actual.first), 0);
auto expectedDims = expectedNetworkInputs[actual.first]->getTensorDesc().getDims();
size_t expectedTotalSize = std::accumulate(expectedDims.begin(), expectedDims.end(), 1, std::multiplies<size_t>());
@@ -130,11 +131,11 @@ TEST_P(VPUBlobReaderInputTests, canGetCorrectInputNamesFromImportedNetwork) {
auto expectedNetworkInputs = _network.getInputsInfo();
for (auto&& actual : parsedNetworkInputs) {
ASSERT_TRUE(expectedNetworkInputs.count(actual.first) > 0);
ASSERT_GT(expectedNetworkInputs.count(actual.first), 0);
}
for (auto&& expected : expectedNetworkInputs) {
ASSERT_TRUE(parsedNetworkInputs.count(expected.first) > 0);
ASSERT_GT(parsedNetworkInputs.count(expected.first), 0);
}
}
@@ -169,7 +170,7 @@ TEST_P(VPUBlobReaderOutputTests, canGetCorrectOutputDimsFromImportedNetwork) {
auto actualDims = actual.second->getDims();
size_t actualTotalSize = std::accumulate(actualDims.begin(), actualDims.end(), 1, std::multiplies<size_t>());
ASSERT_TRUE(expectedNetworkOutputs.count(actual.first) > 0);
ASSERT_GT(expectedNetworkOutputs.count(actual.first), 0);
auto expectedDims = expectedNetworkOutputs[actual.first]->getDims();
size_t expectedTotalSize = std::accumulate(expectedDims.begin(), expectedDims.end(), 1, std::multiplies<size_t>());
@@ -186,11 +187,11 @@ TEST_P(VPUBlobReaderOutputTests, canGetCorrectOutputNamesFromImportedNetwork) {
auto expectedNetworkOutputs = _network.getOutputsInfo();
for (auto&& actual : parsedNetworkOutputs) {
ASSERT_TRUE(expectedNetworkOutputs.count(actual.first) > 0);
ASSERT_GT(expectedNetworkOutputs.count(actual.first), 0);
}
for (auto&& expected : expectedNetworkOutputs) {
ASSERT_TRUE(parsedNetworkOutputs.count(expected.first) > 0);
ASSERT_GT(parsedNetworkOutputs.count(expected.first), 0);
}
}

View File

@@ -19,9 +19,6 @@ void graphTransformerFunctionalTests::SetUp() {
vpuLayersTests::SetUp();
_stageBuilder = std::make_shared<StageBuilder>();
_frontEnd = std::make_shared<FrontEnd>(_stageBuilder);
_backEnd = std::make_shared<BackEnd>();
_passManager = std::make_shared<PassManager>(_stageBuilder, _backEnd);
_platform = CheckMyriadX() ? Platform::MYRIAD_X : Platform::MYRIAD_2;
}

View File

@@ -32,8 +32,5 @@ protected:
private:
vpu::Platform _platform = vpu::Platform::MYRIAD_X;
vpu::FrontEnd::Ptr _frontEnd;
vpu::PassManager::Ptr _passManager;
vpu::BackEnd::Ptr _backEnd;
InferenceEngine::ExecutableNetwork _executableNetwork;
};

View File

@@ -167,7 +167,7 @@ void GraphTransformerTest::SetUp() {
consoleOutput());
stageBuilder = std::make_shared<StageBuilder>();
frontEnd = std::make_shared<FrontEnd>(stageBuilder);
frontEnd = std::make_shared<FrontEnd>(stageBuilder, &_mockCore);
backEnd = std::make_shared<BackEnd>();
passManager = std::make_shared<PassManager>(stageBuilder, backEnd);
}

View File

@@ -17,6 +17,8 @@
#include <vpu/middleend/pass_manager.hpp>
#include <vpu/backend/backend.hpp>
#include <unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp>
namespace vpu {
template <class Cont, class Cond>
@@ -196,6 +198,7 @@ public:
TestModel CreateTestModel(const DataDesc& dataDesc);
private:
MockICore _mockCore;
Logger::Ptr _log;
std::list<ModelPtr> _models;
};