Gna fix mt with iterations (#3297)
* Enable CoreThreadingTestsWithIterations tests for GNA Sync rest of GNA Lib API, Sync Config for MT tests Change models in CoreThreadingTestsWithIterations to be compat with GNA * Use parameter for model set selection * Fix style * Disable HETERO CoreThreadingTestsWithIterations tests and create issue 45658
This commit is contained in:
parent
d2a23680f2
commit
a497153dcd
@ -29,6 +29,7 @@
|
||||
std::mutex GNADeviceHelper::acrossPluginsSync{};
|
||||
|
||||
uint8_t* GNADeviceHelper::alloc(uint32_t size_requested, uint32_t *size_granted) {
|
||||
std::unique_lock<std::mutex> lockGnaCalls{ acrossPluginsSync };
|
||||
void * memPtr = nullptr;
|
||||
#if GNA_LIB_VER == 1
|
||||
memPtr = GNAAlloc(nGNAHandle, size_requested, size_granted);
|
||||
@ -45,6 +46,7 @@ uint8_t* GNADeviceHelper::alloc(uint32_t size_requested, uint32_t *size_granted)
|
||||
}
|
||||
|
||||
void GNADeviceHelper::free(void * ptr) {
|
||||
std::unique_lock<std::mutex> lockGnaCalls{ acrossPluginsSync };
|
||||
#if GNA_LIB_VER == 1
|
||||
GNAFree(nGNAHandle);
|
||||
#else
|
||||
@ -58,6 +60,7 @@ uint32_t GNADeviceHelper::propagate(const intel_nnet_type_t *pNeuralNetwork,
|
||||
const uint32_t *pActiveIndices,
|
||||
uint32_t nActiveIndices,
|
||||
intel_gna_proc_t nGNAProcType) {
|
||||
std::unique_lock<std::mutex> lockGnaCalls{ acrossPluginsSync };
|
||||
uint32_t reqId;
|
||||
|
||||
nGNAStatus = GNAPropagateForward(nGNAHandle, pNeuralNetwork,
|
||||
@ -68,6 +71,7 @@ uint32_t GNADeviceHelper::propagate(const intel_nnet_type_t *pNeuralNetwork,
|
||||
#else
|
||||
|
||||
void GNADeviceHelper::setUpActiveList(const uint32_t requestConfigId, uint32_t layerIndex, uint32_t* ptr_active_indices, uint32_t num_active_indices) {
|
||||
std::unique_lock<std::mutex> lockGnaCalls{ acrossPluginsSync };
|
||||
const auto status = Gna2RequestConfigEnableActiveList(requestConfigId, layerIndex, num_active_indices, ptr_active_indices);
|
||||
checkGna2Status(status, "Gna2RequestConfigEnableActiveList");
|
||||
}
|
||||
@ -76,6 +80,7 @@ void GNADeviceHelper::propagateSync(const uint32_t requestConfigId, Gna2Accelera
|
||||
}
|
||||
|
||||
uint32_t GNADeviceHelper::propagate(const uint32_t requestConfigId, Gna2AccelerationMode gna2AccelerationMode) {
|
||||
std::unique_lock<std::mutex> lockGnaCalls{ acrossPluginsSync };
|
||||
uint32_t reqId{};
|
||||
if (gna2AccelerationMode == Gna2AccelerationModeHardware &&
|
||||
detectedGnaDevVersion == Gna2DeviceVersionSoftwareEmulation) {
|
||||
@ -116,6 +121,7 @@ std::string GNADeviceHelper::getGnaLibraryVersion() {
|
||||
}
|
||||
|
||||
uint32_t GNADeviceHelper::createModel(Gna2Model& gnaModel) const {
|
||||
std::unique_lock<std::mutex> lockGnaCalls{ acrossPluginsSync };
|
||||
uint32_t modelId;
|
||||
if (isUpTo20GnaDevice()) {
|
||||
enforceLegacyCnns(gnaModel);
|
||||
@ -127,11 +133,13 @@ uint32_t GNADeviceHelper::createModel(Gna2Model& gnaModel) const {
|
||||
}
|
||||
|
||||
void GNADeviceHelper::releaseModel(const uint32_t model_id) {
|
||||
std::unique_lock<std::mutex> lockGnaCalls{ acrossPluginsSync };
|
||||
const auto status = Gna2ModelRelease(model_id);
|
||||
checkGna2Status(status, "Gna2ModelRelease");
|
||||
}
|
||||
|
||||
uint32_t GNADeviceHelper::createRequestConfig(const uint32_t model_id) {
|
||||
std::unique_lock<std::mutex> lockGnaCalls{ acrossPluginsSync };
|
||||
uint32_t reqConfId;
|
||||
auto status = Gna2RequestConfigCreate(model_id, &reqConfId);
|
||||
checkGna2Status(status, "Gna2RequestConfigCreate");
|
||||
@ -327,6 +335,7 @@ const std::map <const std::pair<Gna2OperationType, int32_t>, const std::string>
|
||||
#endif
|
||||
|
||||
GnaWaitStatus GNADeviceHelper::wait(uint32_t reqId, int64_t millisTimeout) {
|
||||
std::unique_lock<std::mutex> lockGnaCalls{ acrossPluginsSync };
|
||||
#if GNA_LIB_VER == 2
|
||||
const auto status = Gna2RequestWait(reqId, millisTimeout);
|
||||
if (status == Gna2StatusWarningDeviceBusy) {
|
||||
@ -434,8 +443,8 @@ void GNADeviceHelper::open(uint8_t n_threads) {
|
||||
}
|
||||
|
||||
void GNADeviceHelper::close() {
|
||||
std::unique_lock<std::mutex> lockGnaCalls{ acrossPluginsSync };
|
||||
#if GNA_LIB_VER == 1
|
||||
std::unique_lock<std::mutex> lockGnaCalls{ acrossPluginsSync };
|
||||
GNADeviceClose(nGNAHandle);
|
||||
nGNAHandle = 0;
|
||||
#else
|
||||
@ -447,8 +456,11 @@ void GNADeviceHelper::close() {
|
||||
gnawarn() << "Request with Id " << requestId << " was not awaited successfully";
|
||||
}
|
||||
}
|
||||
const auto status = Gna2DeviceClose(nGnaDeviceIndex);
|
||||
checkGna2Status(status, "Gna2DeviceClose");
|
||||
{
|
||||
std::unique_lock<std::mutex> lockGnaCalls{ acrossPluginsSync };
|
||||
const auto status = Gna2DeviceClose(nGnaDeviceIndex);
|
||||
checkGna2Status(status, "Gna2DeviceClose");
|
||||
}
|
||||
#endif
|
||||
deviceOpened = false;
|
||||
}
|
||||
|
@ -210,18 +210,19 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& config) {
|
||||
}
|
||||
|
||||
void Config::AdjustKeyMapValues() {
|
||||
key_config_map.clear();
|
||||
std::lock_guard<std::mutex> lockGuard{ mtx4keyConfigMap };
|
||||
keyConfigMap.clear();
|
||||
|
||||
if (inputScaleFactors.empty()) {
|
||||
inputScaleFactors.push_back(1.0);
|
||||
}
|
||||
key_config_map[GNA_CONFIG_KEY(SCALE_FACTOR)] = std::to_string(inputScaleFactors[0]);
|
||||
keyConfigMap[GNA_CONFIG_KEY(SCALE_FACTOR)] = std::to_string(inputScaleFactors[0]);
|
||||
for (int n = 0; n < inputScaleFactors.size(); n++) {
|
||||
key_config_map[GNA_CONFIG_KEY(SCALE_FACTOR) + std::string("_") + std::to_string(n)] =
|
||||
keyConfigMap[GNA_CONFIG_KEY(SCALE_FACTOR) + std::string("_") + std::to_string(n)] =
|
||||
std::to_string(inputScaleFactors[n]);
|
||||
}
|
||||
key_config_map[GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE)] = dumpXNNPath;
|
||||
key_config_map[GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE_GENERATION)] = dumpXNNGeneration;
|
||||
keyConfigMap[GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE)] = dumpXNNPath;
|
||||
keyConfigMap[GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE_GENERATION)] = dumpXNNGeneration;
|
||||
|
||||
std::string device_mode;
|
||||
if (gnaFlags.sw_fp32) {
|
||||
@ -243,32 +244,34 @@ void Config::AdjustKeyMapValues() {
|
||||
}
|
||||
}
|
||||
IE_ASSERT(!device_mode.empty());
|
||||
key_config_map[GNA_CONFIG_KEY(DEVICE_MODE)] = device_mode;
|
||||
key_config_map[GNA_CONFIG_KEY(COMPACT_MODE)] =
|
||||
keyConfigMap[GNA_CONFIG_KEY(DEVICE_MODE)] = device_mode;
|
||||
keyConfigMap[GNA_CONFIG_KEY(COMPACT_MODE)] =
|
||||
gnaFlags.compact_mode ? PluginConfigParams::YES: PluginConfigParams::NO;
|
||||
key_config_map[CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)] =
|
||||
keyConfigMap[CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)] =
|
||||
gnaFlags.exclusive_async_requests ? PluginConfigParams::YES: PluginConfigParams::NO;
|
||||
key_config_map[GNA_CONFIG_KEY(PRECISION)] = gnaPrecision.name();
|
||||
key_config_map[GNA_CONFIG_KEY(PWL_UNIFORM_DESIGN)] =
|
||||
keyConfigMap[GNA_CONFIG_KEY(PRECISION)] = gnaPrecision.name();
|
||||
keyConfigMap[GNA_CONFIG_KEY(PWL_UNIFORM_DESIGN)] =
|
||||
gnaFlags.uniformPwlDesign ? PluginConfigParams::YES: PluginConfigParams::NO;
|
||||
key_config_map[CONFIG_KEY(PERF_COUNT)] =
|
||||
keyConfigMap[CONFIG_KEY(PERF_COUNT)] =
|
||||
gnaFlags.performance_counting ? PluginConfigParams::YES: PluginConfigParams::NO;
|
||||
key_config_map[GNA_CONFIG_KEY(LIB_N_THREADS)] = std::to_string(gnaFlags.gna_lib_async_threads_num);
|
||||
key_config_map[CONFIG_KEY(SINGLE_THREAD)] =
|
||||
keyConfigMap[GNA_CONFIG_KEY(LIB_N_THREADS)] = std::to_string(gnaFlags.gna_lib_async_threads_num);
|
||||
keyConfigMap[CONFIG_KEY(SINGLE_THREAD)] =
|
||||
gnaFlags.gna_openmp_multithreading ? PluginConfigParams::NO: PluginConfigParams::YES;
|
||||
}
|
||||
|
||||
std::string Config::GetParameter(const std::string& name) const {
|
||||
auto result = key_config_map.find(name);
|
||||
if (result == key_config_map.end()) {
|
||||
std::lock_guard<std::mutex> lockGuard{ mtx4keyConfigMap };
|
||||
auto result = keyConfigMap.find(name);
|
||||
if (result == keyConfigMap.end()) {
|
||||
THROW_GNA_EXCEPTION << "Unsupported config key: " << name;
|
||||
}
|
||||
return result->second;
|
||||
}
|
||||
|
||||
std::vector<std::string> Config::GetSupportedKeys() const {
|
||||
std::lock_guard<std::mutex> lockGuard{ mtx4keyConfigMap };
|
||||
std::vector<std::string> result;
|
||||
for (auto&& configOption : key_config_map) {
|
||||
for (auto&& configOption : keyConfigMap) {
|
||||
result.push_back(configOption.first);
|
||||
}
|
||||
return result;
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "descriptions/gna_flags.hpp"
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
|
||||
namespace GNAPluginNS {
|
||||
|
||||
@ -21,6 +22,21 @@ struct Config {
|
||||
Config() {
|
||||
AdjustKeyMapValues();
|
||||
}
|
||||
Config(const Config& r) {
|
||||
gnaPrecision = r.gnaPrecision;
|
||||
dumpXNNPath = r.dumpXNNPath;
|
||||
dumpXNNGeneration = r.dumpXNNGeneration;
|
||||
#if GNA_LIB_VER == 1
|
||||
gna_proc_type = r.gna_proc_type;
|
||||
#else
|
||||
pluginGna2AccMode = r.pluginGna2AccMode;
|
||||
pluginGna2DeviceConsistent = r.pluginGna2DeviceConsistent;
|
||||
#endif
|
||||
inputScaleFactors = r.inputScaleFactors;
|
||||
gnaFlags = r.gnaFlags;
|
||||
std::lock_guard<std::mutex>(r.mtx4keyConfigMap);
|
||||
keyConfigMap = r.keyConfigMap;
|
||||
}
|
||||
void UpdateFromMap(const std::map<std::string, std::string>& configMap);
|
||||
void AdjustKeyMapValues();
|
||||
std::string GetParameter(const std::string& name) const;
|
||||
@ -42,7 +58,8 @@ struct Config {
|
||||
std::vector<float> inputScaleFactors;
|
||||
GNAFlags gnaFlags;
|
||||
|
||||
std::map<std::string, std::string> key_config_map;
|
||||
mutable std::mutex mtx4keyConfigMap;
|
||||
std::map<std::string, std::string> keyConfigMap;
|
||||
};
|
||||
|
||||
} // namespace GNAPluginNS
|
||||
|
@ -34,7 +34,7 @@ public:
|
||||
const std::map<std::string, std::string> &config) override {
|
||||
Config updated_config(defaultConfig);
|
||||
updated_config.UpdateFromMap(config);
|
||||
auto plg = std::make_shared<GNAPlugin>(updated_config.key_config_map);
|
||||
auto plg = std::make_shared<GNAPlugin>(updated_config.keyConfigMap);
|
||||
plgPtr = plg;
|
||||
InferenceEngine::CNNNetwork clonedNetwork(InferenceEngine::cloneNetwork(network));
|
||||
return std::make_shared<GNAExecutableNetwork>(clonedNetwork, plg);
|
||||
@ -49,7 +49,7 @@ public:
|
||||
const std::map<std::string, std::string> &config) override {
|
||||
Config updated_config(defaultConfig);
|
||||
updated_config.UpdateFromMap(config);
|
||||
auto plg = std::make_shared<GNAPlugin>(updated_config.key_config_map);
|
||||
auto plg = std::make_shared<GNAPlugin>(updated_config.keyConfigMap);
|
||||
plgPtr = plg;
|
||||
|
||||
return make_executable_network(std::make_shared<GNAExecutableNetwork>(modelFileName, plg));
|
||||
@ -59,7 +59,7 @@ public:
|
||||
const std::map<std::string, std::string>& config) override {
|
||||
Config updated_config(defaultConfig);
|
||||
updated_config.UpdateFromMap(config);
|
||||
auto plg = std::make_shared<GNAPlugin>(updated_config.key_config_map);
|
||||
auto plg = std::make_shared<GNAPlugin>(updated_config.keyConfigMap);
|
||||
plgPtr = plg;
|
||||
return make_executable_network(std::make_shared<GNAExecutableNetwork>(networkModel, plg));
|
||||
}
|
||||
|
@ -22,11 +22,13 @@ INSTANTIATE_TEST_CASE_P(CPU, CoreThreadingTests, testing::ValuesIn(params), Core
|
||||
INSTANTIATE_TEST_CASE_P(CPU, CoreThreadingTestsWithIterations,
|
||||
testing::Combine(testing::ValuesIn(params),
|
||||
testing::Values(4),
|
||||
testing::Values(50)),
|
||||
testing::Values(50),
|
||||
testing::Values(ModelClass::Default)),
|
||||
CoreThreadingTestsWithIterations::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(CPU_Streams, CoreThreadingTestsWithIterations,
|
||||
testing::Combine(testing::ValuesIn(paramsStreams),
|
||||
testing::Values(4),
|
||||
testing::Values(10)),
|
||||
testing::Values(50),
|
||||
testing::Values(ModelClass::Default)),
|
||||
CoreThreadingTestsWithIterations::getTestCaseName);
|
||||
|
@ -5,19 +5,20 @@
|
||||
#include <behavior/core_threading_tests.hpp>
|
||||
|
||||
namespace {
|
||||
|
||||
Params params[] = {
|
||||
std::tuple<Device, Config>{ CommonTestUtils::DEVICE_GNA, {{ CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES) }}},
|
||||
std::tuple<Device, Config>{ CommonTestUtils::DEVICE_HETERO, {{ "TARGET_FALLBACK", CommonTestUtils::DEVICE_GNA }}},
|
||||
std::tuple<Device, Config>{ CommonTestUtils::DEVICE_MULTI, {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES), CommonTestUtils::DEVICE_GNA }}},
|
||||
};
|
||||
|
||||
// TODO: Consider to append params[1] after issue *-45658 resolved
|
||||
std::vector< std::tuple<Device, Config> > paramsWithIterations{ params[0], params[2] };
|
||||
} // namespace
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GNA, CoreThreadingTests, testing::ValuesIn(params), CoreThreadingTests::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(DISABLED_GNA, CoreThreadingTestsWithIterations,
|
||||
testing::Combine(testing::ValuesIn(params),
|
||||
testing::Values(2),
|
||||
testing::Values(2)),
|
||||
INSTANTIATE_TEST_CASE_P(GNA, CoreThreadingTestsWithIterations,
|
||||
testing::Combine(testing::ValuesIn(paramsWithIterations),
|
||||
testing::Values(3),
|
||||
testing::Values(4),
|
||||
testing::Values(ModelClass::ConvPoolRelu)),
|
||||
CoreThreadingTestsWithIterations::getTestCaseName);
|
||||
|
@ -52,5 +52,6 @@ INSTANTIATE_TEST_CASE_P(smoke_GPU, CoreThreadingTests, testing::ValuesIn(params)
|
||||
INSTANTIATE_TEST_CASE_P(smoke_GPU, CoreThreadingTestsWithIterations,
|
||||
testing::Combine(testing::ValuesIn(params),
|
||||
testing::Values(4),
|
||||
testing::Values(20)),
|
||||
testing::Values(20),
|
||||
testing::Values(ModelClass::Default)),
|
||||
CoreThreadingTestsWithIterations::getTestCaseName);
|
||||
|
@ -19,5 +19,6 @@ INSTANTIATE_TEST_CASE_P(MYRIAD, CoreThreadingTests, testing::ValuesIn(params), C
|
||||
INSTANTIATE_TEST_CASE_P(DISABLED_MYRIAD, CoreThreadingTestsWithIterations,
|
||||
testing::Combine(testing::ValuesIn(params),
|
||||
testing::Values(2),
|
||||
testing::Values(2)),
|
||||
testing::Values(2),
|
||||
testing::Values(ModelClass::Default)),
|
||||
CoreThreadingTestsWithIterations::getTestCaseName);
|
||||
|
@ -17,6 +17,8 @@
|
||||
#include <functional_test_utils/test_model/test_model.hpp>
|
||||
#include <common_test_utils/file_utils.hpp>
|
||||
#include <common_test_utils/test_assertions.hpp>
|
||||
#include <common_test_utils/test_constants.hpp>
|
||||
#include <common_test_utils/common_layers_params.hpp>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <thread>
|
||||
@ -182,24 +184,31 @@ TEST_P(CoreThreadingTests, smoke_QueryNetwork) {
|
||||
|
||||
using Threads = unsigned int;
|
||||
using Iterations = unsigned int;
|
||||
using CoreThreadingParams = std::tuple<Params, Threads, Iterations>;
|
||||
|
||||
enum struct ModelClass : unsigned {
|
||||
Default,
|
||||
ConvPoolRelu
|
||||
};
|
||||
|
||||
using CoreThreadingParams = std::tuple<Params, Threads, Iterations, ModelClass>;
|
||||
|
||||
class CoreThreadingTestsWithIterations : public ::testing::TestWithParam<CoreThreadingParams>,
|
||||
public CoreThreadingTestsBase {
|
||||
public CoreThreadingTestsBase {
|
||||
public:
|
||||
void SetUp() override {
|
||||
std::tie(deviceName, config) = std::get<0>(GetParam());
|
||||
numThreads = std::get<1>(GetParam());
|
||||
numIterations = std::get<2>(GetParam());
|
||||
numThreads = std::get<1>(GetParam());
|
||||
numIterations = std::get<2>(GetParam());
|
||||
modelClass = std::get<3>(GetParam());
|
||||
}
|
||||
|
||||
static std::string getTestCaseName(testing::TestParamInfo<std::tuple<Params, Threads, Iterations>> obj) {
|
||||
static std::string getTestCaseName(testing::TestParamInfo<CoreThreadingParams > obj) {
|
||||
unsigned int numThreads, numIterations;
|
||||
std::string deviceName;
|
||||
Config config;
|
||||
std::tie(deviceName, config) = std::get<0>(obj.param);
|
||||
numThreads = std::get<1>(obj.param);
|
||||
numIterations = std::get<2>(obj.param);
|
||||
numThreads = std::get<1>(obj.param);
|
||||
numIterations = std::get<2>(obj.param);
|
||||
char separator('_');
|
||||
std::ostringstream result;
|
||||
result << "targetDevice=" << deviceName << separator;
|
||||
@ -212,8 +221,24 @@ public:
|
||||
return result.str();
|
||||
}
|
||||
|
||||
ModelClass modelClass;
|
||||
unsigned int numIterations;
|
||||
unsigned int numThreads;
|
||||
|
||||
std::vector<InferenceEngine::CNNNetwork> networks;
|
||||
void SetupNetworks() {
|
||||
if (modelClass == ModelClass::ConvPoolRelu) {
|
||||
for (unsigned i = 0; i < numThreads; i++) {
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeConvPoolRelu()));
|
||||
}
|
||||
} else {
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::make2InputSubtract()));
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeMultiSingleConv()));
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSingleConv()));
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitConvConcat()));
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitMultiConvConcat()));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// tested function: LoadNetwork, AddExtension
|
||||
@ -223,12 +248,7 @@ TEST_P(CoreThreadingTestsWithIterations, smoke_LoadNetwork) {
|
||||
InferenceEngine::Core ie;
|
||||
std::atomic<unsigned int> counter{0u};
|
||||
|
||||
std::vector<InferenceEngine::CNNNetwork> networks;
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::make2InputSubtract()));
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeMultiSingleConv()));
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSingleConv()));
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitConvConcat()));
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitMultiConvConcat()));
|
||||
SetupNetworks();
|
||||
|
||||
ie.SetConfig(config, deviceName);
|
||||
runParallel([&] () {
|
||||
@ -244,12 +264,7 @@ TEST_P(CoreThreadingTestsWithIterations, smoke_LoadNetworkAccuracy) {
|
||||
InferenceEngine::Core ie;
|
||||
std::atomic<unsigned int> counter{0u};
|
||||
|
||||
std::vector<InferenceEngine::CNNNetwork> networks;
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::make2InputSubtract()));
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeMultiSingleConv()));
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSingleConv()));
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitConvConcat()));
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitMultiConvConcat()));
|
||||
SetupNetworks();
|
||||
|
||||
ie.SetConfig(config, deviceName);
|
||||
runParallel([&] () {
|
||||
@ -297,12 +312,7 @@ TEST_P(CoreThreadingTestsWithIterations, smoke_LoadNetwork_MultipleIECores) {
|
||||
|
||||
std::atomic<unsigned int> counter{0u};
|
||||
|
||||
std::vector<InferenceEngine::CNNNetwork> networks;
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::make2InputSubtract()));
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeMultiSingleConv()));
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSingleConv()));
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitConvConcat()));
|
||||
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitMultiConvConcat()));
|
||||
SetupNetworks();
|
||||
|
||||
runParallel([&] () {
|
||||
auto value = counter++;
|
||||
|
@ -51,7 +51,7 @@ protected:
|
||||
};
|
||||
|
||||
TEST_F(GNAPluginConfigTest, GnaConfigDefaultConfigIsExpected) {
|
||||
ASSERT_EQ(config.key_config_map, supportedConfigKeysWithDefaults);
|
||||
ASSERT_EQ(config.keyConfigMap, supportedConfigKeysWithDefaults);
|
||||
}
|
||||
|
||||
TEST_F(GNAPluginConfigTest, GnaConfigScaleFactorTest) {
|
||||
|
Loading…
Reference in New Issue
Block a user