Auto batch smart reshape strict testing (once we moved to dim tracking) (#10253)
* fixed perf-counters * explicit auto-batching params that should guarantee the auto-batching is triggered ( to avoid fallback to no-batching when the selected batch1 size is just 1) * makeConvPoolReluNoReshapes and using that whenever applicable to gaurantee the auto-batching is required (not important for things like plugin/executable-network config tests, but important for the inference-requests) * getDefaultNGraphFunctionForTheDevice moved to the ov_behavior_test_utils.hpp
This commit is contained in:
@@ -345,23 +345,27 @@ AutoBatchAsyncInferRequest::AutoBatchAsyncInferRequest(
|
||||
};
|
||||
AutoBatchAsyncInferRequest* _this = nullptr;
|
||||
};
|
||||
_pipeline = {{/*TaskExecutor*/ std::make_shared<ThisRequestExecutor>(this), /*task*/ [this, needPerfCounters] {
|
||||
if (this->_inferRequest->_exceptionPtr) // if the exception happened in the batch1 fallback
|
||||
std::rethrow_exception(this->_inferRequest->_exceptionPtr);
|
||||
auto& batchReq = this->_inferRequest->_myBatchedRequestWrapper;
|
||||
if (batchReq._exceptionPtr) // when the batchN execution failed
|
||||
std::rethrow_exception(batchReq._exceptionPtr);
|
||||
// in the case of non-batched execution the blobs were set explicitly
|
||||
if (AutoBatchInferRequest::eExecutionFlavor::BATCH_EXECUTED ==
|
||||
this->_inferRequest->_wasBatchedRequestUsed)
|
||||
this->_inferRequest->CopyOutputsIfNeeded();
|
||||
if (needPerfCounters) {
|
||||
try {
|
||||
this->_inferRequest->_perfMap = batchReq._inferRequestBatched->GetPerformanceCounts();
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
}}};
|
||||
_pipeline = {
|
||||
{/*TaskExecutor*/ std::make_shared<ThisRequestExecutor>(this), /*task*/ [this, needPerfCounters] {
|
||||
if (this->_inferRequest->_exceptionPtr) // if the exception happened in the batch1 fallback
|
||||
std::rethrow_exception(this->_inferRequest->_exceptionPtr);
|
||||
auto& batchReq = this->_inferRequest->_myBatchedRequestWrapper;
|
||||
if (batchReq._exceptionPtr) // when the batchN execution failed
|
||||
std::rethrow_exception(batchReq._exceptionPtr);
|
||||
// in the case of non-batched execution the blobs were set explicitly
|
||||
if (AutoBatchInferRequest::eExecutionFlavor::BATCH_EXECUTED == this->_inferRequest->_wasBatchedRequestUsed)
|
||||
this->_inferRequest->CopyOutputsIfNeeded();
|
||||
if (needPerfCounters) {
|
||||
try {
|
||||
if (AutoBatchInferRequest::eExecutionFlavor::BATCH_EXECUTED ==
|
||||
this->_inferRequest->_wasBatchedRequestUsed)
|
||||
this->_inferRequest->_perfMap = batchReq._inferRequestBatched->GetPerformanceCounts();
|
||||
else
|
||||
this->_inferRequest->_perfMap = this->_inferRequestWithoutBatch->GetPerformanceCounts();
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
}}};
|
||||
}
|
||||
|
||||
void AutoBatchAsyncInferRequest::Infer_ThreadUnsafe() {
|
||||
|
||||
@@ -53,7 +53,10 @@ const std::vector<std::map<std::string, std::string>> autoConfig = {
|
||||
};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> autoBatchConfig = {
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_GPU}},
|
||||
// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU)
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , std::string(CommonTestUtils::DEVICE_GPU) + "(4)"},
|
||||
// no timeout to avoid increasing the test time
|
||||
{CONFIG_KEY(AUTO_BATCH_TIMEOUT) , "0 "}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ExecNetSetPrecision,
|
||||
|
||||
@@ -20,7 +20,10 @@ const std::vector<std::map<std::string, std::string>> autoConfigs = {
|
||||
};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> autoBatchConfigs = {
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_GPU}},
|
||||
// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU)
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , std::string(CommonTestUtils::DEVICE_GPU) + "(4)"},
|
||||
// no timeout to avoid increasing the test time
|
||||
{CONFIG_KEY(AUTO_BATCH_TIMEOUT) , "0 "}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestCallbackTests,
|
||||
|
||||
@@ -19,7 +19,10 @@ const std::vector<std::map<std::string, std::string>> autoconfigs = {
|
||||
};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> auto_batch_configs = {
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_GPU}},
|
||||
// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU)
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , std::string(CommonTestUtils::DEVICE_GPU) + "(4)"},
|
||||
// no timeout to avoid increasing the test time
|
||||
{CONFIG_KEY(AUTO_BATCH_TIMEOUT) , "0 "}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestMultithreadingTests,
|
||||
|
||||
@@ -20,6 +20,13 @@ namespace {
|
||||
CommonTestUtils::DEVICE_GPU + std::string(",") + CommonTestUtils::DEVICE_CPU}}
|
||||
};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> AutoBatchConfigs = {
|
||||
// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU)
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , std::string(CommonTestUtils::DEVICE_GPU) + "(4)"},
|
||||
// no timeout to avoid increasing the test time
|
||||
{CONFIG_KEY(AUTO_BATCH_TIMEOUT) , "0 "}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestPerfCountersTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_GPU),
|
||||
@@ -38,4 +45,9 @@ namespace {
|
||||
::testing::ValuesIn(AutoConfigs)),
|
||||
InferRequestPerfCountersTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, InferRequestPerfCountersTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_BATCH),
|
||||
::testing::ValuesIn(AutoBatchConfigs)),
|
||||
InferRequestPerfCountersTest::getTestCaseName);
|
||||
} // namespace
|
||||
|
||||
@@ -21,7 +21,10 @@ namespace {
|
||||
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> autoBatchConfigs = {
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_GPU}},
|
||||
// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU)
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , std::string(CommonTestUtils::DEVICE_GPU) + "(4)"},
|
||||
// no timeout to avoid increasing the test time
|
||||
{CONFIG_KEY(AUTO_BATCH_TIMEOUT) , "0 "}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestWaitTests,
|
||||
|
||||
@@ -10,6 +10,13 @@ const std::vector<ov::AnyMap> configs = {
|
||||
{},
|
||||
};
|
||||
|
||||
const std::vector<ov::AnyMap> autoBatchConfigs = {
|
||||
// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU)
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , std::string(CommonTestUtils::DEVICE_GPU) + "(4)"},
|
||||
// no timeout to avoid increasing the test time
|
||||
{CONFIG_KEY(AUTO_BATCH_TIMEOUT) , "0 "}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVExecutableNetworkBaseTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_GPU),
|
||||
@@ -18,7 +25,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVExecutableNetworkBaseTest,
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatchBehaviorTests, OVExecutableNetworkBaseTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(std::string(CommonTestUtils::DEVICE_BATCH) + ":" + CommonTestUtils::DEVICE_GPU),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Values(CommonTestUtils::DEVICE_BATCH),
|
||||
::testing::ValuesIn(autoBatchConfigs)),
|
||||
OVExecutableNetworkBaseTest::getTestCaseName);
|
||||
} // namespace
|
||||
@@ -19,7 +19,10 @@ const std::vector<ov::AnyMap> multiConfigs = {
|
||||
};
|
||||
|
||||
const std::vector<ov::AnyMap> autoBatchConfigs = {
|
||||
{{ CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_GPU}}
|
||||
// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU)
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , std::string(CommonTestUtils::DEVICE_GPU) + "(4)"},
|
||||
// no timeout to avoid increasing the test time
|
||||
{CONFIG_KEY(AUTO_BATCH_TIMEOUT) , "0 "}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCallbackTests,
|
||||
|
||||
@@ -11,6 +11,13 @@ const std::vector<ov::AnyMap> configs = {
|
||||
{},
|
||||
};
|
||||
|
||||
const std::vector<ov::AnyMap> autoBatchConfigs = {
|
||||
// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU)
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , std::string(CommonTestUtils::DEVICE_GPU) + "(4)"},
|
||||
// no timeout to avoid increasing the test time
|
||||
{CONFIG_KEY(AUTO_BATCH_TIMEOUT) , "0 "}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCancellationTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_GPU),
|
||||
@@ -19,7 +26,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCancellationTests,
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatchBehaviorTests, OVInferRequestCancellationTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(std::string(CommonTestUtils::DEVICE_BATCH) + ":" + CommonTestUtils::DEVICE_GPU),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Values(CommonTestUtils::DEVICE_BATCH),
|
||||
::testing::ValuesIn(autoBatchConfigs)),
|
||||
OVInferRequestCancellationTests::getTestCaseName);
|
||||
} // namespace
|
||||
|
||||
@@ -23,7 +23,10 @@ const std::vector<ov::AnyMap> Autoconfigs = {
|
||||
};
|
||||
|
||||
const std::vector<ov::AnyMap> AutoBatchConfigs = {
|
||||
{{ CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_GPU}}
|
||||
// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU)
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , std::string(CommonTestUtils::DEVICE_GPU) + "(4)"},
|
||||
// no timeout to avoid increasing the test time
|
||||
{CONFIG_KEY(AUTO_BATCH_TIMEOUT) , "0 "}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest,
|
||||
|
||||
@@ -20,7 +20,10 @@ const std::vector<ov::AnyMap> Multiconfigs = {
|
||||
};
|
||||
|
||||
const std::vector<ov::AnyMap> AutoBatchConfigs = {
|
||||
{{ CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_GPU}}
|
||||
// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU)
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , std::string(CommonTestUtils::DEVICE_GPU) + "(4)"},
|
||||
// no timeout to avoid increasing the test time
|
||||
{CONFIG_KEY(AUTO_BATCH_TIMEOUT) , "0 "}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestMultithreadingTests,
|
||||
|
||||
@@ -40,7 +40,10 @@ const std::vector<ov::AnyMap> Autoconfigs = {
|
||||
};
|
||||
|
||||
const std::vector<ov::AnyMap> AutoBatchConfigs = {
|
||||
{{ CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_GPU}}
|
||||
// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU)
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , std::string(CommonTestUtils::DEVICE_GPU) + "(4)"},
|
||||
// no timeout to avoid increasing the test time
|
||||
{CONFIG_KEY(AUTO_BATCH_TIMEOUT) , "0 "}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestPerfCountersTest,
|
||||
|
||||
@@ -24,7 +24,10 @@ const std::vector<ov::AnyMap> Autoconfigs = {
|
||||
};
|
||||
|
||||
const std::vector<ov::AnyMap> AutoBatchConfigs = {
|
||||
{{ CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_GPU}}
|
||||
// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU)
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , std::string(CommonTestUtils::DEVICE_GPU) + "(4)"},
|
||||
// no timeout to avoid increasing the test time
|
||||
{CONFIG_KEY(AUTO_BATCH_TIMEOUT) , "0 "}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestWaitTests,
|
||||
|
||||
@@ -20,7 +20,10 @@ const std::vector<ov::AnyMap> MultiConfigs = {
|
||||
};
|
||||
|
||||
const std::vector<ov::AnyMap> AutoBatchConfigs = {
|
||||
{{ CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_GPU}}
|
||||
// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU)
|
||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , std::string(CommonTestUtils::DEVICE_GPU) + "(4)"},
|
||||
// no timeout to avoid increasing the test time
|
||||
{CONFIG_KEY(AUTO_BATCH_TIMEOUT) , "0 "}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_BehaviorTests, OVRemoteTest,
|
||||
|
||||
@@ -80,7 +80,7 @@ public:
|
||||
// Skip test according to plugin specific disabledTestPatterns() (if any)
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
std::tie(targetDevice, configuration) = this->GetParam();
|
||||
function = ngraph::builder::subgraph::makeConvPoolRelu();
|
||||
function = ov::test::behavior::getDefaultNGraphFunctionForTheDevice(targetDevice);
|
||||
cnnNet = InferenceEngine::CNNNetwork(function);
|
||||
// Load CNNNetwork to target plugins
|
||||
execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
|
||||
|
||||
@@ -20,6 +20,15 @@ namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
|
||||
inline std::shared_ptr<ngraph::Function> getDefaultNGraphFunctionForTheDevice(std::string targetDevice,
|
||||
std::vector<size_t> inputShape = {1, 1, 32, 32},
|
||||
ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) {
|
||||
// auto-batching (which now relies on the dim tracking) needs a ngraph function without reshapes in that
|
||||
if (targetDevice.find(CommonTestUtils::DEVICE_BATCH) != std::string::npos)
|
||||
return ngraph::builder::subgraph::makeConvPoolReluNoReshapes(inputShape, ngPrc);
|
||||
else // for compatibility with the GNA that fails on any other ngraph function
|
||||
return ngraph::builder::subgraph::makeConvPoolRelu(inputShape, ngPrc);
|
||||
}
|
||||
|
||||
typedef std::tuple<
|
||||
std::string, // Device name
|
||||
@@ -49,7 +58,7 @@ public:
|
||||
// Skip test according to plugin specific disabledTestPatterns() (if any)
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
std::tie(targetDevice, configuration) = this->GetParam();
|
||||
function = ngraph::builder::subgraph::makeConvPoolRelu();
|
||||
function = ov::test::behavior::getDefaultNGraphFunctionForTheDevice(targetDevice);
|
||||
ov::AnyMap params;
|
||||
for (auto&& v : configuration) {
|
||||
params.emplace(v.first, v.second);
|
||||
|
||||
@@ -30,7 +30,7 @@ public:
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
std::tie(targetDevice, configuration) = this->GetParam();
|
||||
ie = PluginCache::get().ie(targetDevice);
|
||||
function = ngraph::builder::subgraph::makeConvPoolRelu();
|
||||
function = ov::test::behavior::getDefaultNGraphFunctionForTheDevice(targetDevice);
|
||||
cnnNet = InferenceEngine::CNNNetwork(function);
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ public:
|
||||
// Skip test according to plugin specific disabledTestPatterns() (if any)
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
std::tie(targetDevice, configuration) = this->GetParam();
|
||||
function = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, 640, 640});
|
||||
function = ov::test::behavior::getDefaultNGraphFunctionForTheDevice(targetDevice, {1, 3, 640, 640});
|
||||
cnnNet = InferenceEngine::CNNNetwork(function);
|
||||
// Load CNNNetwork to target plugins
|
||||
execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
|
||||
|
||||
@@ -25,7 +25,7 @@ public:
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
std::tie(streamExecutorNumber, targetDevice, configuration) = this->GetParam();
|
||||
// Create CNNNetwork from ngrpah::Function
|
||||
function = ngraph::builder::subgraph::makeConvPoolRelu({1, 1, 32, 32});
|
||||
function = ov::test::behavior::getDefaultNGraphFunctionForTheDevice(targetDevice);
|
||||
cnnNet = InferenceEngine::CNNNetwork(function);
|
||||
}
|
||||
|
||||
|
||||
@@ -336,7 +336,7 @@ public:
|
||||
void SetUp() override {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
std::tie(netPrecision, targetDevice, configuration) = this->GetParam();
|
||||
function = ngraph::builder::subgraph::makeConvPoolRelu();
|
||||
function = ov::test::behavior::getDefaultNGraphFunctionForTheDevice(targetDevice);
|
||||
cnnNet = InferenceEngine::CNNNetwork(function);
|
||||
execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ public:
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
std::tie(targetDevice, configuration) = this->GetParam();
|
||||
ie = PluginCache::get().ie(targetDevice);
|
||||
function = ngraph::builder::subgraph::makeConvPoolRelu();
|
||||
function = ov::test::behavior::getDefaultNGraphFunctionForTheDevice(targetDevice);
|
||||
cnnNet = InferenceEngine::CNNNetwork(function);
|
||||
configuration.insert({ InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES });
|
||||
// Load CNNNetwork to target plugins
|
||||
|
||||
@@ -39,7 +39,7 @@ public:
|
||||
// Skip test according to plugin specific disabledTestPatterns() (if any)
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED();
|
||||
std::tie(targetDevice, configuration) = this->GetParam();
|
||||
function = ngraph::builder::subgraph::makeConvPoolRelu();
|
||||
function = ov::test::behavior::getDefaultNGraphFunctionForTheDevice(targetDevice);
|
||||
}
|
||||
|
||||
void TearDown() override {
|
||||
|
||||
@@ -17,7 +17,7 @@ std::string OVInferRequestPerfCountersTest::getTestCaseName(const testing::TestP
|
||||
void OVInferRequestPerfCountersTest::SetUp() {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
std::tie(targetDevice, configuration) = this->GetParam();
|
||||
function = ngraph::builder::subgraph::makeConvPoolRelu();
|
||||
function = ov::test::behavior::getDefaultNGraphFunctionForTheDevice(targetDevice);
|
||||
configuration.insert(ov::enable_profiling(true));
|
||||
execNet = core->compile_model(function, targetDevice, configuration);
|
||||
req = execNet.create_infer_request();
|
||||
|
||||
@@ -47,6 +47,30 @@ inline std::shared_ptr<ngraph::Function> makeConvPoolRelu(std::vector<size_t> in
|
||||
return fnPtr;
|
||||
}
|
||||
|
||||
inline std::shared_ptr<ngraph::Function> makeConvPoolReluNoReshapes(std::vector<size_t> inputShape = {1, 1, 32, 32},
|
||||
ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) {
|
||||
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
|
||||
params.front()->set_friendly_name("Param_1");
|
||||
params.front()->output(0).get_tensor().set_names({"data"});
|
||||
auto conv1 = ngraph::builder::makeConvolution(params.front(), ngPrc, {1, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
|
||||
ngraph::op::PadType::EXPLICIT, 4);
|
||||
conv1->set_friendly_name("Conv_1");
|
||||
conv1->output(0).get_tensor().set_names({"conv"});
|
||||
std::vector<size_t> stride{1, 1}, padB{0, 0}, padE = padB, kernel{1, 2};
|
||||
auto pool1 = std::make_shared<ngraph::opset1::MaxPool>(conv1, stride, padB, padE, kernel,
|
||||
ngraph::op::RoundingType::FLOOR,
|
||||
ngraph::op::PadType::EXPLICIT);
|
||||
pool1->output(0).get_tensor().set_names({"pool"});
|
||||
pool1->set_friendly_name("Pool_1");
|
||||
auto relu1 = std::make_shared<ngraph::opset1::Relu>(pool1);
|
||||
relu1->set_friendly_name("Relu_1");
|
||||
relu1->output(0).get_tensor().set_names({"relu"});
|
||||
ngraph::Shape reluShape = relu1->outputs()[0].get_tensor().get_shape();
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(relu1)};
|
||||
std::shared_ptr<ngraph::Function> fnPtr = std::make_shared<ngraph::Function>(results, params);
|
||||
return fnPtr;
|
||||
}
|
||||
|
||||
inline std::shared_ptr<ngraph::Function> makeConvPool2Relu2(std::vector<size_t> inputShape = {1, 1, 32, 32},
|
||||
ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) {
|
||||
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
|
||||
|
||||
Reference in New Issue
Block a user