[IE TESTS] Move InferRequestTests (#618)

* [IE TESTS] move Infer_request tests

* fix v0

* [ci-skip][IE TESTS] test update basic class v0

* [ci-skip][IE TESTS] test update basic class v1

* [ci-skip][IE TESTS] test update basic class

* [ci-skip][IE TESTS] test update basic class v3

* [ci-skip][IE TESTS] test update basic class final versions

* [ci-skip][IE TESTS] fix

* [ci-skip][IE TESTS] fix codestaly and comment

Co-authored-by: Irina Efode <irina.efode@intel.com>
This commit is contained in:
Anton Zaytsev 2020-06-03 12:16:00 +03:00 committed by GitHub
parent ed85690136
commit b457553593
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
78 changed files with 2592 additions and 2386 deletions

View File

@ -15,4 +15,4 @@ addIeTargetTest(
ADD_CPPLINT
LABELS
TEMPLATE
)
)

View File

@ -6,8 +6,6 @@
#include "behavior/config.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {

View File

@ -6,8 +6,6 @@
#include "behavior/exec_graph_info.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
@ -24,4 +22,4 @@ namespace {
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
ExecGraphTests::getTestCaseName);
} // namespace
} // namespace

View File

@ -0,0 +1,24 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior/infer_request.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
InferRequestTests::getTestCaseName);
} // namespace

View File

@ -6,8 +6,6 @@
#include "behavior/infer_request_callback.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
@ -15,15 +13,13 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
};
const std::vector<std::map<std::string, std::string>> configs = {
{{{}}}
{}
};
const std::vector<std::string> devices{CommonTestUtils::DEVICE_CPU};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::ValuesIn(devices),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
CallbackTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,25 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior/infer_request_config.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
InferConfigTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,28 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_input.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
InferRequestInputTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,28 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_output.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
InferRequestOutputTests::getTestCaseName);
} // namespace

View File

@ -6,8 +6,6 @@
#include "behavior/set_preprocess.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
@ -18,11 +16,11 @@ namespace {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreProcessTests,
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
PreProcessTests::getTestCaseName);
PreprocessTest::getTestCaseName);
} // namespace

View File

@ -1,8 +0,0 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request.hpp"
#include "template_test_data.hpp"
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequest, ValuesIn(requestsSupportedValues), getTestCaseName);

View File

@ -1,13 +0,0 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_config.hpp"
#include "template_test_data.hpp"
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestConfigExclusiveAsync, ValuesIn(supportedValues),
getConfigTestCaseName);
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestConfig,
ValuesIn(BehTestParams::concat(withCorrectConfValues, withCorrectConfValuesNetworkOnly)),
getConfigTestCaseName);

View File

@ -1,9 +0,0 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_input.hpp"
#include "template_test_data.hpp"
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestInput, ValuesIn(allInputSupportedValues),
getTestCaseName);

View File

@ -1,9 +0,0 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_output.hpp"
#include "template_test_data.hpp"
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestOutput, ValuesIn(allOutputSupportedValues),
getOutputTestCaseName);

View File

@ -5,15 +5,16 @@
#include "multi-device/multi_device_config.hpp"
#include "behavior/config.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> conf = {
{}
};
const std::vector<std::map<std::string, std::string>> Configs = {
{},
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
@ -25,13 +26,10 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> MultiConfigs = {
{},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS,
InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS,
InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "8"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU},
@ -71,6 +69,25 @@ namespace {
{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_LIMIT, "NAN"}}
};
const std::vector<std::map<std::string, std::string>> multiconf = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(conf)),
CorrectConfigAPITests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CorrectConfigAPITests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiconf)),
CorrectConfigAPITests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
@ -86,39 +103,17 @@ namespace {
IncorrectConfigTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigAPITests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(inconfigs)),
IncorrectConfigAPITests::getTestCaseName);
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(inconfigs)),
IncorrectConfigAPITests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, IncorrectConfigAPITests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiinconfigs)),
IncorrectConfigAPITests::getTestCaseName);
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiinconfigs)),
IncorrectConfigAPITests::getTestCaseName);
const std::vector<std::map<std::string, std::string>> conf = {
{}
};
const std::vector<std::map<std::string, std::string>> multiconf = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(conf)),
CorrectConfigAPITests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CorrectConfigAPITests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiconf)),
CorrectConfigAPITests::getTestCaseName);
} // namespace

View File

@ -3,11 +3,8 @@
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/exec_graph_info.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,

View File

@ -0,0 +1,39 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior/infer_request.hpp"
#include "ie_plugin_config.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{},
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}}
};
const std::vector<std::map<std::string, std::string>> Multiconfigs = {
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(configs)),
InferRequestTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(Multiconfigs)),
InferRequestTests::getTestCaseName);
} // namespace

View File

@ -6,8 +6,6 @@
#include "behavior/infer_request_callback.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
@ -22,7 +20,7 @@ const std::vector<std::map<std::string, std::string>> configs = {
const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}}
};/**/
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests,
::testing::Combine(

View File

@ -0,0 +1,77 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_config.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}}
};
const std::vector<std::map<std::string, std::string>> InConfigs = {
{},
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}},
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "8"}},
{{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::NO}},
{{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}},
{{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_LIMIT, "10"}},
};
const std::vector<std::map<std::string, std::string>> MultiInConfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS,
InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS,
InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "8"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::NO}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_LIMIT, "10"}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(configs)),
InferConfigTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
InferConfigTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigInTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(InConfigs)),
InferConfigTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferConfigInTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(MultiInConfigs)),
InferConfigInTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,44 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_input.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16,
InferenceEngine::Precision::U8,
InferenceEngine::Precision::U16,
InferenceEngine::Precision::I16
};
const std::vector<std::map<std::string, std::string>> configs = {
{},
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}
};
const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(configs)),
InferRequestInputTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestInputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
InferRequestInputTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,40 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_output.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32
};
const std::vector<std::map<std::string, std::string>> configs = {
{},
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}
};
const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(configs)),
InferRequestOutputTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestOutputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
InferRequestOutputTests::getTestCaseName);
} // namespace

View File

@ -2,12 +2,11 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <functional_test_utils/behavior_test_utils.hpp>
#include "multi-device/multi_device_config.hpp"
#include "behavior/set_preprocess.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
@ -24,17 +23,17 @@ namespace {
{{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreProcessTests,
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(configs)),
PreProcessTests::getTestCaseName);
PreprocessTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreProcessTests,
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreprocessTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
PreProcessTests::getTestCaseName);
PreprocessTest::getTestCaseName);
} // namespace

View File

@ -6,8 +6,6 @@
#include "gna/gna_config.hpp"
#include "behavior/config.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {

View File

@ -2,12 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/exec_graph_info.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
@ -25,4 +20,4 @@ namespace {
::testing::ValuesIn(configs)),
ExecGraphTests::getTestCaseName);
} // namespace
} // namespace

View File

@ -0,0 +1,19 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/infer_request.hpp"
#include "ie_plugin_config.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::Values(std::map<std::string, std::string>({}))),
InferRequestTests::getTestCaseName);
} // namespace

View File

@ -6,8 +6,6 @@
#include "behavior/infer_request_callback.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
@ -31,9 +29,9 @@ INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests,
CallbackTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CallbackTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
CallbackTests::getTestCaseName);
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
CallbackTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,43 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gna/gna_config.hpp"
#include "behavior/infer_request_config.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> Inconfigs = {
{{InferenceEngine::GNAConfigParams::KEY_GNA_SCALE_FACTOR, "1.0"}},
{{InferenceEngine::GNAConfigParams::KEY_GNA_PRECISION, "I8"}},
{{InferenceEngine::GNAConfigParams::KEY_GNA_FIRMWARE_MODEL_IMAGE, "gfile"}},
{{InferenceEngine::GNAConfigParams::KEY_GNA_DEVICE_MODE, InferenceEngine::GNAConfigParams::GNA_AUTO}},
{{InferenceEngine::GNAConfigParams::KEY_GNA_DEVICE_MODE, InferenceEngine::GNAConfigParams::GNA_SW_FP32}},
{{InferenceEngine::GNAConfigParams::KEY_GNA_DEVICE_MODE, InferenceEngine::GNAConfigParams::GNA_SW}},
{{InferenceEngine::GNAConfigParams::KEY_GNA_DEVICE_MODE, InferenceEngine::GNAConfigParams::GNA_SW_EXACT}},
{{InferenceEngine::GNAConfigParams::KEY_GNA_COMPACT_MODE, InferenceEngine::PluginConfigParams::NO}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(configs)),
InferConfigTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigInTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(Inconfigs)),
InferConfigInTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,25 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/infer_request_input.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::U8,
InferenceEngine::Precision::I16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(configs)),
InferRequestInputTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,24 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/infer_request_output.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(configs)),
InferRequestOutputTests::getTestCaseName);
} // namespace

View File

@ -9,6 +9,26 @@
std::vector<std::string> disabledTestPatterns() {
return {
// TODO: FIX BUG 31661
// TODO: support InferRequest in GNAPlugin
".*InferRequestTests\\.canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait.*",
// TODO: FIX BUG 23740
".*InferRequestTests\\.CanCreateTwoExeNetworks.*",
// TODO: FIX BUG 26702
".*InferRequestTests\\.FailedAsyncInferWithNegativeTimeForWait.*",
// TODO: FIX BUG 23741
".*InferRequestTests\\.canRun3SyncRequestsConsistentlyFromThreads.*",
// TODO: FIX BUG 23742
".*InferRequestTests\\.canWaitWithotStartAsync.*",
// TODO: FIX BUG 23743
".*InferRequestTests\\.returnDeviceBusyOnSetBlobAfterAsyncInfer.*",
".*InferRequestTests\\.returnDeviceBusyOnGetBlobAfterAsyncInfer.*",
".*InferRequestTests\\.returnDeviceBusyOnGetPerformanceCountAfterAsyncInfer.*",
".*InferRequestTests\\.returnDeviceBusyOnStartInferAfterAsyncInfer.*",
".*InferRequestTests\\.returnDeviceBusyOnGetUserDataAfterAsyncInfer.*",
".*InferRequestTests\\.returnDeviceBusyOnSetUserDataAfterAsyncInfer.*",
// TODO: FIX BUG 31661
".*InferRequestTests\\.canStartSeveralAsyncInsideCompletionCallbackNoSafeDtorWithoutWait.*",
// TODO: FIX BUG 31661
".*Behavior.*CallbackThrowException.*",
// TODO: FIX BUG 32210

View File

@ -6,8 +6,6 @@
#include "behavior/config.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
@ -38,16 +36,16 @@ namespace {
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(inconfigs)),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(inconfigs)),
IncorrectConfigTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, IncorrectConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiinconfigs)),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiinconfigs)),
IncorrectConfigTests::getTestCaseName);
@ -60,31 +58,31 @@ namespace {
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(conf)),
CorrectConfigAPITests::getTestCaseName);
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(conf)),
CorrectConfigAPITests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CorrectConfigAPITests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiconf)),
CorrectConfigAPITests::getTestCaseName);
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiconf)),
CorrectConfigAPITests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigAPITests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(conf)),
IncorrectConfigAPITests::getTestCaseName);
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(conf)),
IncorrectConfigAPITests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, IncorrectConfigAPITests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiconf)),
IncorrectConfigAPITests::getTestCaseName);
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiconf)),
IncorrectConfigAPITests::getTestCaseName);
} // namespace

View File

@ -3,11 +3,8 @@
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/exec_graph_info.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
@ -25,4 +22,4 @@ namespace {
::testing::ValuesIn(configs)),
ExecGraphTests::getTestCaseName);
} // namespace
} // namespace

View File

@ -0,0 +1,32 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior/infer_request.hpp"
#include "ie_plugin_config.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_GPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::Values(std::map<std::string, std::string>({}))),
InferRequestTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(configs)),
InferRequestTests::getTestCaseName);
} // namespace

View File

@ -6,8 +6,6 @@
#include "behavior/infer_request_callback.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
@ -30,9 +28,9 @@ INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests,
CallbackTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CallbackTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
CallbackTests::getTestCaseName);
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
CallbackTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,37 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_config.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_GPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(configs)),
InferConfigTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
InferConfigTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,42 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_input.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP16,
InferenceEngine::Precision::U8,
InferenceEngine::Precision::I16,
InferenceEngine::Precision::I32
};
const std::vector<std::map<std::string, std::string>> configs = {
{},
{{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}}
};
const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_GPU},
{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS,
InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(configs)),
InferRequestInputTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestInputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
InferRequestInputTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,37 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_output.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32
};
const std::vector<std::map<std::string, std::string>> configs = {
{},
{{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}}
};
const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_GPU},
{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(configs)),
InferRequestOutputTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestOutputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
InferRequestOutputTests::getTestCaseName);
} // namespace

View File

@ -3,12 +3,12 @@
//
#include "multi-device/multi_device_config.hpp"
#include <functional_test_utils/behavior_test_utils.hpp>
#include "behavior/set_preprocess.hpp"
using namespace LayerTestsDefinitions;
namespace {
using PreprocessBehTest = BehaviorTestsUtils::BehaviorTestsBasic;
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
@ -22,17 +22,17 @@ namespace {
{{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_GPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreProcessTests,
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(configs)),
PreProcessTests::getTestCaseName);
PreprocessTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreProcessTests,
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreprocessTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
PreProcessTests::getTestCaseName);
PreprocessTest::getTestCaseName);
} // namespace

View File

@ -6,8 +6,6 @@
#include "vpu/vpu_plugin_config.hpp"
#include "behavior/config.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
@ -43,7 +41,7 @@ namespace {
const std::vector<std::map<std::string, std::string>> MultiConfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}},
{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}}
};
@ -89,15 +87,15 @@ namespace {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), "ON"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{CONFIG_KEY(LOG_LEVEL), "VERBOSE"}},
{CONFIG_KEY(LOG_LEVEL), "VERBOSE"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{VPU_CONFIG_KEY(IGNORE_IR_STATISTIC), "ON"}},
{VPU_CONFIG_KEY(IGNORE_IR_STATISTIC), "ON"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "-1"}},
{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "-1"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "0"}},
{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "0"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "1"}}
{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "1"}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigTests,
@ -122,7 +120,7 @@ namespace {
const std::vector<std::map<std::string, std::string>> multiInconf = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_MYRIAD},
{"some_nonexistent_key", "some_unknown_value"}}
{"some_nonexistent_key", "some_unknown_value"}}
};

View File

@ -2,12 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/exec_graph_info.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP16
@ -23,4 +19,4 @@ namespace {
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::ValuesIn(configs)),
ExecGraphTests::getTestCaseName);
} // namespace
} // namespace

View File

@ -0,0 +1,31 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior/infer_request.hpp"
#include "ie_plugin_config.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::Values(std::map<std::string, std::string>({}))),
InferRequestTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(configs)),
InferRequestTests::getTestCaseName);
} // namespace

View File

@ -6,8 +6,6 @@
#include "behavior/infer_request_callback.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP16

View File

@ -0,0 +1,78 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "vpu/vpu_plugin_config.hpp"
#include "behavior/infer_request_config.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_MYRIAD}}
};
const std::vector<std::map<std::string, std::string>> Inconfigs = {
{},
{{VPU_CONFIG_KEY(IGNORE_IR_STATISTIC), CONFIG_VALUE(YES)}},
{{VPU_CONFIG_KEY(IGNORE_IR_STATISTIC), CONFIG_VALUE(NO)}},
{{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(YES)}},
{{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(NO)}},
{{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_NONE)}},
{{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_ERROR)}},
{{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_WARNING)}},
{{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_INFO)}},
{{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}},
{{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_TRACE)}},
{{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}},
{{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)}},
{{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(YES)}},
{{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(NO)}}
};
const std::vector<std::map<std::string, std::string>> InmultiConfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::ValuesIn(configs)),
InferConfigTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
InferConfigTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigInTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::ValuesIn(Inconfigs)),
InferConfigInTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferConfigInTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(InmultiConfigs)),
InferConfigInTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,37 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_input.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP16,
InferenceEngine::Precision::U8
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::ValuesIn(configs)),
InferRequestInputTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestInputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
InferRequestInputTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,37 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_output.hpp"
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP16,
InferenceEngine::Precision::U8
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::ValuesIn(configs)),
InferRequestOutputTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestOutputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
InferRequestOutputTests::getTestCaseName);
} // namespace

View File

@ -3,12 +3,11 @@
//
#include "multi-device/multi_device_config.hpp"
#include <functional_test_utils/behavior_test_utils.hpp>
#include "behavior/set_preprocess.hpp"
using namespace LayerTestsDefinitions;
namespace {
using PreprocessBehTest = BehaviorTestsUtils::BehaviorTestsBasic;
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP16
};
@ -21,17 +20,17 @@ namespace {
{{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_MYRIAD}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreProcessTests,
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::ValuesIn(configs)),
PreProcessTests::getTestCaseName);
PreprocessTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreProcessTests,
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreprocessTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
PreProcessTests::getTestCaseName);
PreprocessTest::getTestCaseName);
} // namespace

View File

@ -18,52 +18,171 @@
#include <vpu/private_plugin_config.hpp>
#include <gna/gna_config.hpp>
#include <multi-device/multi_device_config.hpp>
#include <ie_core.hpp>
#include "ie_common.h"
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include <threading/ie_executor_manager.hpp>
#include <functional_test_utils/behavior_test_utils.hpp>
#include "ngraph_functions/pass/convert_prc.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
namespace LayerTestsDefinitions {
typedef std::tuple<
InferenceEngine::Precision, // Network precision
std::string, // Device name
std::map<std::string, std::string> // Config
> ConfigParams;
using CorrectConfigTests = BehaviorTestsUtils::BehaviorTestsBasic;
class CorrectConfigTests : public testing::WithParamInterface<ConfigParams>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<ConfigParams> obj);
// Setting empty config doesn't throw
TEST_P(CorrectConfigTests, SetEmptyConfig) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
std::map<std::string, std::string> config;
ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
ASSERT_NO_THROW(ie->SetConfig(config, targetDevice));
}
protected:
void SetUp() override;
void TearDown() override;
};
// Setting correct config doesn't throw
TEST_P(CorrectConfigTests, SetCorrectConfig) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice));
}
class IncorrectConfigTests : public testing::WithParamInterface<ConfigParams>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<ConfigParams> obj);
using IncorrectConfigTests = BehaviorTestsUtils::BehaviorTestsBasic;
protected:
void SetUp() override;
void TearDown() override;
};
TEST_P(IncorrectConfigTests, SetConfigWithIncorrectKey) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
ASSERT_THROW(ie->SetConfig(configuration, targetDevice),
InferenceEngine::details::InferenceEngineException);
} else {
ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice));
}
}
class CorrectConfigAPITests : public testing::WithParamInterface<ConfigParams>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<ConfigParams> obj);
TEST_P(IncorrectConfigTests, canNotLoadNetworkWithIncorrectConfig) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
ASSERT_THROW(auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration),
InferenceEngine::details::InferenceEngineException);
}
protected:
void SetUp() override;
void TearDown() override;
};
using IncorrectConfigAPITests = BehaviorTestsUtils::BehaviorTestsBasic;
class IncorrectConfigAPITests : public testing::WithParamInterface<ConfigParams>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<ConfigParams> obj);
TEST_P(IncorrectConfigAPITests, SetConfigWithNoExistingKey) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
if (targetDevice.find(CommonTestUtils::DEVICE_GNA) != std::string::npos) {
ASSERT_THROW(ie->SetConfig(configuration, targetDevice), InferenceEngine::NotFound);
} else {
try {
ie->SetConfig(configuration, targetDevice);
} catch (InferenceEngine::details::InferenceEngineException ex) {}
}
}
protected:
void SetUp() override;
void TearDown() override;
};
using CorrectConfigAPITests = BehaviorTestsUtils::BehaviorTestsBasic;
} // namespace LayerTestsDefinitions
TEST_P(CorrectConfigAPITests, canSetExclusiveAsyncRequests) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load config
std::map<std::string, std::string> config = {{CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(YES)}};
config.insert(configuration.begin(), configuration.end());
if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
ASSERT_NO_THROW(ie->SetConfig(config, targetDevice));
}
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, config);
execNet.CreateInferRequest();
if ((targetDevice == CommonTestUtils::DEVICE_HDDL) || (targetDevice == CommonTestUtils::DEVICE_GNA)) {
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
} else if ((targetDevice == CommonTestUtils::DEVICE_FPGA) ||
(targetDevice == CommonTestUtils::DEVICE_KEEMBAY) || (targetDevice == CommonTestUtils::DEVICE_MYRIAD)) {
ASSERT_EQ(2u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
} else if (targetDevice == CommonTestUtils::DEVICE_MULTI) {
} else {
ASSERT_EQ(1u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
}
}
TEST_P(CorrectConfigAPITests, withoutExclusiveAsyncRequests) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load config
std::map<std::string, std::string> config = {{CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(NO)}};
config.insert(configuration.begin(), configuration.end());
if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
ASSERT_NO_THROW(ie->SetConfig(config, targetDevice));
}
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, config);
execNet.CreateInferRequest();
if ((targetDevice == CommonTestUtils::DEVICE_FPGA) || (targetDevice == CommonTestUtils::DEVICE_MYRIAD) ||
(targetDevice == CommonTestUtils::DEVICE_KEEMBAY)) {
ASSERT_EQ(1u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
} else if (targetDevice == CommonTestUtils::DEVICE_MULTI) {
} else {
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
}
}
TEST_P(CorrectConfigAPITests, reusableCPUStreamsExecutor) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
{
// Load config
std::map<std::string, std::string> config = {{CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(NO)}};
config.insert(configuration.begin(), configuration.end());
if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
ASSERT_NO_THROW(ie->SetConfig(config, targetDevice));
}
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, config);
execNet.CreateInferRequest();
if ((targetDevice == CommonTestUtils::DEVICE_FPGA) || (targetDevice == CommonTestUtils::DEVICE_MYRIAD) ||
(targetDevice == CommonTestUtils::DEVICE_KEEMBAY)) {
ASSERT_EQ(1u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
} else if (targetDevice == CommonTestUtils::DEVICE_MULTI) {
} else {
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
ASSERT_GE(2u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
}
}
if (targetDevice == CommonTestUtils::DEVICE_CPU) {
ASSERT_NE(0u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
ASSERT_NO_THROW(ie->UnregisterPlugin("CPU"));
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
}
}

View File

@ -2,33 +2,173 @@
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <vector>
#include <string>
#include <memory>
#include "ie_extension.h"
#include <condition_variable>
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp"
#include <tuple>
#include <vector>
#include <string>
#include <ie_core.hpp>
#include <details/ie_cnn_network_tools.h>
#include <functional_test_utils/behavior_test_utils.hpp>
#include <exec_graph_info.hpp>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/blob_utils.hpp"
namespace LayerTestsDefinitions {
typedef std::tuple<
InferenceEngine::Precision, // Network precision
std::string, // Device name
std::map<std::string, std::string> // Config
> ExecGraphParams;
using ExecGraphTests = BehaviorTestsUtils::BehaviorTestsBasic;
class ExecGraphTests : public testing::WithParamInterface<ExecGraphParams>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<ExecGraphParams> obj);
inline std::vector<std::string> separateStrToVec(std::string str, const char sep) {
std::vector<std::string> result;
protected:
void SetUp() override;
void TearDown() override;
};
std::istringstream stream(str);
std::string strVal;
} // namespace LayerTestsDefinitions
while (getline(stream, strVal, sep)) {
result.push_back(strVal);
}
return result;
}
TEST_P(ExecGraphTests, CheckExecGraphInfoBeforeExecution) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
InferenceEngine::CNNNetwork execGraph;
if (targetDevice == CommonTestUtils::DEVICE_CPU || targetDevice == CommonTestUtils::DEVICE_GPU) {
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo());
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
// Store all the original layers from the network
const auto originalLayers = function->get_ops();
std::map<std::string, int> originalLayersMap;
for (const auto &layer : originalLayers) {
if (layer->description() == "Result")
continue;
originalLayersMap[layer->get_friendly_name()] = 0;
}
int IteratorForLayersConstant = 0;
// Store all the layers from the executable graph information represented as CNNNetwork
const std::vector<InferenceEngine::CNNLayerPtr> execGraphLayers =
InferenceEngine::details::CNNNetSortTopologically(execGraph);
for (const auto &execLayer : execGraphLayers) {
IE_SUPPRESS_DEPRECATED_START
// Each layer from the execGraphInfo network must have PM data option set
ASSERT_EQ("not_executed", execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER]);
// Parse origin layer names (fused/merged layers) from the executable graph
// and compare with layers from the original model
auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
if (origFromExecLayer == "")
IteratorForLayersConstant++;
std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
auto origLayer = originalLayersMap.find(layer);
ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
origLayer->second++;
});
IE_SUPPRESS_DEPRECATED_END
}
// All layers from the original IR must be present with in ExecGraphInfo
for (auto &layer : originalLayersMap) {
if ((layer.second == 0) && (IteratorForLayersConstant > 0)) {
IteratorForLayersConstant--;
continue;
}
ASSERT_GE(layer.second, 0);
}
} else {
ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice).GetExecGraphInfo(),
InferenceEngine::details::InferenceEngineException);
}
}
TEST_P(ExecGraphTests, CheckExecGraphInfoAfterExecution) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
InferenceEngine::CNNNetwork execGraph;
if (targetDevice == CommonTestUtils::DEVICE_CPU || targetDevice == CommonTestUtils::DEVICE_GPU) {
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo());
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
// Store all the original layers from the network
const auto originalLayers = function->get_ops();
std::map<std::string, int> originalLayersMap;
for (const auto &layer : originalLayers) {
originalLayersMap[layer->get_friendly_name()] = 0;
}
int IteratorForLayersConstant = 0;
// Store all the layers from the executable graph information represented as CNNNetwork
const std::vector<InferenceEngine::CNNLayerPtr> execGraphLayers =
InferenceEngine::details::CNNNetSortTopologically(execGraph);
bool has_layer_with_valid_time = false;
for (const auto &execLayer : execGraphLayers) {
IE_SUPPRESS_DEPRECATED_START
// At least one layer in the topology should be executed and have valid perf counter value
try {
float x = static_cast<float>(std::atof(
execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER].c_str()));
ASSERT_GE(x, 0.0f);
has_layer_with_valid_time = true;
} catch (std::exception &) {}
// Parse origin layer names (fused/merged layers) from the executable graph
// and compare with layers from the original model
auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
if (origFromExecLayer == "")
IteratorForLayersConstant++;
std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
auto origLayer = originalLayersMap.find(layer);
ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
origLayer->second++;
});
IE_SUPPRESS_DEPRECATED_END
}
ASSERT_TRUE(has_layer_with_valid_time);
// All layers from the original IR must be present within ExecGraphInfo
for (auto &layer : originalLayersMap) {
if ((layer.second == 0) && (IteratorForLayersConstant > 0)) {
IteratorForLayersConstant--;
continue;
}
ASSERT_GE(layer.second, 0);
}
} else {
ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice).GetExecGraphInfo(),
InferenceEngine::details::InferenceEngineException);
}
}
TEST_P(ExecGraphTests, CheckExecGraphInfoSerialization) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
InferenceEngine::CNNNetwork execGraph;
if (targetDevice == CommonTestUtils::DEVICE_CPU || targetDevice == CommonTestUtils::DEVICE_GPU) {
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo());
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
execGraph.serialize("exeNetwork.xml", "exeNetwork.bin");
ASSERT_EQ(0, std::remove("exeNetwork.xml"));
} else {
ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice).GetExecGraphInfo(),
InferenceEngine::details::InferenceEngineException);
}
}

View File

@ -0,0 +1,624 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <vector>
#include <string>
#include <memory>
#include "ie_extension.h"
#include <condition_variable>
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp"
#include "multi-device/multi_device_config.hpp"
#include <string>
#include <ie_core.hpp>
#include <cpp_interfaces/exception2status.hpp>
#include <thread>
#include <functional_test_utils/behavior_test_utils.hpp>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
using InferRequestTests = BehaviorTestsUtils::BehaviorTestsBasic;
// Setting empty config to LoadNetwork doesn't throw
TEST_P(InferRequestTests, SetEmptyConfig) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
InferenceEngine::IExecutableNetwork::Ptr execNet;
std::map<std::string, std::string> config {};
if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos ||
targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice));
ASSERT_NO_THROW(execNet = ie->LoadNetwork(cnnNet, targetDevice, config));
} else {
ASSERT_THROW(ie->SetConfig(configuration, targetDevice),
InferenceEngine::details::InferenceEngineException);
ASSERT_NO_THROW(execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration));
}
}
// Load correct network to Plugin to get executable network
TEST_P(InferRequestTests, canLoadCorrectNetworkToGetExecutable) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
InferenceEngine::IExecutableNetwork::Ptr execNet;
ASSERT_NO_THROW(execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration));
}
TEST_P(InferRequestTests, CanCreateTwoExeNetworks) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
InferenceEngine::IExecutableNetwork::Ptr execNet;
for (auto i = 0; i < 2; i++) {
ASSERT_NO_THROW(execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration));
}
}
TEST_P(InferRequestTests, CanCreateInferRequest) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
}
TEST_P(InferRequestTests, failToSetNullptrForInput) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::Blob::Ptr inputBlob = nullptr;
ASSERT_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, inputBlob),
InferenceEngine::details::InferenceEngineException);
}
TEST_P(InferRequestTests, failToSetEmptyInputBlob) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::Blob::Ptr blob;
ASSERT_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob),
InferenceEngine::details::InferenceEngineException);
}
TEST_P(InferRequestTests, failToSetEmptyOutputBlob) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::Blob::Ptr blob;
ASSERT_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob),
InferenceEngine::details::InferenceEngineException);
}
TEST_P(InferRequestTests, failToSetNotAllocatedInput) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::Blob::Ptr blob =
FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc());
ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob));
}
TEST_P(InferRequestTests, failToSetNotAllocatedOutput) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::Blob::Ptr blob =
FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc());
ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob));
}
TEST_P(InferRequestTests, failToSetBlobWithIncorrectName) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
const char incorrect_input_name[] = "incorrect_input_name";
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::Blob::Ptr blob =
FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc());
blob->allocate();
ASSERT_THROW(req.SetBlob(incorrect_input_name, blob),
InferenceEngine::details::InferenceEngineException);
}
TEST_P(InferRequestTests, failToSetInputWithIncorrectSizes) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::Blob::Ptr blob =
FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc());
blob->allocate();
blob->getTensorDesc().getDims()[0]*=2;
ASSERT_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob),
InferenceEngine::details::InferenceEngineException);
}
TEST_P(InferRequestTests, failToSetOutputWithIncorrectSizes) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::Blob::Ptr blob =
FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc());
blob->allocate();
blob->getTensorDesc().getDims()[0]*=2;
ASSERT_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob),
InferenceEngine::details::InferenceEngineException);
}
TEST_P(InferRequestTests, canInferWithoutSetAndGetInOut) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
ASSERT_NO_THROW(req.Infer());
}
TEST_P(InferRequestTests, canProcessDeallocatedInputBlobAfterGetBlob) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
InferenceEngine::Blob::Ptr blob;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first));
ASSERT_NO_THROW(req.Infer());
}
TEST_P(InferRequestTests, canProcessDeallocatedInputBlobAfterGetBlobForAsync) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
InferenceEngine::Blob::Ptr blob;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first));
ASSERT_NO_THROW(req.Infer());
ASSERT_NO_THROW(req.StartAsync());
}
TEST_P(InferRequestTests, canProcessDeallocatedInputBlobAfterGetAndSetBlob) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
InferenceEngine::Blob::Ptr blob;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first));
ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob));
ASSERT_NO_THROW(req.Infer());
ASSERT_NO_THROW(req.StartAsync());
}
TEST_P(InferRequestTests, canProcessDeallocatedInputBlobAfterSetBlob) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
InferenceEngine::Blob::Ptr blob =
FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc());
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
blob->allocate();
ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob));
blob->deallocate();
ASSERT_THROW(req.Infer(), InferenceEngine::details::InferenceEngineException);
}
TEST_P(InferRequestTests, canProcessDeallocatedOutputBlobAfterGetBlob) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
InferenceEngine::Blob::Ptr blob =
FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc());
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
blob->allocate();
ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob));
blob->deallocate();
ASSERT_THROW(req.Infer(), InferenceEngine::details::InferenceEngineException);
}
TEST_P(InferRequestTests, canProcessDeallocatedOutputBlobAfterGetBlobForAsync) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
InferenceEngine::Blob::Ptr blob =
FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc());
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
blob->allocate();
ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob));
blob->deallocate();
ASSERT_THROW(req.Infer(), InferenceEngine::details::InferenceEngineException);
ASSERT_THROW(req.StartAsync(), InferenceEngine::details::InferenceEngineException);
}
TEST_P(InferRequestTests, canProcessDeallocatedOutputBlobAfterGetAndSetBlob) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
InferenceEngine::Blob::Ptr blob =
FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc());
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
blob->allocate();
ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first));
ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob));
blob->deallocate();
ASSERT_THROW(req.Infer(), InferenceEngine::details::InferenceEngineException);
}
TEST_P(InferRequestTests, canProcessDeallocatedOutputBlobAfterSetBlob) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
InferenceEngine::Blob::Ptr blob =
FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc());
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
blob->allocate();
ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob));
blob->deallocate();
ASSERT_THROW(req.Infer(), InferenceEngine::details::InferenceEngineException);
}
TEST_P(InferRequestTests, secondCallGetOutputDoNotReAllocateData) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
InferenceEngine::Blob::Ptr blob1;
InferenceEngine::Blob::Ptr blob2;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
ASSERT_NO_THROW(blob1 = req.GetBlob(cnnNet.getInputsInfo().begin()->first));
ASSERT_NO_THROW(blob2 = req.GetBlob(cnnNet.getInputsInfo().begin()->first));
ASSERT_EQ(blob1.get(), blob2.get());
}
TEST_P(InferRequestTests, CorrectOneAsyncInferWithGetInOutWithInfWait) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
InferenceEngine::Blob::Ptr blob =
FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc());
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first));
req.Infer();
req.StartAsync();
InferenceEngine::StatusCode sts;
sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY);
ASSERT_EQ(InferenceEngine::StatusCode::OK, sts);
ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first));
}
// Plugin correct infer request with allocating input and result BlobMaps inside plugin
TEST_P(InferRequestTests, canStartAsyncInferWithGetInOutWithStatusOnlyWait) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
InferenceEngine::Blob::Ptr blob =
FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc());
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first));
req.Infer();
req.StartAsync();
InferenceEngine::StatusCode sts;
sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY);
ASSERT_TRUE(sts == InferenceEngine::StatusCode::OK ||
InferenceEngine::StatusCode::RESULT_NOT_READY);
}
// Plugin correct infer request with allocating input and result BlobMaps inside plugin
TEST_P(InferRequestTests, FailedAsyncInferWithNegativeTimeForWait) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
InferenceEngine::Blob::Ptr blob =
FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc());
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first));
req.Infer();
req.StartAsync();
ASSERT_THROW(req.Wait(-2), InferenceEngine::details::InferenceEngineException);
}
TEST_P(InferRequestTests, canRun3SyncRequestsConsistentlyFromThreads) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
auto req1 = execNet.CreateInferRequest();
auto req2 = execNet.CreateInferRequest();
auto req3 = execNet.CreateInferRequest();
InferenceEngine::ResponseDesc response1, response2, response3;
InferenceEngine::StatusCode sts1, sts2, sts3;
std::thread t1([&] { req1.Infer(); sts1 = req1.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); });
std::thread t2([&] { req2.Infer(); sts2 = req2.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); });
std::thread t3([&] { req3.Infer(); sts3 = req3.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); });
t1.join();
t2.join();
t3.join();
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), sts1) << response1.msg;
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), sts2) << response2.msg;
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), sts3) << response3.msg;
}
TEST_P(InferRequestTests, canRun3AsyncRequestsConsistentlyWithWait) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
auto req1 = execNet.CreateInferRequest();
auto req2 = execNet.CreateInferRequest();
auto req3 = execNet.CreateInferRequest();
InferenceEngine::ResponseDesc response1, response2, response3;
InferenceEngine::StatusCode sts1, sts2, sts3;
req1.StartAsync();
ASSERT_NO_THROW(req1.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY));
req2.Infer();
ASSERT_NO_THROW(req2.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY));
req3.Infer();
ASSERT_NO_THROW(req3.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY));
}
TEST_P(InferRequestTests, canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
auto req1 = execNet.CreateInferRequest();
auto req2 = execNet.CreateInferRequest();
auto req3 = execNet.CreateInferRequest();
InferenceEngine::ResponseDesc response1, response2, response3;
InferenceEngine::StatusCode sts1, sts2, sts3;
req1.Infer();
req2.Infer();
req3.Infer();
std::thread t1([&] { req1.StartAsync(); sts1 = req1.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); });
std::thread t2([&] { req2.StartAsync(); sts2 = req2.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); });
std::thread t3([&] { req3.StartAsync(); sts3 = req3.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); });
t1.join();
t2.join();
t3.join();
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), sts1) << response1.msg;
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), sts2) << response2.msg;
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), sts3) << response3.msg;
}
TEST_P(InferRequestTests, canWaitWithotStartAsync) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
auto req = execNet.CreateInferRequest();
ASSERT_NO_THROW(req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY));
ASSERT_NO_THROW(req.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY));
ASSERT_NO_THROW(req.Wait(1));
}
TEST_P(InferRequestTests, returnDeviceBusyOnSetBlobAfterAsyncInfer) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
auto&& config = configuration;
auto itConfig = config.find(CONFIG_KEY(CPU_THROUGHPUT_STREAMS));
if (itConfig != config.end()) {
if (itConfig->second != "CPU_THROUGHPUT_AUTO") {
if (std::stoi(itConfig->second) == 0) {
GTEST_SKIP() << "Not applicable with disabled streams";
}
}
}
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
auto req = execNet.CreateInferRequest();
auto outputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first);
InferenceEngine::ResponseDesc response;
InferenceEngine::StatusCode sts;
sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY);
ASSERT_EQ(InferenceEngine::StatusCode::INFER_NOT_STARTED, sts) << response.msg;
req.StartAsync();
sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY);
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), sts) << response.msg;
try {
req.SetBlob(cnnNet.getInputsInfo().begin()->first, outputBlob);
}
catch (const std::exception &e) {
std::cout << "Exception: " << e.what() << std::endl;
}
sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY);
ASSERT_TRUE(sts == InferenceEngine::StatusCode::OK ||
sts == InferenceEngine::StatusCode::RESULT_NOT_READY) << response.msg;
}
TEST_P(InferRequestTests, returnDeviceBusyOnGetBlobAfterAsyncInfer) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
auto req = execNet.CreateInferRequest();
auto outputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first);
InferenceEngine::ResponseDesc response;
InferenceEngine::StatusCode sts;
req.StartAsync();
sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY);
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), sts) << response.msg;
try {
req.SetBlob(cnnNet.getInputsInfo().begin()->first, outputBlob);
}
catch (const std::exception &e) {
std::cout << "Exception" << e.what() << std::endl;
}
}
TEST_P(InferRequestTests, returnDeviceBusyOnGetPerformanceCountAfterAsyncInfer) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
auto req = execNet.CreateInferRequest();
auto outputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first);
InferenceEngine::ResponseDesc response;
InferenceEngine::StatusCode sts;
req.StartAsync();
sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY);
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), sts) << response.msg;
std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perfMap;
try {
perfMap = req.GetPerformanceCounts();
}
catch (const std::exception &e) {
std::cout << "Exception" << e.what() << std::endl;
}
}

View File

@ -13,23 +13,285 @@
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp"
#include <ie_core.hpp>
#include <functional_test_utils/behavior_test_utils.hpp>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ngraph_functions/pass/convert_prc.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "behavior/infer_request_callback.hpp"
namespace LayerTestsDefinitions {
typedef std::tuple<
InferenceEngine::Precision, // Network precision
std::string, // Device name
std::map<std::string, std::string> // Config
> CallbackParams;
using CallbackTests = BehaviorTestsUtils::BehaviorTestsBasic;
class CallbackTests : public testing::WithParamInterface<CallbackParams>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<CallbackParams> obj);
TEST_P(CallbackTests, canCallSyncAndAsyncWithCompletionCallback) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req = execNet.CreateInferRequest();
bool isCalled = false;
req.SetCompletionCallback<std::function<void(InferenceEngine::InferRequest, InferenceEngine::StatusCode)>>(
[&](InferenceEngine::InferRequest request, InferenceEngine::StatusCode status) {
// HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE
if (targetDevice != CommonTestUtils::DEVICE_HDDL) {
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), status);
}
isCalled = true;
});
protected:
void SetUp() override;
void TearDown() override;
void canStartSeveralAsyncInsideCompletionCallbackNoSafeDtor(int iterNum);
};
req.StartAsync();
InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY);
} // namespace LayerTestsDefinitions
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), waitStatus);
ASSERT_TRUE(isCalled);
}
// test that can wait all callbacks on dtor
TEST_P(CallbackTests, canStartAsyncInsideCompletionCallback) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
struct TestUserData {
bool startAsyncOK = false;
int numIsCalled = 0;
};
TestUserData data;
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req = execNet.CreateInferRequest();
req.SetCompletionCallback<std::function<void(InferenceEngine::InferRequest, InferenceEngine::StatusCode)>>(
[&](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) {
// HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE
if (targetDevice != CommonTestUtils::DEVICE_HDDL) {
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), status);
}
data.numIsCalled++;
// WA for deadlock
request->SetCompletionCallback(nullptr);
InferenceEngine::StatusCode sts = request->StartAsync(nullptr);
if (sts == InferenceEngine::StatusCode::OK) {
data.startAsyncOK = true;
}
});
req.StartAsync();
InferenceEngine::ResponseDesc responseWait;
InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY);
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), waitStatus) << responseWait.msg;
ASSERT_EQ(1, data.numIsCalled);
ASSERT_TRUE(data.startAsyncOK);
}
// test that can wait all callbacks on dtor
TEST_P(CallbackTests, canStartSeveralAsyncInsideCompletionCallbackWithSafeDtor) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
const int NUM_ITER = 10;
struct TestUserData {
int numIter = NUM_ITER;
bool startAsyncOK = true;
std::atomic<int> numIsCalled{0};
std::mutex mutex_block_emulation;
std::condition_variable cv_block_emulation;
bool isBlocked = true;
};
TestUserData data;
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req = execNet.CreateInferRequest();
req.SetCompletionCallback<std::function<void(InferenceEngine::InferRequest, InferenceEngine::StatusCode)>>(
[&](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) {
// HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE
if (targetDevice != CommonTestUtils::DEVICE_HDDL) {
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), status);
}
if (--data.numIter) {
InferenceEngine::StatusCode sts = request->StartAsync(nullptr);
if (sts != InferenceEngine::StatusCode::OK) {
data.startAsyncOK = false;
}
}
data.numIsCalled++;
if (!data.numIter) {
data.isBlocked = false;
data.cv_block_emulation.notify_all();
}
});
req.StartAsync();
InferenceEngine::ResponseDesc responseWait;
InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY);
// intentionally block until notification from callback
std::unique_lock<std::mutex> lock(data.mutex_block_emulation);
data.cv_block_emulation.wait(lock, [&]() { return !data.isBlocked; });
ASSERT_EQ((int) InferenceEngine::StatusCode::OK, waitStatus) << responseWait.msg;
ASSERT_EQ(NUM_ITER, data.numIsCalled);
ASSERT_TRUE(data.startAsyncOK);
}
TEST_P(CallbackTests, inferDoesNotCallCompletionCallback) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req = execNet.CreateInferRequest();
bool isCalled = false;
req.SetCompletionCallback<std::function<void(InferenceEngine::InferRequest, InferenceEngine::StatusCode)>>(
[&](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) {
isCalled = true;
});
req.Infer();
ASSERT_FALSE(isCalled);
}
TEST_P(CallbackTests, canStartAsyncInsideCompletionCallbackNoSafeDtor) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
struct TestUserData {
int numIter = 0;
bool startAsyncOK = true;
bool getDataOK = true;
std::atomic<int> numIsCalled{0};
std::mutex mutex_block_emulation;
std::condition_variable cv_block_emulation;
bool isBlocked = true;
TestUserData(int i) : numIter(i) {}
};
TestUserData data(1);
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req = execNet.CreateInferRequest();
req.SetCompletionCallback<std::function<void(InferenceEngine::InferRequest, InferenceEngine::StatusCode)>>(
[&](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) {
// WA for deadlock
if (!--data.numIter) {
request->SetCompletionCallback(nullptr);
}
InferenceEngine::StatusCode sts = request->StartAsync(nullptr);
if (sts != InferenceEngine::StatusCode::OK) {
data.startAsyncOK = false;
}
data.numIsCalled++;
if (!data.numIter) {
data.isBlocked = false;
data.cv_block_emulation.notify_one();
}
});
req.StartAsync();
InferenceEngine::ResponseDesc responseWait;
InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY);
// intentionally block until notification from callback
std::unique_lock<std::mutex> lock(data.mutex_block_emulation);
data.cv_block_emulation.wait(lock, [&]() { return !data.isBlocked; });
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), waitStatus);
ASSERT_EQ(1, data.numIsCalled);
ASSERT_TRUE(data.startAsyncOK);
ASSERT_TRUE(data.getDataOK);
}
TEST_P(CallbackTests, canStartSeveralAsyncInsideCompletionCallbackNoSafeDtor) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
struct TestUserData {
int numIter = 0;
bool startAsyncOK = true;
bool getDataOK = true;
std::atomic<int> numIsCalled{0};
std::mutex mutex_block_emulation;
std::condition_variable cv_block_emulation;
bool isBlocked = true;
TestUserData(int i) : numIter(i) {}
};
TestUserData data(10);
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req = execNet.CreateInferRequest();
req.SetCompletionCallback<std::function<void(InferenceEngine::InferRequest, InferenceEngine::StatusCode)>>(
[&](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) {
// WA for deadlock
if (!--data.numIter) {
request->SetCompletionCallback(nullptr);
}
InferenceEngine::StatusCode sts = request->StartAsync(nullptr);
if (sts != InferenceEngine::StatusCode::OK) {
data.startAsyncOK = false;
}
data.numIsCalled++;
if (!data.numIter) {
data.isBlocked = false;
data.cv_block_emulation.notify_one();
}
});
req.StartAsync();
InferenceEngine::ResponseDesc responseWait;
InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY);
// intentionally block until notification from callback
std::unique_lock<std::mutex> lock(data.mutex_block_emulation);
data.cv_block_emulation.wait(lock, [&]() { return !data.isBlocked; });
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), waitStatus);
ASSERT_EQ(10, data.numIsCalled);
ASSERT_TRUE(data.startAsyncOK);
ASSERT_TRUE(data.getDataOK);
}
TEST_P(CallbackTests, returnGeneralErrorIfCallbackThrowException) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::IInferRequest::Ptr req = static_cast<InferenceEngine::IInferRequest::Ptr &>(execNet.CreateInferRequest());
req->SetCompletionCallback(
[](InferenceEngine::IInferRequest::Ptr, InferenceEngine::StatusCode status) {
THROW_IE_EXCEPTION << "returnGeneralErrorIfCallbackThrowException";
});
InferenceEngine::ResponseDesc resp;
req->StartAsync(&resp);
InferenceEngine::StatusCode waitStatus = InferenceEngine::StatusCode::INFER_NOT_STARTED;
while (InferenceEngine::StatusCode::RESULT_NOT_READY == waitStatus ||
InferenceEngine::StatusCode::INFER_NOT_STARTED == waitStatus) {
waitStatus = req->Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY, &resp);
}
ASSERT_EQ(InferenceEngine::StatusCode::GENERAL_ERROR, waitStatus);
ASSERT_NE(std::string(resp.msg).find("returnGeneralErrorIfCallbackThrowException"), std::string::npos);
}

View File

@ -0,0 +1,96 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <vector>
#include <string>
#include <memory>
#include "ie_extension.h"
#include <condition_variable>
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp"
#include <vpu/vpu_plugin_config.hpp>
#include <vpu/private_plugin_config.hpp>
#include <gna/gna_config.hpp>
#include <multi-device/multi_device_config.hpp>
#include <ie_core.hpp>
#include <threading/ie_executor_manager.hpp>
#include <functional_test_utils/behavior_test_utils.hpp>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
using InferConfigTests = BehaviorTestsUtils::BehaviorTestsBasic;
TEST_P(InferConfigTests, canSetExclusiveAsyncRequests) {
ASSERT_EQ(0ul, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load config
std::map<std::string, std::string> config = {{CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(YES)}};
config.insert(configuration.begin(), configuration.end());
if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
ASSERT_NO_THROW(ie->SetConfig(config, targetDevice));
}
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, config);
execNet.CreateInferRequest();
if ((targetDevice == CommonTestUtils::DEVICE_HDDL) || (targetDevice == CommonTestUtils::DEVICE_GNA)) {
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
} else if ((targetDevice == CommonTestUtils::DEVICE_FPGA) || (targetDevice == CommonTestUtils::DEVICE_MYRIAD) ||
(targetDevice == CommonTestUtils::DEVICE_KEEMBAY)) {
ASSERT_EQ(2u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
} else if (targetDevice == CommonTestUtils::DEVICE_MULTI) {
} else {
ASSERT_EQ(1u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
}
}
TEST_P(InferConfigTests, withoutExclusiveAsyncRequests) {
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load config
std::map<std::string, std::string> config = {{CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(YES)}};
config.insert(configuration.begin(), configuration.end());
if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
ASSERT_NO_THROW(ie->SetConfig(config, targetDevice));
}
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, config);
execNet.CreateInferRequest();
if ((targetDevice == CommonTestUtils::DEVICE_GNA) || (targetDevice == CommonTestUtils::DEVICE_HDDL)) {
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
} else if (targetDevice == CommonTestUtils::DEVICE_MULTI) {
} else if (targetDevice == CommonTestUtils::DEVICE_MYRIAD) {
ASSERT_EQ(2u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
} else {
ASSERT_EQ(1u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
}
}
using InferConfigInTests = BehaviorTestsUtils::BehaviorTestsBasic;
TEST_P(InferConfigInTests, CanInferWithConfig) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
auto req = execNet.CreateInferRequest();
ASSERT_NO_THROW(req.Infer());
}

View File

@ -0,0 +1,139 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <vector>
#include <string>
#include <memory>
#include "ie_extension.h"
#include <condition_variable>
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp"
#include "multi-device/multi_device_config.hpp"
#include <ie_core.hpp>
#include <cpp_interfaces/exception2status.hpp>
#include <functional_test_utils/behavior_test_utils.hpp>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "behavior/infer_request_input.hpp"
using InferRequestInputTests = BehaviorTestsUtils::BehaviorTestsBasic;
TEST_P(InferRequestInputTests, canSetInputBlobForSyncRequest) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::Blob::Ptr inputBlob =
FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc());
ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, inputBlob));
InferenceEngine::Blob::Ptr actualBlob;
ASSERT_NO_THROW(actualBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first));
ASSERT_EQ(inputBlob, actualBlob);
}
TEST_P(InferRequestInputTests, canInferWithSetInOut) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::Blob::Ptr inputBlob =
FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc());
req.SetBlob(cnnNet.getInputsInfo().begin()->first, inputBlob);
InferenceEngine::Blob::Ptr outputBlob =
FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc());
req.SetBlob(cnnNet.getInputsInfo().begin()->first, outputBlob);
ASSERT_NO_THROW(req.Infer());
}
TEST_P(InferRequestInputTests, canGetInputBlob_deprecatedAPI) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
std::shared_ptr<InferenceEngine::Blob> actualBlob;
ASSERT_NO_THROW(actualBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first));
ASSERT_TRUE(actualBlob) << "Plugin didn't allocate input blobs";
ASSERT_FALSE(actualBlob->buffer() == nullptr) << "Plugin didn't allocate input blobs";
auto tensorDescription = actualBlob->getTensorDesc();
auto dims = tensorDescription.getDims();
ASSERT_TRUE(cnnNet.getInputsInfo().begin()->second->getTensorDesc().getDims() == dims)
<< "Input blob dimensions don't match network input";
ASSERT_EQ(execNet.GetInputsInfo().begin()->second->getPrecision(), tensorDescription.getPrecision())
<< "Input blob precision don't match network input";
}
TEST_P(InferRequestInputTests, getAfterSetInputDoNotChangeInput) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req = execNet.CreateInferRequest();
std::shared_ptr<InferenceEngine::Blob> inputBlob = FuncTestUtils::createAndFillBlob(
cnnNet.getInputsInfo().begin()->second->getTensorDesc());
ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, inputBlob));
std::shared_ptr<InferenceEngine::Blob> actualBlob;
ASSERT_NO_THROW(actualBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first));
ASSERT_EQ(inputBlob.get(), actualBlob.get());
}
TEST_P(InferRequestInputTests, canInferWithGetInOut) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::Blob::Ptr inputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first);
InferenceEngine::Blob::Ptr outputBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first);
ASSERT_NO_THROW(req.Infer());
}
TEST_P(InferRequestInputTests, canStartAsyncInferWithGetInOut) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::Blob::Ptr inputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first);
InferenceEngine::StatusCode sts;
ASSERT_NO_THROW(req.Infer());
ASSERT_NO_THROW(req.StartAsync());
sts = req.Wait(500);
ASSERT_EQ(InferenceEngine::StatusCode::OK, sts);
InferenceEngine::Blob::Ptr outputBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first);
}

View File

@ -0,0 +1,139 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <vector>
#include <string>
#include <memory>
#include "ie_extension.h"
#include <condition_variable>
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp"
#include "multi-device/multi_device_config.hpp"
#include <ie_core.hpp>
#include <cpp_interfaces/exception2status.hpp>
#include <functional_test_utils/behavior_test_utils.hpp>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "behavior/infer_request_output.hpp"
using InferRequestOutputTests = BehaviorTestsUtils::BehaviorTestsBasic;
TEST_P(InferRequestOutputTests, canGetInputBlobForSyncRequest) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::Blob::Ptr OutputBlob =
FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc());
ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, OutputBlob));
InferenceEngine::Blob::Ptr actualBlob;
ASSERT_NO_THROW(actualBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first));
ASSERT_EQ(OutputBlob, actualBlob);
}
TEST_P(InferRequestOutputTests, canInferWithSetInOut) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::Blob::Ptr inputBlob =
FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc());
req.SetBlob(cnnNet.getInputsInfo().begin()->first, inputBlob);
InferenceEngine::Blob::Ptr outputBlob =
FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc());
req.SetBlob(cnnNet.getInputsInfo().begin()->first, outputBlob);
ASSERT_NO_THROW(req.Infer());
}
TEST_P(InferRequestOutputTests, canGetOutputBlob_deprecatedAPI) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
std::shared_ptr<InferenceEngine::Blob> actualBlob;
ASSERT_NO_THROW(actualBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first));
ASSERT_TRUE(actualBlob) << "Plugin didn't allocate Output blobs";
ASSERT_FALSE(actualBlob->buffer() == nullptr) << "Plugin didn't allocate Output blobs";
auto tensorDescription = actualBlob->getTensorDesc();
auto dims = tensorDescription.getDims();
ASSERT_TRUE(cnnNet.getOutputsInfo().begin()->second->getTensorDesc().getDims() == dims)
<< "Output blob dimensions don't match network Output";
ASSERT_EQ(execNet.GetInputsInfo().begin()->second->getPrecision(), tensorDescription.getPrecision())
<< "Output blob precision don't match network Output";
}
TEST_P(InferRequestOutputTests, getOutputAfterSetOutputDoNotChangeOutput) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req = execNet.CreateInferRequest();
std::shared_ptr<InferenceEngine::Blob> OutputBlob = FuncTestUtils::createAndFillBlob(
cnnNet.getOutputsInfo().begin()->second->getTensorDesc());
ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, OutputBlob));
std::shared_ptr<InferenceEngine::Blob> actualBlob;
ASSERT_NO_THROW(actualBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first));
ASSERT_EQ(OutputBlob.get(), actualBlob.get());
}
TEST_P(InferRequestOutputTests, canInferWithGetInOut) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::Blob::Ptr inputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first);
InferenceEngine::Blob::Ptr outputBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first);
ASSERT_NO_THROW(req.Infer());
}
TEST_P(InferRequestOutputTests, canStartAsyncInferWithGetInOut) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::Blob::Ptr inputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first);
InferenceEngine::StatusCode sts;
ASSERT_NO_THROW(req.Infer());
ASSERT_NO_THROW(req.StartAsync());
sts = req.Wait(500);
ASSERT_EQ(InferenceEngine::StatusCode::OK, sts);
InferenceEngine::Blob::Ptr outputBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first);
}

View File

@ -2,33 +2,60 @@
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <vector>
#include <string>
#include <memory>
#include "ie_extension.h"
#include <condition_variable>
#include <ie_core.hpp>
#include "common_test_utils/test_assertions.hpp"
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ie_preprocess.hpp"
#include "functional_test_utils/behavior_test_utils.hpp"
namespace LayerTestsDefinitions {
typedef std::tuple<
InferenceEngine::Precision, // Network precision
std::string, // Device name
std::map<std::string, std::string> // Config
> PreProcessParams;
using PreprocessTest = BehaviorTestsUtils::BehaviorTestsBasic;
class PreProcessTests : public testing::WithParamInterface<PreProcessParams>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<PreProcessParams> obj);
TEST_P(PreprocessTest, SetPreProcessToInputInfo) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
protected:
void SetUp() override;
void TearDown() override;
};
auto &preProcess = cnnNet.getInputsInfo().begin()->second->getPreProcess();
preProcess.setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
auto req = execNet.CreateInferRequest();
{
InferenceEngine::ConstInputsDataMap inputsMap = execNet.GetInputsInfo();
const auto &name = inputsMap.begin()->second->name();
const InferenceEngine::PreProcessInfo *info = &req.GetPreProcess(name.c_str());
ASSERT_EQ(info->getResizeAlgorithm(), InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR);
ASSERT_PREPROCESS_INFO_EQ(preProcess, *info);
}
}
} // namespace LayerTestsDefinitions
TEST_P(PreprocessTest, SetPreProcessToInferRequest) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
auto &preProcess = cnnNet.getInputsInfo().begin()->second->getPreProcess();
preProcess.setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
auto req = execNet.CreateInferRequest();
InferenceEngine::ConstInputsDataMap inputsMap = execNet.GetInputsInfo();
const auto &name = inputsMap.begin()->second->name();
auto inputBlob = FuncTestUtils::createAndFillBlob(
cnnNet.getInputsInfo().begin()->second->getTensorDesc());
req.SetBlob(cnnNet.getInputsInfo().begin()->first, inputBlob);
{
const InferenceEngine::PreProcessInfo *info = &req.GetPreProcess(name.c_str());
ASSERT_EQ(cnnNet.getInputsInfo().begin()->second->getPreProcess().getResizeAlgorithm(),
info->getResizeAlgorithm());
}
}

View File

@ -1,307 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <memory>
#include <tuple>
#include <vector>
#include <string>
#include <ie_core.hpp>
#include "ie_common.h"
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include <threading/ie_executor_manager.hpp>
#include "ngraph_functions/pass/convert_prc.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "behavior/config.hpp"
namespace LayerTestsDefinitions {
std::string CorrectConfigTests::getTestCaseName(testing::TestParamInfo<ConfigParams> obj) {
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> configuration;
std::tie(netPrecision, targetDevice, configuration) = obj.param;
std::ostringstream result;
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice;
if (!configuration.empty()) {
result << "configItem=" << configuration.begin()->first << "_" << configuration.begin()->second;
}
return result.str();
}
void CorrectConfigTests::SetUp() {
InferenceEngine::Precision netPrecision;
std::tie(netPrecision, targetDevice, configuration) = this->GetParam();
function = ngraph::builder::subgraph::makeConvPoolRelu();
}
void CorrectConfigTests::TearDown() {
if (targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) {
PluginCache::get().reset();
}
}
// Setting empty config doesn't throw
TEST_P(CorrectConfigTests, SetEmptyConfig) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Get Core from cache
auto ie = PluginCache::get().ie();
std::map<std::string, std::string> config;
ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
ASSERT_NO_THROW(ie->SetConfig(config, targetDevice));
function.reset();
}
// Setting correct config doesn't throw
TEST_P(CorrectConfigTests, SetCorrectConfig) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Get Core from cache
auto ie = PluginCache::get().ie();
ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice));
function.reset();
}
std::string IncorrectConfigTests::getTestCaseName(testing::TestParamInfo<ConfigParams> obj) {
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> configuration;
std::tie(netPrecision, targetDevice, configuration) = obj.param;
std::ostringstream result;
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice;
if (!configuration.empty()) {
for (auto& configItem : configuration) {
result << "configItem=" << configItem.first << "_" << configItem.second << "_";
}
}
return result.str();
}
void IncorrectConfigTests::SetUp() {
InferenceEngine::Precision netPrecision;
std::tie(netPrecision, targetDevice, configuration) = this->GetParam();
function = ngraph::builder::subgraph::makeConvPoolRelu();
}
void IncorrectConfigTests::TearDown() {
if (targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) {
PluginCache::get().reset();
}
}
TEST_P(IncorrectConfigTests, SetConfigWithIncorrectKey) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Get Core from cache
auto ie = PluginCache::get().ie();
if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
ASSERT_THROW(ie->SetConfig(configuration, targetDevice),
InferenceEngine::details::InferenceEngineException);
} else {
ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice));
}
function.reset();
}
TEST_P(IncorrectConfigTests, canNotLoadNetworkWithIncorrectConfig) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Get Core from cache
auto ie = PluginCache::get().ie();
ASSERT_THROW(auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration),
InferenceEngine::details::InferenceEngineException);
function.reset();
}
std::string IncorrectConfigAPITests::getTestCaseName(testing::TestParamInfo<ConfigParams> obj) {
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> configuration;
std::tie(netPrecision, targetDevice, configuration) = obj.param;
std::ostringstream result;
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice;
if (!configuration.empty()) {
result << "configItem=" << configuration.begin()->first << "_" << configuration.begin()->second;
}
return result.str();
}
void IncorrectConfigAPITests::SetUp() {
InferenceEngine::Precision netPrecision;
std::tie(netPrecision, targetDevice, configuration) = this->GetParam();
function = ngraph::builder::subgraph::makeConvPoolRelu();
}
void IncorrectConfigAPITests::TearDown() {
if (targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) {
PluginCache::get().reset();
}
}
TEST_P(IncorrectConfigAPITests, SetConfigWithNoExistingKey) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Get Core from cache
auto ie = PluginCache::get().ie();
ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
if (targetDevice.find(CommonTestUtils::DEVICE_GNA) != std::string::npos) {
ASSERT_THROW(ie->SetConfig(configuration, targetDevice), InferenceEngine::NotFound);
} else {
try {
ie->SetConfig(configuration, targetDevice);
} catch (InferenceEngine::details::InferenceEngineException ex) {}
}
function.reset();
}
std::string CorrectConfigAPITests::getTestCaseName(testing::TestParamInfo<ConfigParams> obj) {
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> configuration;
std::tie(netPrecision, targetDevice, configuration) = obj.param;
std::ostringstream result;
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice;
if (!configuration.empty()) {
result << "configItem=" << configuration.begin()->first << "_" << configuration.begin()->second;
}
return result.str();
}
void CorrectConfigAPITests::SetUp() {
InferenceEngine::Precision netPrecision;
std::tie(netPrecision, targetDevice, configuration) = this->GetParam();
function = ngraph::builder::subgraph::makeConvPoolRelu();
}
void CorrectConfigAPITests::TearDown() {
if (targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) {
PluginCache::get().reset();
}
}
TEST_P(CorrectConfigAPITests, canSetExclusiveAsyncRequests) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Get Core from cache
auto ie = PluginCache::get().ie();
// Load config
std::map<std::string, std::string> config = {{CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(YES)}};
config.insert(configuration.begin(), configuration.end());
if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice));
}
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
execNet.CreateInferRequest();
if ((targetDevice == CommonTestUtils::DEVICE_HDDL) || (targetDevice == CommonTestUtils::DEVICE_GNA) ||
(targetDevice == CommonTestUtils::DEVICE_CPU) || (targetDevice == CommonTestUtils::DEVICE_GPU)) {
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
} else if ((targetDevice == CommonTestUtils::DEVICE_FPGA) ||
(targetDevice == CommonTestUtils::DEVICE_KEEMBAY)) {
ASSERT_EQ(2u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
} else if (targetDevice == CommonTestUtils::DEVICE_MULTI) {
} else {
ASSERT_EQ(1u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
}
function.reset();
}
TEST_P(CorrectConfigAPITests, withoutExclusiveAsyncRequests) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Get Core from cache
auto ie = PluginCache::get().ie();
// Load config
std::map<std::string, std::string> config = {{CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(NO)}};
config.insert(configuration.begin(), configuration.end());
if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice));
}
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
execNet.CreateInferRequest();
if ((targetDevice == CommonTestUtils::DEVICE_FPGA) || (targetDevice == CommonTestUtils::DEVICE_MYRIAD) ||
(targetDevice == CommonTestUtils::DEVICE_KEEMBAY)) {
ASSERT_EQ(1u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
} else if (targetDevice == CommonTestUtils::DEVICE_MULTI) {
} else {
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
}
function.reset();
}
TEST_P(CorrectConfigAPITests, reusableCPUStreamsExecutor) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Get Core from cache
auto ie = PluginCache::get().ie();
{
// Load config
std::map<std::string, std::string> config = {{CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(NO)}};
config.insert(configuration.begin(), configuration.end());
if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice));
}
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
execNet.CreateInferRequest();
if ((targetDevice == CommonTestUtils::DEVICE_FPGA) || (targetDevice == CommonTestUtils::DEVICE_MYRIAD) ||
(targetDevice == CommonTestUtils::DEVICE_KEEMBAY)) {
ASSERT_EQ(1u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
} else if (targetDevice == CommonTestUtils::DEVICE_MULTI) {
} else {
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
ASSERT_GE(2u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
}
}
if (targetDevice == CommonTestUtils::DEVICE_CPU) {
ASSERT_NE(0u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
ASSERT_NO_THROW(ie->UnregisterPlugin("CPU"));
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber());
ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
}
function.reset();
}
} // namespace LayerTestsDefinitions

View File

@ -1,210 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <memory>
#include <tuple>
#include <vector>
#include <string>
#include <ie_core.hpp>
#include <details/ie_cnn_network_tools.h>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "exec_graph_info.hpp"
#include "ngraph_functions/pass/convert_prc.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "behavior/exec_graph_info.hpp"
namespace LayerTestsDefinitions {
std::string ExecGraphTests::getTestCaseName(testing::TestParamInfo<ExecGraphParams> obj) {
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> configuration;
std::tie(netPrecision, targetDevice, configuration) = obj.param;
std::ostringstream result;
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice;
if (!configuration.empty()) {
result << "configItem=" << configuration.begin()->first << "_" << configuration.begin()->second;
}
return result.str();
}
void ExecGraphTests::SetUp() {
InferenceEngine::Precision netPrecision;
std::tie(netPrecision, targetDevice, configuration) = this->GetParam();
function = ngraph::builder::subgraph::makeConvPoolRelu();
}
void ExecGraphTests::TearDown() {
if (targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) {
PluginCache::get().reset();
}
}
inline std::vector<std::string> separateStrToVec(std::string str, const char sep) {
std::vector<std::string> result;
std::istringstream stream(str);
std::string strVal;
while (getline(stream, strVal, sep)) {
result.push_back(strVal);
}
return result;
}
TEST_P(ExecGraphTests, CheckExecGraphInfoBeforeExecution) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
InferenceEngine::CNNNetwork execGraph;
// Get Core from cache
auto ie = PluginCache::get().ie();
if (targetDevice == CommonTestUtils::DEVICE_CPU || targetDevice == CommonTestUtils::DEVICE_GPU) {
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo());
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
// Store all the original layers from the network
const auto originalLayers = function->get_ops();
std::map<std::string, int> originalLayersMap;
for (const auto &layer : originalLayers) {
if (layer->description() == "Result")
continue;
originalLayersMap[layer->get_friendly_name()] = 0;
}
int IteratorForLayersConstant = 0;
// Store all the layers from the executable graph information represented as CNNNetwork
const std::vector<InferenceEngine::CNNLayerPtr> execGraphLayers =
InferenceEngine::details::CNNNetSortTopologically(execGraph);
for (const auto &execLayer : execGraphLayers) {
IE_SUPPRESS_DEPRECATED_START
// Each layer from the execGraphInfo network must have PM data option set
ASSERT_EQ("not_executed", execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER]);
// Parse origin layer names (fused/merged layers) from the executable graph
// and compare with layers from the original model
auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
if (origFromExecLayer == "")
IteratorForLayersConstant++;
std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
auto origLayer = originalLayersMap.find(layer);
ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
origLayer->second++;
});
}
// All layers from the original IR must be present with in ExecGraphInfo
for (auto &layer : originalLayersMap) {
if ((layer.second == 0) && (IteratorForLayersConstant > 0)) {
IteratorForLayersConstant--;
continue;
}
ASSERT_GE(layer.second, 0);
}
} else {
ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice).GetExecGraphInfo(),
InferenceEngine::details::InferenceEngineException);
}
IE_SUPPRESS_DEPRECATED_END
function.reset();
}
TEST_P(ExecGraphTests, CheckExecGraphInfoAfterExecution) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
InferenceEngine::CNNNetwork execGraph;
// Get Core from cache
auto ie = PluginCache::get().ie();
if (targetDevice == CommonTestUtils::DEVICE_CPU || targetDevice == CommonTestUtils::DEVICE_GPU) {
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo());
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
// Store all the original layers from the network
const auto originalLayers = function->get_ops();
std::map<std::string, int> originalLayersMap;
for (const auto &layer : originalLayers) {
originalLayersMap[layer->get_friendly_name()] = 0;
}
int IteratorForLayersConstant = 0;
// Store all the layers from the executable graph information represented as CNNNetwork
const std::vector<InferenceEngine::CNNLayerPtr> execGraphLayers =
InferenceEngine::details::CNNNetSortTopologically(execGraph);
bool has_layer_with_valid_time = false;
for (const auto &execLayer : execGraphLayers) {
IE_SUPPRESS_DEPRECATED_START
// At least one layer in the topology should be executed and have valid perf counter value
try {
float x = static_cast<float>(std::atof(
execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER].c_str()));
ASSERT_GE(x, 0.0f);
has_layer_with_valid_time = true;
} catch (std::exception &) {}
// Parse origin layer names (fused/merged layers) from the executable graph
// and compare with layers from the original model
auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
if (origFromExecLayer == "")
IteratorForLayersConstant++;
std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
auto origLayer = originalLayersMap.find(layer);
ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
origLayer->second++;
});
}
ASSERT_TRUE(has_layer_with_valid_time);
// All layers from the original IR must be present within ExecGraphInfo
for (auto &layer : originalLayersMap) {
if ((layer.second == 0) && (IteratorForLayersConstant > 0)) {
IteratorForLayersConstant--;
continue;
}
ASSERT_GE(layer.second, 0);
}
} else {
ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice).GetExecGraphInfo(),
InferenceEngine::details::InferenceEngineException);
}
IE_SUPPRESS_DEPRECATED_END
function.reset();
}
TEST_P(ExecGraphTests, CheckExecGraphInfoSerialization) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
InferenceEngine::CNNNetwork execGraph;
// Get Core from cache
auto ie = PluginCache::get().ie();
if (targetDevice == CommonTestUtils::DEVICE_CPU || targetDevice == CommonTestUtils::DEVICE_GPU) {
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo());
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
execGraph.serialize("exeNetwork.xml", "exeNetwork.bin");
ASSERT_EQ(0, std::remove("exeNetwork.xml"));
} else {
ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice).GetExecGraphInfo(),
InferenceEngine::details::InferenceEngineException);
}
function.reset();
}
} // namespace LayerTestsDefinitions

View File

@ -1,293 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <memory>
#include <tuple>
#include <vector>
#include <string>
#include <ie_core.hpp>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ngraph_functions/pass/convert_prc.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "behavior/infer_request_callback.hpp"
namespace LayerTestsDefinitions {
std::string CallbackTests::getTestCaseName(testing::TestParamInfo<CallbackParams> obj) {
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> configuration;
std::tie(netPrecision, targetDevice, configuration) = obj.param;
std::ostringstream result;
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice;
if (!configuration.empty()) {
result << "configItem=" << configuration.begin()->first << "_" << configuration.begin()->second;
}
return result.str();
}
void CallbackTests::SetUp() {
InferenceEngine::Precision netPrecision;
std::tie(netPrecision, targetDevice, configuration) = this->GetParam();
function = ngraph::builder::subgraph::makeConvPoolRelu();
}
void CallbackTests::TearDown() {
if (targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) {
PluginCache::get().reset();
}
}
void CallbackTests::canStartSeveralAsyncInsideCompletionCallbackNoSafeDtor(int iterNum = 1) {
struct TestUserData {
int numIter = 0;
bool startAsyncOK = true;
bool getDataOK = true;
std::atomic<int> numIsCalled{0};
std::mutex mutex_block_emulation;
std::condition_variable cv_block_emulation;
bool isBlocked = true;
TestUserData(int i) : numIter(i){}
};
TestUserData data(iterNum);
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Get Core from cache
auto ie = PluginCache::get().ie();
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req = execNet.CreateInferRequest();
req.SetCompletionCallback<std::function<void(InferenceEngine::InferRequest, InferenceEngine::StatusCode)>>(
[&](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) {
// WA for deadlock
if (!--data.numIter) {
request->SetCompletionCallback(nullptr);
}
InferenceEngine::StatusCode sts = request->StartAsync(nullptr);
if (sts != InferenceEngine::StatusCode::OK) {
data.startAsyncOK = false;
}
data.numIsCalled++;
if (!data.numIter) {
data.isBlocked = false;
data.cv_block_emulation.notify_one();
}
});
req.StartAsync();
InferenceEngine::ResponseDesc responseWait;
InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY);
// intentionally block until notification from callback
// intentionally block until notification from callback
std::unique_lock<std::mutex> lock(data.mutex_block_emulation);
data.cv_block_emulation.wait(lock, [&]() { return !data.isBlocked; });
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), waitStatus);
ASSERT_EQ(iterNum, data.numIsCalled);
ASSERT_TRUE(data.startAsyncOK);
ASSERT_TRUE(data.getDataOK);
}
TEST_P(CallbackTests, canCallSyncAndAsyncWithCompletionCallback) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Get Core from cache
auto ie = PluginCache::get().ie();
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req = execNet.CreateInferRequest();
bool isCalled = false;
req.SetCompletionCallback<std::function<void(InferenceEngine::InferRequest, InferenceEngine::StatusCode)>>(
[&](InferenceEngine::InferRequest request, InferenceEngine::StatusCode status) {
// HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE
if (targetDevice != CommonTestUtils::DEVICE_HDDL) {
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), status);
}
isCalled = true;
});
req.StartAsync();
InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY);
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), waitStatus);
ASSERT_TRUE(isCalled);
function.reset();
}
// test that can wait all callbacks on dtor
TEST_P(CallbackTests, canStartAsyncInsideCompletionCallback) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
struct TestUserData {
bool startAsyncOK = false;
int numIsCalled = 0;
};
TestUserData data;
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Get Core from cache
auto ie = PluginCache::get().ie();
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req = execNet.CreateInferRequest();
req.SetCompletionCallback<std::function<void(InferenceEngine::InferRequest, InferenceEngine::StatusCode)>>(
[&] (InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) {
// HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE
if (targetDevice != CommonTestUtils::DEVICE_HDDL) {
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), status);
}
data.numIsCalled++;
// WA for deadlock
request->SetCompletionCallback(nullptr);
InferenceEngine::StatusCode sts = request->StartAsync(nullptr);
if (sts == InferenceEngine::StatusCode::OK) {
data.startAsyncOK = true;
}
});
req.StartAsync();
InferenceEngine::ResponseDesc responseWait;
InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY);
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), waitStatus) << responseWait.msg;
ASSERT_EQ(1, data.numIsCalled);
ASSERT_TRUE(data.startAsyncOK);
function.reset();
}
// test that can wait all callbacks on dtor
TEST_P(CallbackTests, canStartSeveralAsyncInsideCompletionCallbackWithSafeDtor) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
const int NUM_ITER = 10;
struct TestUserData {
int numIter = NUM_ITER;
bool startAsyncOK = true;
std::atomic<int> numIsCalled{0};
std::mutex mutex_block_emulation;
std::condition_variable cv_block_emulation;
bool isBlocked = true;
};
TestUserData data;
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Get Core from cache
auto ie = PluginCache::get().ie();
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req = execNet.CreateInferRequest();
req.SetCompletionCallback<std::function<void(InferenceEngine::InferRequest, InferenceEngine::StatusCode)>>(
[&] (InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) {
// HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE
if (targetDevice != CommonTestUtils::DEVICE_HDDL) {
ASSERT_EQ(static_cast<int>(InferenceEngine::StatusCode::OK), status);
}
if (--data.numIter) {
InferenceEngine::StatusCode sts = request->StartAsync(nullptr);
if (sts != InferenceEngine::StatusCode::OK) {
data.startAsyncOK = false;
}
}
data.numIsCalled++;
if (!data.numIter) {
data.isBlocked = false;
data.cv_block_emulation.notify_all();
}
});
req.StartAsync();
InferenceEngine::ResponseDesc responseWait;
InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY);
// intentionally block until notification from callback
std::unique_lock<std::mutex> lock(data.mutex_block_emulation);
data.cv_block_emulation.wait(lock, [&]() { return !data.isBlocked; });
ASSERT_EQ((int) InferenceEngine::StatusCode::OK, waitStatus) << responseWait.msg;
ASSERT_EQ(NUM_ITER, data.numIsCalled);
ASSERT_TRUE(data.startAsyncOK);
function.reset();
}
TEST_P(CallbackTests, inferDoesNotCallCompletionCallback) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Get Core from cache
auto ie = PluginCache::get().ie();
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req = execNet.CreateInferRequest();
bool isCalled = false;
req.SetCompletionCallback<std::function<void(InferenceEngine::InferRequest, InferenceEngine::StatusCode)>>(
[&] (InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) {
isCalled = true;
});
req.Infer();
ASSERT_FALSE(isCalled);
function.reset();
}
TEST_P(CallbackTests, canStartAsyncInsideCompletionCallbackNoSafeDtor) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
canStartSeveralAsyncInsideCompletionCallbackNoSafeDtor(1);
}
TEST_P(CallbackTests, canStartSeveralAsyncInsideCompletionCallbackNoSafeDtor) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
canStartSeveralAsyncInsideCompletionCallbackNoSafeDtor(10);
}
TEST_P(CallbackTests, returnGeneralErrorIfCallbackThrowException) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
// Get Core from cache
auto ie = PluginCache::get().ie();
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::IInferRequest::Ptr req = static_cast<InferenceEngine::IInferRequest::Ptr &>(execNet.CreateInferRequest());
req->SetCompletionCallback(
[](InferenceEngine::IInferRequest::Ptr, InferenceEngine::StatusCode status) {
THROW_IE_EXCEPTION << "returnGeneralErrorIfCallbackThrowException";
});
InferenceEngine::ResponseDesc resp;
req->StartAsync(&resp);
InferenceEngine::StatusCode waitStatus = InferenceEngine::StatusCode::INFER_NOT_STARTED;
while (InferenceEngine::StatusCode::RESULT_NOT_READY == waitStatus || InferenceEngine::StatusCode::INFER_NOT_STARTED == waitStatus) {
waitStatus = req->Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY, &resp);
}
ASSERT_EQ(InferenceEngine::StatusCode::GENERAL_ERROR, waitStatus);
ASSERT_NE(std::string(resp.msg).find("returnGeneralErrorIfCallbackThrowException"), std::string::npos);
}
} // namespace LayerTestsDefinitions

View File

@ -1,102 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <memory>
#include <tuple>
#include <vector>
#include <string>
#include <ie_core.hpp>
#include <cpp_interfaces/exception2status.hpp>
#include "common_test_utils/test_assertions.hpp"
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ie_preprocess.hpp"
#include "ngraph_functions/pass/convert_prc.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "behavior/set_preprocess.hpp"
namespace LayerTestsDefinitions {
std::string PreProcessTests::getTestCaseName(testing::TestParamInfo<PreProcessParams> obj) {
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> configuration;
std::tie(netPrecision, targetDevice, configuration) = obj.param;
std::ostringstream result;
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice;
if (!configuration.empty()) {
result << "configItem=" << configuration.begin()->first << "_" << configuration.begin()->second;
}
return result.str();
}
void PreProcessTests::SetUp() {
InferenceEngine::Precision netPrecision;
std::tie(netPrecision, targetDevice, configuration) = this->GetParam();
function = ngraph::builder::subgraph::makeConvPoolRelu();
}
void PreProcessTests::TearDown() {
if (targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) {
PluginCache::get().reset();
}
}
TEST_P(PreProcessTests, SetPreProcessToInputInfo) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
auto &preProcess = cnnNet.getInputsInfo().begin()->second->getPreProcess();
preProcess.setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR);
// Get Core from cache
auto ie = PluginCache::get().ie();
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
auto req = execNet.CreateInferRequest();
{
InferenceEngine::ConstInputsDataMap inputsMap = execNet.GetInputsInfo();
const auto& name = inputsMap.begin()->second->name();
const InferenceEngine::PreProcessInfo *info = &req.GetPreProcess(name.c_str());
ASSERT_EQ(info->getResizeAlgorithm(), InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR);
ASSERT_PREPROCESS_INFO_EQ(preProcess, *info);
}
function.reset();
}
TEST_P(PreProcessTests, SetPreProcessToInferRequest) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(function);
auto &preProcess = cnnNet.getInputsInfo().begin()->second->getPreProcess();
preProcess.setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR);
// Get Core from cache
auto ie = PluginCache::get().ie();
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
auto req = execNet.CreateInferRequest();
InferenceEngine::ConstInputsDataMap inputsMap = execNet.GetInputsInfo();
const auto& name = inputsMap.begin()->second->name();
auto inputBlob = FuncTestUtils::createAndFillBlob(
cnnNet.getInputsInfo().begin()->second->getTensorDesc());
req.SetBlob(cnnNet.getInputsInfo().begin()->first, inputBlob);
{
const InferenceEngine::PreProcessInfo *info = &req.GetPreProcess(name.c_str());
ASSERT_EQ(cnnNet.getInputsInfo().begin()->second->getPreProcess().getResizeAlgorithm(),
info->getResizeAlgorithm());
}
function.reset();
}
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,74 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <typeindex>
#include <string>
#include <vector>
#include <memory>
#include <tuple>
#include <gtest/gtest.h>
#include <ngraph/node.hpp>
#include <ngraph/function.hpp>
#include <ie_plugin_config.hpp>
#include <ngraph/function.hpp>
#include <ngraph_functions/subgraph_builders.hpp>
#include "gtest/gtest.h"
#include "common_test_utils/common_utils.hpp"
#include "common_test_utils/test_common.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "functional_test_utils/precision_utils.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/pass/convert_prc.hpp"
namespace BehaviorTestsUtils {
typedef std::tuple<
InferenceEngine::Precision, // Network precision
std::string, // Device name
std::map<std::string, std::string> // Config
> BehaviorParams;
class BehaviorTestsBasic : public testing::WithParamInterface<BehaviorParams>,
public CommonTestUtils::TestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<BehaviorParams> obj) {
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> configuration;
std::tie(netPrecision, targetDevice, configuration) = obj.param;
std::ostringstream result;
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice;
if (!configuration.empty()) {
for (auto& configItem : configuration) {
result << "configItem=" << configItem.first << "_" << configItem.second << "_";
}
}
return result.str();
}
void SetUp() override {
std::tie(netPrecision, targetDevice, configuration) = this->GetParam();
function = ngraph::builder::subgraph::makeConvPoolRelu();
}
void TearDown() override {
if ((targetDevice == CommonTestUtils::DEVICE_GPU) || (!configuration.empty())) {
PluginCache::get().reset();
}
function.reset();
}
std::shared_ptr<InferenceEngine::Core> ie = PluginCache::get().ie();
std::shared_ptr<ngraph::Function> function;
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> configuration;
};
} // namespace BehaviorTestsUtils

View File

@ -1,8 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request.hpp"
#include "cldnn_test_data.hpp"
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequest, ValuesIn(requestsSupportedValues), getTestCaseName);

View File

@ -1,9 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_config.hpp"
#include "cldnn_test_data.hpp"
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestConfigExclusiveAsync, ValuesIn(supportedValues),
getConfigTestCaseName);

View File

@ -1,9 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_input.hpp"
#include "cldnn_test_data.hpp"
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestInput, ValuesIn(allInputSupportedValues),
getTestCaseName);

View File

@ -1,9 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_output.hpp"
#include "cldnn_test_data.hpp"
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestOutput, ValuesIn(allOutputSupportedValues),
getOutputTestCaseName);

View File

@ -1,8 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request.hpp"
#include "gna_test_data.hpp"
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequest, ValuesIn(requestsSupportedValues), getTestCaseName);

View File

@ -1,51 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_config.hpp"
#include "gna_test_data.hpp"
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestConfig,
ValuesIn(withCorrectConfValues),
getConfigTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestConfigExclusiveAsync, ValuesIn(supportedValues),
getConfigTestCaseName);
bool CheckGnaHw() {
if (auto envVar = std::getenv("IE_GNA_HW")) {
return std::stoi(envVar) != 0;
}
return false;
}
class BehaviorPluginTestInferRequestWithGnaHw : public BehaviorPluginTestInferRequest {
};
TEST_P(BehaviorPluginTestInferRequestWithGnaHw, CanInferOrFailWithGnaHw) {
TestEnv::Ptr testEnv;
std::map<std::string, std::string> config = GetParam().config;
if (CheckGnaHw()) {
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv, config));
sts = testEnv->inferRequest->Infer(&response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
} else {
try {
_createAndCheckInferRequest(GetParam(), testEnv, config);
} catch (InferenceEngineException ex) {
ASSERT_TRUE(strContains(ex.what(), "Unsuccessful Gna2Status"));
return;
} catch (...) {
FAIL();
}
sts = testEnv->inferRequest->Infer(&response);
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
ASSERT_TRUE(strContains(response.msg, "Bad GNA status"));
}
}
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestWithGnaHw,
ValuesIn(withGnaHwConfValue),
getConfigTestCaseName);

View File

@ -1,9 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_input.hpp"
#include "gna_test_data.hpp"
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestInput, ValuesIn(allInputSupportedValues),
getTestCaseName);

View File

@ -1,9 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_output.hpp"
#include "gna_test_data.hpp"
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestOutput, ValuesIn(allOutputSupportedValues),
getOutputTestCaseName);

View File

@ -1,8 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request.hpp"
#include "mkldnn_test_data.hpp"
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequest, ValuesIn(requestsSupportedValues), getTestCaseName);

View File

@ -1,14 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_config.hpp"
#include "mkldnn_test_data.hpp"
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestConfig,
ValuesIn(BehTestParams::concat(withCorrectConfValues, withCorrectConfValuesNetworkOnly)),
getConfigTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestConfigExclusiveAsync, ValuesIn(supportedValues),
getConfigTestCaseName);

View File

@ -1,9 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_input.hpp"
#include "mkldnn_test_data.hpp"
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestInput, ValuesIn(allInputSupportedValues),
getTestCaseName);

View File

@ -1,9 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_output.hpp"
#include "mkldnn_test_data.hpp"
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestOutput, ValuesIn(allOutputSupportedValues),
getOutputTestCaseName);

View File

@ -1,624 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "behavior_test_plugin.h"
#include <thread>
using namespace std;
using namespace ::testing;
using namespace InferenceEngine;
using namespace InferenceEngine::details;
namespace {
std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
std::string config;
for (auto&& cfg : obj.param.config) {
config += "_" + cfg.first + "_" + cfg.second;
}
return obj.param.device + "_" + obj.param.input_blob_precision.name() + config;
}
}
// Setting empty config to LoadNetwork doesn't throw
TEST_P(BehaviorPluginTestInferRequest, SetEmptyConfig) {
InferenceEngine::Core core;
const std::string device = GetParam().device;
ASSERT_NO_THROW(core.SetConfig(GetParam().config, GetParam().device));
InferenceEngine::CNNNetwork cnnNetwork = core.ReadNetwork(GetParam().model_xml_str, GetParam().weights_blob);
InferenceEngine::IExecutableNetwork::Ptr exeNetwork;
std::map<std::string, std::string> config;
if (device.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
device.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
ASSERT_NO_THROW(exeNetwork = core.LoadNetwork(cnnNetwork, GetParam().device, config));
} else {
ASSERT_NO_THROW(exeNetwork = core.LoadNetwork(cnnNetwork, GetParam().device, GetParam().config));
}
}
// Load incorrect network to Plugin to get executable network
TEST_P(BehaviorPluginTestInferRequest, canNotLoadNetworkToGetExeNetworkWithoutWeights) {
InferenceEngine::Core core;
ASSERT_THROW(core.ReadNetwork(GetParam().model_xml_str, Blob::CPtr()), InferenceEngineException);
}
// Load correct network to Plugin to get executable network
TEST_P(BehaviorPluginTestInferRequest, canLoadCorrectNetworkToGetExecutable) {
InferenceEngine::Core core;
InferenceEngine::CNNNetwork cnnNetwork = core.ReadNetwork(GetParam().model_xml_str, GetParam().weights_blob);
ASSERT_NO_THROW(core.LoadNetwork(cnnNetwork, GetParam().device, GetParam().config));
}
TEST_P(BehaviorPluginTestInferRequest, CanCreateTwoExeNetworks) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
InferenceEngine::Core core;
InferenceEngine::CNNNetwork cnnNetwork = core.ReadNetwork(GetParam().model_xml_str, GetParam().weights_blob);
for (auto i = 0; i < 2; i++) {
ASSERT_NO_THROW(core.LoadNetwork(cnnNetwork, GetParam().device, GetParam().config));
}
}
TEST_P(BehaviorPluginTestInferRequest, CanCreateInferRequest) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
}
TEST_P(BehaviorPluginTestInferRequest, failToSetNullptrForInput) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr inputBlob = nullptr;
ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), inputBlob, &response));
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
std::string refError = NOT_ALLOCATED_str + "Failed to set empty blob with name: \'" + testEnv->inputName + "\'";
response.msg[refError.length()] = '\0';
ASSERT_EQ(refError, response.msg);
}
TEST_P(BehaviorPluginTestInferRequest, failToSetEmptyInputBlob) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr blob;
sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), blob, &response);
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
std::string refError = NOT_ALLOCATED_str + "Failed to set empty blob with name: \'" + testEnv->inputName + "\'";
response.msg[refError.length()] = '\0';
ASSERT_EQ(refError, response.msg);
}
TEST_P(BehaviorPluginTestInferRequest, failToSetEmptyOutputBlob) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr blob;
sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), blob, &response);
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
std::string refError = NOT_ALLOCATED_str + "Failed to set empty blob with name: \'" + testEnv->outputName + "\'";
response.msg[refError.length()] = '\0';
ASSERT_EQ(refError, response.msg);
}
TEST_P(BehaviorPluginTestInferRequest, failToSetNotAllocatedInput) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr input = makeNotAllocatedBlob(GetParam().input_blob_precision,
TensorDesc::getLayoutByDims(testEnv->inputDims), testEnv->inputDims);
ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), input, &response));
std::string refError = "Input data was not allocated. Input name: \'" + testEnv->inputName + "\'";
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
response.msg[refError.length()] = '\0';
ASSERT_EQ(refError, response.msg);
}
TEST_P(BehaviorPluginTestInferRequest, failToSetNotAllocatedOutput) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr output = makeNotAllocatedBlob(GetParam().input_blob_precision,
TensorDesc::getLayoutByDims(testEnv->outputDims), testEnv->outputDims);
ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), output, &response));
std::string refError = "Input data was not allocated. Input name: \'" + testEnv->outputName + "\'";
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
response.msg[refError.length()] = '\0';
ASSERT_EQ(refError, response.msg);
}
TEST_P(BehaviorPluginTestInferRequest, failToSetBlobWithIncorrectName) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
auto input = makeNotAllocatedBlob(GetParam().input_blob_precision, TensorDesc::getLayoutByDims(testEnv->inputDims),
testEnv->inputDims);
input->allocate();
sts = testEnv->inferRequest->SetBlob(FuncTestUtils::TestModel::incorrect_input_name, input, &response);
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
std::string refError =
NOT_FOUND_str + "Failed to find input or output with name: \'" +
FuncTestUtils::TestModel::incorrect_input_name + "\'";
response.msg[refError.length()] = '\0';
ASSERT_EQ(refError, response.msg);
}
TEST_P(BehaviorPluginTestInferRequest, failToSetInputWithIncorrectSizes) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
SizeVector incorrectSizes = testEnv->inputDims;
/* to use 2x size of first dim to simulate using of an input blob of another size */
incorrectSizes[0] *= 2;
auto input = makeNotAllocatedBlob(GetParam().input_blob_precision, TensorDesc::getLayoutByDims(incorrectSizes),
incorrectSizes);
input->allocate();
int in_size = std::accumulate(testEnv->inputDims.begin(), testEnv->inputDims.end(), 1, std::multiplies<int>());
std::string refError = "Input blob size is not equal network input size (" + std::to_string(input->size()) + "!=" +
std::to_string(in_size) + ").";
ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), input, &response));
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
response.msg[refError.length()] = '\0';
ASSERT_EQ(refError, response.msg);
}
TEST_P(BehaviorPluginTestInferRequest, failToSetOutputWithIncorrectSizes) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
SizeVector incorrectSizes = testEnv->outputDims;
/* to use 2x size of first dim to simulate using of an output blob of another size */
incorrectSizes[0] *= 2;
Blob::Ptr output = _prepareOutputBlob(GetParam().input_blob_precision, incorrectSizes);
ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), output, &response));
int out_size = std::accumulate(testEnv->outputDims.begin(), testEnv->outputDims.end(), 1, std::multiplies<int>());
std::string refError =
"Output blob size is not equal network output size (" + std::to_string(output->size()) + "!=" +
std::to_string(out_size) + ").";
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
response.msg[refError.length()] = '\0';
ASSERT_EQ(refError, response.msg);
}
TEST_P(BehaviorPluginTestInferRequest, failToSetInputBlobWithPrecisionNotMatchInputPrecision) {
std::string refError;
if (GetParam().device != CommonTestUtils::DEVICE_CPU) {
// MKLDNNPlugin now supports input blobs with format other than the network format,
// so there is no 'not corresponding user input precision' error
refError =
PARAMETER_MISMATCH_str + "Failed to set Blob with precision not corresponding to user input precision";
} else {
// ...but it still doesn't support Precision::UNSPECIFIED blobs.
refError = PARAMETER_MISMATCH_str + "Failed to set Blob with precision";
}
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
auto inputBlob = prepareInputBlob(Precision::UNSPECIFIED, testEnv->inputDims);
ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), inputBlob, &response));
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
response.msg[refError.length()] = '\0';
if (GetParam().device != CommonTestUtils::DEVICE_CPU) {
ASSERT_EQ(refError, response.msg);
} else {
ASSERT_STR_CONTAINS(response.msg, refError);
}
}
TEST_P(BehaviorPluginTestInferRequest, failToSetOutputBlobWithPrecisionNotMatchOutputPrecision) {
std::string refError =
PARAMETER_MISMATCH_str + "Failed to set Blob with precision not corresponding to user output precision";
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
auto outputBlob = _prepareOutputBlob(Precision::UNSPECIFIED, testEnv->outputDims);
ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), outputBlob, &response));
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
response.msg[refError.length()] = '\0';
ASSERT_EQ(refError, response.msg);
}
TEST_P(BehaviorPluginTestInferRequest, canInferWithoutSetAndGetInOut) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
}
TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedInputBlobAfterGetBlob) {
std::string refError = "Input data was not allocated";
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr blob;
ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), blob, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
blob->deallocate();
ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
EXPECT_THAT(std::string(response.msg), HasSubstr(refError));
}
TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedInputBlobAfterGetBlobForAsync) {
std::string refError = "Input data was not allocated";
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr blob;
ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), blob, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
blob->deallocate();
ASSERT_NO_THROW(sts = testEnv->inferRequest->StartAsync(&response));
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
EXPECT_THAT(std::string(response.msg), HasSubstr(refError));
}
TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedInputBlobAfterGetAndSetBlob) {
std::string refError = "Input data was not allocated";
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr blob;
ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), blob, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), blob, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
blob->deallocate();
ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
EXPECT_THAT(std::string(response.msg), HasSubstr(refError));
}
TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedInputBlobAfterSetBlob) {
std::string refError = "Input data was not allocated";
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
auto blob = makeNotAllocatedBlob(GetParam().input_blob_precision, TensorDesc::getLayoutByDims(testEnv->inputDims),
testEnv->inputDims);
blob->allocate();
ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), blob, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
blob->deallocate();
ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
EXPECT_THAT(std::string(response.msg), HasSubstr(refError));
}
TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedOutputBlobAfterGetBlob) {
std::string refError = "Output data was not allocated";
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr blob;
ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), blob, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
blob->deallocate();
ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
EXPECT_THAT(std::string(response.msg), HasSubstr(refError));
}
TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedOutputBlobAfterGetBlobForAsync) {
std::string refError = "Output data was not allocated";
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr blob;
ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), blob, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
blob->deallocate();
ASSERT_NO_THROW(sts = testEnv->inferRequest->StartAsync(&response));
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
EXPECT_THAT(std::string(response.msg), HasSubstr(refError));
}
TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedOutputBlobAfterGetAndSetBlob) {
std::string refError = "Output data was not allocated";
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr blob;
ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), blob, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), blob, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
blob->deallocate();
ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
EXPECT_THAT(std::string(response.msg), HasSubstr(refError));
}
TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedOutputBlobAfterSetBlob) {
std::string refError = "Output data was not allocated";
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
auto blob = makeNotAllocatedBlob(GetParam().output_blob_precision, TensorDesc::getLayoutByDims(testEnv->outputDims),
testEnv->outputDims);
blob->allocate();
ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), blob, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
blob->deallocate();
ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
EXPECT_THAT(std::string(response.msg), HasSubstr(refError));
}
TEST_P(BehaviorPluginTestInferRequest, DISABLED_secondCallGetOutputDoNotReAllocateData) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr getBlob1;
ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), getBlob1, &response));
Blob::Ptr getBlob2;
ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), getBlob2, &response));
ASSERT_EQ(getBlob1.get(), getBlob2.get());
}
TEST_P(BehaviorPluginTestInferRequest, CorrectOneAsyncInferWithGetInOutWithInfWait) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr input;
Blob::Ptr result;
testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
sts = testEnv->inferRequest->StartAsync(&response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::RESULT_READY, &response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), result, &response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
}
// Plugin correct infer request with allocating input and result BlobMaps inside plugin
TEST_P(BehaviorPluginTestInferRequest, canStartAsyncInferWithGetInOutWithStatusOnlyWait) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr input;
Blob::Ptr result;
testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
sts = testEnv->inferRequest->StartAsync(&response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response);
ASSERT_TRUE(sts == StatusCode::OK || StatusCode::RESULT_NOT_READY) << response.msg;
}
// Plugin correct infer request with allocating input and result BlobMaps inside plugin
TEST_P(BehaviorPluginTestInferRequest, FailedAsyncInferWithNegativeTimeForWait) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
std::string refError = PARAMETER_MISMATCH_str;
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr input;
Blob::Ptr result;
testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
sts = testEnv->inferRequest->StartAsync(&response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
ASSERT_NO_THROW(sts = testEnv->inferRequest->Wait(-2, &response));
ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
response.msg[refError.length()] = '\0';
ASSERT_EQ(refError, response.msg);
}
TEST_P(BehaviorPluginTestInferRequest, canRun3SyncRequestsConsistentlyFromThreads) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
IInferRequest::Ptr inferRequest2;
static_cast<IExecutableNetwork::Ptr &>(testEnv->exeNetwork)->CreateInferRequest(inferRequest2, &response);
ASSERT_NE(inferRequest2, nullptr) << response.msg;
IInferRequest::Ptr inferRequest3;
static_cast<IExecutableNetwork::Ptr &>(testEnv->exeNetwork)->CreateInferRequest(inferRequest3, &response);
ASSERT_NE(inferRequest3, nullptr) << response.msg;
Blob::Ptr input1;
testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input1, &response);
inferRequest2->SetBlob(testEnv->inputName.c_str(), input1, &response);
inferRequest3->SetBlob(testEnv->inputName.c_str(), input1, &response);
InferenceEngine::ResponseDesc response1, response2, response3;
InferenceEngine::StatusCode sts1, sts2, sts3;
std::thread t1([&] { sts1 = testEnv->inferRequest->Infer(&response1); });
std::thread t2([&] { sts2 = inferRequest2->Infer(&response2); });
std::thread t3([&] { sts3 = inferRequest3->Infer(&response3); });
t1.join();
t2.join();
t3.join();
ASSERT_EQ((int) StatusCode::OK, sts1) << response1.msg;
ASSERT_EQ((int) StatusCode::OK, sts2) << response2.msg;
ASSERT_EQ((int) StatusCode::OK, sts3) << response3.msg;
}
TEST_P(BehaviorPluginTestInferRequest, canRun3AsyncRequestsConsistentlyWithWait) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
IInferRequest::Ptr inferRequest2;
static_cast<IExecutableNetwork::Ptr &>(testEnv->exeNetwork)->CreateInferRequest(inferRequest2, &response);
ASSERT_NE(inferRequest2, nullptr) << response.msg;
IInferRequest::Ptr inferRequest3;
static_cast<IExecutableNetwork::Ptr &>(testEnv->exeNetwork)->CreateInferRequest(inferRequest3, &response);
ASSERT_NE(inferRequest3, nullptr) << response.msg;
Blob::Ptr input1;
testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input1, &response);
inferRequest2->SetBlob(testEnv->inputName.c_str(), input1, &response);
inferRequest3->SetBlob(testEnv->inputName.c_str(), input1, &response);
sts = testEnv->inferRequest->StartAsync(&response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
sts = inferRequest2->StartAsync(&response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
sts = inferRequest3->StartAsync(&response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::RESULT_READY, &response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
sts = inferRequest2->Wait(IInferRequest::WaitMode::RESULT_READY, &response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
sts = inferRequest3->Wait(IInferRequest::WaitMode::RESULT_READY, &response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
}
TEST_P(BehaviorPluginTestInferRequest, canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
IInferRequest::Ptr inferRequest2;
static_cast<IExecutableNetwork::Ptr &>(testEnv->exeNetwork)->CreateInferRequest(inferRequest2, &response);
ASSERT_NE(inferRequest2, nullptr) << response.msg;
IInferRequest::Ptr inferRequest3;
static_cast<IExecutableNetwork::Ptr &>(testEnv->exeNetwork)->CreateInferRequest(inferRequest3, &response);
ASSERT_NE(inferRequest3, nullptr) << response.msg;
Blob::Ptr input1;
testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input1, &response);
inferRequest2->SetBlob(testEnv->inputName.c_str(), input1, &response);
inferRequest3->SetBlob(testEnv->inputName.c_str(), input1, &response);
InferenceEngine::ResponseDesc response1, response2, response3;
InferenceEngine::StatusCode sts1, sts2, sts3;
std::thread t1([&] { sts1 = testEnv->inferRequest->StartAsync(&response1); });
std::thread t2([&] { sts2 = inferRequest2->StartAsync(&response2); });
std::thread t3([&] { sts3 = inferRequest3->StartAsync(&response3); });
t1.join();
t2.join();
t3.join();
ASSERT_EQ((int) StatusCode::OK, sts1) << response1.msg;
ASSERT_EQ((int) StatusCode::OK, sts2) << response2.msg;
ASSERT_EQ((int) StatusCode::OK, sts3) << response3.msg;
}
TEST_P(BehaviorPluginTestInferRequest, canWaitWithotStartAsync) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::RESULT_READY, &response);
ASSERT_EQ(StatusCode::INFER_NOT_STARTED, sts);
sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response);
ASSERT_EQ(StatusCode::INFER_NOT_STARTED, sts);
sts = testEnv->inferRequest->Wait(1, &response);
ASSERT_EQ(StatusCode::INFER_NOT_STARTED, sts);
}
TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnSetBlobAfterAsyncInfer) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
auto&& config = GetParam().config;
auto itConfig = config.find(CONFIG_KEY(CPU_THROUGHPUT_STREAMS));
if (itConfig != config.end()) {
if (itConfig->second != "CPU_THROUGHPUT_AUTO") {
if (std::stoi(itConfig->second) == 0) {
GTEST_SKIP() << "Not applicable with disabled streams";
}
}
}
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr input;
sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
ASSERT_EQ((int) StatusCode::OK, sts) << response.msg;
sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response);
ASSERT_EQ(StatusCode::INFER_NOT_STARTED, sts) << response.msg;
std::map<std::string, InferenceEngineProfileInfo> perfMap;
sts = testEnv->inferRequest->StartAsync(&response);
ASSERT_EQ((int) StatusCode::OK, sts) << response.msg;
sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), input, &response);
if (sts == StatusCode::REQUEST_BUSY) {
ASSERT_TRUE(_wasDeviceBusy(response));
} else {
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
}
response.msg[0] = 0;
sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response);
ASSERT_TRUE(sts == StatusCode::OK || sts == StatusCode::RESULT_NOT_READY) << response.msg;
}
TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnGetBlobAfterAsyncInfer) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr input;
testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
ResponseDesc response2;
sts = testEnv->inferRequest->StartAsync(&response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response2);
if (sts == StatusCode::REQUEST_BUSY)
ASSERT_TRUE(_wasDeviceBusy(response2));
else
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
}
TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnGetPerformanceCountAfterAsyncInfer) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr input;
testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
std::map<std::string, InferenceEngineProfileInfo> perfMap;
ResponseDesc response2;
sts = testEnv->inferRequest->StartAsync(&response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
sts = testEnv->inferRequest->GetPerformanceCounts(perfMap, &response2);
if (sts == StatusCode::REQUEST_BUSY)
ASSERT_TRUE(_wasDeviceBusy(response2));
else
ASSERT_EQ(StatusCode::OK, sts);
}
TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnStartInferAfterAsyncInfer) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr input;
testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
ResponseDesc response2;
sts = testEnv->inferRequest->StartAsync(&response);
ASSERT_EQ(StatusCode::OK, sts);
sts = testEnv->inferRequest->StartAsync(&response2);
if (sts == StatusCode::REQUEST_BUSY)
ASSERT_TRUE(_wasDeviceBusy(response2));
else
ASSERT_EQ(StatusCode::OK, sts);
}
TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnGetUserDataAfterAsyncInfer) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr input;
testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
ResponseDesc response2;
sts = testEnv->inferRequest->StartAsync(&response);
ASSERT_EQ(StatusCode::OK, sts);
testEnv->inferRequest->GetUserData(nullptr, &response2);
auto waitStatus = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response);
if (waitStatus == StatusCode::RESULT_NOT_READY)
ASSERT_TRUE(_wasDeviceBusy(response2));
else
ASSERT_TRUE(waitStatus == StatusCode::OK);
}
TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnSetUserDataAfterAsyncInfer) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr input;
testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
ResponseDesc response2;
sts = testEnv->inferRequest->StartAsync(&response);
ASSERT_EQ(StatusCode::OK, sts);
testEnv->inferRequest->SetUserData(nullptr, &response2);
auto waitStatus = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response);
if (waitStatus == StatusCode::RESULT_NOT_READY)
ASSERT_TRUE(_wasDeviceBusy(response2));
else
ASSERT_TRUE(waitStatus == StatusCode::OK);
}

View File

@ -1,80 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin.h"
#include <threading/ie_executor_manager.hpp>
using namespace std;
using namespace ::testing;
using namespace InferenceEngine;
using namespace InferenceEngine::details;
namespace {
std::string getConfigTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
std::string config_str = "";
for (auto it = obj.param.config.cbegin(); it != obj.param.config.cend(); it++) {
std::string v = it->second;
std::replace(v.begin(), v.end(), '.', '_');
config_str += it->first + "_" + v + "_";
}
return obj.param.device + "_" + config_str;
}
}
TEST_P(BehaviorPluginTestInferRequestConfig, CanInferWithConfig) {
TestEnv::Ptr testEnv;
std::map<std::string, std::string> config = GetParam().config;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv, config));
sts = testEnv->inferRequest->Infer(&response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
}
TEST_P(BehaviorPluginTestInferRequestConfigExclusiveAsync, canSetExclusiveAsyncRequests) {
ASSERT_EQ(0ul, ExecutorManager::getInstance()->getExecutorsNumber());
TestEnv::Ptr testEnv;
std::map<std::string, std::string> config;
config[PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS] = PluginConfigParams::YES;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv, config));
// TODO: there is no executors to sync. should it be supported natively in HDDL API?
if (GetParam().device == CommonTestUtils::DEVICE_HDDL) {
ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber());
} else if (GetParam().device == CommonTestUtils::DEVICE_FPGA) {
ASSERT_EQ(2u, ExecutorManager::getInstance()->getExecutorsNumber());
} else if (GetParam().device == CommonTestUtils::DEVICE_MYRIAD) {
ASSERT_EQ(2u, ExecutorManager::getInstance()->getExecutorsNumber());
} else if (GetParam().device == CommonTestUtils::DEVICE_KEEMBAY) {
ASSERT_EQ(2u, ExecutorManager::getInstance()->getExecutorsNumber());
} else if (GetParam().device == CommonTestUtils::DEVICE_GNA) {
ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber());
} else if (GetParam().device == CommonTestUtils::DEVICE_MULTI) {
// for multi-device the number of Executors is not known (defined by the devices configuration)
} else {
ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber());
}
}
TEST_P(BehaviorPluginTestInferRequestConfigExclusiveAsync, withoutExclusiveAsyncRequests) {
ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber());
TestEnv::Ptr testEnv;
std::map<std::string, std::string> config;
config[PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS] = PluginConfigParams::NO;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv, config));
if (GetParam().device == CommonTestUtils::DEVICE_FPGA) {
ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber());
} else if (GetParam().device == CommonTestUtils::DEVICE_MYRIAD) {
ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber());
} else if (GetParam().device == CommonTestUtils::DEVICE_KEEMBAY) {
ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber());
} else if (GetParam().device == CommonTestUtils::DEVICE_MULTI) {
// for multi-device the number of Executors is not known (defined by the devices configuration)
} else {
ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber());
}
}

View File

@ -1,125 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin.h"
using namespace std;
using namespace ::testing;
using namespace InferenceEngine;
using namespace InferenceEngine::details;
namespace {
std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
return obj.param.device + "_" + obj.param.input_blob_precision.name() + "_" + getModelName(obj.param.model_xml_str)
+ (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "");
}
}
TEST_P(BehaviorPluginTestInferRequestInput, canSetInputBlobForSyncRequest) {
TestEnv::Ptr testEnv;
Blob::Ptr actualBlob;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
auto inputBlob = prepareInputBlob(GetParam().input_blob_precision, testEnv->inputDims);
ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), inputBlob, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
ASSERT_NO_THROW(testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), actualBlob, &response));
ASSERT_EQ(inputBlob, actualBlob);
}
TEST_P(BehaviorPluginTestInferRequestInput, canSetInputBlobForAsyncRequest) {
TestEnv::Ptr testEnv;
Blob::Ptr actualBlob;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
auto inputBlob = prepareInputBlob(GetParam().input_blob_precision, testEnv->inputDims);
ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), inputBlob, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
ASSERT_NO_THROW(testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), actualBlob, &response));
ASSERT_EQ(inputBlob, actualBlob);
}
TEST_P(BehaviorPluginTestInferRequestInput, canInferWithSetInOut) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
auto input = prepareInputBlob(GetParam().input_blob_precision, testEnv->inputDims);
testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), input, &response);
auto output = _prepareOutputBlob(GetParam().output_blob_precision, testEnv->outputDims);
testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), output, &response);
sts = testEnv->inferRequest->Infer(&response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
}
TEST_P(BehaviorPluginTestInferRequestInput, canGetInputBlob_deprecatedAPI) {
TestEnv::Ptr testEnv;
Blob::Ptr input;
auto param = GetParam();
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(param, testEnv));
ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
ASSERT_TRUE(input) << "Plugin didn't allocate input blobs";
ASSERT_FALSE(input->buffer() == nullptr) << "Plugin didn't allocate input blobs";
auto dims = input->getTensorDesc().getDims();
ASSERT_TRUE(testEnv->inputDims == dims) << "Input blob dimensions don't match network input";
ASSERT_EQ(param.input_blob_precision, input->getTensorDesc().getPrecision()) << "Input blob precision don't match network input";
}
TEST_P(BehaviorPluginTestInferRequestInput, canGetInputBlob) {
TestEnv::Ptr testEnv;
Blob::Ptr input;
auto param = GetParam();
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(param, testEnv));
ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
ASSERT_TRUE(input) << "Plugin didn't allocate input blobs";
ASSERT_FALSE(input->buffer() == nullptr) << "Plugin didn't allocate input blobs";
auto tensorDescription = input->getTensorDesc();
auto dims = tensorDescription.getDims();
ASSERT_TRUE(testEnv->inputDims == dims) << "Input blob dimensions don't match network input";
ASSERT_EQ(param.input_blob_precision, tensorDescription.getPrecision()) << "Input blob precision don't match network input";
}
TEST_P(BehaviorPluginTestInferRequestInput, getInputAfterSetInputDoNotChangeInput) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr inputSetBlob = prepareInputBlob(GetParam().input_blob_precision, testEnv->inputDims);
ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), inputSetBlob, &response));
Blob::Ptr inputGetBlob;
ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), inputGetBlob, &response));
ASSERT_EQ(inputGetBlob.get(), inputSetBlob.get());
}
TEST_P(BehaviorPluginTestInferRequestInput, canInferWithGetInOut) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr input;
Blob::Ptr result;
testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), result, &response);
sts = testEnv->inferRequest->Infer(&response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
}
TEST_P(BehaviorPluginTestInferRequestInput, canStartAsyncInferWithGetInOut) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr input;
Blob::Ptr result;
testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
sts = testEnv->inferRequest->StartAsync(&response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
sts = testEnv->inferRequest->Wait(500, &response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), result, &response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
}

View File

@ -1,164 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin.h"
using namespace std;
using namespace ::testing;
using namespace InferenceEngine;
using namespace InferenceEngine::details;
namespace {
std::string getOutputTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
return obj.param.device + "_" + obj.param.output_blob_precision.name()
+ (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "");
}
}
TEST_P(BehaviorPluginTestInferRequestOutput, canSetOutputBlobForAsyncRequest) {
TestEnv::Ptr testEnv;
Blob::Ptr actualBlob;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
auto outputBlob = _prepareOutputBlob(GetParam().output_blob_precision, testEnv->outputDims);
ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), outputBlob, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
ASSERT_NO_THROW(testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), actualBlob, &response));
ASSERT_EQ(outputBlob, actualBlob);
}
TEST_P(BehaviorPluginTestInferRequestOutput, canSetOutputBlobForSyncRequest) {
TestEnv::Ptr testEnv;
Blob::Ptr actualBlob;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
auto outputBlob = _prepareOutputBlob(GetParam().output_blob_precision, testEnv->outputDims);
ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), outputBlob, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
ASSERT_NO_THROW(testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), actualBlob, &response));
ASSERT_EQ(outputBlob, actualBlob);
}
TEST_P(BehaviorPluginTestInferRequestOutput, canInferWithSetInOut) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
auto input = prepareInputBlob(GetParam().input_blob_precision, testEnv->inputDims);
testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), input, &response);
auto output = _prepareOutputBlob(GetParam().output_blob_precision, testEnv->outputDims);
testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), output, &response);
sts = testEnv->inferRequest->Infer(&response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
}
TEST_P(BehaviorPluginTestInferRequestOutput, canGetOutputBlob_deprecatedAPI) {
TestEnv::Ptr testEnv;
Blob::Ptr output;
auto param = GetParam();
StatusCode sts = StatusCode::OK;
ResponseDesc response;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(param, testEnv));
ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), output, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
ASSERT_TRUE(output) << "Plugin didn't allocate output blobs";
ASSERT_FALSE(output->buffer() == nullptr) << "Plugin didn't allocate output blobs";
auto dims = output->getTensorDesc().getDims();
ASSERT_TRUE(testEnv->outputDims == dims) << "Output blob dimensions don't match network output";
// [IE FPGA] The plugin ignores custom output precision: CVS-8122
if (param.device != CommonTestUtils::DEVICE_FPGA && param.output_blob_precision != Precision::FP32) {
ASSERT_EQ(param.output_blob_precision, output->getTensorDesc().getPrecision())
<< "Output blob precision don't match network output";
} else if (param.device == CommonTestUtils::DEVICE_FPGA) {
set<Precision> supportedOutputs = {Precision::FP16, Precision::FP32};
ASSERT_TRUE(supportedOutputs.find(output->getTensorDesc().getPrecision()) != supportedOutputs.end()) << "Output blob precision don't match network output";
} else {
ASSERT_EQ(Precision::FP32, output->getTensorDesc().getPrecision()) << "Output blob precision don't match network output";
}
}
TEST_P(BehaviorPluginTestInferRequestOutput, canGetOutputBlob) {
TestEnv::Ptr testEnv;
Blob::Ptr output;
auto param = GetParam();
StatusCode sts = StatusCode::OK;
ResponseDesc response;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(param, testEnv));
ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), output, &response));
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
ASSERT_TRUE(output) << "Plugin didn't allocate output blobs";
ASSERT_FALSE(output->buffer() == nullptr) << "Plugin didn't allocate output blobs";
auto tensorDescription = output->getTensorDesc();
auto dims = tensorDescription.getDims();
ASSERT_TRUE(testEnv->outputDims == dims) << "Output blob dimensions don't match network output";
// [IE FPGA] The plugin ignores custom output precision: CVS-8122
std::cout << "Device: " << param.device << std::endl;
if (param.device != CommonTestUtils::DEVICE_FPGA && param.output_blob_precision != Precision::FP32) {
ASSERT_EQ(param.output_blob_precision, tensorDescription.getPrecision())
<< "Output blob precision don't match network output";
} else if (param.device == CommonTestUtils::DEVICE_FPGA) {
set<Precision> supportedOutputs = {Precision::FP16, Precision::FP32};
ASSERT_TRUE(supportedOutputs.find(tensorDescription.getPrecision()) != supportedOutputs.end()) << "Output blob precision don't match network output";
} else {
ASSERT_EQ(Precision::FP32, tensorDescription.getPrecision()) << "Output blob precision don't match network output";
}
}
TEST_P(BehaviorPluginTestInferRequestOutput, getOutputAfterSetOutputDoNotChangeOutput) {
TestEnv::Ptr testEnv;
ResponseDesc response;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr outputSetBlob = _prepareOutputBlob(GetParam().output_blob_precision, testEnv->outputDims);
ASSERT_EQ(StatusCode::OK, testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), outputSetBlob, &response));
Blob::Ptr outputGetBlob;
ASSERT_EQ(StatusCode::OK, testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), outputGetBlob, &response));
ASSERT_EQ(outputGetBlob.get(), outputSetBlob.get());
}
TEST_P(BehaviorPluginTestInferRequestOutput, canInferWithGetInOut) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr input;
Blob::Ptr result;
StatusCode sts = StatusCode::OK;
ResponseDesc response;
testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), result, &response);
sts = testEnv->inferRequest->Infer(&response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
}
TEST_P(BehaviorPluginTestInferRequestOutput, canStartAsyncInferWithGetInOut) {
TestEnv::Ptr testEnv;
ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
Blob::Ptr input;
Blob::Ptr result;
StatusCode sts = StatusCode::OK;
ResponseDesc response;
testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
sts = testEnv->inferRequest->StartAsync(&response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
sts = testEnv->inferRequest->Wait(500, &response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), result, &response);
ASSERT_EQ(StatusCode::OK, sts) << response.msg;
}

View File

@ -1,8 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request.hpp"
#include "vpu_test_data.hpp"
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequest, ValuesIn(requestsSupportedValues), getTestCaseName);

View File

@ -1,13 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_config.hpp"
#include "vpu_test_data.hpp"
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestConfig,
ValuesIn(BehTestParams::concat(deviceAgnosticConfigurations, withCorrectConfValuesNetworkOnly)),
getConfigTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestConfigExclusiveAsync, ValuesIn(supportedValues),
getConfigTestCaseName);

View File

@ -1,9 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_input.hpp"
#include "vpu_test_data.hpp"
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestInput, ValuesIn(allInputSupportedValues),
getTestCaseName);

View File

@ -1,9 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_output.hpp"
#include "vpu_test_data.hpp"
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestOutput, ValuesIn(allOutputSupportedValues),
getOutputTestCaseName);