diff --git a/docs/template_plugin/tests/functional/CMakeLists.txt b/docs/template_plugin/tests/functional/CMakeLists.txt index 9e03dba0af2..2f6bd1e8336 100644 --- a/docs/template_plugin/tests/functional/CMakeLists.txt +++ b/docs/template_plugin/tests/functional/CMakeLists.txt @@ -15,4 +15,4 @@ addIeTargetTest( ADD_CPPLINT LABELS TEMPLATE -) \ No newline at end of file +) diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/config.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/config.cpp index 2221bb27255..96b58cdd54d 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/config.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/config.cpp @@ -6,8 +6,6 @@ #include "behavior/config.hpp" -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/exec_graph_info.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/exec_graph_info.cpp index 786ee25c700..4859bb2a1ef 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/exec_graph_info.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/exec_graph_info.cpp @@ -6,8 +6,6 @@ #include "behavior/exec_graph_info.hpp" -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { InferenceEngine::Precision::FP32, @@ -24,4 +22,4 @@ namespace { ::testing::Values("TEMPLATE"), ::testing::ValuesIn(configs)), ExecGraphTests::getTestCaseName); -} // namespace +} // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request.cpp new file mode 100644 index 00000000000..cfb38e09778 --- /dev/null +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/infer_request.hpp" +namespace { + const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 + }; + + const std::vector> configs = { + {} + }; + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values("TEMPLATE"), + ::testing::ValuesIn(configs)), + InferRequestTests::getTestCaseName); +} // namespace diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_callback.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_callback.cpp index 2412a566e5d..6cfa8033026 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_callback.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_callback.cpp @@ -6,8 +6,6 @@ #include "behavior/infer_request_callback.hpp" -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { InferenceEngine::Precision::FP32, @@ -15,15 +13,13 @@ const std::vector netPrecisions = { }; const std::vector> configs = { - {{{}}} + {} }; -const std::vector devices{CommonTestUtils::DEVICE_CPU}; - INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(devices), + ::testing::Values("TEMPLATE"), ::testing::ValuesIn(configs)), CallbackTests::getTestCaseName); } // namespace diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_config.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_config.cpp new file mode 100644 index 00000000000..2daf91daca7 --- /dev/null +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_config.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/infer_request_config.hpp" + +namespace { + const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 + }; + + const std::vector> configs = { + {} + }; + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values("TEMPLATE"), + ::testing::ValuesIn(configs)), + InferConfigTests::getTestCaseName); +} // namespace diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_input.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_input.cpp new file mode 100644 index 00000000000..09d88af9c39 --- /dev/null +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_input.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "multi-device/multi_device_config.hpp" + +#include "behavior/infer_request_input.hpp" + +namespace { + + const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 + }; + + const std::vector> configs = { + {} + }; + + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values("TEMPLATE"), + ::testing::ValuesIn(configs)), + InferRequestInputTests::getTestCaseName); + +} // namespace diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_output.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_output.cpp new file mode 100644 index 00000000000..9c639531cf2 --- /dev/null +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_output.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "multi-device/multi_device_config.hpp" + +#include "behavior/infer_request_output.hpp" + +namespace { + + const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 + }; + + const std::vector> configs = { + {} + }; + + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values("TEMPLATE"), + ::testing::ValuesIn(configs)), + InferRequestOutputTests::getTestCaseName); + +} // namespace diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/set_preprocess.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/set_preprocess.cpp index ebbb7879463..2c0082daace 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/set_preprocess.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/set_preprocess.cpp @@ -6,8 +6,6 @@ #include "behavior/set_preprocess.hpp" -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { InferenceEngine::Precision::FP32, @@ -18,11 +16,11 @@ namespace { {} }; - INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreProcessTests, + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values("TEMPLATE"), ::testing::ValuesIn(configs)), - PreProcessTests::getTestCaseName); + PreprocessTest::getTestCaseName); } // namespace \ No newline at end of file diff --git a/docs/template_plugin/tests_deprecated/behavior/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp b/docs/template_plugin/tests_deprecated/behavior/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp deleted file mode 100644 index 9020021f804..00000000000 --- a/docs/template_plugin/tests_deprecated/behavior/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (C) 2018-2019 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request.hpp" -#include "template_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequest, ValuesIn(requestsSupportedValues), getTestCaseName); diff --git a/docs/template_plugin/tests_deprecated/behavior/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp b/docs/template_plugin/tests_deprecated/behavior/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp deleted file mode 100644 index 604aabdd980..00000000000 --- a/docs/template_plugin/tests_deprecated/behavior/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2018-2019 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request_config.hpp" -#include "template_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestConfigExclusiveAsync, ValuesIn(supportedValues), - getConfigTestCaseName); - -INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestConfig, - ValuesIn(BehTestParams::concat(withCorrectConfValues, withCorrectConfValuesNetworkOnly)), - getConfigTestCaseName); diff --git a/docs/template_plugin/tests_deprecated/behavior/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp b/docs/template_plugin/tests_deprecated/behavior/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp deleted file mode 100644 index df8951477b1..00000000000 --- a/docs/template_plugin/tests_deprecated/behavior/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) 2018-2019 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request_input.hpp" -#include "template_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestInput, ValuesIn(allInputSupportedValues), - getTestCaseName); diff --git a/docs/template_plugin/tests_deprecated/behavior/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp b/docs/template_plugin/tests_deprecated/behavior/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp deleted file mode 100644 index 86d35ce6ff6..00000000000 --- a/docs/template_plugin/tests_deprecated/behavior/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) 2018-2019 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request_output.hpp" -#include "template_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestOutput, ValuesIn(allOutputSupportedValues), - getOutputTestCaseName); diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/config.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/config.cpp index a2eec29fb9f..ed8434d4d31 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/config.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/config.cpp @@ -5,15 +5,16 @@ #include "multi-device/multi_device_config.hpp" #include "behavior/config.hpp" - -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16 }; + const std::vector> conf = { + {} + }; + const std::vector> Configs = { {}, {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, @@ -25,13 +26,10 @@ namespace { }; const std::vector> MultiConfigs = { - {}, {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, - InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, + {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, - InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}}, + {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}}, {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}, {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "8"}}, {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}, @@ -71,6 +69,25 @@ namespace { {InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_LIMIT, "NAN"}} }; + const std::vector> multiconf = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}} + }; + + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(conf)), + CorrectConfigAPITests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CorrectConfigAPITests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiconf)), + CorrectConfigAPITests::getTestCaseName); + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigTests, ::testing::Combine( ::testing::ValuesIn(netPrecisions), @@ -86,39 +103,17 @@ namespace { IncorrectConfigTests::getTestCaseName); INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CommonTestUtils::DEVICE_CPU), - ::testing::ValuesIn(inconfigs)), - IncorrectConfigAPITests::getTestCaseName); + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(inconfigs)), + IncorrectConfigAPITests::getTestCaseName); INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(multiinconfigs)), - IncorrectConfigAPITests::getTestCaseName); + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiinconfigs)), + IncorrectConfigAPITests::getTestCaseName); - - const std::vector> conf = { - {} - }; - - const std::vector> multiconf = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}} - }; - - INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CommonTestUtils::DEVICE_CPU), - ::testing::ValuesIn(conf)), - CorrectConfigAPITests::getTestCaseName); - - INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CorrectConfigAPITests, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(multiconf)), - CorrectConfigAPITests::getTestCaseName); } // namespace \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/exec_graph_info.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/exec_graph_info.cpp index 2eb789ce5a5..f834ffced84 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/exec_graph_info.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/exec_graph_info.cpp @@ -3,11 +3,8 @@ // #include "multi-device/multi_device_config.hpp" - #include "behavior/exec_graph_info.hpp" -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { InferenceEngine::Precision::FP32, diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request.cpp new file mode 100644 index 00000000000..83dbf91a4fa --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request.cpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/infer_request.hpp" +#include "ie_plugin_config.hpp" +namespace { + + const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 + }; + + const std::vector> configs = { + {}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} + }; + + const std::vector> Multiconfigs = { + {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} + }; + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(configs)), + InferRequestTests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(Multiconfigs)), + InferRequestTests::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request_callback.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request_callback.cpp index 481f948d596..a081f1dfbd1 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request_callback.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request_callback.cpp @@ -6,8 +6,6 @@ #include "behavior/infer_request_callback.hpp" -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { InferenceEngine::Precision::FP32, @@ -22,7 +20,7 @@ const std::vector> configs = { const std::vector> multiConfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} -};/**/ +}; INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests, ::testing::Combine( diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request_config.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request_config.cpp new file mode 100644 index 00000000000..a1c88276f14 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request_config.cpp @@ -0,0 +1,77 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "multi-device/multi_device_config.hpp" + +#include "behavior/infer_request_config.hpp" + +namespace { + const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 + }; + + const std::vector> configs = { + {} + }; + + const std::vector> multiConfigs = { + {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} + }; + + const std::vector> InConfigs = { + {}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "8"}}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::NO}}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}}, + {{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_LIMIT, "10"}}, + }; + + const std::vector> MultiInConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}, + {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, + InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}, + {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, + InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}, + {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "8"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}, + {InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::NO}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}, + {InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}, + {InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_LIMIT, "10"}} + }; + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(configs)), + InferConfigTests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferConfigTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + InferConfigTests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigInTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(InConfigs)), + InferConfigTests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferConfigInTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(MultiInConfigs)), + InferConfigInTests::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request_input.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request_input.cpp new file mode 100644 index 00000000000..67ed72814b6 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request_input.cpp @@ -0,0 +1,44 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "multi-device/multi_device_config.hpp" + +#include "behavior/infer_request_input.hpp" + +namespace { + + const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16, + InferenceEngine::Precision::U8, + InferenceEngine::Precision::U16, + InferenceEngine::Precision::I16 + }; + + const std::vector> configs = { + {}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} + }; + + const std::vector> multiConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}, + {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} + }; + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(configs)), + InferRequestInputTests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestInputTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + InferRequestInputTests::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request_output.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request_output.cpp new file mode 100644 index 00000000000..32680a86e70 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request_output.cpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "multi-device/multi_device_config.hpp" + +#include "behavior/infer_request_output.hpp" + +namespace { + + const std::vector netPrecisions = { + InferenceEngine::Precision::FP32 + }; + + const std::vector> configs = { + {}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} + }; + + const std::vector> multiConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}, + {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} + }; + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(configs)), + InferRequestOutputTests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestOutputTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + InferRequestOutputTests::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/set_preprocess.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/set_preprocess.cpp index 0946bba776d..05a5c899dde 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/set_preprocess.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/set_preprocess.cpp @@ -2,12 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // +#include #include "multi-device/multi_device_config.hpp" #include "behavior/set_preprocess.hpp" -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { InferenceEngine::Precision::FP32, @@ -24,17 +23,17 @@ namespace { {{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}} }; - INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreProcessTests, + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::ValuesIn(configs)), - PreProcessTests::getTestCaseName); + PreprocessTest::getTestCaseName); - INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreProcessTests, + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreprocessTest, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_MULTI), ::testing::ValuesIn(multiConfigs)), - PreProcessTests::getTestCaseName); + PreprocessTest::getTestCaseName); } // namespace \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/config.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/config.cpp index 040c3693ffa..35237f6e16b 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/config.cpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/config.cpp @@ -6,8 +6,6 @@ #include "gna/gna_config.hpp" #include "behavior/config.hpp" -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/exec_graph_info.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/exec_graph_info.cpp index ff0c4406ec4..3c1655b3534 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/exec_graph_info.cpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/exec_graph_info.cpp @@ -2,12 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "multi-device/multi_device_config.hpp" - #include "behavior/exec_graph_info.hpp" - -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { InferenceEngine::Precision::FP32, @@ -25,4 +20,4 @@ namespace { ::testing::ValuesIn(configs)), ExecGraphTests::getTestCaseName); -} // namespace +} // namespace \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/infer_request.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/infer_request.cpp new file mode 100644 index 00000000000..4438be676ff --- /dev/null +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/infer_request.cpp @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/infer_request.hpp" +#include "ie_plugin_config.hpp" +namespace { +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32 +}; + +INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GNA), + ::testing::Values(std::map({}))), + InferRequestTests::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/infer_request_callback.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/infer_request_callback.cpp index 91cac7195fb..69bf5dfaac6 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/infer_request_callback.cpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/infer_request_callback.cpp @@ -6,8 +6,6 @@ #include "behavior/infer_request_callback.hpp" -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { InferenceEngine::Precision::FP32, @@ -31,9 +29,9 @@ INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests, CallbackTests::getTestCaseName); INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CallbackTests, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - CallbackTests::getTestCaseName); + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + CallbackTests::getTestCaseName); } // namespace diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/infer_request_config.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/infer_request_config.cpp new file mode 100644 index 00000000000..503c07cb3c4 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/infer_request_config.cpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gna/gna_config.hpp" +#include "behavior/infer_request_config.hpp" + +namespace { + const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 + }; + + const std::vector> configs = { + {} + }; + + const std::vector> Inconfigs = { + {{InferenceEngine::GNAConfigParams::KEY_GNA_SCALE_FACTOR, "1.0"}}, + {{InferenceEngine::GNAConfigParams::KEY_GNA_PRECISION, "I8"}}, + {{InferenceEngine::GNAConfigParams::KEY_GNA_FIRMWARE_MODEL_IMAGE, "gfile"}}, + {{InferenceEngine::GNAConfigParams::KEY_GNA_DEVICE_MODE, InferenceEngine::GNAConfigParams::GNA_AUTO}}, + {{InferenceEngine::GNAConfigParams::KEY_GNA_DEVICE_MODE, InferenceEngine::GNAConfigParams::GNA_SW_FP32}}, + {{InferenceEngine::GNAConfigParams::KEY_GNA_DEVICE_MODE, InferenceEngine::GNAConfigParams::GNA_SW}}, + {{InferenceEngine::GNAConfigParams::KEY_GNA_DEVICE_MODE, InferenceEngine::GNAConfigParams::GNA_SW_EXACT}}, + {{InferenceEngine::GNAConfigParams::KEY_GNA_COMPACT_MODE, InferenceEngine::PluginConfigParams::NO}} + }; + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GNA), + ::testing::ValuesIn(configs)), + InferConfigTests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigInTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GNA), + ::testing::ValuesIn(Inconfigs)), + InferConfigInTests::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/infer_request_input.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/infer_request_input.cpp new file mode 100644 index 00000000000..c120a91ac3e --- /dev/null +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/infer_request_input.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/infer_request_input.hpp" + +namespace { + + const std::vector netPrecisions = { + InferenceEngine::Precision::U8, + InferenceEngine::Precision::I16 + }; + + const std::vector> configs = { + {} + }; + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GNA), + ::testing::ValuesIn(configs)), + InferRequestInputTests::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/infer_request_output.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/infer_request_output.cpp new file mode 100644 index 00000000000..fefc6d4c61f --- /dev/null +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/infer_request_output.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/infer_request_output.hpp" + +namespace { + + const std::vector netPrecisions = { + InferenceEngine::Precision::FP32 + }; + + const std::vector> configs = { + {} + }; + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GNA), + ::testing::ValuesIn(configs)), + InferRequestOutputTests::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp index c46a5bef221..5b0b6026f1c 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp @@ -9,6 +9,26 @@ std::vector disabledTestPatterns() { return { + // TODO: FIX BUG 31661 + // TODO: support InferRequest in GNAPlugin + ".*InferRequestTests\\.canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait.*", + // TODO: FIX BUG 23740 + ".*InferRequestTests\\.CanCreateTwoExeNetworks.*", + // TODO: FIX BUG 26702 + ".*InferRequestTests\\.FailedAsyncInferWithNegativeTimeForWait.*", + // TODO: FIX BUG 23741 + ".*InferRequestTests\\.canRun3SyncRequestsConsistentlyFromThreads.*", + // TODO: FIX BUG 23742 + ".*InferRequestTests\\.canWaitWithotStartAsync.*", + // TODO: FIX BUG 23743 + ".*InferRequestTests\\.returnDeviceBusyOnSetBlobAfterAsyncInfer.*", + ".*InferRequestTests\\.returnDeviceBusyOnGetBlobAfterAsyncInfer.*", + ".*InferRequestTests\\.returnDeviceBusyOnGetPerformanceCountAfterAsyncInfer.*", + ".*InferRequestTests\\.returnDeviceBusyOnStartInferAfterAsyncInfer.*", + ".*InferRequestTests\\.returnDeviceBusyOnGetUserDataAfterAsyncInfer.*", + ".*InferRequestTests\\.returnDeviceBusyOnSetUserDataAfterAsyncInfer.*", + // TODO: FIX BUG 31661 + ".*InferRequestTests\\.canStartSeveralAsyncInsideCompletionCallbackNoSafeDtorWithoutWait.*", // TODO: FIX BUG 31661 ".*Behavior.*CallbackThrowException.*", // TODO: FIX BUG 32210 diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/config.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/config.cpp index bdf2812b135..5d826a4ecf5 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/config.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/config.cpp @@ -6,8 +6,6 @@ #include "behavior/config.hpp" -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { InferenceEngine::Precision::FP32, @@ -38,16 +36,16 @@ namespace { INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigTests, ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CommonTestUtils::DEVICE_GPU), - ::testing::ValuesIn(inconfigs)), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GPU), + ::testing::ValuesIn(inconfigs)), IncorrectConfigTests::getTestCaseName); INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, IncorrectConfigTests, ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(multiinconfigs)), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiinconfigs)), IncorrectConfigTests::getTestCaseName); @@ -60,31 +58,31 @@ namespace { }; INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CommonTestUtils::DEVICE_GPU), - ::testing::ValuesIn(conf)), - CorrectConfigAPITests::getTestCaseName); + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GPU), + ::testing::ValuesIn(conf)), + CorrectConfigAPITests::getTestCaseName); INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CorrectConfigAPITests, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(multiconf)), - CorrectConfigAPITests::getTestCaseName); + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiconf)), + CorrectConfigAPITests::getTestCaseName); INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CommonTestUtils::DEVICE_GPU), - ::testing::ValuesIn(conf)), - IncorrectConfigAPITests::getTestCaseName); + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GPU), + ::testing::ValuesIn(conf)), + IncorrectConfigAPITests::getTestCaseName); INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(multiconf)), - IncorrectConfigAPITests::getTestCaseName); + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiconf)), + IncorrectConfigAPITests::getTestCaseName); } // namespace \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/exec_graph_info.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/exec_graph_info.cpp index 708aeea1841..7f549534d6f 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/exec_graph_info.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/exec_graph_info.cpp @@ -3,11 +3,8 @@ // #include "multi-device/multi_device_config.hpp" - #include "behavior/exec_graph_info.hpp" -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { InferenceEngine::Precision::FP32, @@ -25,4 +22,4 @@ namespace { ::testing::ValuesIn(configs)), ExecGraphTests::getTestCaseName); -} // namespace +} // namespace \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request.cpp new file mode 100644 index 00000000000..90a9088f044 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request.cpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/infer_request.hpp" +#include "ie_plugin_config.hpp" + +namespace { + const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 + }; + + const std::vector> configs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_GPU}} + }; + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GPU), + ::testing::Values(std::map({}))), + InferRequestTests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(configs)), + InferRequestTests::getTestCaseName); +} // namespace \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request_callback.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request_callback.cpp index 46bb9b41ba3..34d0284b09b 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request_callback.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request_callback.cpp @@ -6,8 +6,6 @@ #include "behavior/infer_request_callback.hpp" -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { InferenceEngine::Precision::FP32, @@ -30,9 +28,9 @@ INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests, CallbackTests::getTestCaseName); INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CallbackTests, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - CallbackTests::getTestCaseName); + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + CallbackTests::getTestCaseName); } // namespace diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request_config.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request_config.cpp new file mode 100644 index 00000000000..77fd8d90083 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request_config.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "multi-device/multi_device_config.hpp" + +#include "behavior/infer_request_config.hpp" + +namespace { + + const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 + }; + + const std::vector> configs = { + {} + }; + + const std::vector> multiConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_GPU}} + }; + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GPU), + ::testing::ValuesIn(configs)), + InferConfigTests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferConfigTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + InferConfigTests::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request_input.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request_input.cpp new file mode 100644 index 00000000000..f3f9ce08f1c --- /dev/null +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request_input.cpp @@ -0,0 +1,42 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "multi-device/multi_device_config.hpp" + +#include "behavior/infer_request_input.hpp" + +namespace { + const std::vector netPrecisions = { + InferenceEngine::Precision::FP16, + InferenceEngine::Precision::U8, + InferenceEngine::Precision::I16, + InferenceEngine::Precision::I32 + }; + + const std::vector> configs = { + {}, + {{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}} + }; + + const std::vector> multiConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_GPU}, + {InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, + InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}} + }; + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GPU), + ::testing::ValuesIn(configs)), + InferRequestInputTests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestInputTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + InferRequestInputTests::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request_output.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request_output.cpp new file mode 100644 index 00000000000..8fcb6f8e1a6 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request_output.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "multi-device/multi_device_config.hpp" + +#include "behavior/infer_request_output.hpp" + +namespace { + const std::vector netPrecisions = { + InferenceEngine::Precision::FP32 + }; + + const std::vector> configs = { + {}, + {{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}} + }; + + const std::vector> multiConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_GPU}, + {InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}} + }; + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GPU), + ::testing::ValuesIn(configs)), + InferRequestOutputTests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestOutputTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + InferRequestOutputTests::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/set_preprocess.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/set_preprocess.cpp index 411861cd098..996962c30c5 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/set_preprocess.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/set_preprocess.cpp @@ -3,12 +3,12 @@ // #include "multi-device/multi_device_config.hpp" - +#include #include "behavior/set_preprocess.hpp" -using namespace LayerTestsDefinitions; - namespace { + using PreprocessBehTest = BehaviorTestsUtils::BehaviorTestsBasic; + const std::vector netPrecisions = { InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16 @@ -22,17 +22,17 @@ namespace { {{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_GPU}} }; - INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreProcessTests, + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_GPU), ::testing::ValuesIn(configs)), - PreProcessTests::getTestCaseName); + PreprocessTest::getTestCaseName); - INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreProcessTests, + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreprocessTest, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_MULTI), ::testing::ValuesIn(multiConfigs)), - PreProcessTests::getTestCaseName); + PreprocessTest::getTestCaseName); } // namespace \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/config.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/config.cpp index e740e3a0da1..d0cb33f7b51 100644 --- a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/config.cpp +++ b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/config.cpp @@ -6,8 +6,6 @@ #include "vpu/vpu_plugin_config.hpp" #include "behavior/config.hpp" -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { @@ -43,7 +41,7 @@ namespace { const std::vector> MultiConfigs = { {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}, - {CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}}, + {CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}}, {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}, {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}} }; @@ -89,15 +87,15 @@ namespace { {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}, {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), "ON"}}, {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}, - {CONFIG_KEY(LOG_LEVEL), "VERBOSE"}}, + {CONFIG_KEY(LOG_LEVEL), "VERBOSE"}}, {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}, - {VPU_CONFIG_KEY(IGNORE_IR_STATISTIC), "ON"}}, + {VPU_CONFIG_KEY(IGNORE_IR_STATISTIC), "ON"}}, {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}, - {VPU_MYRIAD_CONFIG_KEY(PLATFORM), "-1"}}, + {VPU_MYRIAD_CONFIG_KEY(PLATFORM), "-1"}}, {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}, - {VPU_MYRIAD_CONFIG_KEY(PLATFORM), "0"}}, + {VPU_MYRIAD_CONFIG_KEY(PLATFORM), "0"}}, {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}, - {VPU_MYRIAD_CONFIG_KEY(PLATFORM), "1"}} + {VPU_MYRIAD_CONFIG_KEY(PLATFORM), "1"}} }; INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigTests, @@ -122,7 +120,7 @@ namespace { const std::vector> multiInconf = { {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_MYRIAD}, - {"some_nonexistent_key", "some_unknown_value"}} + {"some_nonexistent_key", "some_unknown_value"}} }; diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/exec_graph_info.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/exec_graph_info.cpp index 92d56de6b4b..b2f89bac2a8 100644 --- a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/exec_graph_info.cpp +++ b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/exec_graph_info.cpp @@ -2,12 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "multi-device/multi_device_config.hpp" - #include "behavior/exec_graph_info.hpp" -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { InferenceEngine::Precision::FP16 @@ -23,4 +19,4 @@ namespace { ::testing::Values(CommonTestUtils::DEVICE_MYRIAD), ::testing::ValuesIn(configs)), ExecGraphTests::getTestCaseName); -} // namespace +} // namespace \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/infer_request.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/infer_request.cpp new file mode 100644 index 00000000000..ef44a9de56c --- /dev/null +++ b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/infer_request.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/infer_request.hpp" +#include "ie_plugin_config.hpp" +namespace { +const std::vector netPrecisions = { + InferenceEngine::Precision::FP16 +}; + +const std::vector> configs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}} +}; + +INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MYRIAD), + ::testing::Values(std::map({}))), + InferRequestTests::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(configs)), + InferRequestTests::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/infer_request_callback.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/infer_request_callback.cpp index aa671c7101e..7eca4ff5bb9 100644 --- a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/infer_request_callback.cpp +++ b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/infer_request_callback.cpp @@ -6,8 +6,6 @@ #include "behavior/infer_request_callback.hpp" -using namespace LayerTestsDefinitions; - namespace { const std::vector netPrecisions = { InferenceEngine::Precision::FP16 diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/infer_request_config.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/infer_request_config.cpp new file mode 100644 index 00000000000..b72a3247fe8 --- /dev/null +++ b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/infer_request_config.cpp @@ -0,0 +1,78 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "multi-device/multi_device_config.hpp" +#include "vpu/vpu_plugin_config.hpp" +#include "behavior/infer_request_config.hpp" + +namespace { + const std::vector netPrecisions = { + InferenceEngine::Precision::FP16 + }; + + const std::vector> configs = { + {} + }; + + const std::vector> multiConfigs = { + {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_MYRIAD}} + }; + + const std::vector> Inconfigs = { + {}, + {{VPU_CONFIG_KEY(IGNORE_IR_STATISTIC), CONFIG_VALUE(YES)}}, + {{VPU_CONFIG_KEY(IGNORE_IR_STATISTIC), CONFIG_VALUE(NO)}}, + + {{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(YES)}}, + {{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(NO)}}, + + {{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_NONE)}}, + {{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_ERROR)}}, + {{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_WARNING)}}, + {{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_INFO)}}, + {{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}}, + {{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_TRACE)}}, + + {{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}}, + {{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)}}, + + {{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(YES)}}, + {{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(NO)}} + }; + + const std::vector> InmultiConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}, + {CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}, + {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}} + }; + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MYRIAD), + ::testing::ValuesIn(configs)), + InferConfigTests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferConfigTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + InferConfigTests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigInTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MYRIAD), + ::testing::ValuesIn(Inconfigs)), + InferConfigInTests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferConfigInTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(InmultiConfigs)), + InferConfigInTests::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/infer_request_input.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/infer_request_input.cpp new file mode 100644 index 00000000000..5296373ab84 --- /dev/null +++ b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/infer_request_input.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "multi-device/multi_device_config.hpp" + +#include "behavior/infer_request_input.hpp" + +namespace { + const std::vector netPrecisions = { + InferenceEngine::Precision::FP16, + InferenceEngine::Precision::U8 + }; + + const std::vector> configs = { + {} + }; + + const std::vector> multiConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}} + }; + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MYRIAD), + ::testing::ValuesIn(configs)), + InferRequestInputTests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestInputTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + InferRequestInputTests::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/infer_request_output.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/infer_request_output.cpp new file mode 100644 index 00000000000..5adc4ba991b --- /dev/null +++ b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/infer_request_output.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "multi-device/multi_device_config.hpp" + +#include "behavior/infer_request_output.hpp" + +namespace { + const std::vector netPrecisions = { + InferenceEngine::Precision::FP16, + InferenceEngine::Precision::U8 + }; + + const std::vector> configs = { + {} + }; + + const std::vector> multiConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}} + }; + + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MYRIAD), + ::testing::ValuesIn(configs)), + InferRequestOutputTests::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, InferRequestOutputTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + InferRequestOutputTests::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/set_preprocess.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/set_preprocess.cpp index 7df275ae6a5..ec1d26dc816 100644 --- a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/set_preprocess.cpp +++ b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/set_preprocess.cpp @@ -3,12 +3,11 @@ // #include "multi-device/multi_device_config.hpp" - +#include #include "behavior/set_preprocess.hpp" -using namespace LayerTestsDefinitions; - namespace { + using PreprocessBehTest = BehaviorTestsUtils::BehaviorTestsBasic; const std::vector netPrecisions = { InferenceEngine::Precision::FP16 }; @@ -21,17 +20,17 @@ namespace { {{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_MYRIAD}} }; - INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreProcessTests, + INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_MYRIAD), ::testing::ValuesIn(configs)), - PreProcessTests::getTestCaseName); + PreprocessTest::getTestCaseName); - INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreProcessTests, + INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreprocessTest, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_MULTI), ::testing::ValuesIn(multiConfigs)), - PreProcessTests::getTestCaseName); + PreprocessTest::getTestCaseName); } // namespace \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/config.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/config.hpp index f0c980033cb..e4bc36c67a3 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/config.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/config.hpp @@ -18,52 +18,171 @@ #include #include #include +#include +#include "ie_common.h" +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "functional_test_utils/blob_utils.hpp" +#include +#include +#include "ngraph_functions/pass/convert_prc.hpp" +#include "ngraph_functions/subgraph_builders.hpp" -namespace LayerTestsDefinitions { -typedef std::tuple< - InferenceEngine::Precision, // Network precision - std::string, // Device name - std::map // Config -> ConfigParams; +using CorrectConfigTests = BehaviorTestsUtils::BehaviorTestsBasic; -class CorrectConfigTests : public testing::WithParamInterface, - public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(testing::TestParamInfo obj); +// Setting empty config doesn't throw +TEST_P(CorrectConfigTests, SetEmptyConfig) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + std::map config; + ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); + ASSERT_NO_THROW(ie->SetConfig(config, targetDevice)); +} -protected: - void SetUp() override; - void TearDown() override; -}; +// Setting correct config doesn't throw +TEST_P(CorrectConfigTests, SetCorrectConfig) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); + ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice)); +} -class IncorrectConfigTests : public testing::WithParamInterface, - public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(testing::TestParamInfo obj); +using IncorrectConfigTests = BehaviorTestsUtils::BehaviorTestsBasic; -protected: - void SetUp() override; - void TearDown() override; -}; +TEST_P(IncorrectConfigTests, SetConfigWithIncorrectKey) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos && + targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) { + ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); + ASSERT_THROW(ie->SetConfig(configuration, targetDevice), + InferenceEngine::details::InferenceEngineException); + } else { + ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); + ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice)); + } +} -class CorrectConfigAPITests : public testing::WithParamInterface, - public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(testing::TestParamInfo obj); +TEST_P(IncorrectConfigTests, canNotLoadNetworkWithIncorrectConfig) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + ASSERT_THROW(auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration), + InferenceEngine::details::InferenceEngineException); +} -protected: - void SetUp() override; - void TearDown() override; -}; +using IncorrectConfigAPITests = BehaviorTestsUtils::BehaviorTestsBasic; -class IncorrectConfigAPITests : public testing::WithParamInterface, - public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(testing::TestParamInfo obj); +TEST_P(IncorrectConfigAPITests, SetConfigWithNoExistingKey) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); + if (targetDevice.find(CommonTestUtils::DEVICE_GNA) != std::string::npos) { + ASSERT_THROW(ie->SetConfig(configuration, targetDevice), InferenceEngine::NotFound); + } else { + try { + ie->SetConfig(configuration, targetDevice); + } catch (InferenceEngine::details::InferenceEngineException ex) {} + } +} -protected: - void SetUp() override; - void TearDown() override; -}; +using CorrectConfigAPITests = BehaviorTestsUtils::BehaviorTestsBasic; -} // namespace LayerTestsDefinitions +TEST_P(CorrectConfigAPITests, canSetExclusiveAsyncRequests) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load config + std::map config = {{CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(YES)}}; + config.insert(configuration.begin(), configuration.end()); + if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos && + targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) { + ASSERT_NO_THROW(ie->SetConfig(config, targetDevice)); + } + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, config); + execNet.CreateInferRequest(); + + if ((targetDevice == CommonTestUtils::DEVICE_HDDL) || (targetDevice == CommonTestUtils::DEVICE_GNA)) { + ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + } else if ((targetDevice == CommonTestUtils::DEVICE_FPGA) || + (targetDevice == CommonTestUtils::DEVICE_KEEMBAY) || (targetDevice == CommonTestUtils::DEVICE_MYRIAD)) { + ASSERT_EQ(2u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + } else if (targetDevice == CommonTestUtils::DEVICE_MULTI) { + } else { + ASSERT_EQ(1u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + } +} + +TEST_P(CorrectConfigAPITests, withoutExclusiveAsyncRequests) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load config + std::map config = {{CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(NO)}}; + config.insert(configuration.begin(), configuration.end()); + if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos && + targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) { + ASSERT_NO_THROW(ie->SetConfig(config, targetDevice)); + } + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, config); + execNet.CreateInferRequest(); + + if ((targetDevice == CommonTestUtils::DEVICE_FPGA) || (targetDevice == CommonTestUtils::DEVICE_MYRIAD) || + (targetDevice == CommonTestUtils::DEVICE_KEEMBAY)) { + ASSERT_EQ(1u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + } else if (targetDevice == CommonTestUtils::DEVICE_MULTI) { + } else { + ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + } +} + +TEST_P(CorrectConfigAPITests, reusableCPUStreamsExecutor) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber()); + + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + { + // Load config + std::map config = {{CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(NO)}}; + config.insert(configuration.begin(), configuration.end()); + if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos && + targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) { + ASSERT_NO_THROW(ie->SetConfig(config, targetDevice)); + } + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, config); + execNet.CreateInferRequest(); + + if ((targetDevice == CommonTestUtils::DEVICE_FPGA) || (targetDevice == CommonTestUtils::DEVICE_MYRIAD) || + (targetDevice == CommonTestUtils::DEVICE_KEEMBAY)) { + ASSERT_EQ(1u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber()); + } else if (targetDevice == CommonTestUtils::DEVICE_MULTI) { + } else { + ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + ASSERT_GE(2u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber()); + } + } + if (targetDevice == CommonTestUtils::DEVICE_CPU) { + ASSERT_NE(0u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber()); + ASSERT_NO_THROW(ie->UnregisterPlugin("CPU")); + ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber()); + } +} \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/exec_graph_info.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/exec_graph_info.hpp index 87c8093f393..efcd5593b0e 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/exec_graph_info.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/exec_graph_info.hpp @@ -2,33 +2,173 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include -#include -#include #include #include "ie_extension.h" #include #include "functional_test_utils/layer_test_utils.hpp" #include "ngraph_functions/utils/ngraph_helpers.hpp" #include "ngraph_functions/builders.hpp" +#include +#include +#include +#include +#include
+#include +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "functional_test_utils/blob_utils.hpp" -namespace LayerTestsDefinitions { - typedef std::tuple< - InferenceEngine::Precision, // Network precision - std::string, // Device name - std::map // Config - > ExecGraphParams; +using ExecGraphTests = BehaviorTestsUtils::BehaviorTestsBasic; -class ExecGraphTests : public testing::WithParamInterface, - public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(testing::TestParamInfo obj); +inline std::vector separateStrToVec(std::string str, const char sep) { + std::vector result; -protected: - void SetUp() override; - void TearDown() override; -}; + std::istringstream stream(str); + std::string strVal; -} // namespace LayerTestsDefinitions + while (getline(stream, strVal, sep)) { + result.push_back(strVal); + } + return result; +} + +TEST_P(ExecGraphTests, CheckExecGraphInfoBeforeExecution) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + InferenceEngine::CNNNetwork execGraph; + if (targetDevice == CommonTestUtils::DEVICE_CPU || targetDevice == CommonTestUtils::DEVICE_GPU) { + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice); + ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo()); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + // Store all the original layers from the network + const auto originalLayers = function->get_ops(); + std::map originalLayersMap; + for (const auto &layer : originalLayers) { + if (layer->description() == "Result") + continue; + originalLayersMap[layer->get_friendly_name()] = 0; + } + int IteratorForLayersConstant = 0; + // Store all the layers from the executable graph information represented as CNNNetwork + const std::vector execGraphLayers = + InferenceEngine::details::CNNNetSortTopologically(execGraph); + for (const auto &execLayer : execGraphLayers) { + IE_SUPPRESS_DEPRECATED_START + // Each layer from the execGraphInfo network must have PM data option set + ASSERT_EQ("not_executed", execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER]); + // Parse origin layer names (fused/merged layers) from the executable graph + // and compare with layers from the original model + auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES]; + if (origFromExecLayer == "") + IteratorForLayersConstant++; + std::vector origFromExecLayerSep = separateStrToVec(origFromExecLayer, ','); + std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) { + auto origLayer = originalLayersMap.find(layer); + ASSERT_NE(originalLayersMap.end(), origLayer) << layer; + origLayer->second++; + }); + IE_SUPPRESS_DEPRECATED_END + } + // All layers from the original IR must be present with in ExecGraphInfo + for (auto &layer : originalLayersMap) { + if ((layer.second == 0) && (IteratorForLayersConstant > 0)) { + IteratorForLayersConstant--; + continue; + } + ASSERT_GE(layer.second, 0); + } + } else { + ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice).GetExecGraphInfo(), + InferenceEngine::details::InferenceEngineException); + } +} + +TEST_P(ExecGraphTests, CheckExecGraphInfoAfterExecution) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + InferenceEngine::CNNNetwork execGraph; + if (targetDevice == CommonTestUtils::DEVICE_CPU || targetDevice == CommonTestUtils::DEVICE_GPU) { + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice); + ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo()); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + // Store all the original layers from the network + const auto originalLayers = function->get_ops(); + std::map originalLayersMap; + for (const auto &layer : originalLayers) { + originalLayersMap[layer->get_friendly_name()] = 0; + } + int IteratorForLayersConstant = 0; + // Store all the layers from the executable graph information represented as CNNNetwork + const std::vector execGraphLayers = + InferenceEngine::details::CNNNetSortTopologically(execGraph); + bool has_layer_with_valid_time = false; + for (const auto &execLayer : execGraphLayers) { + IE_SUPPRESS_DEPRECATED_START + // At least one layer in the topology should be executed and have valid perf counter value + try { + float x = static_cast(std::atof( + execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER].c_str())); + ASSERT_GE(x, 0.0f); + has_layer_with_valid_time = true; + } catch (std::exception &) {} + + // Parse origin layer names (fused/merged layers) from the executable graph + // and compare with layers from the original model + auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES]; + std::vector origFromExecLayerSep = separateStrToVec(origFromExecLayer, ','); + if (origFromExecLayer == "") + IteratorForLayersConstant++; + std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) { + auto origLayer = originalLayersMap.find(layer); + ASSERT_NE(originalLayersMap.end(), origLayer) << layer; + origLayer->second++; + }); + IE_SUPPRESS_DEPRECATED_END + } + ASSERT_TRUE(has_layer_with_valid_time); + + // All layers from the original IR must be present within ExecGraphInfo + for (auto &layer : originalLayersMap) { + if ((layer.second == 0) && (IteratorForLayersConstant > 0)) { + IteratorForLayersConstant--; + continue; + } + ASSERT_GE(layer.second, 0); + } + } else { + ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice).GetExecGraphInfo(), + InferenceEngine::details::InferenceEngineException); + } +} + +TEST_P(ExecGraphTests, CheckExecGraphInfoSerialization) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + InferenceEngine::CNNNetwork execGraph; + if (targetDevice == CommonTestUtils::DEVICE_CPU || targetDevice == CommonTestUtils::DEVICE_GPU) { + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice); + ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo()); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + execGraph.serialize("exeNetwork.xml", "exeNetwork.bin"); + ASSERT_EQ(0, std::remove("exeNetwork.xml")); + } else { + ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice).GetExecGraphInfo(), + InferenceEngine::details::InferenceEngineException); + } +} \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request.hpp new file mode 100644 index 00000000000..338e7434c7f --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request.hpp @@ -0,0 +1,624 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include "ie_extension.h" +#include +#include "functional_test_utils/layer_test_utils.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ngraph_functions/builders.hpp" +#include "multi-device/multi_device_config.hpp" +#include +#include +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "functional_test_utils/blob_utils.hpp" +#include "ngraph_functions/subgraph_builders.hpp" + +using InferRequestTests = BehaviorTestsUtils::BehaviorTestsBasic; + +// Setting empty config to LoadNetwork doesn't throw +TEST_P(InferRequestTests, SetEmptyConfig) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + InferenceEngine::IExecutableNetwork::Ptr execNet; + std::map config {}; + if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos || + targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) { + ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice)); + ASSERT_NO_THROW(execNet = ie->LoadNetwork(cnnNet, targetDevice, config)); + } else { + ASSERT_THROW(ie->SetConfig(configuration, targetDevice), + InferenceEngine::details::InferenceEngineException); + ASSERT_NO_THROW(execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration)); + } +} + +// Load correct network to Plugin to get executable network +TEST_P(InferRequestTests, canLoadCorrectNetworkToGetExecutable) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + InferenceEngine::IExecutableNetwork::Ptr execNet; + ASSERT_NO_THROW(execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration)); +} + +TEST_P(InferRequestTests, CanCreateTwoExeNetworks) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + InferenceEngine::IExecutableNetwork::Ptr execNet; + for (auto i = 0; i < 2; i++) { + ASSERT_NO_THROW(execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration)); + } +} + +TEST_P(InferRequestTests, CanCreateInferRequest) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); +} + +TEST_P(InferRequestTests, failToSetNullptrForInput) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr inputBlob = nullptr; + ASSERT_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, inputBlob), + InferenceEngine::details::InferenceEngineException); +} + +TEST_P(InferRequestTests, failToSetEmptyInputBlob) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr blob; + ASSERT_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob), + InferenceEngine::details::InferenceEngineException); +} + +TEST_P(InferRequestTests, failToSetEmptyOutputBlob) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr blob; + ASSERT_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob), + InferenceEngine::details::InferenceEngineException); +} + +TEST_P(InferRequestTests, failToSetNotAllocatedInput) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob)); +} + +TEST_P(InferRequestTests, failToSetNotAllocatedOutput) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob)); +} + +TEST_P(InferRequestTests, failToSetBlobWithIncorrectName) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + const char incorrect_input_name[] = "incorrect_input_name"; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc()); + blob->allocate(); + ASSERT_THROW(req.SetBlob(incorrect_input_name, blob), + InferenceEngine::details::InferenceEngineException); +} + +TEST_P(InferRequestTests, failToSetInputWithIncorrectSizes) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc()); + blob->allocate(); + blob->getTensorDesc().getDims()[0]*=2; + ASSERT_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob), + InferenceEngine::details::InferenceEngineException); +} + +TEST_P(InferRequestTests, failToSetOutputWithIncorrectSizes) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + blob->allocate(); + blob->getTensorDesc().getDims()[0]*=2; + ASSERT_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob), + InferenceEngine::details::InferenceEngineException); +} + +TEST_P(InferRequestTests, canInferWithoutSetAndGetInOut) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(req.Infer()); +} + +TEST_P(InferRequestTests, canProcessDeallocatedInputBlobAfterGetBlob) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_NO_THROW(req.Infer()); +} + +TEST_P(InferRequestTests, canProcessDeallocatedInputBlobAfterGetBlobForAsync) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_NO_THROW(req.Infer()); + ASSERT_NO_THROW(req.StartAsync()); +} + +TEST_P(InferRequestTests, canProcessDeallocatedInputBlobAfterGetAndSetBlob) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob)); + ASSERT_NO_THROW(req.Infer()); + ASSERT_NO_THROW(req.StartAsync()); +} + +TEST_P(InferRequestTests, canProcessDeallocatedInputBlobAfterSetBlob) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + blob->allocate(); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob)); + blob->deallocate(); + ASSERT_THROW(req.Infer(), InferenceEngine::details::InferenceEngineException); +} + +TEST_P(InferRequestTests, canProcessDeallocatedOutputBlobAfterGetBlob) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + blob->allocate(); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob)); + blob->deallocate(); + ASSERT_THROW(req.Infer(), InferenceEngine::details::InferenceEngineException); +} + +TEST_P(InferRequestTests, canProcessDeallocatedOutputBlobAfterGetBlobForAsync) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + blob->allocate(); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob)); + blob->deallocate(); + ASSERT_THROW(req.Infer(), InferenceEngine::details::InferenceEngineException); + ASSERT_THROW(req.StartAsync(), InferenceEngine::details::InferenceEngineException); +} + +TEST_P(InferRequestTests, canProcessDeallocatedOutputBlobAfterGetAndSetBlob) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + blob->allocate(); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob)); + blob->deallocate(); + ASSERT_THROW(req.Infer(), InferenceEngine::details::InferenceEngineException); +} + +TEST_P(InferRequestTests, canProcessDeallocatedOutputBlobAfterSetBlob) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + blob->allocate(); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob)); + blob->deallocate(); + ASSERT_THROW(req.Infer(), InferenceEngine::details::InferenceEngineException); +} + +TEST_P(InferRequestTests, secondCallGetOutputDoNotReAllocateData) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob1; + InferenceEngine::Blob::Ptr blob2; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob1 = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_NO_THROW(blob2 = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_EQ(blob1.get(), blob2.get()); +} + +TEST_P(InferRequestTests, CorrectOneAsyncInferWithGetInOutWithInfWait) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + req.Infer(); + req.StartAsync(); + InferenceEngine::StatusCode sts; + sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); + ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); +} + +// Plugin correct infer request with allocating input and result BlobMaps inside plugin +TEST_P(InferRequestTests, canStartAsyncInferWithGetInOutWithStatusOnlyWait) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + req.Infer(); + req.StartAsync(); + InferenceEngine::StatusCode sts; + sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); + ASSERT_TRUE(sts == InferenceEngine::StatusCode::OK || + InferenceEngine::StatusCode::RESULT_NOT_READY); +} + +// Plugin correct infer request with allocating input and result BlobMaps inside plugin +TEST_P(InferRequestTests, FailedAsyncInferWithNegativeTimeForWait) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + req.Infer(); + req.StartAsync(); + ASSERT_THROW(req.Wait(-2), InferenceEngine::details::InferenceEngineException); +} + +TEST_P(InferRequestTests, canRun3SyncRequestsConsistentlyFromThreads) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + auto req1 = execNet.CreateInferRequest(); + auto req2 = execNet.CreateInferRequest(); + auto req3 = execNet.CreateInferRequest(); + InferenceEngine::ResponseDesc response1, response2, response3; + InferenceEngine::StatusCode sts1, sts2, sts3; + + std::thread t1([&] { req1.Infer(); sts1 = req1.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); }); + std::thread t2([&] { req2.Infer(); sts2 = req2.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); }); + std::thread t3([&] { req3.Infer(); sts3 = req3.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); }); + + t1.join(); + t2.join(); + t3.join(); + + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), sts1) << response1.msg; + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), sts2) << response2.msg; + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), sts3) << response3.msg; +} + +TEST_P(InferRequestTests, canRun3AsyncRequestsConsistentlyWithWait) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + auto req1 = execNet.CreateInferRequest(); + auto req2 = execNet.CreateInferRequest(); + auto req3 = execNet.CreateInferRequest(); + InferenceEngine::ResponseDesc response1, response2, response3; + InferenceEngine::StatusCode sts1, sts2, sts3; + + req1.StartAsync(); + ASSERT_NO_THROW(req1.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY)); + + req2.Infer(); + ASSERT_NO_THROW(req2.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY)); + + req3.Infer(); + ASSERT_NO_THROW(req3.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY)); +} + +TEST_P(InferRequestTests, canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + auto req1 = execNet.CreateInferRequest(); + auto req2 = execNet.CreateInferRequest(); + auto req3 = execNet.CreateInferRequest(); + InferenceEngine::ResponseDesc response1, response2, response3; + InferenceEngine::StatusCode sts1, sts2, sts3; + + req1.Infer(); + req2.Infer(); + req3.Infer(); + + std::thread t1([&] { req1.StartAsync(); sts1 = req1.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); }); + std::thread t2([&] { req2.StartAsync(); sts2 = req2.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); }); + std::thread t3([&] { req3.StartAsync(); sts3 = req3.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); }); + + t1.join(); + t2.join(); + t3.join(); + + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), sts1) << response1.msg; + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), sts2) << response2.msg; + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), sts3) << response3.msg; +} + +TEST_P(InferRequestTests, canWaitWithotStartAsync) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + auto req = execNet.CreateInferRequest(); + ASSERT_NO_THROW(req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY)); + ASSERT_NO_THROW(req.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY)); + ASSERT_NO_THROW(req.Wait(1)); +} + +TEST_P(InferRequestTests, returnDeviceBusyOnSetBlobAfterAsyncInfer) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + auto&& config = configuration; + auto itConfig = config.find(CONFIG_KEY(CPU_THROUGHPUT_STREAMS)); + if (itConfig != config.end()) { + if (itConfig->second != "CPU_THROUGHPUT_AUTO") { + if (std::stoi(itConfig->second) == 0) { + GTEST_SKIP() << "Not applicable with disabled streams"; + } + } + } + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + auto req = execNet.CreateInferRequest(); + auto outputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first); + InferenceEngine::ResponseDesc response; + + InferenceEngine::StatusCode sts; + sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); + ASSERT_EQ(InferenceEngine::StatusCode::INFER_NOT_STARTED, sts) << response.msg; + req.StartAsync(); + sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), sts) << response.msg; + try { + req.SetBlob(cnnNet.getInputsInfo().begin()->first, outputBlob); + } + catch (const std::exception &e) { + std::cout << "Exception: " << e.what() << std::endl; + } + sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); + ASSERT_TRUE(sts == InferenceEngine::StatusCode::OK || + sts == InferenceEngine::StatusCode::RESULT_NOT_READY) << response.msg; +} + +TEST_P(InferRequestTests, returnDeviceBusyOnGetBlobAfterAsyncInfer) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + auto req = execNet.CreateInferRequest(); + auto outputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first); + InferenceEngine::ResponseDesc response; + InferenceEngine::StatusCode sts; + req.StartAsync(); + sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), sts) << response.msg; + try { + req.SetBlob(cnnNet.getInputsInfo().begin()->first, outputBlob); + } + catch (const std::exception &e) { + std::cout << "Exception" << e.what() << std::endl; + } +} + +TEST_P(InferRequestTests, returnDeviceBusyOnGetPerformanceCountAfterAsyncInfer) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + auto req = execNet.CreateInferRequest(); + auto outputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first); + InferenceEngine::ResponseDesc response; + InferenceEngine::StatusCode sts; + req.StartAsync(); + sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), sts) << response.msg; + + std::map perfMap; + + try { + perfMap = req.GetPerformanceCounts(); + } + catch (const std::exception &e) { + std::cout << "Exception" << e.what() << std::endl; + } +} \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_callback.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_callback.hpp index 6765761db7c..fe99e1afaa5 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_callback.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_callback.hpp @@ -13,23 +13,285 @@ #include "functional_test_utils/layer_test_utils.hpp" #include "ngraph_functions/utils/ngraph_helpers.hpp" #include "ngraph_functions/builders.hpp" +#include +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "functional_test_utils/blob_utils.hpp" +#include "ngraph_functions/pass/convert_prc.hpp" +#include "ngraph_functions/subgraph_builders.hpp" +#include "behavior/infer_request_callback.hpp" -namespace LayerTestsDefinitions { -typedef std::tuple< - InferenceEngine::Precision, // Network precision - std::string, // Device name - std::map // Config -> CallbackParams; +using CallbackTests = BehaviorTestsUtils::BehaviorTestsBasic; -class CallbackTests : public testing::WithParamInterface, - public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(testing::TestParamInfo obj); +TEST_P(CallbackTests, canCallSyncAndAsyncWithCompletionCallback) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req = execNet.CreateInferRequest(); + bool isCalled = false; + req.SetCompletionCallback>( + [&](InferenceEngine::InferRequest request, InferenceEngine::StatusCode status) { + // HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE + if (targetDevice != CommonTestUtils::DEVICE_HDDL) { + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), status); + } + isCalled = true; + }); -protected: - void SetUp() override; - void TearDown() override; - void canStartSeveralAsyncInsideCompletionCallbackNoSafeDtor(int iterNum); -}; + req.StartAsync(); + InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); -} // namespace LayerTestsDefinitions + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), waitStatus); + ASSERT_TRUE(isCalled); +} + +// test that can wait all callbacks on dtor +TEST_P(CallbackTests, canStartAsyncInsideCompletionCallback) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + struct TestUserData { + bool startAsyncOK = false; + int numIsCalled = 0; + }; + TestUserData data; + + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req = execNet.CreateInferRequest(); + + req.SetCompletionCallback>( + [&](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) { + // HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE + if (targetDevice != CommonTestUtils::DEVICE_HDDL) { + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), status); + } + data.numIsCalled++; + // WA for deadlock + request->SetCompletionCallback(nullptr); + InferenceEngine::StatusCode sts = request->StartAsync(nullptr); + if (sts == InferenceEngine::StatusCode::OK) { + data.startAsyncOK = true; + } + }); + + req.StartAsync(); + InferenceEngine::ResponseDesc responseWait; + InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); + + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), waitStatus) << responseWait.msg; + ASSERT_EQ(1, data.numIsCalled); + ASSERT_TRUE(data.startAsyncOK); +} + +// test that can wait all callbacks on dtor +TEST_P(CallbackTests, canStartSeveralAsyncInsideCompletionCallbackWithSafeDtor) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + const int NUM_ITER = 10; + struct TestUserData { + int numIter = NUM_ITER; + bool startAsyncOK = true; + std::atomic numIsCalled{0}; + std::mutex mutex_block_emulation; + std::condition_variable cv_block_emulation; + bool isBlocked = true; + }; + TestUserData data; + + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req = execNet.CreateInferRequest(); + req.SetCompletionCallback>( + [&](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) { + // HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE + if (targetDevice != CommonTestUtils::DEVICE_HDDL) { + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), status); + } + if (--data.numIter) { + InferenceEngine::StatusCode sts = request->StartAsync(nullptr); + if (sts != InferenceEngine::StatusCode::OK) { + data.startAsyncOK = false; + } + } + data.numIsCalled++; + if (!data.numIter) { + data.isBlocked = false; + data.cv_block_emulation.notify_all(); + } + }); + + req.StartAsync(); + InferenceEngine::ResponseDesc responseWait; + InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); + // intentionally block until notification from callback + std::unique_lock lock(data.mutex_block_emulation); + data.cv_block_emulation.wait(lock, [&]() { return !data.isBlocked; }); + + ASSERT_EQ((int) InferenceEngine::StatusCode::OK, waitStatus) << responseWait.msg; + ASSERT_EQ(NUM_ITER, data.numIsCalled); + ASSERT_TRUE(data.startAsyncOK); +} + +TEST_P(CallbackTests, inferDoesNotCallCompletionCallback) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req = execNet.CreateInferRequest(); + bool isCalled = false; + req.SetCompletionCallback>( + [&](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) { + isCalled = true; + }); + req.Infer(); + ASSERT_FALSE(isCalled); +} + +TEST_P(CallbackTests, canStartAsyncInsideCompletionCallbackNoSafeDtor) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + struct TestUserData { + int numIter = 0; + bool startAsyncOK = true; + bool getDataOK = true; + std::atomic numIsCalled{0}; + std::mutex mutex_block_emulation; + std::condition_variable cv_block_emulation; + bool isBlocked = true; + + TestUserData(int i) : numIter(i) {} + }; + TestUserData data(1); + + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req = execNet.CreateInferRequest(); + + req.SetCompletionCallback>( + [&](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) { + // WA for deadlock + if (!--data.numIter) { + request->SetCompletionCallback(nullptr); + } + InferenceEngine::StatusCode sts = request->StartAsync(nullptr); + if (sts != InferenceEngine::StatusCode::OK) { + data.startAsyncOK = false; + } + data.numIsCalled++; + if (!data.numIter) { + data.isBlocked = false; + data.cv_block_emulation.notify_one(); + } + }); + req.StartAsync(); + InferenceEngine::ResponseDesc responseWait; + InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); + // intentionally block until notification from callback + std::unique_lock lock(data.mutex_block_emulation); + data.cv_block_emulation.wait(lock, [&]() { return !data.isBlocked; }); + + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), waitStatus); + + ASSERT_EQ(1, data.numIsCalled); + ASSERT_TRUE(data.startAsyncOK); + ASSERT_TRUE(data.getDataOK); +} + +TEST_P(CallbackTests, canStartSeveralAsyncInsideCompletionCallbackNoSafeDtor) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + struct TestUserData { + int numIter = 0; + bool startAsyncOK = true; + bool getDataOK = true; + std::atomic numIsCalled{0}; + std::mutex mutex_block_emulation; + std::condition_variable cv_block_emulation; + bool isBlocked = true; + + TestUserData(int i) : numIter(i) {} + }; + TestUserData data(10); + + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req = execNet.CreateInferRequest(); + + req.SetCompletionCallback>( + [&](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) { + // WA for deadlock + if (!--data.numIter) { + request->SetCompletionCallback(nullptr); + } + InferenceEngine::StatusCode sts = request->StartAsync(nullptr); + if (sts != InferenceEngine::StatusCode::OK) { + data.startAsyncOK = false; + } + data.numIsCalled++; + if (!data.numIter) { + data.isBlocked = false; + data.cv_block_emulation.notify_one(); + } + }); + req.StartAsync(); + InferenceEngine::ResponseDesc responseWait; + InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); + // intentionally block until notification from callback + std::unique_lock lock(data.mutex_block_emulation); + data.cv_block_emulation.wait(lock, [&]() { return !data.isBlocked; }); + + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), waitStatus); + + ASSERT_EQ(10, data.numIsCalled); + ASSERT_TRUE(data.startAsyncOK); + ASSERT_TRUE(data.getDataOK); +} + +TEST_P(CallbackTests, returnGeneralErrorIfCallbackThrowException) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::IInferRequest::Ptr req = static_cast(execNet.CreateInferRequest()); + req->SetCompletionCallback( + [](InferenceEngine::IInferRequest::Ptr, InferenceEngine::StatusCode status) { + THROW_IE_EXCEPTION << "returnGeneralErrorIfCallbackThrowException"; + }); + + InferenceEngine::ResponseDesc resp; + req->StartAsync(&resp); + InferenceEngine::StatusCode waitStatus = InferenceEngine::StatusCode::INFER_NOT_STARTED; + while (InferenceEngine::StatusCode::RESULT_NOT_READY == waitStatus || + InferenceEngine::StatusCode::INFER_NOT_STARTED == waitStatus) { + waitStatus = req->Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY, &resp); + } + ASSERT_EQ(InferenceEngine::StatusCode::GENERAL_ERROR, waitStatus); + ASSERT_NE(std::string(resp.msg).find("returnGeneralErrorIfCallbackThrowException"), std::string::npos); +} \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_config.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_config.hpp new file mode 100644 index 00000000000..45ef5cda56e --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_config.hpp @@ -0,0 +1,96 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include "ie_extension.h" +#include +#include "functional_test_utils/layer_test_utils.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ngraph_functions/builders.hpp" +#include +#include +#include +#include +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "functional_test_utils/blob_utils.hpp" +#include "ngraph_functions/subgraph_builders.hpp" + +using InferConfigTests = BehaviorTestsUtils::BehaviorTestsBasic; + +TEST_P(InferConfigTests, canSetExclusiveAsyncRequests) { + ASSERT_EQ(0ul, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load config + std::map config = {{CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(YES)}}; + config.insert(configuration.begin(), configuration.end()); + if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos && + targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) { + ASSERT_NO_THROW(ie->SetConfig(config, targetDevice)); + } + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, config); + execNet.CreateInferRequest(); + + if ((targetDevice == CommonTestUtils::DEVICE_HDDL) || (targetDevice == CommonTestUtils::DEVICE_GNA)) { + ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + } else if ((targetDevice == CommonTestUtils::DEVICE_FPGA) || (targetDevice == CommonTestUtils::DEVICE_MYRIAD) || + (targetDevice == CommonTestUtils::DEVICE_KEEMBAY)) { + ASSERT_EQ(2u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + } else if (targetDevice == CommonTestUtils::DEVICE_MULTI) { + } else { + ASSERT_EQ(1u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + } +} + +TEST_P(InferConfigTests, withoutExclusiveAsyncRequests) { + ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load config + std::map config = {{CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(YES)}}; + config.insert(configuration.begin(), configuration.end()); + if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos && + targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) { + ASSERT_NO_THROW(ie->SetConfig(config, targetDevice)); + } + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, config); + execNet.CreateInferRequest(); + + if ((targetDevice == CommonTestUtils::DEVICE_GNA) || (targetDevice == CommonTestUtils::DEVICE_HDDL)) { + ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + } else if (targetDevice == CommonTestUtils::DEVICE_MULTI) { + } else if (targetDevice == CommonTestUtils::DEVICE_MYRIAD) { + ASSERT_EQ(2u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + } else { + ASSERT_EQ(1u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); + } +} + +using InferConfigInTests = BehaviorTestsUtils::BehaviorTestsBasic; + +TEST_P(InferConfigInTests, CanInferWithConfig) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + auto req = execNet.CreateInferRequest(); + ASSERT_NO_THROW(req.Infer()); +} \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_input.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_input.hpp new file mode 100644 index 00000000000..2c86b8fea9e --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_input.hpp @@ -0,0 +1,139 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include "ie_extension.h" +#include +#include "functional_test_utils/layer_test_utils.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ngraph_functions/builders.hpp" +#include "multi-device/multi_device_config.hpp" +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "functional_test_utils/blob_utils.hpp" +#include "ngraph_functions/subgraph_builders.hpp" +#include "behavior/infer_request_input.hpp" + +using InferRequestInputTests = BehaviorTestsUtils::BehaviorTestsBasic; + +TEST_P(InferRequestInputTests, canSetInputBlobForSyncRequest) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr inputBlob = + FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, inputBlob)); + InferenceEngine::Blob::Ptr actualBlob; + ASSERT_NO_THROW(actualBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_EQ(inputBlob, actualBlob); +} + +TEST_P(InferRequestInputTests, canInferWithSetInOut) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr inputBlob = + FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc()); + req.SetBlob(cnnNet.getInputsInfo().begin()->first, inputBlob); + InferenceEngine::Blob::Ptr outputBlob = + FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc()); + req.SetBlob(cnnNet.getInputsInfo().begin()->first, outputBlob); + ASSERT_NO_THROW(req.Infer()); +} + +TEST_P(InferRequestInputTests, canGetInputBlob_deprecatedAPI) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + std::shared_ptr actualBlob; + + ASSERT_NO_THROW(actualBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_TRUE(actualBlob) << "Plugin didn't allocate input blobs"; + ASSERT_FALSE(actualBlob->buffer() == nullptr) << "Plugin didn't allocate input blobs"; + + auto tensorDescription = actualBlob->getTensorDesc(); + auto dims = tensorDescription.getDims(); + ASSERT_TRUE(cnnNet.getInputsInfo().begin()->second->getTensorDesc().getDims() == dims) + << "Input blob dimensions don't match network input"; + + ASSERT_EQ(execNet.GetInputsInfo().begin()->second->getPrecision(), tensorDescription.getPrecision()) + << "Input blob precision don't match network input"; +} + +TEST_P(InferRequestInputTests, getAfterSetInputDoNotChangeInput) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req = execNet.CreateInferRequest(); + std::shared_ptr inputBlob = FuncTestUtils::createAndFillBlob( + cnnNet.getInputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, inputBlob)); + std::shared_ptr actualBlob; + ASSERT_NO_THROW(actualBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_EQ(inputBlob.get(), actualBlob.get()); +} + +TEST_P(InferRequestInputTests, canInferWithGetInOut) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr inputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first); + InferenceEngine::Blob::Ptr outputBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first); + ASSERT_NO_THROW(req.Infer()); +} + +TEST_P(InferRequestInputTests, canStartAsyncInferWithGetInOut) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr inputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first); + InferenceEngine::StatusCode sts; + ASSERT_NO_THROW(req.Infer()); + ASSERT_NO_THROW(req.StartAsync()); + sts = req.Wait(500); + ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); + InferenceEngine::Blob::Ptr outputBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first); +} \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_output.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_output.hpp new file mode 100644 index 00000000000..6b5310c0f69 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_output.hpp @@ -0,0 +1,139 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include "ie_extension.h" +#include +#include "functional_test_utils/layer_test_utils.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ngraph_functions/builders.hpp" +#include "multi-device/multi_device_config.hpp" +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "functional_test_utils/blob_utils.hpp" +#include "ngraph_functions/subgraph_builders.hpp" +#include "behavior/infer_request_output.hpp" + +using InferRequestOutputTests = BehaviorTestsUtils::BehaviorTestsBasic; + +TEST_P(InferRequestOutputTests, canGetInputBlobForSyncRequest) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr OutputBlob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, OutputBlob)); + InferenceEngine::Blob::Ptr actualBlob; + ASSERT_NO_THROW(actualBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); + ASSERT_EQ(OutputBlob, actualBlob); +} + +TEST_P(InferRequestOutputTests, canInferWithSetInOut) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr inputBlob = + FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc()); + req.SetBlob(cnnNet.getInputsInfo().begin()->first, inputBlob); + InferenceEngine::Blob::Ptr outputBlob = + FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc()); + req.SetBlob(cnnNet.getInputsInfo().begin()->first, outputBlob); + ASSERT_NO_THROW(req.Infer()); +} + +TEST_P(InferRequestOutputTests, canGetOutputBlob_deprecatedAPI) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + std::shared_ptr actualBlob; + + ASSERT_NO_THROW(actualBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); + ASSERT_TRUE(actualBlob) << "Plugin didn't allocate Output blobs"; + ASSERT_FALSE(actualBlob->buffer() == nullptr) << "Plugin didn't allocate Output blobs"; + + auto tensorDescription = actualBlob->getTensorDesc(); + auto dims = tensorDescription.getDims(); + ASSERT_TRUE(cnnNet.getOutputsInfo().begin()->second->getTensorDesc().getDims() == dims) + << "Output blob dimensions don't match network Output"; + + ASSERT_EQ(execNet.GetInputsInfo().begin()->second->getPrecision(), tensorDescription.getPrecision()) + << "Output blob precision don't match network Output"; +} + +TEST_P(InferRequestOutputTests, getOutputAfterSetOutputDoNotChangeOutput) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req = execNet.CreateInferRequest(); + std::shared_ptr OutputBlob = FuncTestUtils::createAndFillBlob( + cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, OutputBlob)); + std::shared_ptr actualBlob; + ASSERT_NO_THROW(actualBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); + ASSERT_EQ(OutputBlob.get(), actualBlob.get()); +} + +TEST_P(InferRequestOutputTests, canInferWithGetInOut) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr inputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first); + InferenceEngine::Blob::Ptr outputBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first); + ASSERT_NO_THROW(req.Infer()); +} + +TEST_P(InferRequestOutputTests, canStartAsyncInferWithGetInOut) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr inputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first); + InferenceEngine::StatusCode sts; + ASSERT_NO_THROW(req.Infer()); + ASSERT_NO_THROW(req.StartAsync()); + sts = req.Wait(500); + ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); + InferenceEngine::Blob::Ptr outputBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first); +} \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/set_preprocess.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/set_preprocess.hpp index 4c3402d6166..866e31f599d 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/set_preprocess.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/set_preprocess.hpp @@ -2,33 +2,60 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include #include -#include -#include -#include "ie_extension.h" -#include + +#include +#include "common_test_utils/test_assertions.hpp" +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" #include "functional_test_utils/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "functional_test_utils/blob_utils.hpp" +#include "ie_preprocess.hpp" +#include "functional_test_utils/behavior_test_utils.hpp" -namespace LayerTestsDefinitions { - typedef std::tuple< - InferenceEngine::Precision, // Network precision - std::string, // Device name - std::map // Config - > PreProcessParams; +using PreprocessTest = BehaviorTestsUtils::BehaviorTestsBasic; -class PreProcessTests : public testing::WithParamInterface, - public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(testing::TestParamInfo obj); +TEST_P(PreprocessTest, SetPreProcessToInputInfo) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); -protected: - void SetUp() override; - void TearDown() override; -}; + auto &preProcess = cnnNet.getInputsInfo().begin()->second->getPreProcess(); + preProcess.setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + auto req = execNet.CreateInferRequest(); + { + InferenceEngine::ConstInputsDataMap inputsMap = execNet.GetInputsInfo(); + const auto &name = inputsMap.begin()->second->name(); + const InferenceEngine::PreProcessInfo *info = &req.GetPreProcess(name.c_str()); + ASSERT_EQ(info->getResizeAlgorithm(), InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR); + ASSERT_PREPROCESS_INFO_EQ(preProcess, *info); + } +} -} // namespace LayerTestsDefinitions \ No newline at end of file +TEST_P(PreprocessTest, SetPreProcessToInferRequest) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + + auto &preProcess = cnnNet.getInputsInfo().begin()->second->getPreProcess(); + preProcess.setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + auto req = execNet.CreateInferRequest(); + InferenceEngine::ConstInputsDataMap inputsMap = execNet.GetInputsInfo(); + const auto &name = inputsMap.begin()->second->name(); + auto inputBlob = FuncTestUtils::createAndFillBlob( + cnnNet.getInputsInfo().begin()->second->getTensorDesc()); + req.SetBlob(cnnNet.getInputsInfo().begin()->first, inputBlob); + { + const InferenceEngine::PreProcessInfo *info = &req.GetPreProcess(name.c_str()); + ASSERT_EQ(cnnNet.getInputsInfo().begin()->second->getPreProcess().getResizeAlgorithm(), + info->getResizeAlgorithm()); + } +} diff --git a/inference-engine/tests/functional/plugin/shared/src/behavior/config.cpp b/inference-engine/tests/functional/plugin/shared/src/behavior/config.cpp deleted file mode 100644 index 51c86cc9745..00000000000 --- a/inference-engine/tests/functional/plugin/shared/src/behavior/config.cpp +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include - -#include -#include "ie_common.h" -#include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" -#include "functional_test_utils/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" -#include -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "behavior/config.hpp" - - -namespace LayerTestsDefinitions { -std::string CorrectConfigTests::getTestCaseName(testing::TestParamInfo obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - std::tie(netPrecision, targetDevice, configuration) = obj.param; - std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - if (!configuration.empty()) { - result << "configItem=" << configuration.begin()->first << "_" << configuration.begin()->second; - } - return result.str(); -} - -void CorrectConfigTests::SetUp() { - InferenceEngine::Precision netPrecision; - std::tie(netPrecision, targetDevice, configuration) = this->GetParam(); - function = ngraph::builder::subgraph::makeConvPoolRelu(); -} - -void CorrectConfigTests::TearDown() { - if (targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) { - PluginCache::get().reset(); - } -} - -// Setting empty config doesn't throw -TEST_P(CorrectConfigTests, SetEmptyConfig) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - // Get Core from cache - auto ie = PluginCache::get().ie(); - std::map config; - ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); - ASSERT_NO_THROW(ie->SetConfig(config, targetDevice)); - function.reset(); -} - -// Setting correct config doesn't throw -TEST_P(CorrectConfigTests, SetCorrectConfig) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - // Get Core from cache - auto ie = PluginCache::get().ie(); - ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); - ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice)); - function.reset(); -} - - std::string IncorrectConfigTests::getTestCaseName(testing::TestParamInfo obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - std::tie(netPrecision, targetDevice, configuration) = obj.param; - std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - if (!configuration.empty()) { - for (auto& configItem : configuration) { - result << "configItem=" << configItem.first << "_" << configItem.second << "_"; - } - } - return result.str(); - } - - void IncorrectConfigTests::SetUp() { - InferenceEngine::Precision netPrecision; - std::tie(netPrecision, targetDevice, configuration) = this->GetParam(); - function = ngraph::builder::subgraph::makeConvPoolRelu(); - } - - void IncorrectConfigTests::TearDown() { - if (targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) { - PluginCache::get().reset(); - } - } - -TEST_P(IncorrectConfigTests, SetConfigWithIncorrectKey) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - // Get Core from cache - auto ie = PluginCache::get().ie(); - if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos && - targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) { - ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); - ASSERT_THROW(ie->SetConfig(configuration, targetDevice), - InferenceEngine::details::InferenceEngineException); - } else { - ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); - ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice)); - } - function.reset(); -} - -TEST_P(IncorrectConfigTests, canNotLoadNetworkWithIncorrectConfig) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - // Get Core from cache - auto ie = PluginCache::get().ie(); - ASSERT_THROW(auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration), - InferenceEngine::details::InferenceEngineException); - function.reset(); - } - - std::string IncorrectConfigAPITests::getTestCaseName(testing::TestParamInfo obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - std::tie(netPrecision, targetDevice, configuration) = obj.param; - std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - if (!configuration.empty()) { - result << "configItem=" << configuration.begin()->first << "_" << configuration.begin()->second; - } - return result.str(); - } - - void IncorrectConfigAPITests::SetUp() { - InferenceEngine::Precision netPrecision; - std::tie(netPrecision, targetDevice, configuration) = this->GetParam(); - function = ngraph::builder::subgraph::makeConvPoolRelu(); - } - - void IncorrectConfigAPITests::TearDown() { - if (targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) { - PluginCache::get().reset(); - } - } - -TEST_P(IncorrectConfigAPITests, SetConfigWithNoExistingKey) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - // Get Core from cache - auto ie = PluginCache::get().ie(); - ASSERT_NO_THROW(ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); - if (targetDevice.find(CommonTestUtils::DEVICE_GNA) != std::string::npos) { - ASSERT_THROW(ie->SetConfig(configuration, targetDevice), InferenceEngine::NotFound); - } else { - try { - ie->SetConfig(configuration, targetDevice); - } catch (InferenceEngine::details::InferenceEngineException ex) {} - } - function.reset(); - } - - - std::string CorrectConfigAPITests::getTestCaseName(testing::TestParamInfo obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - std::tie(netPrecision, targetDevice, configuration) = obj.param; - std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - if (!configuration.empty()) { - result << "configItem=" << configuration.begin()->first << "_" << configuration.begin()->second; - } - return result.str(); - } - - void CorrectConfigAPITests::SetUp() { - InferenceEngine::Precision netPrecision; - std::tie(netPrecision, targetDevice, configuration) = this->GetParam(); - function = ngraph::builder::subgraph::makeConvPoolRelu(); - } - - void CorrectConfigAPITests::TearDown() { - if (targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) { - PluginCache::get().reset(); - } - } - -TEST_P(CorrectConfigAPITests, canSetExclusiveAsyncRequests) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - // Get Core from cache - auto ie = PluginCache::get().ie(); - // Load config - std::map config = {{CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(YES)}}; - config.insert(configuration.begin(), configuration.end()); - if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos && - targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) { - ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice)); - } - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); - execNet.CreateInferRequest(); - - if ((targetDevice == CommonTestUtils::DEVICE_HDDL) || (targetDevice == CommonTestUtils::DEVICE_GNA) || - (targetDevice == CommonTestUtils::DEVICE_CPU) || (targetDevice == CommonTestUtils::DEVICE_GPU)) { - ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); - } else if ((targetDevice == CommonTestUtils::DEVICE_FPGA) || - (targetDevice == CommonTestUtils::DEVICE_KEEMBAY)) { - ASSERT_EQ(2u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); - } else if (targetDevice == CommonTestUtils::DEVICE_MULTI) { - } else { - ASSERT_EQ(1u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); - } - - function.reset(); -} - -TEST_P(CorrectConfigAPITests, withoutExclusiveAsyncRequests) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - // Get Core from cache - auto ie = PluginCache::get().ie(); - // Load config - std::map config = {{CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(NO)}}; - config.insert(configuration.begin(), configuration.end()); - if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos && - targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) { - ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice)); - } - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); - execNet.CreateInferRequest(); - - if ((targetDevice == CommonTestUtils::DEVICE_FPGA) || (targetDevice == CommonTestUtils::DEVICE_MYRIAD) || - (targetDevice == CommonTestUtils::DEVICE_KEEMBAY)) { - ASSERT_EQ(1u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); - } else if (targetDevice == CommonTestUtils::DEVICE_MULTI) { - } else { - ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); - } - function.reset(); -} - -TEST_P(CorrectConfigAPITests, reusableCPUStreamsExecutor) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - - ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); - ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber()); - - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - // Get Core from cache - auto ie = PluginCache::get().ie(); - { - // Load config - std::map config = {{CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(NO)}}; - config.insert(configuration.begin(), configuration.end()); - if (targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos && - targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) { - ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice)); - } - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); - execNet.CreateInferRequest(); - - if ((targetDevice == CommonTestUtils::DEVICE_FPGA) || (targetDevice == CommonTestUtils::DEVICE_MYRIAD) || - (targetDevice == CommonTestUtils::DEVICE_KEEMBAY)) { - ASSERT_EQ(1u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); - ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber()); - } else if (targetDevice == CommonTestUtils::DEVICE_MULTI) { - } else { - ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); - ASSERT_GE(2u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber()); - } - } - if (targetDevice == CommonTestUtils::DEVICE_CPU) { - ASSERT_NE(0u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber()); - ASSERT_NO_THROW(ie->UnregisterPlugin("CPU")); - ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getExecutorsNumber()); - ASSERT_EQ(0u, InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber()); - } - function.reset(); -} -} // namespace LayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/src/behavior/exec_graph_info.cpp b/inference-engine/tests/functional/plugin/shared/src/behavior/exec_graph_info.cpp deleted file mode 100644 index bfde5ee8254..00000000000 --- a/inference-engine/tests/functional/plugin/shared/src/behavior/exec_graph_info.cpp +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include - -#include -#include
- -#include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" -#include "functional_test_utils/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" -#include "exec_graph_info.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "behavior/exec_graph_info.hpp" - - -namespace LayerTestsDefinitions { - std::string ExecGraphTests::getTestCaseName(testing::TestParamInfo obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - std::tie(netPrecision, targetDevice, configuration) = obj.param; - std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - if (!configuration.empty()) { - result << "configItem=" << configuration.begin()->first << "_" << configuration.begin()->second; - } - return result.str(); - } - - void ExecGraphTests::SetUp() { - InferenceEngine::Precision netPrecision; - std::tie(netPrecision, targetDevice, configuration) = this->GetParam(); - function = ngraph::builder::subgraph::makeConvPoolRelu(); - } - - void ExecGraphTests::TearDown() { - if (targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) { - PluginCache::get().reset(); - } - } - - inline std::vector separateStrToVec(std::string str, const char sep) { - std::vector result; - - std::istringstream stream(str); - std::string strVal; - - while (getline(stream, strVal, sep)) { - result.push_back(strVal); - } - return result; - } - -TEST_P(ExecGraphTests, CheckExecGraphInfoBeforeExecution) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - InferenceEngine::CNNNetwork execGraph; - // Get Core from cache - auto ie = PluginCache::get().ie(); - if (targetDevice == CommonTestUtils::DEVICE_CPU || targetDevice == CommonTestUtils::DEVICE_GPU) { - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice); - ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo()); - // Create InferRequest - InferenceEngine::InferRequest req; - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - // Store all the original layers from the network - const auto originalLayers = function->get_ops(); - std::map originalLayersMap; - for (const auto &layer : originalLayers) { - if (layer->description() == "Result") - continue; - originalLayersMap[layer->get_friendly_name()] = 0; - } - int IteratorForLayersConstant = 0; - // Store all the layers from the executable graph information represented as CNNNetwork - const std::vector execGraphLayers = - InferenceEngine::details::CNNNetSortTopologically(execGraph); - for (const auto &execLayer : execGraphLayers) { - IE_SUPPRESS_DEPRECATED_START - // Each layer from the execGraphInfo network must have PM data option set - ASSERT_EQ("not_executed", execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER]); - // Parse origin layer names (fused/merged layers) from the executable graph - // and compare with layers from the original model - auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES]; - if (origFromExecLayer == "") - IteratorForLayersConstant++; - std::vector origFromExecLayerSep = separateStrToVec(origFromExecLayer, ','); - std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) { - auto origLayer = originalLayersMap.find(layer); - ASSERT_NE(originalLayersMap.end(), origLayer) << layer; - origLayer->second++; - }); - } - // All layers from the original IR must be present with in ExecGraphInfo - for (auto &layer : originalLayersMap) { - if ((layer.second == 0) && (IteratorForLayersConstant > 0)) { - IteratorForLayersConstant--; - continue; - } - ASSERT_GE(layer.second, 0); - } - } else { - ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice).GetExecGraphInfo(), - InferenceEngine::details::InferenceEngineException); - } - IE_SUPPRESS_DEPRECATED_END - function.reset(); -} - -TEST_P(ExecGraphTests, CheckExecGraphInfoAfterExecution) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - InferenceEngine::CNNNetwork execGraph; - // Get Core from cache - auto ie = PluginCache::get().ie(); - if (targetDevice == CommonTestUtils::DEVICE_CPU || targetDevice == CommonTestUtils::DEVICE_GPU) { - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice); - ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo()); - // Create InferRequest - InferenceEngine::InferRequest req; - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - // Store all the original layers from the network - const auto originalLayers = function->get_ops(); - std::map originalLayersMap; - for (const auto &layer : originalLayers) { - originalLayersMap[layer->get_friendly_name()] = 0; - } - int IteratorForLayersConstant = 0; - // Store all the layers from the executable graph information represented as CNNNetwork - const std::vector execGraphLayers = - InferenceEngine::details::CNNNetSortTopologically(execGraph); - bool has_layer_with_valid_time = false; - for (const auto &execLayer : execGraphLayers) { - IE_SUPPRESS_DEPRECATED_START - // At least one layer in the topology should be executed and have valid perf counter value - try { - float x = static_cast(std::atof( - execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER].c_str())); - ASSERT_GE(x, 0.0f); - has_layer_with_valid_time = true; - } catch (std::exception &) {} - - // Parse origin layer names (fused/merged layers) from the executable graph - // and compare with layers from the original model - auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES]; - std::vector origFromExecLayerSep = separateStrToVec(origFromExecLayer, ','); - if (origFromExecLayer == "") - IteratorForLayersConstant++; - std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) { - auto origLayer = originalLayersMap.find(layer); - ASSERT_NE(originalLayersMap.end(), origLayer) << layer; - origLayer->second++; - }); - } - ASSERT_TRUE(has_layer_with_valid_time); - - // All layers from the original IR must be present within ExecGraphInfo - for (auto &layer : originalLayersMap) { - if ((layer.second == 0) && (IteratorForLayersConstant > 0)) { - IteratorForLayersConstant--; - continue; - } - ASSERT_GE(layer.second, 0); - } - } else { - ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice).GetExecGraphInfo(), - InferenceEngine::details::InferenceEngineException); - } - IE_SUPPRESS_DEPRECATED_END - function.reset(); -} - -TEST_P(ExecGraphTests, CheckExecGraphInfoSerialization) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - InferenceEngine::CNNNetwork execGraph; - // Get Core from cache - auto ie = PluginCache::get().ie(); - if (targetDevice == CommonTestUtils::DEVICE_CPU || targetDevice == CommonTestUtils::DEVICE_GPU) { - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice); - ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo()); - // Create InferRequest - InferenceEngine::InferRequest req; - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - execGraph.serialize("exeNetwork.xml", "exeNetwork.bin"); - ASSERT_EQ(0, std::remove("exeNetwork.xml")); - } else { - ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice).GetExecGraphInfo(), - InferenceEngine::details::InferenceEngineException); - } - function.reset(); -} -} // namespace LayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/src/behavior/infer_request_callback.cpp b/inference-engine/tests/functional/plugin/shared/src/behavior/infer_request_callback.cpp deleted file mode 100644 index 544b3729096..00000000000 --- a/inference-engine/tests/functional/plugin/shared/src/behavior/infer_request_callback.cpp +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include - -#include - -#include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" -#include "functional_test_utils/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" - -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "behavior/infer_request_callback.hpp" - - -namespace LayerTestsDefinitions { -std::string CallbackTests::getTestCaseName(testing::TestParamInfo obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - std::tie(netPrecision, targetDevice, configuration) = obj.param; - std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - if (!configuration.empty()) { - result << "configItem=" << configuration.begin()->first << "_" << configuration.begin()->second; - } - return result.str(); -} - -void CallbackTests::SetUp() { - InferenceEngine::Precision netPrecision; - std::tie(netPrecision, targetDevice, configuration) = this->GetParam(); - function = ngraph::builder::subgraph::makeConvPoolRelu(); -} - -void CallbackTests::TearDown() { - if (targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) { - PluginCache::get().reset(); - } -} - -void CallbackTests::canStartSeveralAsyncInsideCompletionCallbackNoSafeDtor(int iterNum = 1) { - struct TestUserData { - int numIter = 0; - bool startAsyncOK = true; - bool getDataOK = true; - std::atomic numIsCalled{0}; - std::mutex mutex_block_emulation; - std::condition_variable cv_block_emulation; - bool isBlocked = true; - - TestUserData(int i) : numIter(i){} - }; - TestUserData data(iterNum); - - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - // Get Core from cache - auto ie = PluginCache::get().ie(); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); - // Create InferRequest - InferenceEngine::InferRequest req = execNet.CreateInferRequest(); - - req.SetCompletionCallback>( - [&](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) { - // WA for deadlock - if (!--data.numIter) { - request->SetCompletionCallback(nullptr); - } - InferenceEngine::StatusCode sts = request->StartAsync(nullptr); - if (sts != InferenceEngine::StatusCode::OK) { - data.startAsyncOK = false; - } - data.numIsCalled++; - if (!data.numIter) { - data.isBlocked = false; - data.cv_block_emulation.notify_one(); - } - }); - req.StartAsync(); - InferenceEngine::ResponseDesc responseWait; - InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); - // intentionally block until notification from callback - - // intentionally block until notification from callback - std::unique_lock lock(data.mutex_block_emulation); - data.cv_block_emulation.wait(lock, [&]() { return !data.isBlocked; }); - - ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), waitStatus); - - ASSERT_EQ(iterNum, data.numIsCalled); - ASSERT_TRUE(data.startAsyncOK); - ASSERT_TRUE(data.getDataOK); -} - -TEST_P(CallbackTests, canCallSyncAndAsyncWithCompletionCallback) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - // Get Core from cache - auto ie = PluginCache::get().ie(); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); - // Create InferRequest - InferenceEngine::InferRequest req = execNet.CreateInferRequest(); - bool isCalled = false; - req.SetCompletionCallback>( - [&](InferenceEngine::InferRequest request, InferenceEngine::StatusCode status) { - // HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE - if (targetDevice != CommonTestUtils::DEVICE_HDDL) { - ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), status); - } - isCalled = true; - }); - - req.StartAsync(); - InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); - - ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), waitStatus); - ASSERT_TRUE(isCalled); - function.reset(); -} - -// test that can wait all callbacks on dtor -TEST_P(CallbackTests, canStartAsyncInsideCompletionCallback) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - - struct TestUserData { - bool startAsyncOK = false; - int numIsCalled = 0; - }; - TestUserData data; - - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - // Get Core from cache - auto ie = PluginCache::get().ie(); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); - // Create InferRequest - InferenceEngine::InferRequest req = execNet.CreateInferRequest(); - - req.SetCompletionCallback>( - [&] (InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) { - // HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE - if (targetDevice != CommonTestUtils::DEVICE_HDDL) { - ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), status); - } - data.numIsCalled++; - // WA for deadlock - request->SetCompletionCallback(nullptr); - InferenceEngine::StatusCode sts = request->StartAsync(nullptr); - if (sts == InferenceEngine::StatusCode::OK) { - data.startAsyncOK = true; - } - }); - - req.StartAsync(); - InferenceEngine::ResponseDesc responseWait; - InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); - - ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), waitStatus) << responseWait.msg; - ASSERT_EQ(1, data.numIsCalled); - ASSERT_TRUE(data.startAsyncOK); - function.reset(); -} - -// test that can wait all callbacks on dtor -TEST_P(CallbackTests, canStartSeveralAsyncInsideCompletionCallbackWithSafeDtor) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - - const int NUM_ITER = 10; - struct TestUserData { - int numIter = NUM_ITER; - bool startAsyncOK = true; - std::atomic numIsCalled{0}; - std::mutex mutex_block_emulation; - std::condition_variable cv_block_emulation; - bool isBlocked = true; - }; - TestUserData data; - - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - // Get Core from cache - auto ie = PluginCache::get().ie(); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); - // Create InferRequest - InferenceEngine::InferRequest req = execNet.CreateInferRequest(); - req.SetCompletionCallback>( - [&] (InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) { - // HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE - if (targetDevice != CommonTestUtils::DEVICE_HDDL) { - ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), status); - } - if (--data.numIter) { - InferenceEngine::StatusCode sts = request->StartAsync(nullptr); - if (sts != InferenceEngine::StatusCode::OK) { - data.startAsyncOK = false; - } - } - data.numIsCalled++; - if (!data.numIter) { - data.isBlocked = false; - data.cv_block_emulation.notify_all(); - } - }); - - req.StartAsync(); - InferenceEngine::ResponseDesc responseWait; - InferenceEngine::StatusCode waitStatus = req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); - // intentionally block until notification from callback - std::unique_lock lock(data.mutex_block_emulation); - data.cv_block_emulation.wait(lock, [&]() { return !data.isBlocked; }); - - ASSERT_EQ((int) InferenceEngine::StatusCode::OK, waitStatus) << responseWait.msg; - ASSERT_EQ(NUM_ITER, data.numIsCalled); - ASSERT_TRUE(data.startAsyncOK); - function.reset(); -} - -TEST_P(CallbackTests, inferDoesNotCallCompletionCallback) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - // Get Core from cache - auto ie = PluginCache::get().ie(); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); - // Create InferRequest - InferenceEngine::InferRequest req = execNet.CreateInferRequest(); - bool isCalled = false; - req.SetCompletionCallback>( - [&] (InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) { - isCalled = true; - }); - req.Infer(); - ASSERT_FALSE(isCalled); - function.reset(); -} - -TEST_P(CallbackTests, canStartAsyncInsideCompletionCallbackNoSafeDtor) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - canStartSeveralAsyncInsideCompletionCallbackNoSafeDtor(1); -} - -TEST_P(CallbackTests, canStartSeveralAsyncInsideCompletionCallbackNoSafeDtor) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - canStartSeveralAsyncInsideCompletionCallbackNoSafeDtor(10); -} - -TEST_P(CallbackTests, returnGeneralErrorIfCallbackThrowException) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - // Get Core from cache - auto ie = PluginCache::get().ie(); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); - // Create InferRequest - InferenceEngine::IInferRequest::Ptr req = static_cast(execNet.CreateInferRequest()); - req->SetCompletionCallback( - [](InferenceEngine::IInferRequest::Ptr, InferenceEngine::StatusCode status) { - THROW_IE_EXCEPTION << "returnGeneralErrorIfCallbackThrowException"; - }); - - InferenceEngine::ResponseDesc resp; - req->StartAsync(&resp); - InferenceEngine::StatusCode waitStatus = InferenceEngine::StatusCode::INFER_NOT_STARTED; - while (InferenceEngine::StatusCode::RESULT_NOT_READY == waitStatus || InferenceEngine::StatusCode::INFER_NOT_STARTED == waitStatus) { - waitStatus = req->Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY, &resp); - } - ASSERT_EQ(InferenceEngine::StatusCode::GENERAL_ERROR, waitStatus); - ASSERT_NE(std::string(resp.msg).find("returnGeneralErrorIfCallbackThrowException"), std::string::npos); -} - -} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/behavior/set_preprocess.cpp b/inference-engine/tests/functional/plugin/shared/src/behavior/set_preprocess.cpp deleted file mode 100644 index 8eea245b423..00000000000 --- a/inference-engine/tests/functional/plugin/shared/src/behavior/set_preprocess.cpp +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include - -#include -#include -#include "common_test_utils/test_assertions.hpp" -#include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" -#include "functional_test_utils/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" -#include "ie_preprocess.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "behavior/set_preprocess.hpp" - -namespace LayerTestsDefinitions { - std::string PreProcessTests::getTestCaseName(testing::TestParamInfo obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - std::tie(netPrecision, targetDevice, configuration) = obj.param; - std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - if (!configuration.empty()) { - result << "configItem=" << configuration.begin()->first << "_" << configuration.begin()->second; - } - return result.str(); - } - - void PreProcessTests::SetUp() { - InferenceEngine::Precision netPrecision; - std::tie(netPrecision, targetDevice, configuration) = this->GetParam(); - function = ngraph::builder::subgraph::makeConvPoolRelu(); - } - - void PreProcessTests::TearDown() { - if (targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) { - PluginCache::get().reset(); - } - } - -TEST_P(PreProcessTests, SetPreProcessToInputInfo) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - - auto &preProcess = cnnNet.getInputsInfo().begin()->second->getPreProcess(); - preProcess.setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR); - - // Get Core from cache - auto ie = PluginCache::get().ie(); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); - // Create InferRequest - auto req = execNet.CreateInferRequest(); - { - InferenceEngine::ConstInputsDataMap inputsMap = execNet.GetInputsInfo(); - const auto& name = inputsMap.begin()->second->name(); - const InferenceEngine::PreProcessInfo *info = &req.GetPreProcess(name.c_str()); - ASSERT_EQ(info->getResizeAlgorithm(), InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR); - ASSERT_PREPROCESS_INFO_EQ(preProcess, *info); - } - function.reset(); - } - -TEST_P(PreProcessTests, SetPreProcessToInferRequest) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - - auto &preProcess = cnnNet.getInputsInfo().begin()->second->getPreProcess(); - preProcess.setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR); - - // Get Core from cache - auto ie = PluginCache::get().ie(); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); - // Create InferRequest - auto req = execNet.CreateInferRequest(); - InferenceEngine::ConstInputsDataMap inputsMap = execNet.GetInputsInfo(); - const auto& name = inputsMap.begin()->second->name(); - auto inputBlob = FuncTestUtils::createAndFillBlob( - cnnNet.getInputsInfo().begin()->second->getTensorDesc()); - req.SetBlob(cnnNet.getInputsInfo().begin()->first, inputBlob); - { - const InferenceEngine::PreProcessInfo *info = &req.GetPreProcess(name.c_str()); - ASSERT_EQ(cnnNet.getInputsInfo().begin()->second->getPreProcess().getResizeAlgorithm(), - info->getResizeAlgorithm()); - } - function.reset(); - } - -} // namespace LayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/behavior_test_utils.hpp b/inference-engine/tests/ie_test_utils/functional_test_utils/behavior_test_utils.hpp new file mode 100644 index 00000000000..127a95d2e4b --- /dev/null +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/behavior_test_utils.hpp @@ -0,0 +1,74 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "gtest/gtest.h" +#include "common_test_utils/common_utils.hpp" +#include "common_test_utils/test_common.hpp" + +#include "functional_test_utils/skip_tests_config.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "functional_test_utils/blob_utils.hpp" +#include "functional_test_utils/precision_utils.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ngraph_functions/pass/convert_prc.hpp" + +namespace BehaviorTestsUtils { + typedef std::tuple< + InferenceEngine::Precision, // Network precision + std::string, // Device name + std::map // Config + > BehaviorParams; + +class BehaviorTestsBasic : public testing::WithParamInterface, + public CommonTestUtils::TestsCommon { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + InferenceEngine::Precision netPrecision; + std::string targetDevice; + std::map configuration; + std::tie(netPrecision, targetDevice, configuration) = obj.param; + std::ostringstream result; + result << "netPRC=" << netPrecision.name() << "_"; + result << "targetDevice=" << targetDevice; + if (!configuration.empty()) { + for (auto& configItem : configuration) { + result << "configItem=" << configItem.first << "_" << configItem.second << "_"; + } + } + return result.str(); + } + + void SetUp() override { + std::tie(netPrecision, targetDevice, configuration) = this->GetParam(); + function = ngraph::builder::subgraph::makeConvPoolRelu(); + } + + void TearDown() override { + if ((targetDevice == CommonTestUtils::DEVICE_GPU) || (!configuration.empty())) { + PluginCache::get().reset(); + } + function.reset(); + } + + std::shared_ptr ie = PluginCache::get().ie(); + std::shared_ptr function; + InferenceEngine::Precision netPrecision; + std::string targetDevice; + std::map configuration; +}; + +} // namespace BehaviorTestsUtils diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp deleted file mode 100644 index d6312a9cb89..00000000000 --- a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request.hpp" -#include "cldnn_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequest, ValuesIn(requestsSupportedValues), getTestCaseName); diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp deleted file mode 100644 index 52338059f8d..00000000000 --- a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request_config.hpp" -#include "cldnn_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestConfigExclusiveAsync, ValuesIn(supportedValues), - getConfigTestCaseName); diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp deleted file mode 100644 index d71e2d5d0d0..00000000000 --- a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request_input.hpp" -#include "cldnn_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestInput, ValuesIn(allInputSupportedValues), - getTestCaseName); diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp deleted file mode 100644 index 1ebe6221d8b..00000000000 --- a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request_output.hpp" -#include "cldnn_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestOutput, ValuesIn(allOutputSupportedValues), - getOutputTestCaseName); diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp deleted file mode 100644 index 9918dc508fc..00000000000 --- a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request.hpp" -#include "gna_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequest, ValuesIn(requestsSupportedValues), getTestCaseName); diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp deleted file mode 100644 index 851d6b1d79c..00000000000 --- a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request_config.hpp" -#include "gna_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestConfig, - ValuesIn(withCorrectConfValues), - getConfigTestCaseName); - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestConfigExclusiveAsync, ValuesIn(supportedValues), - getConfigTestCaseName); - -bool CheckGnaHw() { - if (auto envVar = std::getenv("IE_GNA_HW")) { - return std::stoi(envVar) != 0; - } - return false; -} - -class BehaviorPluginTestInferRequestWithGnaHw : public BehaviorPluginTestInferRequest { -}; - -TEST_P(BehaviorPluginTestInferRequestWithGnaHw, CanInferOrFailWithGnaHw) { - TestEnv::Ptr testEnv; - std::map config = GetParam().config; - - if (CheckGnaHw()) { - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv, config)); - sts = testEnv->inferRequest->Infer(&response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - } else { - try { - _createAndCheckInferRequest(GetParam(), testEnv, config); - } catch (InferenceEngineException ex) { - ASSERT_TRUE(strContains(ex.what(), "Unsuccessful Gna2Status")); - return; - } catch (...) { - FAIL(); - } - - sts = testEnv->inferRequest->Infer(&response); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts); - ASSERT_TRUE(strContains(response.msg, "Bad GNA status")); - } -} - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestWithGnaHw, - ValuesIn(withGnaHwConfValue), - getConfigTestCaseName); diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp deleted file mode 100644 index 2c9c0d1ede8..00000000000 --- a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request_input.hpp" -#include "gna_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestInput, ValuesIn(allInputSupportedValues), - getTestCaseName); diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp deleted file mode 100644 index d9c222058d5..00000000000 --- a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request_output.hpp" -#include "gna_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestOutput, ValuesIn(allOutputSupportedValues), - getOutputTestCaseName); diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp deleted file mode 100644 index f74a84c4bb1..00000000000 --- a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request.hpp" -#include "mkldnn_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequest, ValuesIn(requestsSupportedValues), getTestCaseName); diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp deleted file mode 100644 index 408d020f192..00000000000 --- a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request_config.hpp" -#include "mkldnn_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestConfig, - ValuesIn(BehTestParams::concat(withCorrectConfValues, withCorrectConfValuesNetworkOnly)), - getConfigTestCaseName); - - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestConfigExclusiveAsync, ValuesIn(supportedValues), - getConfigTestCaseName); diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp deleted file mode 100644 index 12f48efdd7a..00000000000 --- a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request_input.hpp" -#include "mkldnn_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestInput, ValuesIn(allInputSupportedValues), - getTestCaseName); diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp deleted file mode 100644 index 575a8653417..00000000000 --- a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request_output.hpp" -#include "mkldnn_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestOutput, ValuesIn(allOutputSupportedValues), - getOutputTestCaseName); diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request.hpp deleted file mode 100644 index 503e6fabfb0..00000000000 --- a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request.hpp +++ /dev/null @@ -1,624 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include "behavior_test_plugin.h" -#include - -using namespace std; -using namespace ::testing; -using namespace InferenceEngine; -using namespace InferenceEngine::details; - -namespace { -std::string getTestCaseName(testing::TestParamInfo obj) { - std::string config; - for (auto&& cfg : obj.param.config) { - config += "_" + cfg.first + "_" + cfg.second; - } - return obj.param.device + "_" + obj.param.input_blob_precision.name() + config; -} -} - -// Setting empty config to LoadNetwork doesn't throw -TEST_P(BehaviorPluginTestInferRequest, SetEmptyConfig) { - InferenceEngine::Core core; - - const std::string device = GetParam().device; - ASSERT_NO_THROW(core.SetConfig(GetParam().config, GetParam().device)); - - InferenceEngine::CNNNetwork cnnNetwork = core.ReadNetwork(GetParam().model_xml_str, GetParam().weights_blob); - InferenceEngine::IExecutableNetwork::Ptr exeNetwork; - std::map config; - if (device.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos && - device.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) { - ASSERT_NO_THROW(exeNetwork = core.LoadNetwork(cnnNetwork, GetParam().device, config)); - } else { - ASSERT_NO_THROW(exeNetwork = core.LoadNetwork(cnnNetwork, GetParam().device, GetParam().config)); - } -} - -// Load incorrect network to Plugin to get executable network -TEST_P(BehaviorPluginTestInferRequest, canNotLoadNetworkToGetExeNetworkWithoutWeights) { - InferenceEngine::Core core; - ASSERT_THROW(core.ReadNetwork(GetParam().model_xml_str, Blob::CPtr()), InferenceEngineException); -} - -// Load correct network to Plugin to get executable network -TEST_P(BehaviorPluginTestInferRequest, canLoadCorrectNetworkToGetExecutable) { - InferenceEngine::Core core; - InferenceEngine::CNNNetwork cnnNetwork = core.ReadNetwork(GetParam().model_xml_str, GetParam().weights_blob); - ASSERT_NO_THROW(core.LoadNetwork(cnnNetwork, GetParam().device, GetParam().config)); -} - -TEST_P(BehaviorPluginTestInferRequest, CanCreateTwoExeNetworks) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - InferenceEngine::Core core; - InferenceEngine::CNNNetwork cnnNetwork = core.ReadNetwork(GetParam().model_xml_str, GetParam().weights_blob); - - for (auto i = 0; i < 2; i++) { - ASSERT_NO_THROW(core.LoadNetwork(cnnNetwork, GetParam().device, GetParam().config)); - } -} - -TEST_P(BehaviorPluginTestInferRequest, CanCreateInferRequest) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); -} - -TEST_P(BehaviorPluginTestInferRequest, failToSetNullptrForInput) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr inputBlob = nullptr; - ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), inputBlob, &response)); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts); - std::string refError = NOT_ALLOCATED_str + "Failed to set empty blob with name: \'" + testEnv->inputName + "\'"; - response.msg[refError.length()] = '\0'; - ASSERT_EQ(refError, response.msg); -} - -TEST_P(BehaviorPluginTestInferRequest, failToSetEmptyInputBlob) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr blob; - sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), blob, &response); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts); - std::string refError = NOT_ALLOCATED_str + "Failed to set empty blob with name: \'" + testEnv->inputName + "\'"; - response.msg[refError.length()] = '\0'; - ASSERT_EQ(refError, response.msg); -} - -TEST_P(BehaviorPluginTestInferRequest, failToSetEmptyOutputBlob) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr blob; - sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), blob, &response); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts); - std::string refError = NOT_ALLOCATED_str + "Failed to set empty blob with name: \'" + testEnv->outputName + "\'"; - response.msg[refError.length()] = '\0'; - ASSERT_EQ(refError, response.msg); -} - -TEST_P(BehaviorPluginTestInferRequest, failToSetNotAllocatedInput) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr input = makeNotAllocatedBlob(GetParam().input_blob_precision, - TensorDesc::getLayoutByDims(testEnv->inputDims), testEnv->inputDims); - ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), input, &response)); - std::string refError = "Input data was not allocated. Input name: \'" + testEnv->inputName + "\'"; - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts); - response.msg[refError.length()] = '\0'; - ASSERT_EQ(refError, response.msg); -} - -TEST_P(BehaviorPluginTestInferRequest, failToSetNotAllocatedOutput) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr output = makeNotAllocatedBlob(GetParam().input_blob_precision, - TensorDesc::getLayoutByDims(testEnv->outputDims), testEnv->outputDims); - ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), output, &response)); - std::string refError = "Input data was not allocated. Input name: \'" + testEnv->outputName + "\'"; - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts); - response.msg[refError.length()] = '\0'; - ASSERT_EQ(refError, response.msg); -} - -TEST_P(BehaviorPluginTestInferRequest, failToSetBlobWithIncorrectName) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - auto input = makeNotAllocatedBlob(GetParam().input_blob_precision, TensorDesc::getLayoutByDims(testEnv->inputDims), - testEnv->inputDims); - input->allocate(); - sts = testEnv->inferRequest->SetBlob(FuncTestUtils::TestModel::incorrect_input_name, input, &response); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts); - std::string refError = - NOT_FOUND_str + "Failed to find input or output with name: \'" + - FuncTestUtils::TestModel::incorrect_input_name + "\'"; - response.msg[refError.length()] = '\0'; - ASSERT_EQ(refError, response.msg); -} - -TEST_P(BehaviorPluginTestInferRequest, failToSetInputWithIncorrectSizes) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - SizeVector incorrectSizes = testEnv->inputDims; - /* to use 2x size of first dim to simulate using of an input blob of another size */ - incorrectSizes[0] *= 2; - auto input = makeNotAllocatedBlob(GetParam().input_blob_precision, TensorDesc::getLayoutByDims(incorrectSizes), - incorrectSizes); - input->allocate(); - int in_size = std::accumulate(testEnv->inputDims.begin(), testEnv->inputDims.end(), 1, std::multiplies()); - std::string refError = "Input blob size is not equal network input size (" + std::to_string(input->size()) + "!=" + - std::to_string(in_size) + ")."; - ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), input, &response)); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts); - response.msg[refError.length()] = '\0'; - ASSERT_EQ(refError, response.msg); -} - -TEST_P(BehaviorPluginTestInferRequest, failToSetOutputWithIncorrectSizes) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - SizeVector incorrectSizes = testEnv->outputDims; - /* to use 2x size of first dim to simulate using of an output blob of another size */ - incorrectSizes[0] *= 2; - Blob::Ptr output = _prepareOutputBlob(GetParam().input_blob_precision, incorrectSizes); - ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), output, &response)); - int out_size = std::accumulate(testEnv->outputDims.begin(), testEnv->outputDims.end(), 1, std::multiplies()); - std::string refError = - "Output blob size is not equal network output size (" + std::to_string(output->size()) + "!=" + - std::to_string(out_size) + ")."; - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts); - response.msg[refError.length()] = '\0'; - ASSERT_EQ(refError, response.msg); -} - -TEST_P(BehaviorPluginTestInferRequest, failToSetInputBlobWithPrecisionNotMatchInputPrecision) { - - std::string refError; - if (GetParam().device != CommonTestUtils::DEVICE_CPU) { - // MKLDNNPlugin now supports input blobs with format other than the network format, - // so there is no 'not corresponding user input precision' error - - refError = - PARAMETER_MISMATCH_str + "Failed to set Blob with precision not corresponding to user input precision"; - } else { - // ...but it still doesn't support Precision::UNSPECIFIED blobs. - - refError = PARAMETER_MISMATCH_str + "Failed to set Blob with precision"; - } - - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - auto inputBlob = prepareInputBlob(Precision::UNSPECIFIED, testEnv->inputDims); - ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), inputBlob, &response)); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts); - response.msg[refError.length()] = '\0'; - - if (GetParam().device != CommonTestUtils::DEVICE_CPU) { - ASSERT_EQ(refError, response.msg); - } else { - ASSERT_STR_CONTAINS(response.msg, refError); - } - - -} - -TEST_P(BehaviorPluginTestInferRequest, failToSetOutputBlobWithPrecisionNotMatchOutputPrecision) { - std::string refError = - PARAMETER_MISMATCH_str + "Failed to set Blob with precision not corresponding to user output precision"; - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - auto outputBlob = _prepareOutputBlob(Precision::UNSPECIFIED, testEnv->outputDims); - ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), outputBlob, &response)); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts); - response.msg[refError.length()] = '\0'; - ASSERT_EQ(refError, response.msg); -} - -TEST_P(BehaviorPluginTestInferRequest, canInferWithoutSetAndGetInOut) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response)); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; -} - -TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedInputBlobAfterGetBlob) { - std::string refError = "Input data was not allocated"; - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr blob; - ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), blob, &response)); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - blob->deallocate(); - ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response)); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg; - EXPECT_THAT(std::string(response.msg), HasSubstr(refError)); -} - -TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedInputBlobAfterGetBlobForAsync) { - std::string refError = "Input data was not allocated"; - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr blob; - ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), blob, &response)); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - blob->deallocate(); - ASSERT_NO_THROW(sts = testEnv->inferRequest->StartAsync(&response)); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg; - EXPECT_THAT(std::string(response.msg), HasSubstr(refError)); -} - -TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedInputBlobAfterGetAndSetBlob) { - std::string refError = "Input data was not allocated"; - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr blob; - ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), blob, &response)); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), blob, &response)); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - blob->deallocate(); - ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response)); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg; - EXPECT_THAT(std::string(response.msg), HasSubstr(refError)); -} - -TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedInputBlobAfterSetBlob) { - std::string refError = "Input data was not allocated"; - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - auto blob = makeNotAllocatedBlob(GetParam().input_blob_precision, TensorDesc::getLayoutByDims(testEnv->inputDims), - testEnv->inputDims); - blob->allocate(); - ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), blob, &response)); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - blob->deallocate(); - ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response)); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg; - EXPECT_THAT(std::string(response.msg), HasSubstr(refError)); -} - -TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedOutputBlobAfterGetBlob) { - std::string refError = "Output data was not allocated"; - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr blob; - ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), blob, &response)); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - blob->deallocate(); - ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response)); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg; - EXPECT_THAT(std::string(response.msg), HasSubstr(refError)); -} - -TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedOutputBlobAfterGetBlobForAsync) { - std::string refError = "Output data was not allocated"; - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr blob; - ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), blob, &response)); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - blob->deallocate(); - ASSERT_NO_THROW(sts = testEnv->inferRequest->StartAsync(&response)); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg; - EXPECT_THAT(std::string(response.msg), HasSubstr(refError)); -} - -TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedOutputBlobAfterGetAndSetBlob) { - std::string refError = "Output data was not allocated"; - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr blob; - ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), blob, &response)); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), blob, &response)); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - blob->deallocate(); - ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response)); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg; - EXPECT_THAT(std::string(response.msg), HasSubstr(refError)); -} - -TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedOutputBlobAfterSetBlob) { - std::string refError = "Output data was not allocated"; - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - auto blob = makeNotAllocatedBlob(GetParam().output_blob_precision, TensorDesc::getLayoutByDims(testEnv->outputDims), - testEnv->outputDims); - blob->allocate(); - ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), blob, &response)); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - blob->deallocate(); - ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response)); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg; - EXPECT_THAT(std::string(response.msg), HasSubstr(refError)); -} - -TEST_P(BehaviorPluginTestInferRequest, DISABLED_secondCallGetOutputDoNotReAllocateData) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr getBlob1; - ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), getBlob1, &response)); - Blob::Ptr getBlob2; - ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), getBlob2, &response)); - ASSERT_EQ(getBlob1.get(), getBlob2.get()); -} - -TEST_P(BehaviorPluginTestInferRequest, CorrectOneAsyncInferWithGetInOutWithInfWait) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr input; - Blob::Ptr result; - testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response); - - sts = testEnv->inferRequest->StartAsync(&response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - - sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::RESULT_READY, &response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - - testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), result, &response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; -} - -// Plugin correct infer request with allocating input and result BlobMaps inside plugin -TEST_P(BehaviorPluginTestInferRequest, canStartAsyncInferWithGetInOutWithStatusOnlyWait) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr input; - Blob::Ptr result; - testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response); - - sts = testEnv->inferRequest->StartAsync(&response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - - sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response); - ASSERT_TRUE(sts == StatusCode::OK || StatusCode::RESULT_NOT_READY) << response.msg; -} - -// Plugin correct infer request with allocating input and result BlobMaps inside plugin -TEST_P(BehaviorPluginTestInferRequest, FailedAsyncInferWithNegativeTimeForWait) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - std::string refError = PARAMETER_MISMATCH_str; - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr input; - Blob::Ptr result; - testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response); - - sts = testEnv->inferRequest->StartAsync(&response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - - ASSERT_NO_THROW(sts = testEnv->inferRequest->Wait(-2, &response)); - ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg; - response.msg[refError.length()] = '\0'; - ASSERT_EQ(refError, response.msg); -} - -TEST_P(BehaviorPluginTestInferRequest, canRun3SyncRequestsConsistentlyFromThreads) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - IInferRequest::Ptr inferRequest2; - static_cast(testEnv->exeNetwork)->CreateInferRequest(inferRequest2, &response); - ASSERT_NE(inferRequest2, nullptr) << response.msg; - IInferRequest::Ptr inferRequest3; - static_cast(testEnv->exeNetwork)->CreateInferRequest(inferRequest3, &response); - ASSERT_NE(inferRequest3, nullptr) << response.msg; - - Blob::Ptr input1; - testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input1, &response); - inferRequest2->SetBlob(testEnv->inputName.c_str(), input1, &response); - inferRequest3->SetBlob(testEnv->inputName.c_str(), input1, &response); - - InferenceEngine::ResponseDesc response1, response2, response3; - InferenceEngine::StatusCode sts1, sts2, sts3; - std::thread t1([&] { sts1 = testEnv->inferRequest->Infer(&response1); }); - std::thread t2([&] { sts2 = inferRequest2->Infer(&response2); }); - std::thread t3([&] { sts3 = inferRequest3->Infer(&response3); }); - - t1.join(); - t2.join(); - t3.join(); - - ASSERT_EQ((int) StatusCode::OK, sts1) << response1.msg; - ASSERT_EQ((int) StatusCode::OK, sts2) << response2.msg; - ASSERT_EQ((int) StatusCode::OK, sts3) << response3.msg; -} - -TEST_P(BehaviorPluginTestInferRequest, canRun3AsyncRequestsConsistentlyWithWait) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - IInferRequest::Ptr inferRequest2; - static_cast(testEnv->exeNetwork)->CreateInferRequest(inferRequest2, &response); - ASSERT_NE(inferRequest2, nullptr) << response.msg; - IInferRequest::Ptr inferRequest3; - static_cast(testEnv->exeNetwork)->CreateInferRequest(inferRequest3, &response); - ASSERT_NE(inferRequest3, nullptr) << response.msg; - Blob::Ptr input1; - testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input1, &response); - inferRequest2->SetBlob(testEnv->inputName.c_str(), input1, &response); - inferRequest3->SetBlob(testEnv->inputName.c_str(), input1, &response); - - sts = testEnv->inferRequest->StartAsync(&response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - sts = inferRequest2->StartAsync(&response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - sts = inferRequest3->StartAsync(&response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::RESULT_READY, &response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - sts = inferRequest2->Wait(IInferRequest::WaitMode::RESULT_READY, &response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - sts = inferRequest3->Wait(IInferRequest::WaitMode::RESULT_READY, &response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; -} - -TEST_P(BehaviorPluginTestInferRequest, canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - IInferRequest::Ptr inferRequest2; - static_cast(testEnv->exeNetwork)->CreateInferRequest(inferRequest2, &response); - ASSERT_NE(inferRequest2, nullptr) << response.msg; - IInferRequest::Ptr inferRequest3; - static_cast(testEnv->exeNetwork)->CreateInferRequest(inferRequest3, &response); - ASSERT_NE(inferRequest3, nullptr) << response.msg; - Blob::Ptr input1; - testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input1, &response); - inferRequest2->SetBlob(testEnv->inputName.c_str(), input1, &response); - inferRequest3->SetBlob(testEnv->inputName.c_str(), input1, &response); - - InferenceEngine::ResponseDesc response1, response2, response3; - InferenceEngine::StatusCode sts1, sts2, sts3; - std::thread t1([&] { sts1 = testEnv->inferRequest->StartAsync(&response1); }); - std::thread t2([&] { sts2 = inferRequest2->StartAsync(&response2); }); - std::thread t3([&] { sts3 = inferRequest3->StartAsync(&response3); }); - - t1.join(); - t2.join(); - t3.join(); - - ASSERT_EQ((int) StatusCode::OK, sts1) << response1.msg; - ASSERT_EQ((int) StatusCode::OK, sts2) << response2.msg; - ASSERT_EQ((int) StatusCode::OK, sts3) << response3.msg; -} - -TEST_P(BehaviorPluginTestInferRequest, canWaitWithotStartAsync) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::RESULT_READY, &response); - ASSERT_EQ(StatusCode::INFER_NOT_STARTED, sts); - sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response); - ASSERT_EQ(StatusCode::INFER_NOT_STARTED, sts); - sts = testEnv->inferRequest->Wait(1, &response); - ASSERT_EQ(StatusCode::INFER_NOT_STARTED, sts); -} - -TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnSetBlobAfterAsyncInfer) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - auto&& config = GetParam().config; - auto itConfig = config.find(CONFIG_KEY(CPU_THROUGHPUT_STREAMS)); - if (itConfig != config.end()) { - if (itConfig->second != "CPU_THROUGHPUT_AUTO") { - if (std::stoi(itConfig->second) == 0) { - GTEST_SKIP() << "Not applicable with disabled streams"; - } - } - } - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr input; - sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response); - ASSERT_EQ((int) StatusCode::OK, sts) << response.msg; - - sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response); - ASSERT_EQ(StatusCode::INFER_NOT_STARTED, sts) << response.msg; - - std::map perfMap; - - sts = testEnv->inferRequest->StartAsync(&response); - ASSERT_EQ((int) StatusCode::OK, sts) << response.msg; - - sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), input, &response); - if (sts == StatusCode::REQUEST_BUSY) { - ASSERT_TRUE(_wasDeviceBusy(response)); - } else { - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - } - response.msg[0] = 0; - - sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response); - ASSERT_TRUE(sts == StatusCode::OK || sts == StatusCode::RESULT_NOT_READY) << response.msg; -} - -TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnGetBlobAfterAsyncInfer) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr input; - testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response); - ResponseDesc response2; - - sts = testEnv->inferRequest->StartAsync(&response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response2); - if (sts == StatusCode::REQUEST_BUSY) - ASSERT_TRUE(_wasDeviceBusy(response2)); - else - ASSERT_EQ(StatusCode::OK, sts) << response.msg; -} - -TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnGetPerformanceCountAfterAsyncInfer) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr input; - testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response); - std::map perfMap; - ResponseDesc response2; - - sts = testEnv->inferRequest->StartAsync(&response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - sts = testEnv->inferRequest->GetPerformanceCounts(perfMap, &response2); - if (sts == StatusCode::REQUEST_BUSY) - ASSERT_TRUE(_wasDeviceBusy(response2)); - else - ASSERT_EQ(StatusCode::OK, sts); -} - -TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnStartInferAfterAsyncInfer) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr input; - testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response); - ResponseDesc response2; - - sts = testEnv->inferRequest->StartAsync(&response); - ASSERT_EQ(StatusCode::OK, sts); - sts = testEnv->inferRequest->StartAsync(&response2); - if (sts == StatusCode::REQUEST_BUSY) - ASSERT_TRUE(_wasDeviceBusy(response2)); - else - ASSERT_EQ(StatusCode::OK, sts); -} - -TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnGetUserDataAfterAsyncInfer) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr input; - testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response); - ResponseDesc response2; - - sts = testEnv->inferRequest->StartAsync(&response); - ASSERT_EQ(StatusCode::OK, sts); - testEnv->inferRequest->GetUserData(nullptr, &response2); - auto waitStatus = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response); - if (waitStatus == StatusCode::RESULT_NOT_READY) - ASSERT_TRUE(_wasDeviceBusy(response2)); - else - ASSERT_TRUE(waitStatus == StatusCode::OK); -} - -TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnSetUserDataAfterAsyncInfer) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr input; - testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response); - ResponseDesc response2; - - sts = testEnv->inferRequest->StartAsync(&response); - ASSERT_EQ(StatusCode::OK, sts); - testEnv->inferRequest->SetUserData(nullptr, &response2); - auto waitStatus = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response); - if (waitStatus == StatusCode::RESULT_NOT_READY) - ASSERT_TRUE(_wasDeviceBusy(response2)); - else - ASSERT_TRUE(waitStatus == StatusCode::OK); -} diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_config.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_config.hpp deleted file mode 100644 index ed20ec49bb1..00000000000 --- a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_config.hpp +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin.h" -#include - -using namespace std; -using namespace ::testing; -using namespace InferenceEngine; -using namespace InferenceEngine::details; - -namespace { -std::string getConfigTestCaseName(testing::TestParamInfo obj) { - std::string config_str = ""; - for (auto it = obj.param.config.cbegin(); it != obj.param.config.cend(); it++) { - std::string v = it->second; - std::replace(v.begin(), v.end(), '.', '_'); - config_str += it->first + "_" + v + "_"; - } - return obj.param.device + "_" + config_str; -} -} - -TEST_P(BehaviorPluginTestInferRequestConfig, CanInferWithConfig) { - TestEnv::Ptr testEnv; - std::map config = GetParam().config; - - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv, config)); - sts = testEnv->inferRequest->Infer(&response); - - ASSERT_EQ(StatusCode::OK, sts) << response.msg; -} - -TEST_P(BehaviorPluginTestInferRequestConfigExclusiveAsync, canSetExclusiveAsyncRequests) { - ASSERT_EQ(0ul, ExecutorManager::getInstance()->getExecutorsNumber()); - TestEnv::Ptr testEnv; - std::map config; - config[PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS] = PluginConfigParams::YES; - - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv, config)); - - // TODO: there is no executors to sync. should it be supported natively in HDDL API? - if (GetParam().device == CommonTestUtils::DEVICE_HDDL) { - ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber()); - } else if (GetParam().device == CommonTestUtils::DEVICE_FPGA) { - ASSERT_EQ(2u, ExecutorManager::getInstance()->getExecutorsNumber()); - } else if (GetParam().device == CommonTestUtils::DEVICE_MYRIAD) { - ASSERT_EQ(2u, ExecutorManager::getInstance()->getExecutorsNumber()); - } else if (GetParam().device == CommonTestUtils::DEVICE_KEEMBAY) { - ASSERT_EQ(2u, ExecutorManager::getInstance()->getExecutorsNumber()); - } else if (GetParam().device == CommonTestUtils::DEVICE_GNA) { - ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber()); - } else if (GetParam().device == CommonTestUtils::DEVICE_MULTI) { - // for multi-device the number of Executors is not known (defined by the devices configuration) - } else { - ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber()); - } -} - -TEST_P(BehaviorPluginTestInferRequestConfigExclusiveAsync, withoutExclusiveAsyncRequests) { - ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber()); - TestEnv::Ptr testEnv; - std::map config; - config[PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS] = PluginConfigParams::NO; - - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv, config)); - - if (GetParam().device == CommonTestUtils::DEVICE_FPGA) { - ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber()); - } else if (GetParam().device == CommonTestUtils::DEVICE_MYRIAD) { - ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber()); - } else if (GetParam().device == CommonTestUtils::DEVICE_KEEMBAY) { - ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber()); - } else if (GetParam().device == CommonTestUtils::DEVICE_MULTI) { - // for multi-device the number of Executors is not known (defined by the devices configuration) - } else { - ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber()); - } -} diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_input.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_input.hpp deleted file mode 100644 index 96cb02bccc1..00000000000 --- a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_input.hpp +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin.h" - -using namespace std; -using namespace ::testing; -using namespace InferenceEngine; -using namespace InferenceEngine::details; - -namespace { -std::string getTestCaseName(testing::TestParamInfo obj) { - return obj.param.device + "_" + obj.param.input_blob_precision.name() + "_" + getModelName(obj.param.model_xml_str) - + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : ""); -} -} - -TEST_P(BehaviorPluginTestInferRequestInput, canSetInputBlobForSyncRequest) { - TestEnv::Ptr testEnv; - Blob::Ptr actualBlob; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - auto inputBlob = prepareInputBlob(GetParam().input_blob_precision, testEnv->inputDims); - - ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), inputBlob, &response)); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - ASSERT_NO_THROW(testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), actualBlob, &response)); - - ASSERT_EQ(inputBlob, actualBlob); -} - -TEST_P(BehaviorPluginTestInferRequestInput, canSetInputBlobForAsyncRequest) { - TestEnv::Ptr testEnv; - Blob::Ptr actualBlob; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - auto inputBlob = prepareInputBlob(GetParam().input_blob_precision, testEnv->inputDims); - - ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), inputBlob, &response)); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - ASSERT_NO_THROW(testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), actualBlob, &response)); - - ASSERT_EQ(inputBlob, actualBlob); -} - -TEST_P(BehaviorPluginTestInferRequestInput, canInferWithSetInOut) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - auto input = prepareInputBlob(GetParam().input_blob_precision, testEnv->inputDims); - testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), input, &response); - auto output = _prepareOutputBlob(GetParam().output_blob_precision, testEnv->outputDims); - testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), output, &response); - sts = testEnv->inferRequest->Infer(&response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; -} - -TEST_P(BehaviorPluginTestInferRequestInput, canGetInputBlob_deprecatedAPI) { - TestEnv::Ptr testEnv; - Blob::Ptr input; - auto param = GetParam(); - - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(param, testEnv)); - ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response)); - - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - ASSERT_TRUE(input) << "Plugin didn't allocate input blobs"; - ASSERT_FALSE(input->buffer() == nullptr) << "Plugin didn't allocate input blobs"; - auto dims = input->getTensorDesc().getDims(); - ASSERT_TRUE(testEnv->inputDims == dims) << "Input blob dimensions don't match network input"; - - ASSERT_EQ(param.input_blob_precision, input->getTensorDesc().getPrecision()) << "Input blob precision don't match network input"; -} - -TEST_P(BehaviorPluginTestInferRequestInput, canGetInputBlob) { - TestEnv::Ptr testEnv; - Blob::Ptr input; - auto param = GetParam(); - - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(param, testEnv)); - ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response)); - - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - ASSERT_TRUE(input) << "Plugin didn't allocate input blobs"; - ASSERT_FALSE(input->buffer() == nullptr) << "Plugin didn't allocate input blobs"; - - auto tensorDescription = input->getTensorDesc(); - auto dims = tensorDescription.getDims(); - ASSERT_TRUE(testEnv->inputDims == dims) << "Input blob dimensions don't match network input"; - - ASSERT_EQ(param.input_blob_precision, tensorDescription.getPrecision()) << "Input blob precision don't match network input"; -} - -TEST_P(BehaviorPluginTestInferRequestInput, getInputAfterSetInputDoNotChangeInput) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr inputSetBlob = prepareInputBlob(GetParam().input_blob_precision, testEnv->inputDims); - ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), inputSetBlob, &response)); - Blob::Ptr inputGetBlob; - ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), inputGetBlob, &response)); - ASSERT_EQ(inputGetBlob.get(), inputSetBlob.get()); -} - -TEST_P(BehaviorPluginTestInferRequestInput, canInferWithGetInOut) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr input; - Blob::Ptr result; - testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response); - testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), result, &response); - sts = testEnv->inferRequest->Infer(&response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; -} - -TEST_P(BehaviorPluginTestInferRequestInput, canStartAsyncInferWithGetInOut) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr input; - Blob::Ptr result; - testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response); - sts = testEnv->inferRequest->StartAsync(&response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - sts = testEnv->inferRequest->Wait(500, &response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), result, &response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; -} diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_output.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_output.hpp deleted file mode 100644 index ced2b092c19..00000000000 --- a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_output.hpp +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin.h" - -using namespace std; -using namespace ::testing; -using namespace InferenceEngine; -using namespace InferenceEngine::details; - -namespace { -std::string getOutputTestCaseName(testing::TestParamInfo obj) { - return obj.param.device + "_" + obj.param.output_blob_precision.name() - + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : ""); -} - -} - -TEST_P(BehaviorPluginTestInferRequestOutput, canSetOutputBlobForAsyncRequest) { - TestEnv::Ptr testEnv; - Blob::Ptr actualBlob; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - auto outputBlob = _prepareOutputBlob(GetParam().output_blob_precision, testEnv->outputDims); - - ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), outputBlob, &response)); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - ASSERT_NO_THROW(testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), actualBlob, &response)); - - ASSERT_EQ(outputBlob, actualBlob); -} - -TEST_P(BehaviorPluginTestInferRequestOutput, canSetOutputBlobForSyncRequest) { - TestEnv::Ptr testEnv; - Blob::Ptr actualBlob; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - auto outputBlob = _prepareOutputBlob(GetParam().output_blob_precision, testEnv->outputDims); - - ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), outputBlob, &response)); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - ASSERT_NO_THROW(testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), actualBlob, &response)); - - ASSERT_EQ(outputBlob, actualBlob); -} - -TEST_P(BehaviorPluginTestInferRequestOutput, canInferWithSetInOut) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - auto input = prepareInputBlob(GetParam().input_blob_precision, testEnv->inputDims); - testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), input, &response); - auto output = _prepareOutputBlob(GetParam().output_blob_precision, testEnv->outputDims); - testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), output, &response); - - sts = testEnv->inferRequest->Infer(&response); - - ASSERT_EQ(StatusCode::OK, sts) << response.msg; -} - -TEST_P(BehaviorPluginTestInferRequestOutput, canGetOutputBlob_deprecatedAPI) { - TestEnv::Ptr testEnv; - Blob::Ptr output; - auto param = GetParam(); - - StatusCode sts = StatusCode::OK; - ResponseDesc response; - - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(param, testEnv)); - ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), output, &response)); - - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - ASSERT_TRUE(output) << "Plugin didn't allocate output blobs"; - ASSERT_FALSE(output->buffer() == nullptr) << "Plugin didn't allocate output blobs"; - auto dims = output->getTensorDesc().getDims(); - ASSERT_TRUE(testEnv->outputDims == dims) << "Output blob dimensions don't match network output"; - // [IE FPGA] The plugin ignores custom output precision: CVS-8122 - if (param.device != CommonTestUtils::DEVICE_FPGA && param.output_blob_precision != Precision::FP32) { - ASSERT_EQ(param.output_blob_precision, output->getTensorDesc().getPrecision()) - << "Output blob precision don't match network output"; - } else if (param.device == CommonTestUtils::DEVICE_FPGA) { - set supportedOutputs = {Precision::FP16, Precision::FP32}; - ASSERT_TRUE(supportedOutputs.find(output->getTensorDesc().getPrecision()) != supportedOutputs.end()) << "Output blob precision don't match network output"; - } else { - ASSERT_EQ(Precision::FP32, output->getTensorDesc().getPrecision()) << "Output blob precision don't match network output"; - } -} - -TEST_P(BehaviorPluginTestInferRequestOutput, canGetOutputBlob) { - TestEnv::Ptr testEnv; - Blob::Ptr output; - auto param = GetParam(); - - StatusCode sts = StatusCode::OK; - ResponseDesc response; - - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(param, testEnv)); - ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), output, &response)); - - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - ASSERT_TRUE(output) << "Plugin didn't allocate output blobs"; - ASSERT_FALSE(output->buffer() == nullptr) << "Plugin didn't allocate output blobs"; - - auto tensorDescription = output->getTensorDesc(); - auto dims = tensorDescription.getDims(); - ASSERT_TRUE(testEnv->outputDims == dims) << "Output blob dimensions don't match network output"; - // [IE FPGA] The plugin ignores custom output precision: CVS-8122 - std::cout << "Device: " << param.device << std::endl; - if (param.device != CommonTestUtils::DEVICE_FPGA && param.output_blob_precision != Precision::FP32) { - ASSERT_EQ(param.output_blob_precision, tensorDescription.getPrecision()) - << "Output blob precision don't match network output"; - } else if (param.device == CommonTestUtils::DEVICE_FPGA) { - set supportedOutputs = {Precision::FP16, Precision::FP32}; - ASSERT_TRUE(supportedOutputs.find(tensorDescription.getPrecision()) != supportedOutputs.end()) << "Output blob precision don't match network output"; - } else { - ASSERT_EQ(Precision::FP32, tensorDescription.getPrecision()) << "Output blob precision don't match network output"; - } -} - -TEST_P(BehaviorPluginTestInferRequestOutput, getOutputAfterSetOutputDoNotChangeOutput) { - TestEnv::Ptr testEnv; - ResponseDesc response; - - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr outputSetBlob = _prepareOutputBlob(GetParam().output_blob_precision, testEnv->outputDims); - ASSERT_EQ(StatusCode::OK, testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), outputSetBlob, &response)); - Blob::Ptr outputGetBlob; - ASSERT_EQ(StatusCode::OK, testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), outputGetBlob, &response)); - ASSERT_EQ(outputGetBlob.get(), outputSetBlob.get()); -} - -TEST_P(BehaviorPluginTestInferRequestOutput, canInferWithGetInOut) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr input; - Blob::Ptr result; - - StatusCode sts = StatusCode::OK; - ResponseDesc response; - - testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response); - testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), result, &response); - sts = testEnv->inferRequest->Infer(&response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; -} - -TEST_P(BehaviorPluginTestInferRequestOutput, canStartAsyncInferWithGetInOut) { - TestEnv::Ptr testEnv; - ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv)); - Blob::Ptr input; - Blob::Ptr result; - - StatusCode sts = StatusCode::OK; - ResponseDesc response; - - testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response); - - sts = testEnv->inferRequest->StartAsync(&response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - - sts = testEnv->inferRequest->Wait(500, &response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; - - testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), result, &response); - ASSERT_EQ(StatusCode::OK, sts) << response.msg; -} diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp deleted file mode 100644 index 0721ad56ca8..00000000000 --- a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request.hpp" -#include "vpu_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequest, ValuesIn(requestsSupportedValues), getTestCaseName); diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp deleted file mode 100644 index 437662c1592..00000000000 --- a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request_config.hpp" -#include "vpu_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestConfig, - ValuesIn(BehTestParams::concat(deviceAgnosticConfigurations, withCorrectConfValuesNetworkOnly)), - getConfigTestCaseName); - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestConfigExclusiveAsync, ValuesIn(supportedValues), - getConfigTestCaseName); diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp deleted file mode 100644 index e8633b7b08f..00000000000 --- a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request_input.hpp" -#include "vpu_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestInput, ValuesIn(allInputSupportedValues), - getTestCaseName); diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp deleted file mode 100644 index c7e891db1b2..00000000000 --- a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior_test_plugin_infer_request_output.hpp" -#include "vpu_test_data.hpp" - -INSTANTIATE_TEST_CASE_P(smoke_BehaviorTest, BehaviorPluginTestInferRequestOutput, ValuesIn(allOutputSupportedValues), - getOutputTestCaseName);