From c39e6fcfd8d3205d7b9faf5e6eea8ec9e1353cd1 Mon Sep 17 00:00:00 2001 From: Anton Pankratv Date: Mon, 25 Oct 2021 10:36:17 +0300 Subject: [PATCH] added ov InferRequest behaviour tests (#7811) --- .../behavior/ov_infer_request/callback.cpp | 21 ++ .../ov_infer_request/cancellation.cpp | 19 ++ .../infer_request_dynamic.cpp | 12 +- .../inference_chaining.cpp | 2 +- .../behavior/ov_infer_request/io_tensor.cpp | 22 ++ .../ov_infer_request/multithreading.cpp | 23 ++ .../behavior/ov_infer_request/wait.cpp | 22 ++ .../include/openvino/runtime/exception.hpp | 22 ++ .../src/cpp/ie_infer_request.cpp | 26 +- .../behavior/ov_infer_request/callback.cpp | 39 +++ .../ov_infer_request/cancellation.cpp | 19 ++ .../infer_request_dynamic.cpp} | 20 +- .../inference_chaining.cpp | 2 +- .../behavior/ov_infer_request/io_tensor.cpp | 84 +++++ .../ov_infer_request/multithreading.cpp | 41 +++ .../ov_infer_request/perf_counters.cpp | 59 ++++ .../behavior/ov_infer_request/wait.cpp | 45 +++ .../include/base/ov_behavior_test_utils.hpp | 3 +- .../infer_request/inference_chaining.hpp | 202 ------------ .../behavior/ov_infer_request/callback.hpp | 17 + .../ov_infer_request/cancellation.hpp | 19 ++ .../infer_request_dynamic.hpp | 62 ++++ .../ov_infer_request/inference_chaining.hpp | 61 ++++ .../behavior/ov_infer_request/io_tensor.hpp | 46 +++ .../ov_infer_request/multithreading.hpp | 17 + .../ov_infer_request/perf_counters.hpp | 19 ++ .../behavior/ov_infer_request/wait.hpp | 22 ++ .../behavior/ov_infer_request/callback.cpp | 132 ++++++++ .../ov_infer_request/cancellation.cpp | 66 ++++ .../infer_request_dynamic.cpp} | 290 +++++++++--------- .../ov_infer_request/inference_chaining.cpp | 188 ++++++++++++ .../behavior/ov_infer_request/io_tensor.cpp | 231 ++++++++++++++ .../ov_infer_request/multithreading.cpp | 92 ++++++ .../ov_infer_request/perf_counters.cpp | 42 +++ .../src/behavior/ov_infer_request/wait.cpp | 84 +++++ .../common_test_utils/test_assertions.hpp | 22 ++ .../common_test_utils/test_common.cpp | 1 + .../common_test_utils/test_common.hpp | 1 + .../functional_test_utils/ov_tensor_utils.hpp | 19 ++ .../src/ov_tensor_utils.cpp | 53 ++++ .../core/include/openvino/runtime/tensor.hpp | 8 +- 41 files changed, 1791 insertions(+), 384 deletions(-) create mode 100644 docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp create mode 100644 docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp rename docs/template_plugin/tests/functional/shared_tests_instances/behavior/{infer_request => ov_infer_request}/infer_request_dynamic.cpp (79%) rename docs/template_plugin/tests/functional/shared_tests_instances/behavior/{infer_request => ov_infer_request}/inference_chaining.cpp (94%) create mode 100644 docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp create mode 100644 docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp create mode 100644 docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp create mode 100644 inference-engine/src/inference_engine/include/openvino/runtime/exception.hpp create mode 100644 inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/callback.cpp create mode 100644 inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp rename inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/{infer_request/infer_requset_dynamic.cpp => ov_infer_request/infer_request_dynamic.cpp} (86%) rename inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/{infer_request => ov_infer_request}/inference_chaining.cpp (94%) create mode 100644 inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp create mode 100644 inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp create mode 100644 inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp create mode 100644 inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/wait.cpp delete mode 100644 inference-engine/tests/functional/plugin/shared/include/behavior/infer_request/inference_chaining.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/callback.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/cancellation.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference_chaining.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/io_tensor.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/multithreading.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/perf_counters.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/wait.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/callback.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/cancellation.cpp rename inference-engine/tests/functional/plugin/shared/{include/behavior/infer_request/infer_request_dynamic.hpp => src/behavior/ov_infer_request/infer_request_dynamic.cpp} (55%) create mode 100644 inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference_chaining.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/multithreading.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/perf_counters.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp create mode 100644 inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/ov_tensor_utils.hpp create mode 100644 inference-engine/tests/ie_test_utils/functional_test_utils/src/ov_tensor_utils.cpp diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp new file mode 100644 index 00000000000..a9d30193c0f --- /dev/null +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/ov_infer_request/callback.hpp" + +using namespace ov::test::behavior; + +namespace { +const std::vector> configs = { + {} +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCallbackTests, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), + ::testing::ValuesIn(configs)), + OVInferRequestCallbackTests::getTestCaseName); +} // namespace diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp new file mode 100644 index 00000000000..479b3fce0c0 --- /dev/null +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/cancellation.hpp" + +using namespace ov::test::behavior; + +namespace { +const std::vector> configs = { + {}, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCancellationTests, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), + ::testing::ValuesIn(configs)), + OVInferRequestCancellationTests::getTestCaseName); +} // namespace diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request/infer_request_dynamic.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp similarity index 79% rename from docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request/infer_request_dynamic.cpp rename to docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index edfdfa24af4..ed9c5c698f6 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request/infer_request_dynamic.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -4,9 +4,9 @@ #include -#include "behavior/infer_request/infer_request_dynamic.hpp" +#include "behavior/ov_infer_request/infer_request_dynamic.hpp" -using namespace BehaviorTestsDefinitions; +using namespace ov::test::behavior; namespace { @@ -17,22 +17,22 @@ const std::vector> configs = { const std::vector> HeteroConfigs = { {{"TARGET_FALLBACK", CommonTestUtils::DEVICE_TEMPLATE}}}; -INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestDynamicTests, +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestDynamicTests, ::testing::Combine( ::testing::Values(ngraph::builder::subgraph::makeSplitConvConcat()), ::testing::Values(std::vector, std::vector>>{{{1, 4, 20, 20}, {1, 10, 18, 18}}, {{2, 4, 20, 20}, {2, 10, 18, 18}}}), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(configs)), - InferRequestDynamicTests::getTestCaseName); + OVInferRequestDynamicTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, InferRequestDynamicTests, +INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferRequestDynamicTests, ::testing::Combine( ::testing::Values(ngraph::builder::subgraph::makeSplitConvConcat()), ::testing::Values(std::vector, std::vector>>{{{1, 4, 20, 20}, {1, 10, 18, 18}}, {{2, 4, 20, 20}, {2, 10, 18, 18}}}), ::testing::Values(CommonTestUtils::DEVICE_HETERO), ::testing::ValuesIn(HeteroConfigs)), - InferRequestDynamicTests::getTestCaseName); + OVInferRequestDynamicTests::getTestCaseName); } // namespace diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request/inference_chaining.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp similarity index 94% rename from docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request/inference_chaining.cpp rename to docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp index 00e6da05b86..e5cbc7c9da8 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request/inference_chaining.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "behavior/infer_request/inference_chaining.hpp" +#include "behavior/ov_infer_request/inference_chaining.hpp" #include "common_test_utils/test_constants.hpp" using namespace ov::test::behavior; diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp new file mode 100644 index 00000000000..76ece4a8529 --- /dev/null +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -0,0 +1,22 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/ov_infer_request/io_tensor.hpp" + +using namespace ov::test::behavior; + +namespace { +const std::vector> configs = { + {} +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), + ::testing::ValuesIn(configs)), + OVInferRequestIOTensorTest::getTestCaseName); + +} // namespace diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp new file mode 100644 index 00000000000..cde9ffd9032 --- /dev/null +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/ov_infer_request/multithreading.hpp" + +using namespace ov::test::behavior; + +namespace { + +const std::vector> configs = { + {} +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestMultithreadingTests, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), + ::testing::ValuesIn(configs)), + OVInferRequestMultithreadingTests::getTestCaseName); + +} // namespace diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp new file mode 100644 index 00000000000..00803264b66 --- /dev/null +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp @@ -0,0 +1,22 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/ov_infer_request/wait.hpp" + +using namespace ov::test::behavior; + +namespace { +const std::vector> configs = { + {} +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestWaitTests, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), + ::testing::ValuesIn(configs)), + OVInferRequestWaitTests::getTestCaseName); + +} // namespace diff --git a/inference-engine/src/inference_engine/include/openvino/runtime/exception.hpp b/inference-engine/src/inference_engine/include/openvino/runtime/exception.hpp new file mode 100644 index 00000000000..129c3acee7e --- /dev/null +++ b/inference-engine/src/inference_engine/include/openvino/runtime/exception.hpp @@ -0,0 +1,22 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/except.hpp" +#include "openvino/runtime/common.hpp" + +namespace ov { +namespace runtime { +/// Thrown in case of canceled asynchronous operation +class OPENVINO_RUNTIME_API Cancelled : public Exception { + using Exception::Exception; +}; + +/// Thrown in case of busy infer request +class OPENVINO_RUNTIME_API Busy : public Exception { + using Exception::Exception; +}; +} // namespace runtime +} // namespace ov \ No newline at end of file diff --git a/inference-engine/src/inference_engine/src/cpp/ie_infer_request.cpp b/inference-engine/src/inference_engine/src/cpp/ie_infer_request.cpp index 0c55184e299..01d2adb4b1f 100644 --- a/inference-engine/src/inference_engine/src/cpp/ie_infer_request.cpp +++ b/inference-engine/src/inference_engine/src/cpp/ie_infer_request.cpp @@ -13,7 +13,7 @@ #include "ie_infer_async_request_base.hpp" #include "ie_ngraph_utils.hpp" #include "ie_remote_context.hpp" -#include "openvino/core/except.hpp" +#include "openvino/runtime/exception.hpp" #include "openvino/runtime/infer_request.hpp" #include "transformations/utils/utils.hpp" @@ -50,6 +50,8 @@ namespace InferenceEngine { OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized."); \ try { \ __VA_ARGS__; \ + } catch (const ::InferenceEngine::RequestBusy& ex) { \ + throw ov::runtime::Busy(ex.what()); \ } catch (const std::exception& ex) { \ throw ov::Exception(ex.what()); \ } catch (...) { \ @@ -405,11 +407,29 @@ void InferRequest::start_async() { } void InferRequest::wait() { - OV_INFER_REQ_CALL_STATEMENT(_impl->Wait(ie::InferRequest::RESULT_READY);) + OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized."); + try { + _impl->Wait(ie::InferRequest::RESULT_READY); + } catch (const ie::InferCancelled& e) { + throw Cancelled{e.what()}; + } catch (const std::exception& ex) { + throw Exception(ex.what()); + } catch (...) { + OPENVINO_UNREACHABLE("Unexpected exception"); + } } bool InferRequest::wait_for(const std::chrono::milliseconds timeout) { - OV_INFER_REQ_CALL_STATEMENT(return _impl->Wait(timeout.count()) == ie::OK;) + OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized."); + try { + return _impl->Wait(timeout.count()) == ie::OK; + } catch (const ie::InferCancelled& e) { + throw Cancelled{e.what()}; + } catch (const std::exception& ex) { + throw Exception(ex.what()); + } catch (...) { + OPENVINO_UNREACHABLE("Unexpected exception"); + } } void InferRequest::set_callback(std::function callback) { diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/callback.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/callback.cpp new file mode 100644 index 00000000000..b53e4155fef --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/callback.cpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/ov_infer_request/callback.hpp" + +using namespace ov::test::behavior; + +namespace { +const std::vector> configs = { + {}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} +}; + +const std::vector> multiConfigs = { + {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCallbackTests, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(configs)), + OVInferRequestCallbackTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestCallbackTests, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + OVInferRequestCallbackTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestCallbackTests, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_AUTO), + ::testing::ValuesIn(multiConfigs)), + OVInferRequestCallbackTests::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp new file mode 100644 index 00000000000..132cf7d88eb --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/cancellation.hpp" + +using namespace ov::test::behavior; + +namespace { +const std::vector> configs = { + {}, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCancellationTests, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(configs)), + OVInferRequestCancellationTests::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/infer_requset_dynamic.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp similarity index 86% rename from inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/infer_requset_dynamic.cpp rename to inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index ff940261d53..8330d9ef26c 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/infer_requset_dynamic.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -4,16 +4,12 @@ #include -#include "behavior/infer_request/infer_request_dynamic.hpp" +#include "behavior/ov_infer_request/infer_request_dynamic.hpp" -using namespace BehaviorTestsDefinitions; +using namespace ov::test::behavior; namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32 -}; - const std::vector> configs = { {} }; @@ -64,7 +60,7 @@ std::shared_ptr getFunction2() { return std::make_shared(concat, params, "SplitAddConcat"); } -INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_1, InferRequestDynamicTests, +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_1, OVInferRequestDynamicTests, ::testing::Combine( ::testing::Values(getFunction1()), ::testing::Values(std::vector, std::vector>>{ @@ -72,9 +68,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_1, InferRequestDynamicTests, {{2, 4, 20, 20}, {2, 4, 20, 20}}}), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::ValuesIn(configs)), - InferRequestDynamicTests::getTestCaseName); + OVInferRequestDynamicTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_2, InferRequestDynamicTests, +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_2, OVInferRequestDynamicTests, ::testing::Combine( ::testing::Values(getFunction2()), ::testing::Values(std::vector, std::vector>>{ @@ -82,9 +78,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_2, InferRequestDynamicTests, {{2, 4, 20, 20}, {2, 2, 20, 40}}}), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::ValuesIn(configs)), - InferRequestDynamicTests::getTestCaseName); + OVInferRequestDynamicTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, InferRequestDynamicTests, +INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferRequestDynamicTests, ::testing::Combine( ::testing::Values(getFunction2()), ::testing::Values(std::vector, std::vector>>{ @@ -92,6 +88,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, InferRequestDynamicTests, {{2, 4, 20, 20}, {2, 2, 20, 40}}}), ::testing::Values(CommonTestUtils::DEVICE_HETERO), ::testing::ValuesIn(HeteroConfigs)), - InferRequestDynamicTests::getTestCaseName); + OVInferRequestDynamicTests::getTestCaseName); } // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/inference_chaining.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp similarity index 94% rename from inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/inference_chaining.cpp rename to inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp index 78e510c8505..007fc174531 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/inference_chaining.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "behavior/infer_request/inference_chaining.hpp" +#include "behavior/ov_infer_request/inference_chaining.hpp" #include "common_test_utils/test_constants.hpp" using namespace ov::test::behavior; diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp new file mode 100644 index 00000000000..be79d7a6c42 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -0,0 +1,84 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/ov_infer_request/io_tensor.hpp" + +using namespace ov::test::behavior; + +namespace { +const std::vector> configs = { + {}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} +}; + +const std::vector> Multiconfigs = { + {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} +}; + +const std::vector> Autoconfigs = { + {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(configs)), + OVInferRequestIOTensorTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorTest, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(Multiconfigs)), + OVInferRequestIOTensorTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorTest, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestIOTensorTest::getTestCaseName); + +std::vector prcs = { + ov::element::boolean, + ov::element::bf16, + ov::element::f16, + ov::element::f32, + ov::element::f64, + ov::element::i4, + ov::element::i8, + ov::element::i16, + ov::element::i32, + ov::element::i64, + ov::element::u1, + ov::element::u4, + ov::element::u8, + ov::element::u16, + ov::element::u32, + ov::element::u64, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest, + ::testing::Combine( + ::testing::ValuesIn(prcs), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(configs)), + OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest, + ::testing::Combine( + ::testing::ValuesIn(prcs), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(Multiconfigs)), + OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest, + ::testing::Combine( + ::testing::ValuesIn(prcs), + ::testing::Values(CommonTestUtils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp new file mode 100644 index 00000000000..a1385a56a38 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/ov_infer_request/multithreading.hpp" + +using namespace ov::test::behavior; + +namespace { + +const std::vector> configs = { + {}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} +}; + +const std::vector> Multiconfigs = { + {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestMultithreadingTests, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(configs)), + OVInferRequestMultithreadingTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestMultithreadingTests, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(Multiconfigs)), + OVInferRequestMultithreadingTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestMultithreadingTests, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_AUTO), + ::testing::ValuesIn(Multiconfigs)), + OVInferRequestMultithreadingTests::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp new file mode 100644 index 00000000000..5b015b301e0 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp @@ -0,0 +1,59 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/perf_counters.hpp" + +using namespace ov::test::behavior; + +namespace { +TEST_P(OVInferRequestPerfCountersTest, CheckOperationInProfilingInfo) { + req = execNet.create_infer_request(); + ASSERT_NO_THROW(req.infer()); + + std::vector profiling_info; + ASSERT_NO_THROW(profiling_info = req.get_profiling_info()); + + for (const auto& op : function->get_ops()) { + auto op_is_in_profiling_info = std::any_of(std::begin(profiling_info), std::end(profiling_info), + [&] (const ov::runtime::ProfilingInfo& info) { + if (info.node_name.find(op->get_friendly_name() + "_") != std::string::npos || info.node_name == op->get_friendly_name()) { + return true; + } else { + return false; + } + }); + ASSERT_TRUE(op_is_in_profiling_info) << "For op: " << op; + } +} + +const std::vector> configs = { + {} +}; + +const std::vector> Multiconfigs = { + {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} +}; + +const std::vector> Autoconfigs = { + {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestPerfCountersTest, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(configs)), + OVInferRequestPerfCountersTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestPerfCountersTest, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(Multiconfigs)), + OVInferRequestPerfCountersTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestPerfCountersTest, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestPerfCountersTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/wait.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/wait.cpp new file mode 100644 index 00000000000..b7d6dcca6ba --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/wait.cpp @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/ov_infer_request/wait.hpp" + +using namespace ov::test::behavior; + +namespace { + +const std::vector> configs = { + {}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, + {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} +}; + +const std::vector> Multiconfigs = { + {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} +}; + +const std::vector> Autoconfigs = { + {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestWaitTests, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(configs)), + OVInferRequestWaitTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestWaitTests, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::ValuesIn(Multiconfigs)), + OVInferRequestWaitTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestWaitTests, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestWaitTests::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp b/inference-engine/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp index d0b841a6827..8cce834dc2b 100644 --- a/inference-engine/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp @@ -49,6 +49,7 @@ public: SKIP_IF_CURRENT_TEST_IS_DISABLED() std::tie(targetDevice, configuration) = this->GetParam(); function = ngraph::builder::subgraph::makeConvPoolRelu(); + execNet = core->compile_model(function, targetDevice, configuration); } void TearDown() override { @@ -59,7 +60,7 @@ public: protected: ov::runtime::ExecutableNetwork execNet; - std::shared_ptr core = utils::PluginCache::get().core();; + std::shared_ptr core = utils::PluginCache::get().core(); std::string targetDevice; std::map configuration; std::shared_ptr function; diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request/inference_chaining.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request/inference_chaining.hpp deleted file mode 100644 index 4ed968e984b..00000000000 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request/inference_chaining.hpp +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include -#include -#include -#include -#include -#include - -#include "base/behavior_test_utils.hpp" -#include "openvino/core/attribute_visitor.hpp" -#include "openvino/core/function.hpp" -#include "openvino/core/node.hpp" -#include "openvino/core/partial_shape.hpp" -#include "openvino/core/rank.hpp" -#include "openvino/core/shape.hpp" -#include "openvino/core/type/element_type.hpp" -#include "openvino/core/type/element_type_traits.hpp" -#include "openvino/op/parameter.hpp" -#include "openvino/runtime/infer_request.hpp" -#include "openvino/runtime/tensor.hpp" - -namespace ov { -namespace test { -namespace behavior { - -class OVInferenceChaining : public OVInferRequestTests { -protected: - static std::shared_ptr getFirstStaticFunction(const ov::PartialShape &shape = {3}) { - auto params = ngraph::builder::makeDynamicParams(element::Type_t::f32, {shape, shape, shape}); - params[0]->get_output_tensor(0).set_names({"input_tensor_0"}); - params[0]->set_friendly_name("param_0"); - params[1]->get_output_tensor(0).set_names({"input_tensor_1"}); - params[1]->set_friendly_name("param_1"); - params[2]->get_output_tensor(0).set_names({"input_tensor_2"}); - params[2]->set_friendly_name("param_2"); - auto eltwise = ngraph::builder::makeEltwise(params[0], params[1], ngraph::helpers::EltwiseTypes::ADD); - auto eltwise2 = ngraph::builder::makeEltwise(eltwise, params[2], ngraph::helpers::EltwiseTypes::ADD); - eltwise2->get_output_tensor(0).set_names({"result_tensor_0"}); - eltwise2->set_friendly_name("result_0"); - - return std::make_shared(eltwise2, ov::ParameterVector(params)); - } - - static std::shared_ptr getSecondStaticFunction(const ov::PartialShape &shape = {3}) { - auto params = ngraph::builder::makeDynamicParams(element::Type_t::f32, {shape, shape}); - params[0]->get_output_tensor(0).set_names({"input_tensor_0"}); - params[0]->set_friendly_name("param_0"); - params[1]->get_output_tensor(0).set_names({"input_tensor_1"}); - params[1]->set_friendly_name("param_1"); - auto eltwise = ngraph::builder::makeEltwise(params[0], params[1], ngraph::helpers::EltwiseTypes::MULTIPLY); - eltwise->get_output_tensor(0).set_names({"result_tensor_0"}); - eltwise->set_friendly_name("result_0"); - - return std::make_shared(eltwise, ov::ParameterVector(params)); - } - - static std::shared_ptr getThirdStaticFunction(const ov::PartialShape &shape = {3}) { - auto params = ngraph::builder::makeDynamicParams(element::Type_t::f32, {shape, shape, shape, shape}); - params[0]->get_output_tensor(0).set_names({"input_tensor_0"}); - params[0]->set_friendly_name("param_0"); - params[1]->get_output_tensor(0).set_names({"input_tensor_1"}); - params[1]->set_friendly_name("param_1"); - params[2]->get_output_tensor(0).set_names({"input_tensor_2"}); - params[2]->set_friendly_name("param_2"); - params[3]->get_output_tensor(0).set_names({"input_tensor_3"}); - params[3]->set_friendly_name("param_3"); - auto eltwise = ngraph::builder::makeEltwise(params[0], params[1], ngraph::helpers::EltwiseTypes::ADD); - auto eltwise2 = ngraph::builder::makeEltwise(eltwise, params[2], ngraph::helpers::EltwiseTypes::ADD); - auto eltwise3 = ngraph::builder::makeEltwise(eltwise2, params[3], ngraph::helpers::EltwiseTypes::MULTIPLY); - eltwise3->get_output_tensor(0).set_names({"result_tensor_0"}); - eltwise3->set_friendly_name("result_0"); - - return std::make_shared(eltwise3, ov::ParameterVector(params)); - } - - template - ov::runtime::Tensor tensor(const std::vector &v) { - auto type = ov::element::from(); - ov::runtime::Tensor tensor(type, {v.size()}); - std::memcpy(tensor.data(), v.data(), v.size() * type.size()); - - return tensor; - } - - std::shared_ptr function0; - std::shared_ptr function1; - std::shared_ptr function2; - - bool outputToInput = true; - -public: - void Run() { - ov::runtime::ExecutableNetwork execNet0, execNet1, execNet2; - ASSERT_NO_THROW(execNet0 = core->compile_model(function0, targetDevice, configuration)); - ASSERT_NO_THROW(execNet1 = core->compile_model(function1, targetDevice, configuration)); - ASSERT_NO_THROW(execNet2 = core->compile_model(function2, targetDevice, configuration)); - - ov::runtime::InferRequest r0, r1, r2; - ASSERT_NO_THROW(r0 = execNet0.create_infer_request()); - ASSERT_NO_THROW(r1 = execNet1.create_infer_request()); - ASSERT_NO_THROW(r2 = execNet2.create_infer_request()); - - // perform inference chaining - if (outputToInput) { - ASSERT_NO_THROW(r1.set_tensor("input_tensor_0", r0.get_tensor("result_tensor_0"))); - } else { - ASSERT_NO_THROW(r0.set_tensor("result_tensor_0", r1.get_tensor("input_tensor_0"))); - } - - // create input tensors - ov::runtime::Tensor t0 = tensor(std::vector{1.0f, 2.0f, 3.0f}); - ov::runtime::Tensor t1 = tensor(std::vector{4.0f, 5.0f, 6.0f}); - ov::runtime::Tensor t2 = tensor(std::vector{7.0f, 8.0f, 9.0f}); - ov::runtime::Tensor t3 = tensor(std::vector{2.0f, 3.0f, 2.0f}); - - ASSERT_NO_THROW(r0.set_tensor("input_tensor_0", t0)); - ASSERT_NO_THROW(r0.set_tensor("input_tensor_1", t1)); - ASSERT_NO_THROW(r0.set_tensor("input_tensor_2", t2)); - ASSERT_NO_THROW(r1.set_tensor("input_tensor_1", t3)); - - ASSERT_NO_THROW(r2.set_tensor("input_tensor_0", t0)); - ASSERT_NO_THROW(r2.set_tensor("input_tensor_1", t1)); - ASSERT_NO_THROW(r2.set_tensor("input_tensor_2", t2)); - ASSERT_NO_THROW(r2.set_tensor("input_tensor_3", t3)); - - ASSERT_NO_THROW(r0.infer()); - ASSERT_NO_THROW(r1.infer()); - ASSERT_NO_THROW(r2.infer()); - - // check results - std::vector reference1 = {12.0f, 15.0f, 18.0f}; - std::vector reference2 = {24.0f, 45.0f, 36.0f}; - - auto rti = r0.get_tensor("result_tensor_0"); - auto rt0 = r1.get_tensor("result_tensor_0"); - auto rt1 = r2.get_tensor("result_tensor_0"); - - for (size_t i = 0; i < reference1.size(); ++i) { - EXPECT_EQ(reference1[i], rti.data()[i]); - EXPECT_EQ(reference2[i], rt0.data()[i]); - EXPECT_EQ(reference2[i], rt1.data()[i]); - } - } -}; - -TEST_P(OVInferenceChaining, StaticOutputToStaticInput) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - - function0 = getFirstStaticFunction(); - function1 = getSecondStaticFunction(); - function2 = getThirdStaticFunction(); - - Run(); -} - -TEST_P(OVInferenceChaining, StaticOutputToDynamicInput) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - - const auto dynamic = ov::PartialShape::dynamic(ov::Rank(1)); - function0 = getFirstStaticFunction(); - function1 = getSecondStaticFunction(dynamic); - function2 = getThirdStaticFunction(dynamic); - - Run(); -} - -TEST_P(OVInferenceChaining, DynamicOutputToDynamicInput) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - - const auto dynamic = ov::PartialShape::dynamic(); - function0 = getFirstStaticFunction(dynamic); - function1 = getSecondStaticFunction(dynamic); - function2 = getThirdStaticFunction(dynamic); - - Run(); -} - -TEST_P(OVInferenceChaining, DynamicInputToDynamicOutput) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - - this->outputToInput = false; - - const auto dynamic = ov::PartialShape::dynamic(); - function0 = getFirstStaticFunction(dynamic); - function1 = getSecondStaticFunction(dynamic); - function2 = getThirdStaticFunction(dynamic); - - Run(); -} - -} // namespace behavior -} // namespace test -} // namespace ov diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/callback.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/callback.hpp new file mode 100644 index 00000000000..09d478cf58a --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/callback.hpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "base/ov_behavior_test_utils.hpp" + +namespace ov { +namespace test { +namespace behavior { +struct OVInferRequestCallbackTests : public OVInferRequestTests { + static std::string getTestCaseName(const testing::TestParamInfo& obj); +}; +} // namespace behavior +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/cancellation.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/cancellation.hpp new file mode 100644 index 00000000000..e041d076aa0 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/cancellation.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "base/ov_behavior_test_utils.hpp" + +namespace ov { +namespace test { +namespace behavior { +struct OVInferRequestCancellationTests : public OVInferRequestTests { + static std::string getTestCaseName(const testing::TestParamInfo& obj); +}; +} // namespace behavior +} // namespace test +} // namespace ov diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp new file mode 100644 index 00000000000..ca63a7cee10 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp @@ -0,0 +1,62 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include "functional_test_utils/ov_plugin_cache.hpp" +#include "ie_extension.h" +#include +#include "openvino/core/shape.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ngraph_functions/builders.hpp" +#include "transformations/utils/utils.hpp" +#include +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "functional_test_utils/blob_utils.hpp" +#include "ngraph_functions/subgraph_builders.hpp" +#include "shared_test_classes/subgraph/basic_lstm.hpp" + +// TODO [mandrono]: move current test case inside CPU plug-in and return the original tests +namespace ov { +namespace test { +namespace behavior { + +using OVInferRequestDynamicParams = std::tuple< + std::shared_ptr, // ov function + std::vector, std::vector>>, // input/expected output shapes per inference + std::string, // Device name + std::map // Config +>; + +class OVInferRequestDynamicTests : public testing::WithParamInterface, + public CommonTestUtils::TestsCommon { +public: + static std::string getTestCaseName(testing::TestParamInfo obj); + +protected: + void SetUp() override; + + void TearDown() override; + + std::shared_ptr ie = utils::PluginCache::get().core(); + std::shared_ptr function; + std::string targetDevice; + std::map configuration; + std::vector, std::vector>> inOutShapes; +}; +} // namespace behavior +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference_chaining.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference_chaining.hpp new file mode 100644 index 00000000000..a439affd984 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference_chaining.hpp @@ -0,0 +1,61 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include +#include +#include + +#include "base/behavior_test_utils.hpp" +#include "openvino/core/attribute_visitor.hpp" +#include "openvino/core/function.hpp" +#include "openvino/core/node.hpp" +#include "openvino/core/partial_shape.hpp" +#include "openvino/core/rank.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/type/element_type.hpp" +#include "openvino/core/type/element_type_traits.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/runtime/infer_request.hpp" +#include "openvino/runtime/tensor.hpp" + +namespace ov { +namespace test { +namespace behavior { + +class OVInferenceChaining : public OVInferRequestTests { +protected: + static std::shared_ptr getFirstStaticFunction(const ov::PartialShape &shape = {3}); + + static std::shared_ptr getSecondStaticFunction(const ov::PartialShape &shape = {3}); + + static std::shared_ptr getThirdStaticFunction(const ov::PartialShape &shape = {3}); + + template + ov::runtime::Tensor tensor(const std::vector &v) { + auto type = ov::element::from(); + ov::runtime::Tensor tensor(type, {v.size()}); + std::memcpy(tensor.data(), v.data(), v.size() * type.size()); + + return tensor; + } + + std::shared_ptr function0; + std::shared_ptr function1; + std::shared_ptr function2; + + bool outputToInput = true; + +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + + void Run(); +}; +} // namespace behavior +} // namespace test +} // namespace ov diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/io_tensor.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/io_tensor.hpp new file mode 100644 index 00000000000..f857fa76f3e --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/io_tensor.hpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "shared_test_classes/subgraph/basic_lstm.hpp" +#include "base/ov_behavior_test_utils.hpp" + +namespace ov { +namespace test { +namespace behavior { + +struct OVInferRequestIOTensorTest : public OVInferRequestTests { + static std::string getTestCaseName(const testing::TestParamInfo& obj); + void SetUp() override; + void TearDown() override; + runtime::InferRequest req; + ov::Output input; + ov::Output output; +}; + +using OVInferRequestSetPrecisionParams = std::tuple< + element::Type, // element type + std::string, // Device name + std::map // Config +>; +struct OVInferRequestIOTensorSetPrecisionTest : public testing::WithParamInterface, + public CommonTestUtils::TestsCommon { + static std::string getTestCaseName(const testing::TestParamInfo& obj); + void SetUp() override; + void TearDown() override; + std::shared_ptr core = utils::PluginCache::get().core(); + std::shared_ptr function; + runtime::ExecutableNetwork execNet; + runtime::InferRequest req; + std::string target_device; + runtime::ConfigMap config; + element::Type element_type; +}; +} // namespace behavior +} // namespace test +} // namespace ov diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/multithreading.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/multithreading.hpp new file mode 100644 index 00000000000..00824d97a76 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/multithreading.hpp @@ -0,0 +1,17 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "base/ov_behavior_test_utils.hpp" + +namespace ov { +namespace test { +namespace behavior { +struct OVInferRequestMultithreadingTests : public OVInferRequestTests { + static std::string getTestCaseName(const testing::TestParamInfo& obj); +}; +} // namespace behavior +} // namespace test +} // namespace ov diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/perf_counters.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/perf_counters.hpp new file mode 100644 index 00000000000..bb87f203791 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/perf_counters.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "base/ov_behavior_test_utils.hpp" + +namespace ov { +namespace test { +namespace behavior { +struct OVInferRequestPerfCountersTest : public OVInferRequestTests { + static std::string getTestCaseName(const testing::TestParamInfo& obj); + void SetUp() override; + runtime::InferRequest req; +}; +} // namespace behavior +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/wait.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/wait.hpp new file mode 100644 index 00000000000..856461a0a6e --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_infer_request/wait.hpp @@ -0,0 +1,22 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "base/ov_behavior_test_utils.hpp" + +namespace ov { +namespace test { +namespace behavior { +struct OVInferRequestWaitTests : public OVInferRequestTests { + static std::string getTestCaseName(const testing::TestParamInfo& obj); + void SetUp() override; + void TearDown() override; + runtime::InferRequest req; + ov::Output input; + ov::Output output; +}; +} // namespace behavior +} // namespace test +} // namespace ov diff --git a/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/callback.cpp b/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/callback.cpp new file mode 100644 index 00000000000..dbb327236dc --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/callback.cpp @@ -0,0 +1,132 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "shared_test_classes/subgraph/basic_lstm.hpp" +#include "behavior/ov_infer_request/callback.hpp" + +namespace ov { +namespace test { +namespace behavior { + +std::string OVInferRequestCallbackTests::getTestCaseName(const testing::TestParamInfo& obj) { + return OVInferRequestTests::getTestCaseName(obj); +} + +TEST_P(OVInferRequestCallbackTests, canCallAsyncWithCompletionCallback) { + runtime::InferRequest req; + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + bool is_called = false; + OV_ASSERT_NO_THROW(req.set_callback([&] (std::exception_ptr exception_ptr) { + // HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE + ASSERT_EQ(exception_ptr, nullptr); + is_called = true; + })); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + ASSERT_TRUE(is_called); +} + +TEST_P(OVInferRequestCallbackTests, syncInferDoesNotCallCompletionCallback) { + runtime::InferRequest req; + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + bool is_called = false; + req.set_callback([&] (std::exception_ptr exception_ptr) { + ASSERT_EQ(nullptr, exception_ptr); + is_called = true; + }); + req.infer(); + ASSERT_FALSE(is_called); +} + +// test that can wait all callbacks on dtor +TEST_P(OVInferRequestCallbackTests, canStartSeveralAsyncInsideCompletionCallbackWithSafeDtor) { + const int NUM_ITER = 10; + struct TestUserData { + std::atomic numIter = {0}; + std::promise promise; + }; + TestUserData data; + + runtime::InferRequest req; + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req.set_callback([&] (std::exception_ptr exception_ptr) { + if (exception_ptr) { + data.promise.set_exception(exception_ptr); + } else { + if (data.numIter.fetch_add(1) != NUM_ITER) { + req.start_async(); + } else { + data.promise.set_value(true); + } + } + })); + auto future = data.promise.get_future(); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + future.wait(); + auto callbackStatus = future.get(); + ASSERT_TRUE(callbackStatus); + auto dataNumIter = data.numIter - 1; + ASSERT_EQ(NUM_ITER, dataNumIter); +} + +TEST_P(OVInferRequestCallbackTests, returnGeneralErrorIfCallbackThrowException) { + runtime::InferRequest req; + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req.set_callback([] (std::exception_ptr) { + OPENVINO_UNREACHABLE("Throw"); + })); + OV_ASSERT_NO_THROW(req.start_async()); + ASSERT_THROW(req.wait(), ov::Exception); +} + +TEST_P(OVInferRequestCallbackTests, ReturnResultNotReadyFromWaitInAsyncModeForTooSmallTimeout) { + // GetNetwork(3000, 380) make inference around 20ms on GNA SW + // so increases chances for getting RESULT_NOT_READY + OV_ASSERT_NO_THROW(execNet = core->compile_model( + SubgraphTestsDefinitions::Basic_LSTM_S::GetNetwork(300, 38), targetDevice, configuration)); + runtime::InferRequest req; + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + std::promise callbackTimeStamp; + auto callbackTimeStampFuture = callbackTimeStamp.get_future(); + // add a callback to the request and capture the timestamp + OV_ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { + if (exception_ptr) { + callbackTimeStamp.set_exception(exception_ptr); + } else { + callbackTimeStamp.set_value(std::chrono::system_clock::now()); + } + })); + OV_ASSERT_NO_THROW(req.start_async()); + bool ready = false; + OV_ASSERT_NO_THROW(ready = req.wait_for({})); + // get timestamp taken AFTER return from the wait(STATUS_ONLY) + const auto afterWaitTimeStamp = std::chrono::system_clock::now(); + // IF the callback timestamp is larger than the afterWaitTimeStamp + // then we should observe false ready result + if (afterWaitTimeStamp < callbackTimeStampFuture.get()) { + ASSERT_FALSE(ready); + } + OV_ASSERT_NO_THROW(req.wait()); +} + +TEST_P(OVInferRequestCallbackTests, ImplDoesNotCopyCallback) { + runtime::InferRequest req; + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + { + auto somePtr = std::make_shared(42); + OV_ASSERT_NO_THROW(req.set_callback([somePtr] (std::exception_ptr exception_ptr) { + ASSERT_EQ(nullptr, exception_ptr); + ASSERT_EQ(1, somePtr.use_count()); + })); + } + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); +} + +} // namespace behavior +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/cancellation.cpp b/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/cancellation.cpp new file mode 100644 index 00000000000..ff5b9163893 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/cancellation.cpp @@ -0,0 +1,66 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/ov_infer_request/cancellation.hpp" +#include "openvino/runtime/exception.hpp" + +namespace ov { +namespace test { +namespace behavior { + +std::string OVInferRequestCancellationTests::getTestCaseName(const testing::TestParamInfo& obj) { + return OVInferRequestTests::getTestCaseName(obj); +} + +TEST_P(OVInferRequestCancellationTests, canCancelAsyncRequest) { + runtime::InferRequest req; + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.cancel()); + try { + req.wait(); + } catch (const ov::runtime::Cancelled&) { + SUCCEED(); + } +} + +TEST_P(OVInferRequestCancellationTests, CanResetAfterCancelAsyncRequest) { + runtime::InferRequest req; + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.cancel()); + try { + req.wait(); + } catch (const ov::runtime::Cancelled&) { + SUCCEED(); + } + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); +} + +TEST_P(OVInferRequestCancellationTests, canCancelBeforeAsyncRequest) { + runtime::InferRequest req; + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req.cancel()); +} + +TEST_P(OVInferRequestCancellationTests, canCancelInferRequest) { + runtime::InferRequest req; + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + auto infer = std::async(std::launch::async, [&req]{req.infer();}); + while (!req.wait_for({})) { + } + OV_ASSERT_NO_THROW(req.cancel()); + try { + infer.get(); + } catch (const ov::runtime::Cancelled&) { + SUCCEED(); + } +} + +} // namespace behavior +} // namespace test +} // namespace ov diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request/infer_request_dynamic.hpp b/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp similarity index 55% rename from inference-engine/tests/functional/plugin/shared/include/behavior/infer_request/infer_request_dynamic.hpp rename to inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp index 31c5b29e040..b0c8c1e29e1 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request/infer_request_dynamic.hpp +++ b/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -2,8 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - #include #include #include @@ -14,7 +12,6 @@ #include "functional_test_utils/ov_plugin_cache.hpp" #include "ie_extension.h" #include -#include "openvino/core/partial_shape.hpp" #include "openvino/core/shape.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ngraph_functions/utils/ngraph_helpers.hpp" @@ -29,213 +26,198 @@ #include "functional_test_utils/blob_utils.hpp" #include "ngraph_functions/subgraph_builders.hpp" #include "shared_test_classes/subgraph/basic_lstm.hpp" +#include "behavior/ov_infer_request/infer_request_dynamic.hpp" -// TODO [mandrono]: move current test case inside CPU plug-in and return the original tests -namespace BehaviorTestsDefinitions { +namespace ov { +namespace test { +namespace behavior { -typedef std::tuple< - std::shared_ptr, // ov function - std::vector, std::vector>>, // input/expected output shapes per inference - std::string, // Device name - std::map // Config -> InferRequestDynamicParams; - -class InferRequestDynamicTests : public testing::WithParamInterface, - public CommonTestUtils::TestsCommon { -public: - static std::string getTestCaseName(testing::TestParamInfo obj) { - std::shared_ptr func; - std::vector, std::vector>> inOutShapes; - std::string targetDevice; - std::map configuration; - std::tie(func, inOutShapes, targetDevice, configuration) = obj.param; - std::ostringstream result; - result << "function=" << func->get_friendly_name() << "_"; - result << "inOutShape=("; - for (const auto& inOutShape : inOutShapes) { - result << "(" << CommonTestUtils::vec2str(inOutShape.first) << "_" << CommonTestUtils::vec2str(inOutShape.second) << ")"; - } - result << ")_"; - result << "targetDevice=" << targetDevice; - if (!configuration.empty()) { - for (auto& configItem : configuration) { - result << "configItem=" << configItem.first << "_" << configItem.second << "_"; - } - } - return result.str(); - } - -protected: - void SetUp() override { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - std::tie(function, inOutShapes, targetDevice, configuration) = this->GetParam(); - } - - void TearDown() override { - if (!configuration.empty()) { - ov::test::utils::PluginCache::get().reset(); - } - } - - std::shared_ptr ie = ov::test::utils::PluginCache::get().core(); - std::shared_ptr function; +std::string OVInferRequestDynamicTests::getTestCaseName(testing::TestParamInfo obj) { + std::shared_ptr func; + std::vector, std::vector>> inOutShapes; std::string targetDevice; std::map configuration; - std::vector, std::vector>> inOutShapes; -}; + std::tie(func, inOutShapes, targetDevice, configuration) = obj.param; + std::ostringstream result; + result << "function=" << func->get_friendly_name() << "_"; + result << "inOutShape=("; + for (const auto& inOutShape : inOutShapes) { + result << "(" << CommonTestUtils::vec2str(inOutShape.first) << "_" << CommonTestUtils::vec2str(inOutShape.second) << ")"; + } + result << ")_"; + result << "targetDevice=" << targetDevice; + if (!configuration.empty()) { + for (auto& configItem : configuration) { + result << "configItem=" << configItem.first << "_" << configItem.second << "_"; + } + } + return result.str(); +} -TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithoutSetShape) { +void OVInferRequestDynamicTests::SetUp() { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + std::tie(function, inOutShapes, targetDevice, configuration) = this->GetParam(); +} + +void OVInferRequestDynamicTests::TearDown() { + if (!configuration.empty()) { + PluginCache::get().reset(); + } + function.reset(); +} + +TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithoutSetShape) { const std::string tensor_name = "input_tensor"; std::map shapes; shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20}; - ASSERT_NO_THROW(function->reshape(shapes)); + OV_ASSERT_NO_THROW(function->reshape(shapes)); // Load ov::Function to target plugins auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest ov::runtime::InferRequest req; ov::runtime::Tensor tensor; - ASSERT_NO_THROW(req = execNet.create_infer_request()); - ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); } -TEST_P(InferRequestDynamicTests, InferDynamicNetworkBoundWithoutSetShape) { +TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkBoundWithoutSetShape) { const std::string tensor_name = "input_tensor"; std::map shapes; shapes[tensor_name] = {ov::Dimension(0, 5), 4, 20, 20}; - ASSERT_NO_THROW(function->reshape(shapes)); + OV_ASSERT_NO_THROW(function->reshape(shapes)); // Load ov::Function to target plugins auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest ov::runtime::InferRequest req; ov::runtime::Tensor tensor; - ASSERT_NO_THROW(req = execNet.create_infer_request()); - ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); } -TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithGetTensor) { +TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithGetTensor) { const std::string tensor_name = "input_tensor"; const ov::Shape refShape = inOutShapes[0].first; const ov::Shape refOutShape = inOutShapes[0].second; std::map shapes; shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20}; - ASSERT_NO_THROW(function->reshape(shapes)); + OV_ASSERT_NO_THROW(function->reshape(shapes)); // Load ov::Function to target plugins auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest ov::runtime::InferRequest req; ov::runtime::Tensor tensor, otensor; const std::string outputname = function->outputs().back().get_any_name(); - ASSERT_NO_THROW(req = execNet.create_infer_request()); - //ASSERT_NO_THROW(req.SetShape(tensor_name, {1, 4, 20, 20})); - ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); - ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20})); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + //OV_ASSERT_NO_THROW(req.SetShape(tensor_name, {1, 4, 20, 20})); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); + OV_ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20})); ASSERT_EQ(tensor.get_shape(), refShape); - ASSERT_NO_THROW(otensor = req.get_tensor(outputname)); + OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputname)); ASSERT_EQ(0, otensor.get_size()); // output tensor is not allocated ASSERT_EQ(function->output().get_element_type(), otensor.get_element_type()); // by it has type - ASSERT_NO_THROW(req.infer()); - ASSERT_NO_THROW(req.start_async()); - ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); EXPECT_NE(0, otensor.get_size()); // output tensor is allocated after infer - ASSERT_NO_THROW(otensor = req.get_tensor(outputname)); + OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputname)); ASSERT_EQ(otensor.get_shape(), refOutShape); } -TEST_P(InferRequestDynamicTests, InferUpperBoundNetworkWithGetTensor) { +TEST_P(OVInferRequestDynamicTests, InferUpperBoundNetworkWithGetTensor) { const std::string tensor_name = "input_tensor"; const ov::Shape refShape = inOutShapes[0].first; const ov::Shape refOutShape = inOutShapes[0].second; std::map shapes; shapes[tensor_name] = {ov::Dimension(0, 19), 4, 20, 20}; - ASSERT_NO_THROW(function->reshape(shapes)); + OV_ASSERT_NO_THROW(function->reshape(shapes)); // Load ov::Function to target plugins auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest ov::runtime::InferRequest req; ov::runtime::Tensor tensor, otensor; const std::string outputname = function->outputs().back().get_any_name(); - ASSERT_NO_THROW(req = execNet.create_infer_request()); - //ASSERT_NO_THROW(req.SetShape(tensor_name, {1, 4, 20, 20})); - ASSERT_NO_THROW(otensor = req.get_tensor(outputname)); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + //OV_ASSERT_NO_THROW(req.SetShape(tensor_name, {1, 4, 20, 20})); + OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputname)); ASSERT_EQ(0, otensor.get_size()); // output tensor is not allocated ASSERT_EQ(function->output().get_element_type(), otensor.get_element_type()); // by it has type - ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); - ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20})); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); + OV_ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20})); ASSERT_EQ(tensor.get_shape(), refShape); - ASSERT_NO_THROW(req.infer()); - ASSERT_NO_THROW(req.start_async()); - ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); ASSERT_EQ(otensor.get_shape(), refOutShape); } -TEST_P(InferRequestDynamicTests, InferFullyDynamicNetworkWithGetTensor) { +TEST_P(OVInferRequestDynamicTests, InferFullyDynamicNetworkWithGetTensor) { const std::string tensor_name = "input_tensor"; const ov::Shape refShape = inOutShapes[0].first; const ov::Shape refOutShape = inOutShapes[0].second; std::map shapes; shapes[tensor_name] = ov::PartialShape::dynamic(); - ASSERT_NO_THROW(function->reshape(shapes)); + OV_ASSERT_NO_THROW(function->reshape(shapes)); // Load ov::Function to target plugins auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest ov::runtime::InferRequest req; ov::runtime::Tensor tensor, otensor; const std::string outputName = function->outputs().back().get_any_name(); - ASSERT_NO_THROW(req = execNet.create_infer_request()); - //ASSERT_NO_THROW(req.SetShape(tensor_name, {1, 4, 20, 20})); - ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); - ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20})); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + //OV_ASSERT_NO_THROW(req.SetShape(tensor_name, {1, 4, 20, 20})); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); + OV_ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20})); ASSERT_EQ(tensor.get_shape(), refShape); - ASSERT_NO_THROW(otensor = req.get_tensor(outputName)); + OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputName)); ASSERT_EQ(0, otensor.get_size()); // output tensor is not allocated ASSERT_EQ(function->output().get_element_type(), otensor.get_element_type()); // by it has type - ASSERT_NO_THROW(req.infer()); - ASSERT_NO_THROW(req.start_async()); - ASSERT_NO_THROW(req.wait()); - ASSERT_NO_THROW(otensor = req.get_tensor(outputName)); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputName)); ASSERT_EQ(otensor.get_shape(), refOutShape); } -TEST_P(InferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetTensorLower) { +TEST_P(OVInferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetTensorLower) { const std::string tensor_name = "input_tensor"; const ov::Shape refShape = inOutShapes[0].first; const ov::Shape refOutShape = inOutShapes[0].second; std::map shapes; shapes[tensor_name] = {ov::Dimension(2, 3), 4, 20, 20}; - ASSERT_NO_THROW(function->reshape(shapes)); + OV_ASSERT_NO_THROW(function->reshape(shapes)); // Load ov::Function to target plugins auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest ov::runtime::InferRequest req; ov::runtime::Tensor tensor; - ASSERT_NO_THROW(req = execNet.create_infer_request()); - ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); - ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20})); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); + OV_ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20})); // Plugin may or may not throw in case if input tensor has dimensions that are out of bounds //ASSERT_THROW(req.infer(), ov::Exception); } -TEST_P(InferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetTensorUpper) { +TEST_P(OVInferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetTensorUpper) { const std::string tensor_name = "input_tensor"; const ov::Shape refShape = inOutShapes[0].first; const ov::Shape refOutShape = inOutShapes[0].second; std::map shapes; shapes[tensor_name] = {ov::Dimension(1, 2), 4, 20, 20}; - ASSERT_NO_THROW(function->reshape(shapes)); + OV_ASSERT_NO_THROW(function->reshape(shapes)); // Load ov::Function to target plugins auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest ov::runtime::InferRequest req; ov::runtime::Tensor tensor; - ASSERT_NO_THROW(req = execNet.create_infer_request()); - ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); - ASSERT_NO_THROW(tensor.set_shape({3, 4, 20, 20})); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); + OV_ASSERT_NO_THROW(tensor.set_shape({3, 4, 20, 20})); // Plugin may or may not throw in case if input tensor has dimensions that are out of bounds // ASSERT_THROW(req.infer(), ov::Exception); } -TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithGetTensor2times) { +TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithGetTensor2times) { const std::string tensor_name = "input_tensor"; const ov::Shape refShape = inOutShapes[0].first; const ov::Shape refShape2 = inOutShapes[1].first; @@ -243,106 +225,106 @@ TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithGetTensor2times) { const ov::Shape refOutShape2 = inOutShapes[1].second; std::map shapes; shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20}; - ASSERT_NO_THROW(function->reshape(shapes)); + OV_ASSERT_NO_THROW(function->reshape(shapes)); // Load ov::Function to target plugins auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest ov::runtime::InferRequest req; ov::runtime::Tensor tensor; - ASSERT_NO_THROW(req = execNet.create_infer_request()); - ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); - ASSERT_NO_THROW(tensor.set_shape(refShape)); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); + OV_ASSERT_NO_THROW(tensor.set_shape(refShape)); ASSERT_EQ(tensor.get_shape(), refShape); - ASSERT_NO_THROW(req.infer()); - ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); req.wait(); const std::string outputName = function->outputs().back().get_any_name(); - ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); ASSERT_EQ(tensor.get_shape(), refOutShape); - ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); - ASSERT_NO_THROW(tensor.set_shape(refShape2)); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); + OV_ASSERT_NO_THROW(tensor.set_shape(refShape2)); ASSERT_EQ(tensor.get_shape(), refShape2); - ASSERT_NO_THROW(req.infer()); - ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); req.wait(); - ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); ASSERT_EQ(tensor.get_shape(), refOutShape2); } -TEST_P(InferRequestDynamicTests, GetSameTensor2times) { +TEST_P(OVInferRequestDynamicTests, GetSameTensor2times) { const std::string tensor_name = "input_tensor"; const ov::Shape refShape = inOutShapes[0].first; std::map shapes; shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20}; - ASSERT_NO_THROW(function->reshape(shapes)); + OV_ASSERT_NO_THROW(function->reshape(shapes)); // Load ov::Function to target plugins auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest ov::runtime::InferRequest req; ov::runtime::Tensor tensor; - ASSERT_NO_THROW(req = execNet.create_infer_request()); - ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); - ASSERT_NO_THROW(tensor.set_shape(refShape)); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); + OV_ASSERT_NO_THROW(tensor.set_shape(refShape)); ASSERT_EQ(tensor.get_shape(), refShape); - ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name())); ASSERT_EQ(tensor.get_shape(), refShape); } -TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithSetTensor) { +TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithSetTensor) { const std::string tensor_name = "input_tensor"; const ov::Shape refShape = inOutShapes[0].first; const ov::Shape refOutShape = inOutShapes[0].second; std::map shapes; shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20}; - ASSERT_NO_THROW(function->reshape(shapes)); + OV_ASSERT_NO_THROW(function->reshape(shapes)); // Load ov::Function to target plugins auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest ov::runtime::InferRequest req; ov::runtime::Tensor tensor(ov::element::f32, refShape); - ASSERT_NO_THROW(req = execNet.create_infer_request()); - ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor)); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor)); ASSERT_EQ(tensor.get_shape(), refShape); - ASSERT_NO_THROW(req.infer()); - ASSERT_NO_THROW(req.start_async()); - ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); const std::string outputName = function->outputs().back().get_any_name(); - ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); ASSERT_EQ(tensor.get_shape(), refOutShape); } -TEST_P(InferRequestDynamicTests, InferFullyDynamicNetworkWithSetTensor) { +TEST_P(OVInferRequestDynamicTests, InferFullyDynamicNetworkWithSetTensor) { const std::string tensor_name = "input_tensor"; const ov::Shape refShape = inOutShapes[0].first; const ov::Shape refOutShape = inOutShapes[0].second; std::map shapes; shapes[tensor_name] = ov::PartialShape::dynamic(); - ASSERT_NO_THROW(function->reshape(shapes)); + OV_ASSERT_NO_THROW(function->reshape(shapes)); // Load ov::Function to target plugins auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest ov::runtime::InferRequest req; ov::runtime::Tensor tensor(ov::element::f32, refShape), otensor; const std::string outputName = function->outputs().back().get_any_name(); - ASSERT_NO_THROW(req = execNet.create_infer_request()); - ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor)); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor)); ASSERT_EQ(tensor.get_shape(), refShape); - ASSERT_NO_THROW(otensor = req.get_tensor(outputName)); + OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputName)); ASSERT_EQ(0, otensor.get_size()); // output tensor is not allocated ASSERT_EQ(function->output().get_element_type(), otensor.get_element_type()); // by it has type - ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.infer()); ASSERT_EQ(otensor.get_shape(), refOutShape); - ASSERT_NO_THROW(req.start_async()); - ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); ASSERT_EQ(otensor.get_shape(), refOutShape); - ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); ASSERT_EQ(tensor.get_shape(), refOutShape); ASSERT_EQ(otensor.get_shape(), refOutShape); } -TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) { +TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) { const std::string tensor_name = "input_tensor"; const ov::Shape refShape = inOutShapes[0].first; const ov::Shape refShape2 = inOutShapes[1].first; @@ -350,7 +332,7 @@ TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) { const ov::Shape refOutShape2 = inOutShapes[1].second; std::map shapes; shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20}; - ASSERT_NO_THROW(function->reshape(shapes)); + OV_ASSERT_NO_THROW(function->reshape(shapes)); const std::string outputName = function->outputs().back().get_any_name(); // Load ov::Function to target plugins auto execNet = ie->compile_model(function, targetDevice, configuration); @@ -358,23 +340,25 @@ TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) { ov::runtime::InferRequest req; ov::runtime::Tensor tensor(ov::element::f32, refShape); - ASSERT_NO_THROW(req = execNet.create_infer_request()); - ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor)); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor)); ASSERT_EQ(tensor.get_shape(), refShape); - ASSERT_NO_THROW(req.infer()); - ASSERT_NO_THROW(req.start_async()); - ASSERT_NO_THROW(req.wait()); - ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); ASSERT_EQ(tensor.get_shape(), refOutShape); tensor = ov::runtime::Tensor(ov::element::f32, refShape2); - ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor)); + OV_ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor)); ASSERT_EQ(tensor.get_shape(), refShape2); - ASSERT_NO_THROW(req.infer()); - ASSERT_NO_THROW(req.start_async()); - ASSERT_NO_THROW(req.wait()); - ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName)); ASSERT_EQ(tensor.get_shape(), refOutShape2); } -} // namespace BehaviorTestsDefinitions +} // namespace behavior +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference_chaining.cpp b/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference_chaining.cpp new file mode 100644 index 00000000000..3b8d9b7c53f --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference_chaining.cpp @@ -0,0 +1,188 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include +#include +#include +#include + +#include "base/ov_behavior_test_utils.hpp" +#include "openvino/core/attribute_visitor.hpp" +#include "openvino/core/node.hpp" +#include "openvino/core/partial_shape.hpp" +#include "openvino/core/rank.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/type/element_type.hpp" +#include "openvino/core/type/element_type_traits.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/core/function.hpp" +#include "ngraph_functions/builders.hpp" +#include "openvino/runtime/infer_request.hpp" +#include "openvino/runtime/tensor.hpp" +#include "behavior/ov_infer_request/inference_chaining.hpp" + +namespace ov { +namespace test { +namespace behavior { + +std::string OVInferenceChaining::getTestCaseName(const testing::TestParamInfo& obj) { + return OVInferRequestTests::getTestCaseName(obj); +} + +std::shared_ptr OVInferenceChaining::getFirstStaticFunction(const ov::PartialShape &shape) { + auto params = ngraph::builder::makeDynamicParams(element::Type_t::f32, {shape, shape, shape}); + params[0]->get_output_tensor(0).set_names({"input_tensor_0"}); + params[0]->set_friendly_name("param_0"); + params[1]->get_output_tensor(0).set_names({"input_tensor_1"}); + params[1]->set_friendly_name("param_1"); + params[2]->get_output_tensor(0).set_names({"input_tensor_2"}); + params[2]->set_friendly_name("param_2"); + auto eltwise = ngraph::builder::makeEltwise(params[0], params[1], ngraph::helpers::EltwiseTypes::ADD); + auto eltwise2 = ngraph::builder::makeEltwise(eltwise, params[2], ngraph::helpers::EltwiseTypes::ADD); + eltwise2->get_output_tensor(0).set_names({"result_tensor_0"}); + eltwise2->set_friendly_name("result_0"); + + return std::make_shared(eltwise2, ov::ParameterVector(params)); +} + +std::shared_ptr OVInferenceChaining::getSecondStaticFunction(const ov::PartialShape &shape) { + auto params = ngraph::builder::makeDynamicParams(element::Type_t::f32, {shape, shape}); + params[0]->get_output_tensor(0).set_names({"input_tensor_0"}); + params[0]->set_friendly_name("param_0"); + params[1]->get_output_tensor(0).set_names({"input_tensor_1"}); + params[1]->set_friendly_name("param_1"); + auto eltwise = ngraph::builder::makeEltwise(params[0], params[1], ngraph::helpers::EltwiseTypes::MULTIPLY); + eltwise->get_output_tensor(0).set_names({"result_tensor_0"}); + eltwise->set_friendly_name("result_0"); + + return std::make_shared(eltwise, ov::ParameterVector(params)); +} + +std::shared_ptr OVInferenceChaining::getThirdStaticFunction(const ov::PartialShape &shape) { + auto params = ngraph::builder::makeDynamicParams(element::Type_t::f32, {shape, shape, shape, shape}); + params[0]->get_output_tensor(0).set_names({"input_tensor_0"}); + params[0]->set_friendly_name("param_0"); + params[1]->get_output_tensor(0).set_names({"input_tensor_1"}); + params[1]->set_friendly_name("param_1"); + params[2]->get_output_tensor(0).set_names({"input_tensor_2"}); + params[2]->set_friendly_name("param_2"); + params[3]->get_output_tensor(0).set_names({"input_tensor_3"}); + params[3]->set_friendly_name("param_3"); + auto eltwise = ngraph::builder::makeEltwise(params[0], params[1], ngraph::helpers::EltwiseTypes::ADD); + auto eltwise2 = ngraph::builder::makeEltwise(eltwise, params[2], ngraph::helpers::EltwiseTypes::ADD); + auto eltwise3 = ngraph::builder::makeEltwise(eltwise2, params[3], ngraph::helpers::EltwiseTypes::MULTIPLY); + eltwise3->get_output_tensor(0).set_names({"result_tensor_0"}); + eltwise3->set_friendly_name("result_0"); + + return std::make_shared(eltwise3, ov::ParameterVector(params)); +} + +void OVInferenceChaining::Run() { + ov::runtime::ExecutableNetwork execNet0, execNet1, execNet2; + OV_ASSERT_NO_THROW(execNet0 = core->compile_model(function0, targetDevice, configuration)); + OV_ASSERT_NO_THROW(execNet1 = core->compile_model(function1, targetDevice, configuration)); + OV_ASSERT_NO_THROW(execNet2 = core->compile_model(function2, targetDevice, configuration)); + + ov::runtime::InferRequest r0, r1, r2; + OV_ASSERT_NO_THROW(r0 = execNet0.create_infer_request()); + OV_ASSERT_NO_THROW(r1 = execNet1.create_infer_request()); + OV_ASSERT_NO_THROW(r2 = execNet2.create_infer_request()); + + // perform inference chaining + if (outputToInput) { + OV_ASSERT_NO_THROW(r1.set_tensor("input_tensor_0", r0.get_tensor("result_tensor_0"))); + } else { + OV_ASSERT_NO_THROW(r0.set_tensor("result_tensor_0", r1.get_tensor("input_tensor_0"))); + } + + // create input tensors + ov::runtime::Tensor t0 = tensor(std::vector{1.0f, 2.0f, 3.0f}); + ov::runtime::Tensor t1 = tensor(std::vector{4.0f, 5.0f, 6.0f}); + ov::runtime::Tensor t2 = tensor(std::vector{7.0f, 8.0f, 9.0f}); + ov::runtime::Tensor t3 = tensor(std::vector{2.0f, 3.0f, 2.0f}); + + OV_ASSERT_NO_THROW(r0.set_tensor("input_tensor_0", t0)); + OV_ASSERT_NO_THROW(r0.set_tensor("input_tensor_1", t1)); + OV_ASSERT_NO_THROW(r0.set_tensor("input_tensor_2", t2)); + OV_ASSERT_NO_THROW(r1.set_tensor("input_tensor_1", t3)); + + OV_ASSERT_NO_THROW(r2.set_tensor("input_tensor_0", t0)); + OV_ASSERT_NO_THROW(r2.set_tensor("input_tensor_1", t1)); + OV_ASSERT_NO_THROW(r2.set_tensor("input_tensor_2", t2)); + OV_ASSERT_NO_THROW(r2.set_tensor("input_tensor_3", t3)); + + OV_ASSERT_NO_THROW(r0.infer()); + OV_ASSERT_NO_THROW(r1.infer()); + OV_ASSERT_NO_THROW(r2.infer()); + + // check results + std::vector reference1 = {12.0f, 15.0f, 18.0f}; + std::vector reference2 = {24.0f, 45.0f, 36.0f}; + + auto rti = r0.get_tensor("result_tensor_0"); + auto rt0 = r1.get_tensor("result_tensor_0"); + auto rt1 = r2.get_tensor("result_tensor_0"); + + for (size_t i = 0; i < reference1.size(); ++i) { + EXPECT_EQ(reference1[i], rti.data()[i]); + EXPECT_EQ(reference2[i], rt0.data()[i]); + EXPECT_EQ(reference2[i], rt1.data()[i]); + } +} + +TEST_P(OVInferenceChaining, StaticOutputToStaticInput) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + function0 = getFirstStaticFunction(); + function1 = getSecondStaticFunction(); + function2 = getThirdStaticFunction(); + + Run(); +} + +TEST_P(OVInferenceChaining, StaticOutputToDynamicInput) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + const auto dynamic = ov::PartialShape::dynamic(ov::Rank(1)); + function0 = getFirstStaticFunction(); + function1 = getSecondStaticFunction(dynamic); + function2 = getThirdStaticFunction(dynamic); + + Run(); +} + +TEST_P(OVInferenceChaining, DynamicOutputToDynamicInput) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + const auto dynamic = ov::PartialShape::dynamic(); + function0 = getFirstStaticFunction(dynamic); + function1 = getSecondStaticFunction(dynamic); + function2 = getThirdStaticFunction(dynamic); + + Run(); +} + +TEST_P(OVInferenceChaining, DynamicInputToDynamicOutput) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + this->outputToInput = false; + + const auto dynamic = ov::PartialShape::dynamic(); + function0 = getFirstStaticFunction(dynamic); + function1 = getSecondStaticFunction(dynamic); + function2 = getThirdStaticFunction(dynamic); + + Run(); +} + +} // namespace behavior +} // namespace test +} // namespace ov diff --git a/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp b/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp new file mode 100644 index 00000000000..ead494de8b0 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp @@ -0,0 +1,231 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "shared_test_classes/subgraph/basic_lstm.hpp" +#include "behavior/ov_infer_request/io_tensor.hpp" +#include "functional_test_utils/ov_tensor_utils.hpp" + +namespace ov { +namespace test { +namespace behavior { + +std::string OVInferRequestIOTensorTest::getTestCaseName(const testing::TestParamInfo& obj) { + return OVInferRequestTests::getTestCaseName(obj); +} + +void OVInferRequestIOTensorTest::SetUp() { + OVInferRequestTests::SetUp(); + req = execNet.create_infer_request(); + input = execNet.input(); + output = execNet.output(); +} + +void OVInferRequestIOTensorTest::TearDown() { + req = {}; + input = {}; + output = {}; + OVInferRequestTests::TearDown(); +} + +TEST_P(OVInferRequestIOTensorTest, Cancreate_infer_request) { + runtime::InferRequest req; + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); +} + +TEST_P(OVInferRequestIOTensorTest, failToSetNullptrForInput) { + ASSERT_THROW(req.set_tensor(input, {}), ov::Exception); +} + +TEST_P(OVInferRequestIOTensorTest, failToSetNullptrForOutput) { + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + ASSERT_THROW(req.set_tensor(output, {}), ov::Exception); +} + +TEST_P(OVInferRequestIOTensorTest, getAfterSetInputDoNotChangeInput) { + auto tensor = create_and_fill_tensor(input.get_element_type(), input.get_shape()); + OV_ASSERT_NO_THROW(req.set_tensor(input, tensor)); + runtime::Tensor actual_tensor; + OV_ASSERT_NO_THROW(actual_tensor = req.get_tensor(input)); + + ASSERT_TRUE(actual_tensor); + ASSERT_NE(nullptr, actual_tensor.data()); + ASSERT_EQ(tensor.data(), actual_tensor.data()); + ASSERT_EQ(input.get_element_type(), actual_tensor.get_element_type()); + ASSERT_EQ(input.get_shape(), actual_tensor.get_shape()); +} + +TEST_P(OVInferRequestIOTensorTest, getAfterSetInputDoNotChangeOutput) { + auto tensor = create_and_fill_tensor(output.get_element_type(), output.get_shape()); + req.set_tensor(output, tensor); + auto actual_tensor = req.get_tensor(output); + + ASSERT_TRUE(actual_tensor); + ASSERT_FALSE(actual_tensor.data() == nullptr); + ASSERT_EQ(actual_tensor.data(), tensor.data()); + ASSERT_EQ(output.get_element_type(), actual_tensor.get_element_type()); + ASSERT_EQ(output.get_shape(), actual_tensor.get_shape()); +} + +TEST_P(OVInferRequestIOTensorTest, failToSetTensorWithIncorrectName) { + auto tensor = create_and_fill_tensor(input.get_element_type(), input.get_shape()); + ASSERT_THROW(req.set_tensor("incorrect_input", tensor), ov::Exception); +} + +TEST_P(OVInferRequestIOTensorTest, failToSetInputWithIncorrectSizes) { + auto shape = input.get_shape(); + shape[0] *= 2; + auto tensor = create_and_fill_tensor(input.get_element_type(), shape); + ASSERT_THROW(req.set_tensor(input, tensor), ov::Exception); +} + +TEST_P(OVInferRequestIOTensorTest, failToSetOutputWithIncorrectSizes) { + auto shape = output.get_shape(); + shape[0] *= 2; + auto tensor = create_and_fill_tensor(output.get_element_type(), shape); + ASSERT_THROW(req.set_tensor(output, tensor), ov::Exception); +} + +TEST_P(OVInferRequestIOTensorTest, canInferWithoutSetAndGetInOutSync) { + OV_ASSERT_NO_THROW(req.infer()); +} + +TEST_P(OVInferRequestIOTensorTest, canInferWithoutSetAndGetInOutAsync) { + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); +} + +TEST_P(OVInferRequestIOTensorTest, secondCallGetInputDoNotReAllocateData) { + runtime::Tensor tensor1, tensor2; + OV_ASSERT_NO_THROW(tensor1 = req.get_tensor(input)); + OV_ASSERT_NO_THROW(tensor2 = req.get_tensor(input)); + ASSERT_EQ(tensor1.data(), tensor2.data()); +} + +TEST_P(OVInferRequestIOTensorTest, secondCallGetOutputDoNotReAllocateData) { + runtime::Tensor tensor1, tensor2; + OV_ASSERT_NO_THROW(tensor1 = req.get_tensor(output)); + OV_ASSERT_NO_THROW(tensor2 = req.get_tensor(output)); + ASSERT_EQ(tensor1.data(), tensor2.data()); +} + +TEST_P(OVInferRequestIOTensorTest, secondCallGetInputAfterInferSync) { + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + runtime::Tensor tensor1, tensor2; + OV_ASSERT_NO_THROW(tensor1 = req.get_tensor(input)); + OV_ASSERT_NO_THROW(tensor2 = req.get_tensor(input)); + ASSERT_EQ(tensor1.data(), tensor2.data()); +} + +TEST_P(OVInferRequestIOTensorTest, secondCallGetOutputAfterInferSync) { + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + runtime::Tensor tensor1, tensor2; + OV_ASSERT_NO_THROW(tensor1 = req.get_tensor(output)); + OV_ASSERT_NO_THROW(tensor2 = req.get_tensor(output)); + ASSERT_EQ(tensor1.data(), tensor2.data()); +} + +TEST_P(OVInferRequestIOTensorTest, canSetInputTensorForInferRequest) { + auto input_tensor = create_and_fill_tensor(input.get_element_type(), input.get_shape()); + OV_ASSERT_NO_THROW(req.set_tensor(input, input_tensor)); + runtime::Tensor actual_tensor; + OV_ASSERT_NO_THROW(actual_tensor = req.get_tensor(input)); + ASSERT_EQ(input_tensor.data(), actual_tensor.data()); +} + +TEST_P(OVInferRequestIOTensorTest, canSetOutputBlobForInferRequest) { + auto output_tensor = create_and_fill_tensor(output.get_element_type(), output.get_shape()); + OV_ASSERT_NO_THROW(req.set_tensor(output, output_tensor)); + runtime::Tensor actual_tensor; + OV_ASSERT_NO_THROW(actual_tensor = req.get_tensor(output)); + ASSERT_EQ(output_tensor.data(), actual_tensor.data()); +} + +TEST_P(OVInferRequestIOTensorTest, canInferWithSetInOutBlobs) { + auto input_tensor = create_and_fill_tensor(input.get_element_type(), input.get_shape()); + OV_ASSERT_NO_THROW(req.set_tensor(input, input_tensor)); + auto output_tensor = create_and_fill_tensor(output.get_element_type(), output.get_shape()); + OV_ASSERT_NO_THROW(req.set_tensor(output, output_tensor)); + OV_ASSERT_NO_THROW(req.infer()); +} + +TEST_P(OVInferRequestIOTensorTest, canInferWithGetIn) { + runtime::Tensor input_tensor; + OV_ASSERT_NO_THROW(input_tensor = req.get_tensor(input)); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.get_tensor(output)); +} + +TEST_P(OVInferRequestIOTensorTest, canInferWithGetOut) { + runtime::Tensor output_tensor; + OV_ASSERT_NO_THROW(output_tensor = req.get_tensor(output)); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.get_tensor(output)); +} + +std::string OVInferRequestIOTensorSetPrecisionTest::getTestCaseName(const testing::TestParamInfo& obj) { + element::Type type; + std::string targetDevice; + std::map configuration; + std::tie(type, targetDevice, configuration) = obj.param; + std::ostringstream result; + result << "type=" << type << "_"; + result << "targetDevice=" << targetDevice << "_"; + if (!configuration.empty()) { + using namespace CommonTestUtils; + for (auto &configItem : configuration) { + result << "configItem=" << configItem.first << "_" << configItem.second << "_"; + } + } + return result.str(); +} + +void OVInferRequestIOTensorSetPrecisionTest::SetUp() { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + std::tie(element_type, target_device, config) = this->GetParam(); + function = ngraph::builder::subgraph::makeConvPoolRelu(); + execNet = core->compile_model(function, target_device, config); + req = execNet.create_infer_request(); +} + +void OVInferRequestIOTensorSetPrecisionTest::TearDown() { + execNet = {}; + req = {}; +} + +TEST_P(OVInferRequestIOTensorSetPrecisionTest, CanSetInBlobWithDifferentPrecision) { + for (auto&& output : execNet.outputs()) { + auto output_tensor = create_and_fill_tensor(element_type, output.get_shape()); + if (output.get_element_type() == element_type) { + OV_ASSERT_NO_THROW(req.set_tensor(output, output_tensor)); + } else { + ASSERT_THROW(req.set_tensor(output, output_tensor), ov::Exception); + } + } +} + +TEST_P(OVInferRequestIOTensorSetPrecisionTest, CanSetOutBlobWithDifferentPrecision) { + for (auto&& input : execNet.inputs()) { + auto input_tensor = create_and_fill_tensor(element_type, input.get_shape()); + if (input.get_element_type() == element_type) { + OV_ASSERT_NO_THROW(req.set_tensor(input, input_tensor)); + } else { + ASSERT_THROW(req.set_tensor(input, input_tensor), ov::Exception); + } + } +} + +} // namespace behavior +} // namespace test +} // namespace ov diff --git a/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/multithreading.cpp b/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/multithreading.cpp new file mode 100644 index 00000000000..a7b8ea2f7ac --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/multithreading.cpp @@ -0,0 +1,92 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/ov_infer_request/multithreading.hpp" + +namespace ov { +namespace test { +namespace behavior { + +std::string OVInferRequestMultithreadingTests::getTestCaseName(const testing::TestParamInfo& obj) { + return OVInferRequestTests::getTestCaseName(obj); +} + +TEST_P(OVInferRequestMultithreadingTests, canRun3SyncRequestsConsistentlyFromThreads) { + runtime::InferRequest req1, req2, req3; + OV_ASSERT_NO_THROW(req1 = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req2 = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req3 = execNet.create_infer_request()); + + auto f1 = std::async(std::launch::async, [&] { req1.infer(); }); + auto f2 = std::async(std::launch::async, [&] { req2.infer(); }); + auto f3 = std::async(std::launch::async, [&] { req3.infer(); }); + + f1.wait(); + f2.wait(); + f3.wait(); + + OV_ASSERT_NO_THROW(f1.get()); + OV_ASSERT_NO_THROW(f2.get()); + OV_ASSERT_NO_THROW(f3.get()); +} + +TEST_P(OVInferRequestMultithreadingTests, canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait) { + runtime::InferRequest req1, req2, req3; + OV_ASSERT_NO_THROW(req1 = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req2 = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req3 = execNet.create_infer_request()); + + OV_ASSERT_NO_THROW(req1.infer()); + OV_ASSERT_NO_THROW(req2.infer()); + OV_ASSERT_NO_THROW(req3.infer()); + + auto f1 = std::async(std::launch::async, [&] { req1.start_async(); }); + auto f2 = std::async(std::launch::async, [&] { req2.start_async(); }); + auto f3 = std::async(std::launch::async, [&] { req3.start_async(); }); + + f1.wait(); + f2.wait(); + f3.wait(); + + OV_ASSERT_NO_THROW(f1.get()); + OV_ASSERT_NO_THROW(f2.get()); + OV_ASSERT_NO_THROW(f3.get()); +} + +TEST_P(OVInferRequestMultithreadingTests, canRun3AsyncRequestsConsistentlyWithWait) { + runtime::InferRequest req1, req2, req3; + OV_ASSERT_NO_THROW(req1 = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req2 = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req3 = execNet.create_infer_request()); + + req1.start_async(); + OV_ASSERT_NO_THROW(req1.wait()); + + req2.start_async(); + OV_ASSERT_NO_THROW(req2.wait()); + + req3.start_async(); + OV_ASSERT_NO_THROW(req3.wait()); +} + +TEST_P(OVInferRequestMultithreadingTests, canRun3AsyncRequestsParallelWithWait) { + runtime::InferRequest req1, req2, req3; + OV_ASSERT_NO_THROW(req1 = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req2 = execNet.create_infer_request()); + OV_ASSERT_NO_THROW(req3 = execNet.create_infer_request()); + + req1.start_async(); + req2.start_async(); + req3.start_async(); + + OV_ASSERT_NO_THROW(req2.wait()); + OV_ASSERT_NO_THROW(req1.wait()); + OV_ASSERT_NO_THROW(req3.wait()); +} + +} // namespace behavior +} // namespace test +} // namespace ov diff --git a/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/perf_counters.cpp b/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/perf_counters.cpp new file mode 100644 index 00000000000..b92c516bf5a --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/perf_counters.cpp @@ -0,0 +1,42 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + + +#include "behavior/ov_infer_request/perf_counters.hpp" +#include "openvino/runtime/profiling_info.hpp" + +namespace ov { +namespace test { +namespace behavior { + +std::string OVInferRequestPerfCountersTest::getTestCaseName(const testing::TestParamInfo& obj) { + return OVInferRequestTests::getTestCaseName(obj); +} + +void OVInferRequestPerfCountersTest::SetUp() { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + std::tie(targetDevice, configuration) = this->GetParam(); + function = ngraph::builder::subgraph::makeConvPoolRelu(); + configuration.insert({ InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES }); + execNet = core->compile_model(function, targetDevice, configuration); + req = execNet.create_infer_request(); +} + +TEST_P(OVInferRequestPerfCountersTest, NotEmptyAfterAsyncInfer) { + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + std::vector perf; + OV_ASSERT_NO_THROW(perf = req.get_profiling_info()); + ASSERT_FALSE(perf.empty()); +} + +TEST_P(OVInferRequestPerfCountersTest, NotEmptyAfterSyncInfer) { + OV_ASSERT_NO_THROW(req.infer()); + std::vector perf; + OV_ASSERT_NO_THROW(perf = req.get_profiling_info()); + ASSERT_FALSE(perf.empty()); +} +} // namespace behavior +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp b/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp new file mode 100644 index 00000000000..7764b0fbf76 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp @@ -0,0 +1,84 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/wait.hpp" +#include "openvino/runtime/exception.hpp" + +namespace ov { +namespace test { +namespace behavior { + +std::string OVInferRequestWaitTests::getTestCaseName(const testing::TestParamInfo& obj) { + return OVInferRequestTests::getTestCaseName(obj); +} + +void OVInferRequestWaitTests::SetUp() { + OVInferRequestTests::SetUp(); + req = execNet.create_infer_request(); + input = execNet.input(); + output = execNet.output(); +} + +void OVInferRequestWaitTests::TearDown() { + req = {}; + input = {}; + output = {}; + OVInferRequestTests::TearDown(); +} + +TEST_P(OVInferRequestWaitTests, CorrectOneAsyncInferWithGetInOutWithInfWait) { + runtime::Tensor tensor; + OV_ASSERT_NO_THROW(tensor = req.get_tensor(input)); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(output)); +} + +// Plugin correct infer request with allocating input and result BlobMaps inside plugin +TEST_P(OVInferRequestWaitTests, canstart_asyncInferWithGetInOutWithStatusOnlyWait) { + runtime::Tensor tensor; + OV_ASSERT_NO_THROW(tensor = req.get_tensor(input)); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait_for({})); +} + +TEST_P(OVInferRequestWaitTests, canWaitWithotStartSsync) { + OV_ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.wait_for({})); + OV_ASSERT_NO_THROW(req.wait_for(std::chrono::milliseconds{1})); +} + +TEST_P(OVInferRequestWaitTests, throwExceptionOnSetTensorAfterAsyncInfer) { + auto&& config = configuration; + auto itConfig = config.find(CONFIG_KEY(CPU_THROUGHPUT_STREAMS)); + if (itConfig != config.end()) { + if (itConfig->second != "CPU_THROUGHPUT_AUTO") { + if (std::stoi(itConfig->second) == 0) { + GTEST_SKIP() << "Not applicable with disabled streams"; + } + } + } + auto output_tensor = req.get_tensor(input); + OV_ASSERT_NO_THROW(req.wait_for({})); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(try { + req.set_tensor(input, output_tensor); + } catch (const ov::runtime::Busy&) {}); + OV_ASSERT_NO_THROW(req.wait_for({})); + OV_ASSERT_NO_THROW(req.wait()); +} + +TEST_P(OVInferRequestWaitTests, throwExceptionOnGetTensorAfterAsyncInfer) { + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(try { + req.get_tensor(input); + } catch (const ov::runtime::Busy&) {}); + OV_ASSERT_NO_THROW(req.wait()); +} + +} // namespace behavior +} // namespace test +} // namespace ov diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/test_assertions.hpp b/inference-engine/tests/ie_test_utils/common_test_utils/test_assertions.hpp index 30c5bdd8fd2..27a5a805aa2 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/test_assertions.hpp +++ b/inference-engine/tests/ie_test_utils/common_test_utils/test_assertions.hpp @@ -1,6 +1,11 @@ // Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once #pragma once @@ -55,6 +60,23 @@ inline bool strDoesnotContain(const std::string & str, const std::string & subst #define ASSERT_STRINGEQ(lhs, rhs) \ compare_cpp_strings(lhs, rhs) +#define OV_ASSERT_NO_THROW(statement) \ + OV_ASSERT_NO_THROW_(statement, GTEST_FATAL_FAILURE_) + +#define OV_ASSERT_NO_THROW_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } catch (const std::exception& e) { \ + fail("Expected: " #statement " doesn't throw an exception.\n" \ + " Actual: it throws.") << e.what(); \ + } catch (...) { \ + fail("Expected: " #statement " doesn't throw an exception.\n" \ + " Actual: it throws."); \ + } \ + } + inline void compare_blob(InferenceEngine::Blob::Ptr lhs, InferenceEngine::Blob::Ptr rhs) { ASSERT_EQ(lhs.get(), rhs.get()); //TODO: add blob specific comparison for general case diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/test_common.cpp b/inference-engine/tests/ie_test_utils/common_test_utils/test_common.cpp index 02f5f5dca9e..979122e8919 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/test_common.cpp +++ b/inference-engine/tests/ie_test_utils/common_test_utils/test_common.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #ifdef _WIN32 #ifndef NOMINMAX diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/test_common.hpp b/inference-engine/tests/ie_test_utils/common_test_utils/test_common.hpp index 2c4dcf49067..df975cf0fce 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/test_common.hpp +++ b/inference-engine/tests/ie_test_utils/common_test_utils/test_common.hpp @@ -7,6 +7,7 @@ #include #include +#include "test_assertions.hpp" namespace CommonTestUtils { diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/ov_tensor_utils.hpp b/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/ov_tensor_utils.hpp new file mode 100644 index 00000000000..c8b158badaa --- /dev/null +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/ov_tensor_utils.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace ov { +namespace test { +ov::runtime::Tensor create_and_fill_tensor( + const ov::element::Type element_type, + const ov::Shape& shape, + const uint32_t range = 10, + const int32_t start_from = 0, + const int32_t resolution = 1, + const int seed = 1); +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/src/ov_tensor_utils.cpp b/inference-engine/tests/ie_test_utils/functional_test_utils/src/ov_tensor_utils.cpp new file mode 100644 index 00000000000..2b08fcb8397 --- /dev/null +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/src/ov_tensor_utils.cpp @@ -0,0 +1,53 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + + +#include "functional_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/data_utils.hpp" +#include "openvino/core/type/element_type_traits.hpp" +#include "ngraph/coordinate_transform.hpp" +#include + +namespace ov { +namespace test { + ov::runtime::Tensor create_and_fill_tensor( + const ov::element::Type element_type, + const ov::Shape& shape, + const uint32_t range, + const int32_t start_from, + const int32_t resolution, + const int seed) { + auto tensor = ov::runtime::Tensor{element_type, shape}; +#define CASE(X) case X: ::CommonTestUtils::fill_data_random( \ + tensor.data::value_type>(), \ + shape_size(shape), \ + range, start_from, resolution, seed); break; + switch (element_type) { + CASE(ov::element::Type_t::boolean) + CASE(ov::element::Type_t::bf16) + CASE(ov::element::Type_t::f16) + CASE(ov::element::Type_t::f32) + CASE(ov::element::Type_t::f64) + CASE(ov::element::Type_t::i8) + CASE(ov::element::Type_t::i16) + CASE(ov::element::Type_t::i32) + CASE(ov::element::Type_t::i64) + CASE(ov::element::Type_t::u8) + CASE(ov::element::Type_t::u16) + CASE(ov::element::Type_t::u32) + CASE(ov::element::Type_t::u64) + case ov::element::Type_t::u1: + case ov::element::Type_t::i4: + case ov::element::Type_t::u4: + ::CommonTestUtils::fill_data_random( + static_cast(tensor.data()), + tensor.get_byte_size(), + range, start_from, resolution, seed); break; + default: OPENVINO_UNREACHABLE("Unsupported element type: ", element_type); + } +#undef CASE + return tensor; +} +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/ngraph/core/include/openvino/runtime/tensor.hpp b/ngraph/core/include/openvino/runtime/tensor.hpp index 512cbaba1b5..3de0e3c47d2 100644 --- a/ngraph/core/include/openvino/runtime/tensor.hpp +++ b/ngraph/core/include/openvino/runtime/tensor.hpp @@ -166,8 +166,7 @@ public: * @return true if this object can be dynamically cast to the type const T*. Otherwise, false */ template - bool is() const noexcept { - static_assert(std::is_base_of::value, "Could not check type that is not inherited from Tensor"); + typename std::enable_if::value, bool>::type is() const noexcept { try { T::type_check(*this); } catch (...) { @@ -183,8 +182,7 @@ public: * @return T object */ template - const T as() const { - static_assert(std::is_base_of::value, "Could not check type that is not inherited from Tensor"); + const typename std::enable_if::value, T>::type as() const { T::type_check(*this); return *static_cast(this); } @@ -195,7 +193,7 @@ public: * @tparam T Type to cast to. Must represent a class derived from the Tensor * @return T object */ - template + template ::value>::type> operator T() const { return as(); }