diff --git a/inference-engine/tests/functional/inference_engine/ov_infer_request_test.cpp b/inference-engine/tests/functional/inference_engine/ov_infer_request_test.cpp index e757bfaed01..714f075b5f6 100644 --- a/inference-engine/tests/functional/inference_engine/ov_infer_request_test.cpp +++ b/inference-engine/tests/functional/inference_engine/ov_infer_request_test.cpp @@ -8,6 +8,7 @@ #include #include #include +#include using namespace ::testing; using namespace std; @@ -83,3 +84,9 @@ TEST(InferRequestOVTests, throwsOnUninitializedSetRemoteTensor) { ov::runtime::RemoteTensor remote_tensor; ASSERT_THROW(req.set_tensor(ov::Output(), remote_tensor), ov::Exception); } + + +TEST(InferRequestOVTests, throwsOnGetCompiledModel) { + ov::runtime::InferRequest req; + ASSERT_THROW(req.get_compiled_model(), ov::Exception); +} diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp index bf6c4d1dec8..611325ceb6b 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp @@ -633,6 +633,23 @@ TEST_P(OVExecutableNetworkBaseTest, precisionsAsInOriginalIR) { EXPECT_EQ(ref_result->get_shape(), actual_result->get_shape()); EXPECT_EQ(ref_result->get_friendly_name(), actual_result->get_friendly_name()); } + +TEST_P(OVExecutableNetworkBaseTest, getCompiledModelFromInferRequest) { + ov::runtime::InferRequest req; + { + ov::runtime::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core->compile_model(function, targetDevice, configuration)); + ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req.infer()); + } + { + ov::runtime::CompiledModel restored_compiled_model; + ov::runtime::InferRequest another_req; + ASSERT_NO_THROW(restored_compiled_model = req.get_compiled_model()); + ASSERT_NO_THROW(another_req = restored_compiled_model.create_infer_request()); + ASSERT_NO_THROW(another_req.infer()); + } +} } // namespace behavior } // namespace test } // namespace ov diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp index 9200c31920b..ea7e117677c 100644 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp +++ b/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp @@ -185,6 +185,12 @@ public: */ void setPointerToExecutableNetworkInternal(const std::shared_ptr& exeNetwork); + /** + * @brief Returns the pointer to executable network internal. + * @returns The executable network + */ + std::shared_ptr getPointerToExecutableNetworkInternal() const; + /** * @brief Gets the pointer to userData. * @return Pointer to user data diff --git a/src/inference/include/openvino/runtime/compiled_model.hpp b/src/inference/include/openvino/runtime/compiled_model.hpp index 9f096fdc16a..1f1f8febe14 100644 --- a/src/inference/include/openvino/runtime/compiled_model.hpp +++ b/src/inference/include/openvino/runtime/compiled_model.hpp @@ -28,6 +28,7 @@ namespace ov { namespace runtime { class Core; +class InferRequest; /** * @brief This is an interface of an executable network @@ -45,6 +46,7 @@ class OPENVINO_RUNTIME_API CompiledModel { CompiledModel(const std::shared_ptr& so, const std::shared_ptr& impl); friend class ov::runtime::Core; + friend class ov::runtime::InferRequest; public: /** diff --git a/src/inference/include/openvino/runtime/infer_request.hpp b/src/inference/include/openvino/runtime/infer_request.hpp index d6852b76efd..bef4de7037d 100644 --- a/src/inference/include/openvino/runtime/infer_request.hpp +++ b/src/inference/include/openvino/runtime/infer_request.hpp @@ -222,6 +222,13 @@ public: */ std::vector query_state(); + /** + * @brief Returns compiled model that creates this inference request + * + * @return Compiled model object + */ + CompiledModel get_compiled_model(); + /** * @brief Checks if current InferRequest object is not initialized * @return true if current InferRequest object is not initialized, false - otherwise diff --git a/src/inference/src/cpp/ie_infer_request.cpp b/src/inference/src/cpp/ie_infer_request.cpp index 5f9fbe6e283..428888c6f19 100644 --- a/src/inference/src/cpp/ie_infer_request.cpp +++ b/src/inference/src/cpp/ie_infer_request.cpp @@ -13,6 +13,7 @@ #include "ie_infer_async_request_base.hpp" #include "ie_ngraph_utils.hpp" #include "ie_remote_context.hpp" +#include "openvino/runtime/compiled_model.hpp" #include "openvino/runtime/exception.hpp" #include "openvino/runtime/infer_request.hpp" #include "transformations/utils/utils.hpp" @@ -446,6 +447,10 @@ std::vector InferRequest::query_state() { return variable_states; } +CompiledModel InferRequest::get_compiled_model() { + OV_INFER_REQ_CALL_STATEMENT(return {_so, _impl->getPointerToExecutableNetworkInternal()}); +} + bool InferRequest::operator!() const noexcept { return !_impl; } diff --git a/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp index dbd74f4fd6e..da3299b2302 100644 --- a/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp +++ b/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp @@ -355,6 +355,10 @@ void IInferRequestInternal::setPointerToExecutableNetworkInternal( _exeNetwork = exeNetwork; } +std::shared_ptr IInferRequestInternal::getPointerToExecutableNetworkInternal() const { + return _exeNetwork; +} + bool IInferRequestInternal::preProcessingRequired(const InputInfo::Ptr& info, const Blob::Ptr& userBlob, const Blob::Ptr& deviceBlob) {