Change plugin api (#18441)
* Remove vector of so objects from Tensor, Any, VariableState and RemoteContext * Change IRemoteContext * Fixed build without Proxy * Moved API to ITensor * Remove friends from Tensor class * Changed query_state API * Remote SoPtr for some returned values * Fixed auto plugin * Add so to converted objects * Fixed build all on macOS * Check that tensor pointers != nullptr * Add SO to converter * Added new constructors for SO ptrs * Changed IVariableState API * Remove proxy friend * Remove friends and nullptrs from auto * Fixed build * Fixed HETERO plugin * Fixed code style
This commit is contained in:
parent
38913f2184
commit
93e30468b1
@ -69,6 +69,13 @@ public:
|
||||
return static_cast<T*>(data(element::from<datatype>()));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reports whether the tensor is continuous or not
|
||||
*
|
||||
* @return true if tensor is continuous
|
||||
*/
|
||||
bool is_continuous() const;
|
||||
|
||||
protected:
|
||||
virtual ~ITensor();
|
||||
};
|
||||
|
@ -17,32 +17,20 @@
|
||||
#include "openvino/core/type/element_type.hpp"
|
||||
#include "openvino/runtime/allocator.hpp"
|
||||
|
||||
namespace InferenceEngine {
|
||||
class IAsyncInferRequestWrapper;
|
||||
class IVariableStateWrapper;
|
||||
} // namespace InferenceEngine
|
||||
|
||||
namespace ov {
|
||||
|
||||
class Core;
|
||||
class CoreImpl;
|
||||
class InferRequest;
|
||||
class RemoteContext;
|
||||
class VariableState;
|
||||
class ISyncInferRequest;
|
||||
class IInferRequestInternalWrapper;
|
||||
class IVariableStateInternalWrapper;
|
||||
class Tensor;
|
||||
class ITensor;
|
||||
class RemoteTensor;
|
||||
|
||||
namespace proxy {
|
||||
class RemoteContext;
|
||||
}
|
||||
namespace util {
|
||||
ov::Tensor make_tensor(const std::shared_ptr<ov::ITensor>& tensor, const std::shared_ptr<void>& so);
|
||||
void get_tensor_impl(const ov::Tensor& tensor, std::shared_ptr<ov::ITensor>& tensor_impl, std::shared_ptr<void>& so);
|
||||
} // namespace util
|
||||
|
||||
namespace op {
|
||||
namespace util {
|
||||
class VariableValue;
|
||||
}
|
||||
} // namespace util
|
||||
} // namespace op
|
||||
|
||||
/**
|
||||
@ -63,19 +51,12 @@ protected:
|
||||
*/
|
||||
Tensor(const std::shared_ptr<ITensor>& impl, const std::shared_ptr<void>& so);
|
||||
|
||||
friend class ov::Core;
|
||||
friend class ov::CoreImpl;
|
||||
friend class ov::InferRequest;
|
||||
friend class ov::RemoteTensor;
|
||||
friend class ov::RemoteContext;
|
||||
friend class ov::VariableState;
|
||||
friend class ov::ISyncInferRequest;
|
||||
friend class ov::IInferRequestInternalWrapper;
|
||||
friend class ov::IVariableStateInternalWrapper;
|
||||
friend class ov::proxy::RemoteContext;
|
||||
friend class InferenceEngine::IAsyncInferRequestWrapper;
|
||||
friend class InferenceEngine::IVariableStateWrapper;
|
||||
friend class ov::op::util::VariableValue;
|
||||
friend ov::Tensor ov::util::make_tensor(const std::shared_ptr<ov::ITensor>& tensor,
|
||||
const std::shared_ptr<void>& so);
|
||||
friend void ov::util::get_tensor_impl(const ov::Tensor& tensor,
|
||||
std::shared_ptr<ov::ITensor>& tensor_impl,
|
||||
std::shared_ptr<void>& so);
|
||||
|
||||
public:
|
||||
/// @brief Default constructor
|
||||
|
@ -4,7 +4,6 @@
|
||||
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "openvino/runtime/allocator.hpp"
|
||||
#include "openvino/runtime/properties.hpp"
|
||||
@ -21,4 +20,25 @@ size_t ITensor::get_byte_size() const {
|
||||
return (get_size() * get_element_type().bitwidth() + 8 - 1) / 8;
|
||||
}
|
||||
|
||||
bool ITensor::is_continuous() const {
|
||||
if (get_element_type().bitwidth() < 8)
|
||||
// OpenVINO doesn't support strides for lp types
|
||||
return true;
|
||||
const auto& shape = get_shape();
|
||||
const auto& type = get_element_type();
|
||||
std::vector<size_t> strides(shape.size());
|
||||
if (!shape.empty()) {
|
||||
strides[shape.size() - 1] = 1;
|
||||
}
|
||||
auto size = shape.size();
|
||||
for (size_t i = 1; i < size; i++) {
|
||||
strides[size - i - 1] = strides[size - i] * shape[size - i];
|
||||
}
|
||||
|
||||
ov::Strides byte_strides(strides.size());
|
||||
for (size_t i = 0; i < strides.size(); ++i)
|
||||
byte_strides[i] = strides[i] * type.size();
|
||||
return byte_strides == get_strides();
|
||||
}
|
||||
|
||||
} // namespace ov
|
||||
|
@ -4,12 +4,12 @@
|
||||
|
||||
#include <numeric>
|
||||
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "openvino/core/node_output.hpp"
|
||||
#include "openvino/core/shape.hpp"
|
||||
#include "openvino/core/strides.hpp"
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
#include "openvino/runtime/remote_tensor.hpp"
|
||||
#include "openvino/runtime/tensor.hpp"
|
||||
#include "shape_util.hpp"
|
||||
@ -230,26 +230,7 @@ Tensor::operator bool() const noexcept {
|
||||
}
|
||||
|
||||
bool Tensor::is_continuous() const {
|
||||
OV_TENSOR_STATEMENT({
|
||||
if (get_element_type().bitwidth() < 8)
|
||||
// OpenVINO doesn't support strides for lp types
|
||||
return true;
|
||||
const auto& shape = get_shape();
|
||||
const auto& type = get_element_type();
|
||||
std::vector<size_t> strides(shape.size());
|
||||
if (!shape.empty()) {
|
||||
strides[shape.size() - 1] = 1;
|
||||
}
|
||||
auto size = shape.size();
|
||||
for (size_t i = 1; i < size; i++) {
|
||||
strides[size - i - 1] = strides[size - i] * shape[size - i];
|
||||
}
|
||||
|
||||
ov::Strides byte_strides(strides.size());
|
||||
for (size_t i = 0; i < strides.size(); ++i)
|
||||
byte_strides[i] = strides[i] * type.size();
|
||||
return byte_strides == get_strides();
|
||||
});
|
||||
OV_TENSOR_STATEMENT(return _impl->is_continuous());
|
||||
}
|
||||
|
||||
} // namespace ov
|
||||
|
@ -27,6 +27,7 @@ class Result;
|
||||
namespace InferenceEngine {
|
||||
|
||||
class IInferencePlugin;
|
||||
class IPluginWrapper;
|
||||
class IInferRequestInternal;
|
||||
class RemoteContext;
|
||||
class IVariableStateInternal;
|
||||
@ -216,6 +217,7 @@ protected:
|
||||
bool _loadedFromCache = false;
|
||||
|
||||
friend InferenceEngine::ICompiledModelWrapper;
|
||||
friend InferenceEngine::IPluginWrapper;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -90,7 +90,7 @@ public:
|
||||
* @param port Port of the tensor to get.
|
||||
* @return Tensor for the port @p port.
|
||||
*/
|
||||
ov::Tensor get_tensor(const ov::Output<const ov::Node>& port) const override;
|
||||
ov::SoPtr<ov::ITensor> get_tensor(const ov::Output<const ov::Node>& port) const override;
|
||||
|
||||
/**
|
||||
* @brief Sets an input/output tensor to infer.
|
||||
@ -98,7 +98,7 @@ public:
|
||||
* @param tensor Reference to a tensor. The element_type and shape of a tensor must match
|
||||
* the model's input/output element_type and size.
|
||||
*/
|
||||
void set_tensor(const ov::Output<const ov::Node>& port, const ov::Tensor& tensor) override;
|
||||
void set_tensor(const ov::Output<const ov::Node>& port, const ov::SoPtr<ov::ITensor>& tensor) override;
|
||||
|
||||
/**
|
||||
* @brief Gets a batch of tensors for input data to infer by input port.
|
||||
@ -111,7 +111,7 @@ public:
|
||||
* input element type and shape (except batch dimension). Total size of tensors must match the input size.
|
||||
* @return vector of tensors
|
||||
*/
|
||||
std::vector<ov::Tensor> get_tensors(const ov::Output<const ov::Node>& port) const override;
|
||||
std::vector<ov::SoPtr<ov::ITensor>> get_tensors(const ov::Output<const ov::Node>& port) const override;
|
||||
/**
|
||||
* @brief Sets a batch of tensors for input data to infer by input port.
|
||||
* Model input must have batch dimension, and the number of @p tensors must match the batch size.
|
||||
@ -122,7 +122,8 @@ public:
|
||||
* @param tensors Input tensors for batched infer request. The type of each tensor must match the model
|
||||
* input element type and shape (except batch dimension). Total size of tensors must match the input size.
|
||||
*/
|
||||
void set_tensors(const ov::Output<const ov::Node>& port, const std::vector<ov::Tensor>& tensors) override;
|
||||
void set_tensors(const ov::Output<const ov::Node>& port,
|
||||
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) override;
|
||||
|
||||
/**
|
||||
* @brief Gets state control interface for the given infer request.
|
||||
@ -130,7 +131,7 @@ public:
|
||||
* State control essential for recurrent models.
|
||||
* @return Vector of Variable State objects.
|
||||
*/
|
||||
std::vector<std::shared_ptr<ov::IVariableState>> query_state() const override;
|
||||
std::vector<ov::SoPtr<ov::IVariableState>> query_state() const override;
|
||||
|
||||
/**
|
||||
* @brief Gets pointer to compiled model (usually synchronous request holds the compiled model)
|
||||
|
@ -15,8 +15,10 @@
|
||||
|
||||
#include "openvino/core/node_output.hpp"
|
||||
#include "openvino/runtime/common.hpp"
|
||||
#include "openvino/runtime/iremote_context.hpp"
|
||||
#include "openvino/runtime/isync_infer_request.hpp"
|
||||
#include "openvino/runtime/remote_context.hpp"
|
||||
#include "openvino/runtime/so_ptr.hpp"
|
||||
#include "openvino/runtime/threading/cpu_streams_executor.hpp"
|
||||
#include "openvino/runtime/threading/itask_executor.hpp"
|
||||
|
||||
@ -72,7 +74,7 @@ public:
|
||||
ICompiledModel(
|
||||
const std::shared_ptr<const ov::Model>& model,
|
||||
const std::shared_ptr<const ov::IPlugin>& plugin,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const std::shared_ptr<ov::threading::ITaskExecutor>& task_executor =
|
||||
std::make_shared<ov::threading::CPUStreamsExecutor>(ov::threading::IStreamsExecutor::Config{"Default"}),
|
||||
const std::shared_ptr<ov::threading::ITaskExecutor>& callback_executor =
|
||||
@ -134,7 +136,7 @@ public:
|
||||
*
|
||||
* @return OpenVINO RemoteContext
|
||||
*/
|
||||
std::shared_ptr<ov::IRemoteContext> get_context() const;
|
||||
ov::SoPtr<ov::IRemoteContext> get_context() const;
|
||||
|
||||
virtual ~ICompiledModel() = default;
|
||||
|
||||
@ -142,7 +144,7 @@ private:
|
||||
std::shared_ptr<const ov::IPlugin> m_plugin;
|
||||
std::vector<ov::Output<const ov::Node>> m_inputs;
|
||||
std::vector<ov::Output<const ov::Node>> m_outputs;
|
||||
ov::RemoteContext m_context;
|
||||
ov::SoPtr<IRemoteContext> m_context;
|
||||
|
||||
std::shared_ptr<ov::threading::ITaskExecutor> m_task_executor = nullptr; //!< Holds a task executor
|
||||
std::shared_ptr<ov::threading::ITaskExecutor> m_callback_executor = nullptr; //!< Holds a callback executor
|
||||
|
@ -80,7 +80,7 @@ public:
|
||||
* @return A pointer to compiled model
|
||||
*/
|
||||
virtual ov::SoPtr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& config = {}) const = 0;
|
||||
|
||||
/**
|
||||
@ -138,7 +138,7 @@ public:
|
||||
* @return A pointer to compiled model
|
||||
*/
|
||||
virtual ov::SoPtr<ov::ICompiledModel> import_model(std::istream& modelStream,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& config = {}) const = 0;
|
||||
|
||||
/**
|
||||
@ -168,7 +168,7 @@ public:
|
||||
* @param params Map of device-specific shared context parameters.
|
||||
* @return A shared pointer to a created remote context.
|
||||
*/
|
||||
virtual ov::RemoteContext create_context(const std::string& device_name, const AnyMap& args) const = 0;
|
||||
virtual ov::SoPtr<ov::IRemoteContext> create_context(const std::string& device_name, const AnyMap& args) const = 0;
|
||||
|
||||
virtual bool is_new_api() const = 0;
|
||||
|
||||
@ -177,7 +177,7 @@ public:
|
||||
* @param device_name - A name of a device to get create shared context from.
|
||||
* @return A shared pointer to a default remote context.
|
||||
*/
|
||||
virtual ov::RemoteContext get_default_context(const std::string& device_name) const = 0;
|
||||
virtual ov::SoPtr<ov::IRemoteContext> get_default_context(const std::string& device_name) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Gets properties related to device behaviour.
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "openvino/runtime/common.hpp"
|
||||
#include "openvino/runtime/ivariable_state.hpp"
|
||||
#include "openvino/runtime/profiling_info.hpp"
|
||||
#include "openvino/runtime/so_ptr.hpp"
|
||||
#include "openvino/runtime/tensor.hpp"
|
||||
|
||||
namespace ov {
|
||||
@ -51,7 +52,7 @@ public:
|
||||
* @param port Port of the tensor to get.
|
||||
* @return Tensor for the port @p port.
|
||||
*/
|
||||
virtual ov::Tensor get_tensor(const ov::Output<const ov::Node>& port) const = 0;
|
||||
virtual ov::SoPtr<ov::ITensor> get_tensor(const ov::Output<const ov::Node>& port) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Sets an input/output tensor to infer.
|
||||
@ -59,7 +60,7 @@ public:
|
||||
* @param tensor Reference to a tensor. The element_type and shape of a tensor must match
|
||||
* the model's input/output element_type and size.
|
||||
*/
|
||||
virtual void set_tensor(const ov::Output<const ov::Node>& port, const ov::Tensor& tensor) = 0;
|
||||
virtual void set_tensor(const ov::Output<const ov::Node>& port, const ov::SoPtr<ov::ITensor>& tensor) = 0;
|
||||
|
||||
/**
|
||||
* @brief Gets a batch of tensors for input data to infer by input port.
|
||||
@ -72,7 +73,7 @@ public:
|
||||
* input element type and shape (except batch dimension). Total size of tensors must match the input size.
|
||||
* @return vector of tensors
|
||||
*/
|
||||
virtual std::vector<ov::Tensor> get_tensors(const ov::Output<const ov::Node>& port) const = 0;
|
||||
virtual std::vector<ov::SoPtr<ov::ITensor>> get_tensors(const ov::Output<const ov::Node>& port) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Sets a batch of tensors for input data to infer by input port.
|
||||
@ -84,7 +85,8 @@ public:
|
||||
* @param tensors Input tensors for batched infer request. The type of each tensor must match the model
|
||||
* input element type and shape (except batch dimension). Total size of tensors must match the input size.
|
||||
*/
|
||||
virtual void set_tensors(const ov::Output<const ov::Node>& port, const std::vector<ov::Tensor>& tensors) = 0;
|
||||
virtual void set_tensors(const ov::Output<const ov::Node>& port,
|
||||
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) = 0;
|
||||
|
||||
/**
|
||||
* @brief Gets state control interface for the given infer request.
|
||||
@ -92,7 +94,7 @@ public:
|
||||
* State control essential for recurrent models.
|
||||
* @return Vector of Variable State objects.
|
||||
*/
|
||||
virtual std::vector<std::shared_ptr<ov::IVariableState>> query_state() const = 0;
|
||||
virtual std::vector<ov::SoPtr<ov::IVariableState>> query_state() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Gets pointer to compiled model (usually synchronous request holds the compiled model)
|
||||
|
@ -133,7 +133,7 @@ public:
|
||||
*/
|
||||
virtual std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties,
|
||||
const ov::RemoteContext& context) const = 0;
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Sets properties for plugin, acceptable keys can be found in openvino/runtime/properties.hpp
|
||||
@ -157,7 +157,7 @@ public:
|
||||
*
|
||||
* @return A remote context object
|
||||
*/
|
||||
virtual std::shared_ptr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const = 0;
|
||||
virtual ov::SoPtr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Provides a default remote context instance if supported by a plugin
|
||||
@ -165,7 +165,7 @@ public:
|
||||
*
|
||||
* @return The default context.
|
||||
*/
|
||||
virtual std::shared_ptr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const = 0;
|
||||
virtual ov::SoPtr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Creates an compiled model from an previously exported model using plugin implementation
|
||||
@ -187,7 +187,7 @@ public:
|
||||
* @return An Compiled model
|
||||
*/
|
||||
virtual std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const = 0;
|
||||
|
||||
/**
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "openvino/core/type/element_type.hpp"
|
||||
#include "openvino/runtime/common.hpp"
|
||||
#include "openvino/runtime/iremote_tensor.hpp"
|
||||
#include "openvino/runtime/so_ptr.hpp"
|
||||
|
||||
namespace ov {
|
||||
|
||||
@ -50,7 +51,7 @@ public:
|
||||
* @param params Map of the low-level tensor object parameters.
|
||||
* @return Pointer to a plugin object that implements the RemoteTensor interface.
|
||||
*/
|
||||
virtual std::shared_ptr<ov::IRemoteTensor> create_tensor(const ov::element::Type& type,
|
||||
virtual ov::SoPtr<ov::IRemoteTensor> create_tensor(const ov::element::Type& type,
|
||||
const ov::Shape& shape,
|
||||
const ov::AnyMap& params = {}) = 0;
|
||||
|
||||
@ -62,7 +63,7 @@ public:
|
||||
* @param shape Tensor shape.
|
||||
* @return A tensor instance with device friendly memory.
|
||||
*/
|
||||
virtual std::shared_ptr<ov::ITensor> create_host_tensor(const ov::element::Type type, const ov::Shape& shape);
|
||||
virtual ov::SoPtr<ov::ITensor> create_host_tensor(const ov::element::Type type, const ov::Shape& shape);
|
||||
};
|
||||
|
||||
} // namespace ov
|
||||
|
@ -41,7 +41,7 @@ public:
|
||||
* @param port Port of the tensor to get.
|
||||
* @return Tensor for the port @p port.
|
||||
*/
|
||||
ov::Tensor get_tensor(const ov::Output<const ov::Node>& port) const override;
|
||||
ov::SoPtr<ov::ITensor> get_tensor(const ov::Output<const ov::Node>& port) const override;
|
||||
|
||||
/**
|
||||
* @brief Sets an input/output tensor to infer.
|
||||
@ -49,7 +49,7 @@ public:
|
||||
* @param tensor Reference to a tensor. The element_type and shape of a tensor must match
|
||||
* the model's input/output element_type and size.
|
||||
*/
|
||||
void set_tensor(const ov::Output<const ov::Node>& port, const ov::Tensor& tensor) override;
|
||||
void set_tensor(const ov::Output<const ov::Node>& port, const ov::SoPtr<ov::ITensor>& tensor) override;
|
||||
|
||||
/**
|
||||
* @brief Gets a batch of tensors for input data to infer by input port.
|
||||
@ -62,7 +62,7 @@ public:
|
||||
* input element type and shape (except batch dimension). Total size of tensors must match the input size.
|
||||
* @return vector of tensors
|
||||
*/
|
||||
std::vector<ov::Tensor> get_tensors(const ov::Output<const ov::Node>& port) const override;
|
||||
std::vector<ov::SoPtr<ov::ITensor>> get_tensors(const ov::Output<const ov::Node>& port) const override;
|
||||
|
||||
/**
|
||||
* @brief Sets a batch of tensors for input data to infer by input port.
|
||||
@ -74,7 +74,8 @@ public:
|
||||
* @param tensors Input tensors for batched infer request. The type of each tensor must match the model
|
||||
* input element type and shape (except batch dimension). Total size of tensors must match the input size.
|
||||
*/
|
||||
void set_tensors(const ov::Output<const ov::Node>& port, const std::vector<ov::Tensor>& tensors) override;
|
||||
void set_tensors(const ov::Output<const ov::Node>& port,
|
||||
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) override;
|
||||
|
||||
/**
|
||||
* @brief Plugin implementation for set tensors
|
||||
@ -83,7 +84,8 @@ public:
|
||||
* @param tensors Input tensors for batched infer request. The type of each tensor must match the model
|
||||
* input element type and shape (except batch dimension). Total size of tensors must match the input size.
|
||||
*/
|
||||
virtual void set_tensors_impl(const ov::Output<const ov::Node> port, const std::vector<ov::Tensor>& tensors);
|
||||
virtual void set_tensors_impl(const ov::Output<const ov::Node> port,
|
||||
const std::vector<ov::SoPtr<ov::ITensor>>& tensors);
|
||||
|
||||
/**
|
||||
* @brief Gets inputs for infer request
|
||||
@ -132,7 +134,7 @@ protected:
|
||||
* @param port Input/Output port
|
||||
* @param tensor Input/Output tensor
|
||||
*/
|
||||
void check_tensor(const ov::Output<const ov::Node>& port, const ov::Tensor& tensor) const;
|
||||
void check_tensor(const ov::Output<const ov::Node>& port, const ov::SoPtr<ov::ITensor>& tensor) const;
|
||||
|
||||
/**
|
||||
* @brief Check that all tensors are valid. Throws an exception if it's not.
|
||||
@ -146,15 +148,15 @@ protected:
|
||||
* @param allocate_callback function which allocates the tensor
|
||||
*/
|
||||
void allocate_tensor(const ov::Output<const ov::Node>& port,
|
||||
const std::function<void(ov::Tensor& tensor)>& allocate_callback);
|
||||
const std::function<void(ov::SoPtr<ov::ITensor>& tensor)>& allocate_callback);
|
||||
|
||||
std::unordered_map<std::shared_ptr<ov::descriptor::Tensor>, std::vector<ov::Tensor>> m_batched_tensors;
|
||||
std::unordered_map<std::shared_ptr<ov::descriptor::Tensor>, std::vector<ov::SoPtr<ov::ITensor>>> m_batched_tensors;
|
||||
|
||||
private:
|
||||
std::shared_ptr<const ov::ICompiledModel> m_compiled_model;
|
||||
// Mutable to return reference to ov::Tensor
|
||||
mutable std::unordered_map<std::shared_ptr<ov::descriptor::Tensor>, ov::Tensor> m_tensors;
|
||||
ov::Tensor& get_ref_tensor(const ov::Output<const ov::Node>& port) const;
|
||||
mutable std::unordered_map<std::shared_ptr<ov::descriptor::Tensor>, ov::SoPtr<ov::ITensor>> m_tensors;
|
||||
ov::SoPtr<ov::ITensor>& get_tensor_ptr(const ov::Output<const ov::Node>& port) const;
|
||||
|
||||
/**
|
||||
* @brief Finds input or output port
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <string>
|
||||
|
||||
#include "openvino/runtime/common.hpp"
|
||||
#include "openvino/runtime/so_ptr.hpp"
|
||||
#include "openvino/runtime/tensor.hpp"
|
||||
|
||||
namespace ov {
|
||||
@ -42,13 +43,13 @@ public:
|
||||
* @brief Sets the new state for the next inference
|
||||
* @param newState A new state
|
||||
*/
|
||||
virtual void set_state(const ov::Tensor& state);
|
||||
virtual void set_state(const ov::SoPtr<ov::ITensor>& state);
|
||||
|
||||
/**
|
||||
* @brief Returns the value of the variable state.
|
||||
* @return The value of the variable state
|
||||
*/
|
||||
virtual const ov::Tensor& get_state() const;
|
||||
virtual const ov::SoPtr<ov::ITensor>& get_state() const;
|
||||
|
||||
protected:
|
||||
/**
|
||||
@ -57,7 +58,7 @@ protected:
|
||||
virtual ~IVariableState();
|
||||
|
||||
std::string m_name;
|
||||
ov::Tensor m_state;
|
||||
ov::SoPtr<ov::ITensor> m_state;
|
||||
};
|
||||
|
||||
} // namespace ov
|
||||
|
@ -5,7 +5,9 @@
|
||||
#pragma once
|
||||
|
||||
#include "ie_blob.h"
|
||||
#include "openvino/runtime/common.hpp"
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
#include "openvino/runtime/so_ptr.hpp"
|
||||
|
||||
namespace ov {
|
||||
|
||||
@ -15,7 +17,9 @@ namespace ov {
|
||||
* @param shape Tensor shape
|
||||
* @param allocator allocates memory for internal tensor storage
|
||||
*/
|
||||
std::shared_ptr<ITensor> make_tensor(const element::Type type, const Shape& shape, const Allocator& allocator = {});
|
||||
OPENVINO_RUNTIME_API std::shared_ptr<ITensor> make_tensor(const element::Type type,
|
||||
const Shape& shape,
|
||||
const Allocator& allocator = {});
|
||||
|
||||
/**
|
||||
* @brief Constructs Tensor using element type and shape. Wraps allocated host memory.
|
||||
@ -26,7 +30,7 @@ std::shared_ptr<ITensor> make_tensor(const element::Type type, const Shape& shap
|
||||
* @param strides Optional strides parameters in bytes. Strides are supposed to be computed automatically based
|
||||
* on shape and element size
|
||||
*/
|
||||
std::shared_ptr<ITensor> make_tensor(const element::Type type,
|
||||
OPENVINO_RUNTIME_API std::shared_ptr<ITensor> make_tensor(const element::Type type,
|
||||
const Shape& shape,
|
||||
void* host_ptr,
|
||||
const Strides& strides = {});
|
||||
@ -39,17 +43,35 @@ std::shared_ptr<ITensor> make_tensor(const element::Type type,
|
||||
* @param end end coordinate of ROI object inside of the original object.
|
||||
* @note A Number of dimensions in `begin` and `end` must match number of dimensions in `other.get_shape()`
|
||||
*/
|
||||
std::shared_ptr<ITensor> make_tensor(const std::shared_ptr<ITensor>& other,
|
||||
OPENVINO_RUNTIME_API std::shared_ptr<ITensor> make_tensor(const std::shared_ptr<ITensor>& other,
|
||||
const Coordinate& begin,
|
||||
const Coordinate& end);
|
||||
|
||||
/**
|
||||
* @brief Constructs public ov::Tensor class
|
||||
*
|
||||
* @param tensor Tensor implementation
|
||||
*
|
||||
* @return OpenVINO Tensor
|
||||
*/
|
||||
OPENVINO_RUNTIME_API ov::Tensor make_tensor(const ov::SoPtr<ITensor>& tensor);
|
||||
|
||||
/**
|
||||
* @brief Returns tensor implementation
|
||||
*
|
||||
* @param tensor OpenVINO Tensor
|
||||
*
|
||||
* @return SoPtr to ITensor
|
||||
*/
|
||||
OPENVINO_RUNTIME_API ov::SoPtr<ov::ITensor> get_tensor_impl(const ov::Tensor& tensor);
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
/** @cond INTERNAL */
|
||||
std::shared_ptr<ITensor> make_tensor(const std::shared_ptr<InferenceEngine::Blob>& tensor);
|
||||
ov::SoPtr<ITensor> make_tensor(const std::shared_ptr<InferenceEngine::Blob>& tensor);
|
||||
const InferenceEngine::Blob* get_hardware_blob(const InferenceEngine::Blob* blob);
|
||||
InferenceEngine::Blob* get_hardware_blob(InferenceEngine::Blob* blob);
|
||||
|
||||
std::shared_ptr<InferenceEngine::Blob> tensor_to_blob(const std::shared_ptr<ITensor>& tensor, bool unwrap = true);
|
||||
std::shared_ptr<InferenceEngine::Blob> tensor_to_blob(const ov::SoPtr<ITensor>& tensor, bool unwrap = true);
|
||||
/** @endcond */
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_END
|
@ -43,6 +43,20 @@ struct SoPtr {
|
||||
*/
|
||||
SoPtr(const std::shared_ptr<T>& ptr, const std::shared_ptr<void>& so) : _ptr{ptr}, _so{so} {}
|
||||
|
||||
/**
|
||||
* @brief Constructs an object with existing shared object reference
|
||||
* @param ptr pointer to the loaded object
|
||||
*/
|
||||
SoPtr(const std::shared_ptr<T>& ptr) : _ptr{ptr}, _so{nullptr} {}
|
||||
|
||||
/**
|
||||
* @brief Constructs an object with existing shared object reference
|
||||
* @param ptr pointer to the loaded object
|
||||
*/
|
||||
template <class U, typename std::enable_if<std::is_base_of<T, U>::value, bool>::type = true>
|
||||
SoPtr(const std::shared_ptr<U>& ptr) : _ptr{std::dynamic_pointer_cast<T>(ptr)},
|
||||
_so{nullptr} {}
|
||||
|
||||
/**
|
||||
* @brief The copy-like constructor, can create So Pointer that dereferenced into child type if T is derived of U
|
||||
* @param that copied SoPtr object
|
||||
|
@ -18,29 +18,12 @@
|
||||
#include "openvino/runtime/properties.hpp"
|
||||
#include "openvino/runtime/remote_tensor.hpp"
|
||||
|
||||
namespace InferenceEngine {
|
||||
class IPluginWrapper;
|
||||
class ICompiledModelWrapper;
|
||||
class Core;
|
||||
} // namespace InferenceEngine
|
||||
|
||||
namespace ov {
|
||||
|
||||
class Core;
|
||||
class CoreImpl;
|
||||
class Plugin;
|
||||
class IPlugin;
|
||||
class IRemoteContext;
|
||||
class ISyncInferRequest;
|
||||
class IInferencePluginWrapper;
|
||||
class IExecutableNetworkWrapper;
|
||||
class ICompiledModel;
|
||||
class CompiledModel;
|
||||
|
||||
namespace proxy {
|
||||
class RemoteContext;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief This class represents an abstraction
|
||||
* @ingroup ov_runtime_cpp_api
|
||||
@ -60,19 +43,8 @@ protected:
|
||||
* object is destroyed.
|
||||
*/
|
||||
RemoteContext(const std::shared_ptr<IRemoteContext>& impl, const std::shared_ptr<void>& so);
|
||||
friend class InferenceEngine::Core;
|
||||
friend class InferenceEngine::IPluginWrapper;
|
||||
friend class InferenceEngine::ICompiledModelWrapper;
|
||||
friend class ov::Core;
|
||||
friend class ov::CoreImpl;
|
||||
friend class ov::Plugin;
|
||||
friend class ov::IPlugin;
|
||||
friend class ov::ISyncInferRequest;
|
||||
friend class ov::IInferencePluginWrapper;
|
||||
friend class ov::IExecutableNetworkWrapper;
|
||||
friend class ov::ICompiledModel;
|
||||
friend class ov::CompiledModel;
|
||||
friend class ov::proxy::RemoteContext;
|
||||
|
||||
public:
|
||||
/**
|
||||
|
@ -14,10 +14,6 @@
|
||||
|
||||
namespace ov {
|
||||
|
||||
namespace proxy {
|
||||
class RemoteTensor;
|
||||
}
|
||||
|
||||
class RemoteContext;
|
||||
|
||||
/**
|
||||
@ -26,8 +22,6 @@ class RemoteContext;
|
||||
*/
|
||||
class OPENVINO_RUNTIME_API RemoteTensor : public Tensor {
|
||||
using Tensor::Tensor;
|
||||
friend class ov::RemoteContext;
|
||||
friend class ov::proxy::RemoteTensor;
|
||||
|
||||
public:
|
||||
/**
|
||||
|
@ -121,7 +121,9 @@ Any CompiledModel::get_property(const std::string& name) const {
|
||||
RemoteContext CompiledModel::get_context() const {
|
||||
OV_COMPILED_MODEL_CALL_STATEMENT({
|
||||
auto ctx = _impl->get_context();
|
||||
return {ctx, {_so}};
|
||||
if (!ctx._so)
|
||||
ctx._so = _so;
|
||||
return {ctx._ptr, ctx._so};
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "ie_itt.hpp"
|
||||
#include "openvino/core/so_extension.hpp"
|
||||
#include "openvino/runtime/device_id_parser.hpp"
|
||||
#include "openvino/runtime/iremote_context.hpp"
|
||||
|
||||
namespace {
|
||||
std::string resolve_extension_path(const std::string& path) {
|
||||
@ -156,7 +157,7 @@ CompiledModel Core::compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const RemoteContext& context,
|
||||
const AnyMap& config) {
|
||||
OV_CORE_CALL_STATEMENT({
|
||||
auto exec = _impl->compile_model(model, context, config);
|
||||
auto exec = _impl->compile_model(model, ov::SoPtr<ov::IRemoteContext>{context._impl, context._so}, config);
|
||||
return {exec._ptr, exec._so};
|
||||
});
|
||||
}
|
||||
@ -222,7 +223,7 @@ CompiledModel Core::import_model(std::istream& modelStream, const RemoteContext&
|
||||
OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model");
|
||||
|
||||
OV_CORE_CALL_STATEMENT({
|
||||
auto exec = _impl->import_model(modelStream, context, config);
|
||||
auto exec = _impl->import_model(modelStream, ov::SoPtr<ov::IRemoteContext>{context._impl, context._so}, config);
|
||||
return {exec._ptr, exec._so};
|
||||
});
|
||||
}
|
||||
@ -279,7 +280,7 @@ RemoteContext Core::create_context(const std::string& device_name, const AnyMap&
|
||||
OV_CORE_CALL_STATEMENT({
|
||||
auto parsed = parseDeviceNameIntoConfig(device_name, params);
|
||||
auto remoteContext = _impl->get_plugin(parsed._deviceName).create_context(parsed._config);
|
||||
return {remoteContext._impl, {remoteContext._so}};
|
||||
return {remoteContext._ptr, remoteContext._so};
|
||||
});
|
||||
}
|
||||
|
||||
@ -292,7 +293,7 @@ RemoteContext Core::get_default_context(const std::string& device_name) {
|
||||
OV_CORE_CALL_STATEMENT({
|
||||
auto parsed = parseDeviceNameIntoConfig(device_name, AnyMap{});
|
||||
auto remoteContext = _impl->get_plugin(parsed._deviceName).get_default_context(parsed._config);
|
||||
return {remoteContext._impl, {remoteContext._so}};
|
||||
return {remoteContext._ptr, remoteContext._so};
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -7,12 +7,12 @@
|
||||
#include <exception>
|
||||
|
||||
#include "any_copy.hpp"
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "ie_ngraph_utils.hpp"
|
||||
#include "ie_remote_blob.hpp"
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "openvino/runtime/iremote_context.hpp"
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
#include "openvino/runtime/remote_context.hpp"
|
||||
|
||||
#define OV_REMOTE_CONTEXT_STATEMENT(...) \
|
||||
@ -71,14 +71,14 @@ std::string RemoteContext::get_device_name() const {
|
||||
RemoteTensor RemoteContext::create_tensor(const element::Type& type, const Shape& shape, const AnyMap& params) {
|
||||
OV_REMOTE_CONTEXT_STATEMENT({
|
||||
auto tensor = _impl->create_tensor(type, shape, params);
|
||||
return {tensor, {_so}};
|
||||
return make_tensor(tensor).as<ov::RemoteTensor>();
|
||||
});
|
||||
}
|
||||
|
||||
Tensor RemoteContext::create_host_tensor(const element::Type element_type, const Shape& shape) {
|
||||
OV_REMOTE_CONTEXT_STATEMENT({
|
||||
auto tensor = _impl->create_host_tensor(element_type, shape);
|
||||
return {tensor, {_so}};
|
||||
return make_tensor(tensor);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "cpp_interfaces/interface/ie_ivariable_state_internal.hpp"
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "openvino/runtime/ivariable_state.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
#include "openvino/runtime/variable_state.hpp"
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
@ -81,11 +82,14 @@ std::string VariableState::get_name() const {
|
||||
}
|
||||
|
||||
Tensor VariableState::get_state() const {
|
||||
OV_VARIABLE_CALL_STATEMENT(return _impl->get_state());
|
||||
OV_VARIABLE_CALL_STATEMENT({
|
||||
auto tensor = _impl->get_state();
|
||||
return make_tensor(tensor);
|
||||
});
|
||||
}
|
||||
|
||||
void VariableState::set_state(const Tensor& state) {
|
||||
OV_VARIABLE_CALL_STATEMENT(_impl->set_state(state));
|
||||
OV_VARIABLE_CALL_STATEMENT(_impl->set_state(get_tensor_impl(state)));
|
||||
}
|
||||
|
||||
} // namespace ov
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp"
|
||||
#include "cpp_interfaces/interface/ie_iplugin_internal.hpp"
|
||||
#include "cpp_interfaces/interface/ie_ivariable_state_internal.hpp"
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "icompiled_model_wrapper.hpp"
|
||||
#include "ie_blob.h"
|
||||
#include "ie_common.h"
|
||||
@ -31,8 +30,10 @@
|
||||
#include "openvino/runtime/icompiled_model.hpp"
|
||||
#include "openvino/runtime/iinfer_request.hpp"
|
||||
#include "openvino/runtime/iplugin.hpp"
|
||||
#include "openvino/runtime/iremote_context.hpp"
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
#include "openvino/runtime/ivariable_state.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
#include "openvino/runtime/profiling_info.hpp"
|
||||
#include "openvino/runtime/remote_context.hpp"
|
||||
#include "openvino/runtime/so_ptr.hpp"
|
||||
@ -197,10 +198,10 @@ std::shared_ptr<const ov::Model> ov::legacy_convert::convert_model(const Inferen
|
||||
namespace ov {
|
||||
|
||||
class IVariableStateInternalWrapper : public InferenceEngine::IVariableStateInternal {
|
||||
std::shared_ptr<ov::IVariableState> m_state;
|
||||
ov::SoPtr<ov::IVariableState> m_state;
|
||||
|
||||
public:
|
||||
IVariableStateInternalWrapper(const std::shared_ptr<ov::IVariableState>& state)
|
||||
IVariableStateInternalWrapper(const ov::SoPtr<ov::IVariableState>& state)
|
||||
: InferenceEngine::IVariableStateInternal(state->get_name()),
|
||||
m_state(state) {}
|
||||
|
||||
@ -213,17 +214,17 @@ public:
|
||||
}
|
||||
|
||||
void SetState(const InferenceEngine::Blob::Ptr& newState) override {
|
||||
m_state->set_state(ov::Tensor(ov::make_tensor(newState), {}));
|
||||
m_state->set_state(ov::make_tensor(newState));
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::CPtr GetState() const override {
|
||||
return tensor_to_blob(m_state->get_state()._impl);
|
||||
return tensor_to_blob(m_state->get_state());
|
||||
}
|
||||
};
|
||||
|
||||
class IInferencePluginWrapper : public InferenceEngine::IInferencePlugin {
|
||||
public:
|
||||
IInferencePluginWrapper(const std::shared_ptr<ov::IPlugin>& plugin) : m_plugin(plugin) {
|
||||
IInferencePluginWrapper(const ov::SoPtr<ov::IPlugin>& plugin) : m_plugin(plugin) {
|
||||
auto& ver = plugin->get_version();
|
||||
InferenceEngine::Version version;
|
||||
version.buildNumber = ver.buildNumber;
|
||||
@ -247,8 +248,9 @@ public:
|
||||
const InferenceEngine::CNNNetwork& network,
|
||||
const std::map<std::string, std::string>& config) override {
|
||||
return ov::legacy_convert::convert_compiled_model(
|
||||
m_plugin->compile_model(ov::legacy_convert::convert_model(network, m_plugin->is_new_api()),
|
||||
ov::any_copy(config)));
|
||||
{m_plugin->compile_model(ov::legacy_convert::convert_model(network, m_plugin->is_new_api()),
|
||||
ov::any_copy(config)),
|
||||
m_plugin._so});
|
||||
}
|
||||
|
||||
std::shared_ptr<InferenceEngine::IExecutableNetworkInternal> LoadNetwork(
|
||||
@ -256,17 +258,19 @@ public:
|
||||
const std::map<std::string, std::string>& config,
|
||||
const std::shared_ptr<InferenceEngine::RemoteContext>& context) override {
|
||||
return ov::legacy_convert::convert_compiled_model(
|
||||
m_plugin->compile_model(ov::legacy_convert::convert_model(network, m_plugin->is_new_api()),
|
||||
{m_plugin->compile_model(ov::legacy_convert::convert_model(network, m_plugin->is_new_api()),
|
||||
ov::any_copy(config),
|
||||
ov::RemoteContext{ov::legacy_convert::convert_remote_context(context), {}}));
|
||||
ov::legacy_convert::convert_remote_context(context)),
|
||||
m_plugin._so});
|
||||
}
|
||||
|
||||
ov::SoPtr<InferenceEngine::IExecutableNetworkInternal> LoadNetwork(
|
||||
const std::string& modelPath,
|
||||
const std::map<std::string, std::string>& config) override {
|
||||
return ov::SoPtr<InferenceEngine::IExecutableNetworkInternal>(
|
||||
ov::legacy_convert::convert_compiled_model(m_plugin->compile_model(modelPath, ov::any_copy(config))),
|
||||
{});
|
||||
ov::legacy_convert::convert_compiled_model(
|
||||
{m_plugin->compile_model(modelPath, ov::any_copy(config)), m_plugin._so}),
|
||||
m_plugin._so);
|
||||
}
|
||||
|
||||
void AddExtension(const std::shared_ptr<InferenceEngine::IExtension>& extension) override {
|
||||
@ -306,13 +310,15 @@ public:
|
||||
const std::string& modelFileName,
|
||||
const std::map<std::string, std::string>& config) override {
|
||||
std::ifstream model(modelFileName, std::ios::binary);
|
||||
return ov::legacy_convert::convert_compiled_model(m_plugin->import_model(model, ov::any_copy(config)));
|
||||
return ov::legacy_convert::convert_compiled_model(
|
||||
{m_plugin->import_model(model, ov::any_copy(config)), m_plugin._so});
|
||||
}
|
||||
|
||||
std::shared_ptr<InferenceEngine::IExecutableNetworkInternal> ImportNetwork(
|
||||
std::istream& networkModel,
|
||||
const std::map<std::string, std::string>& config) override {
|
||||
return ov::legacy_convert::convert_compiled_model(m_plugin->import_model(networkModel, ov::any_copy(config)));
|
||||
return ov::legacy_convert::convert_compiled_model(
|
||||
{m_plugin->import_model(networkModel, ov::any_copy(config)), m_plugin._so});
|
||||
}
|
||||
|
||||
std::shared_ptr<InferenceEngine::IExecutableNetworkInternal> ImportNetwork(
|
||||
@ -320,9 +326,10 @@ public:
|
||||
const std::shared_ptr<InferenceEngine::RemoteContext>& context,
|
||||
const std::map<std::string, std::string>& config) override {
|
||||
return ov::legacy_convert::convert_compiled_model(
|
||||
m_plugin->import_model(networkModel,
|
||||
ov::RemoteContext{ov::legacy_convert::convert_remote_context(context), {}},
|
||||
ov::any_copy(config)));
|
||||
{m_plugin->import_model(networkModel,
|
||||
ov::legacy_convert::convert_remote_context(context),
|
||||
ov::any_copy(config)),
|
||||
m_plugin._so});
|
||||
}
|
||||
|
||||
void SetCore(std::weak_ptr<InferenceEngine::ICore> core) override {
|
||||
@ -348,19 +355,19 @@ public:
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IPlugin> get_plugin() {
|
||||
ov::SoPtr<ov::IPlugin> get_plugin() {
|
||||
return m_plugin;
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<ov::IPlugin> m_plugin;
|
||||
ov::SoPtr<ov::IPlugin> m_plugin;
|
||||
};
|
||||
|
||||
} // namespace ov
|
||||
|
||||
std::shared_ptr<::InferenceEngine::IInferencePlugin> ov::legacy_convert::convert_plugin(
|
||||
const std::shared_ptr<::ov::IPlugin>& plugin) {
|
||||
if (auto wrapper = std::dynamic_pointer_cast<InferenceEngine::IPluginWrapper>(plugin))
|
||||
const ov::SoPtr<::ov::IPlugin>& plugin) {
|
||||
if (auto wrapper = std::dynamic_pointer_cast<InferenceEngine::IPluginWrapper>(plugin._ptr))
|
||||
return wrapper->get_plugin();
|
||||
return std::make_shared<ov::IInferencePluginWrapper>(plugin);
|
||||
}
|
||||
@ -375,7 +382,7 @@ namespace ov {
|
||||
|
||||
class IExecutableNetworkWrapper : public InferenceEngine::IExecutableNetworkInternal {
|
||||
public:
|
||||
explicit IExecutableNetworkWrapper(const std::shared_ptr<ov::ICompiledModel>& model) : m_model(model) {
|
||||
explicit IExecutableNetworkWrapper(const ov::SoPtr<ov::ICompiledModel>& model) : m_model(model) {
|
||||
for (const auto& input : m_model->inputs()) {
|
||||
InferenceEngine::InputInfo::Ptr input_info;
|
||||
ov::legacy_convert::fill_input_info(input, input_info);
|
||||
@ -390,11 +397,13 @@ public:
|
||||
_networkOutputs[output_info->getName()] = output_info;
|
||||
_results.emplace_back(output.get_node_shared_ptr());
|
||||
}
|
||||
_plugin = ov::legacy_convert::convert_plugin(std::const_pointer_cast<ov::IPlugin>(m_model->m_plugin));
|
||||
_plugin =
|
||||
ov::legacy_convert::convert_plugin({std::const_pointer_cast<ov::IPlugin>(m_model->m_plugin), m_model._so});
|
||||
_so = model._so;
|
||||
}
|
||||
|
||||
std::shared_ptr<InferenceEngine::IInferRequestInternal> CreateInferRequest() override {
|
||||
auto infer_request = legacy_convert::convert_infer_request(m_model->create_infer_request());
|
||||
auto infer_request = legacy_convert::convert_infer_request({m_model->create_infer_request(), m_model._so});
|
||||
infer_request->setPointerToExecutableNetworkInternal(shared_from_this());
|
||||
return infer_request;
|
||||
}
|
||||
@ -428,29 +437,29 @@ public:
|
||||
return ov::legacy_convert::convert_remote_context(m_model->get_context());
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> get_compiled_model() {
|
||||
ov::SoPtr<ov::ICompiledModel> get_compiled_model() {
|
||||
return m_model;
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<ov::ICompiledModel> m_model;
|
||||
ov::SoPtr<ov::ICompiledModel> m_model;
|
||||
};
|
||||
} // namespace ov
|
||||
|
||||
std::shared_ptr<InferenceEngine::IExecutableNetworkInternal> ov::legacy_convert::convert_compiled_model(
|
||||
const std::shared_ptr<ov::ICompiledModel>& model) {
|
||||
if (auto comp_model = std::dynamic_pointer_cast<InferenceEngine::ICompiledModelWrapper>(model)) {
|
||||
const ov::SoPtr<ov::ICompiledModel>& model) {
|
||||
if (auto comp_model = std::dynamic_pointer_cast<InferenceEngine::ICompiledModelWrapper>(model._ptr)) {
|
||||
return comp_model->get_executable_network();
|
||||
}
|
||||
return std::make_shared<ov::IExecutableNetworkWrapper>(model);
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> ov::legacy_convert::convert_compiled_model(
|
||||
ov::SoPtr<ov::ICompiledModel> ov::legacy_convert::convert_compiled_model(
|
||||
const std::shared_ptr<InferenceEngine::IExecutableNetworkInternal>& model) {
|
||||
if (auto comp_model = std::dynamic_pointer_cast<ov::IExecutableNetworkWrapper>(model)) {
|
||||
return comp_model->get_compiled_model();
|
||||
}
|
||||
return std::make_shared<InferenceEngine::ICompiledModelWrapper>(model);
|
||||
return {std::make_shared<InferenceEngine::ICompiledModelWrapper>(model), model->GetPointerToSo()};
|
||||
}
|
||||
|
||||
namespace ov {
|
||||
@ -469,8 +478,9 @@ class IInferRequestInternalWrapper : public InferenceEngine::IInferRequestIntern
|
||||
}
|
||||
|
||||
public:
|
||||
explicit IInferRequestInternalWrapper(const std::shared_ptr<ov::IAsyncInferRequest>& request)
|
||||
: m_request(request) {}
|
||||
explicit IInferRequestInternalWrapper(const ov::SoPtr<ov::IAsyncInferRequest>& request) : m_request(request) {
|
||||
_so = request._so;
|
||||
}
|
||||
|
||||
void Infer() override {
|
||||
m_request->infer();
|
||||
@ -511,7 +521,7 @@ public:
|
||||
|
||||
void SetBlob(const std::string& name, const InferenceEngine::Blob::Ptr& data) override {
|
||||
try {
|
||||
m_request->set_tensor(find_port(name), ov::Tensor{ov::make_tensor(data), {}});
|
||||
m_request->set_tensor(find_port(name), ov::make_tensor(data));
|
||||
} catch (const ov::Exception& ex) {
|
||||
const std::string what = ex.what();
|
||||
if (what.find("Failed to set tensor") != std::string::npos) {
|
||||
@ -523,9 +533,9 @@ public:
|
||||
|
||||
void SetBlobs(const std::string& name, const std::vector<InferenceEngine::Blob::Ptr>& blobs) override {
|
||||
try {
|
||||
std::vector<ov::Tensor> tensors;
|
||||
std::vector<ov::SoPtr<ov::ITensor>> tensors;
|
||||
for (const auto& blob : blobs) {
|
||||
tensors.emplace_back(ov::Tensor{ov::make_tensor(blob), {}});
|
||||
tensors.emplace_back(ov::make_tensor(blob));
|
||||
}
|
||||
m_request->set_tensors(find_port(name), tensors);
|
||||
} catch (const ov::Exception& ex) {
|
||||
@ -534,22 +544,22 @@ public:
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::Ptr GetBlob(const std::string& name) override {
|
||||
return tensor_to_blob(m_request->get_tensor(find_port(name))._impl);
|
||||
return tensor_to_blob(m_request->get_tensor(find_port(name)));
|
||||
}
|
||||
|
||||
InferenceEngine::BatchedBlob::Ptr GetBlobs(const std::string& name) override {
|
||||
auto tensors = m_request->get_tensors(find_port(name));
|
||||
std::vector<InferenceEngine::Blob::Ptr> blobs;
|
||||
for (const auto& tensor : tensors) {
|
||||
blobs.emplace_back(tensor_to_blob(tensor._impl));
|
||||
blobs.emplace_back(tensor_to_blob(tensor));
|
||||
}
|
||||
return std::make_shared<InferenceEngine::BatchedBlob>(blobs);
|
||||
}
|
||||
|
||||
const InferenceEngine::PreProcessInfo& GetPreProcess(const std::string& name) const override {
|
||||
#ifdef PROXY_PLUGIN_ENABLED
|
||||
if (auto proxy_request = std::dynamic_pointer_cast<ov::proxy::InferRequest>(m_request)) {
|
||||
return ov::legacy_convert::convert_infer_request(proxy_request->get_hardware_request()._ptr)
|
||||
if (auto proxy_request = std::dynamic_pointer_cast<ov::proxy::InferRequest>(m_request._ptr)) {
|
||||
return ov::legacy_convert::convert_infer_request(proxy_request->get_hardware_request())
|
||||
->GetPreProcess(name);
|
||||
}
|
||||
#endif
|
||||
@ -591,12 +601,12 @@ public:
|
||||
m_request->set_callback(std::move(callback));
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IAsyncInferRequest> get_infer_request() {
|
||||
ov::SoPtr<ov::IAsyncInferRequest> get_infer_request() {
|
||||
return m_request;
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<ov::IAsyncInferRequest> m_request;
|
||||
ov::SoPtr<ov::IAsyncInferRequest> m_request;
|
||||
};
|
||||
|
||||
} // namespace ov
|
||||
@ -606,7 +616,7 @@ namespace InferenceEngine {
|
||||
class IVariableStateWrapper : public ov::IVariableState {
|
||||
private:
|
||||
std::shared_ptr<InferenceEngine::IVariableStateInternal> m_state;
|
||||
mutable ov::Tensor m_converted_state;
|
||||
mutable ov::SoPtr<ov::ITensor> m_converted_state;
|
||||
|
||||
public:
|
||||
explicit IVariableStateWrapper(const std::shared_ptr<InferenceEngine::IVariableStateInternal>& state)
|
||||
@ -617,13 +627,13 @@ public:
|
||||
m_state->Reset();
|
||||
}
|
||||
|
||||
void set_state(const ov::Tensor& state) override {
|
||||
m_state->SetState(ov::tensor_to_blob(state._impl));
|
||||
void set_state(const ov::SoPtr<ov::ITensor>& state) override {
|
||||
m_state->SetState(ov::tensor_to_blob(state));
|
||||
}
|
||||
|
||||
const ov::Tensor& get_state() const override {
|
||||
m_converted_state =
|
||||
ov::Tensor(ov::make_tensor(std::const_pointer_cast<InferenceEngine::Blob>(m_state->GetState())), {});
|
||||
const ov::SoPtr<ov::ITensor>& get_state() const override {
|
||||
m_converted_state = ov::make_tensor(std::const_pointer_cast<InferenceEngine::Blob>(m_state->GetState()));
|
||||
|
||||
return m_converted_state;
|
||||
}
|
||||
};
|
||||
@ -716,7 +726,7 @@ public:
|
||||
return infos;
|
||||
}
|
||||
|
||||
ov::Tensor get_tensor(const ov::Output<const ov::Node>& port) const override {
|
||||
ov::SoPtr<ov::ITensor> get_tensor(const ov::Output<const ov::Node>& port) const override {
|
||||
const auto& name = get_legacy_name_from_port(port);
|
||||
OPENVINO_ASSERT(!m_request->GetBlobs(name),
|
||||
"get_tensor shall not be used together with batched "
|
||||
@ -724,35 +734,42 @@ public:
|
||||
name,
|
||||
"'");
|
||||
auto blob = m_request->GetBlob(name);
|
||||
ov::Tensor tensor = {ov::make_tensor(blob), {m_request->getPointerToSo()}};
|
||||
ov::SoPtr<ov::ITensor> tensor = ov::make_tensor(blob);
|
||||
if (!tensor._so)
|
||||
tensor._so = m_request->getPointerToSo();
|
||||
return tensor;
|
||||
}
|
||||
void set_tensor(const ov::Output<const ov::Node>& port, const ov::Tensor& tensor) override {
|
||||
m_request->SetBlob(get_legacy_name_from_port(port), ov::tensor_to_blob(tensor._impl, m_unwrap_tensor));
|
||||
void set_tensor(const ov::Output<const ov::Node>& port, const ov::SoPtr<ov::ITensor>& tensor) override {
|
||||
m_request->SetBlob(get_legacy_name_from_port(port), ov::tensor_to_blob(tensor, m_unwrap_tensor));
|
||||
}
|
||||
|
||||
std::vector<ov::Tensor> get_tensors(const ov::Output<const ov::Node>& port) const override {
|
||||
std::vector<ov::SoPtr<ov::ITensor>> get_tensors(const ov::Output<const ov::Node>& port) const override {
|
||||
auto blobs = m_request->GetBlobs(get_legacy_name_from_port(port));
|
||||
std::vector<ov::Tensor> ret;
|
||||
std::vector<ov::SoPtr<ov::ITensor>> ret;
|
||||
if (!blobs)
|
||||
return ret;
|
||||
for (size_t i = 0; i < blobs->size(); i++) {
|
||||
ret.emplace_back(ov::Tensor{ov::make_tensor(blobs->getBlob(i)), {m_request->getPointerToSo()}});
|
||||
ov::SoPtr<ov::ITensor> tensor = ov::make_tensor(blobs->getBlob(i));
|
||||
if (!tensor._so)
|
||||
tensor._so = m_request->getPointerToSo();
|
||||
ret.emplace_back(tensor);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
void set_tensors(const ov::Output<const ov::Node>& port, const std::vector<ov::Tensor>& tensors) override {
|
||||
void set_tensors(const ov::Output<const ov::Node>& port,
|
||||
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) override {
|
||||
std::vector<InferenceEngine::Blob::Ptr> blobs;
|
||||
for (const auto& tensor : tensors) {
|
||||
blobs.emplace_back(ov::tensor_to_blob(tensor._impl, m_unwrap_tensor));
|
||||
blobs.emplace_back(ov::tensor_to_blob(tensor, m_unwrap_tensor));
|
||||
}
|
||||
m_request->SetBlobs(get_legacy_name_from_port(port), blobs);
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<ov::IVariableState>> query_state() const override {
|
||||
std::vector<std::shared_ptr<ov::IVariableState>> variable_states;
|
||||
std::vector<ov::SoPtr<ov::IVariableState>> query_state() const override {
|
||||
std::vector<ov::SoPtr<ov::IVariableState>> variable_states;
|
||||
for (auto&& state : m_request->QueryState()) {
|
||||
variable_states.emplace_back(std::make_shared<InferenceEngine::IVariableStateWrapper>(state));
|
||||
variable_states.push_back(
|
||||
{std::make_shared<InferenceEngine::IVariableStateWrapper>(state), m_request->getPointerToSo()});
|
||||
}
|
||||
return variable_states;
|
||||
}
|
||||
@ -771,7 +788,7 @@ public:
|
||||
}
|
||||
}
|
||||
OPENVINO_ASSERT(m_compiled_model);
|
||||
return m_compiled_model;
|
||||
return m_compiled_model._ptr;
|
||||
}
|
||||
|
||||
const std::vector<ov::Output<const ov::Node>>& get_inputs() const override {
|
||||
@ -783,7 +800,7 @@ public:
|
||||
|
||||
private:
|
||||
std::shared_ptr<InferenceEngine::IInferRequestInternal> m_request;
|
||||
mutable std::shared_ptr<const ov::ICompiledModel> m_compiled_model;
|
||||
mutable ov::SoPtr<const ov::ICompiledModel> m_compiled_model;
|
||||
mutable std::mutex m_mutex;
|
||||
const bool m_unwrap_tensor;
|
||||
};
|
||||
@ -791,19 +808,20 @@ private:
|
||||
} // namespace InferenceEngine
|
||||
|
||||
std::shared_ptr<::InferenceEngine::IInferRequestInternal> ov::legacy_convert::convert_infer_request(
|
||||
const std::shared_ptr<::ov::IAsyncInferRequest>& request) {
|
||||
if (auto comp_model = std::dynamic_pointer_cast<InferenceEngine::IAsyncInferRequestWrapper>(request)) {
|
||||
const ov::SoPtr<::ov::IAsyncInferRequest>& request) {
|
||||
if (auto comp_model = std::dynamic_pointer_cast<InferenceEngine::IAsyncInferRequestWrapper>(request._ptr)) {
|
||||
return comp_model->get_infer_request();
|
||||
}
|
||||
return std::make_shared<ov::IInferRequestInternalWrapper>(request);
|
||||
}
|
||||
std::shared_ptr<::ov::IAsyncInferRequest> ov::legacy_convert::convert_infer_request(
|
||||
ov::SoPtr<::ov::IAsyncInferRequest> ov::legacy_convert::convert_infer_request(
|
||||
const std::shared_ptr<::InferenceEngine::IInferRequestInternal>& request,
|
||||
const std::string& plugin_name) {
|
||||
if (auto comp_model = std::dynamic_pointer_cast<ov::IInferRequestInternalWrapper>(request)) {
|
||||
return comp_model->get_infer_request();
|
||||
}
|
||||
return std::make_shared<InferenceEngine::IAsyncInferRequestWrapper>(request, plugin_name);
|
||||
return {std::make_shared<InferenceEngine::IAsyncInferRequestWrapper>(request, plugin_name),
|
||||
request->getPointerToSo()};
|
||||
}
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -830,7 +848,7 @@ public:
|
||||
return m_params;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteTensor> create_tensor(const ov::element::Type& type,
|
||||
ov::SoPtr<ov::IRemoteTensor> create_tensor(const ov::element::Type& type,
|
||||
const ov::Shape& shape,
|
||||
const ov::AnyMap& params = {}) override {
|
||||
InferenceEngine::TensorDesc desc(InferenceEngine::details::convertPrecision(type),
|
||||
@ -838,10 +856,11 @@ public:
|
||||
InferenceEngine::TensorDesc::getLayoutByDims(shape));
|
||||
auto blob = m_context->CreateBlob(desc, params);
|
||||
blob->allocate();
|
||||
return std::dynamic_pointer_cast<ov::IRemoteTensor>(ov::make_tensor(blob));
|
||||
auto tensor = ov::make_tensor(blob);
|
||||
return {std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr), tensor._so};
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ITensor> create_host_tensor(const ov::element::Type type, const ov::Shape& shape) override {
|
||||
ov::SoPtr<ov::ITensor> create_host_tensor(const ov::element::Type type, const ov::Shape& shape) override {
|
||||
InferenceEngine::TensorDesc desc(InferenceEngine::details::convertPrecision(type),
|
||||
shape,
|
||||
InferenceEngine::TensorDesc::getLayoutByDims(shape));
|
||||
@ -854,16 +873,17 @@ public:
|
||||
} // namespace InferenceEngine
|
||||
|
||||
std::shared_ptr<InferenceEngine::RemoteContext> ov::legacy_convert::convert_remote_context(
|
||||
const std::shared_ptr<ov::IRemoteContext>& context) {
|
||||
if (auto ctx = std::dynamic_pointer_cast<InferenceEngine::IRemoteContextWrapper>(context)) {
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) {
|
||||
if (auto ctx = std::dynamic_pointer_cast<InferenceEngine::IRemoteContextWrapper>(context._ptr)) {
|
||||
return ctx->get_context();
|
||||
}
|
||||
return std::make_shared<ov::RemoteContextWrapper>(context);
|
||||
}
|
||||
std::shared_ptr<ov::IRemoteContext> ov::legacy_convert::convert_remote_context(
|
||||
|
||||
ov::SoPtr<ov::IRemoteContext> ov::legacy_convert::convert_remote_context(
|
||||
const std::shared_ptr<InferenceEngine::RemoteContext>& context) {
|
||||
if (auto ctx = std::dynamic_pointer_cast<ov::RemoteContextWrapper>(context)) {
|
||||
return ctx->get_context();
|
||||
}
|
||||
return std::make_shared<InferenceEngine::IRemoteContextWrapper>(context);
|
||||
return {std::make_shared<InferenceEngine::IRemoteContextWrapper>(context)};
|
||||
}
|
||||
|
@ -23,24 +23,22 @@ void fill_output_info(const ov::Output<const ov::Node>& output, InferenceEngine:
|
||||
InferenceEngine::CNNNetwork convert_model(const std::shared_ptr<const ov::Model>& model, bool is_new_api);
|
||||
std::shared_ptr<const ov::Model> convert_model(const InferenceEngine::CNNNetwork& model, bool is_new_api);
|
||||
|
||||
std::shared_ptr<::InferenceEngine::IInferencePlugin> convert_plugin(const std::shared_ptr<::ov::IPlugin>& plugin);
|
||||
std::shared_ptr<::InferenceEngine::IInferencePlugin> convert_plugin(const ov::SoPtr<::ov::IPlugin>& plugin);
|
||||
std::shared_ptr<::ov::IPlugin> convert_plugin(const std::shared_ptr<::InferenceEngine::IInferencePlugin>& plugin);
|
||||
|
||||
std::shared_ptr<::InferenceEngine::IExecutableNetworkInternal> convert_compiled_model(
|
||||
const std::shared_ptr<::ov::ICompiledModel>& model);
|
||||
std::shared_ptr<::ov::ICompiledModel> convert_compiled_model(
|
||||
const ov::SoPtr<::ov::ICompiledModel>& model);
|
||||
ov::SoPtr<::ov::ICompiledModel> convert_compiled_model(
|
||||
const std::shared_ptr<::InferenceEngine::IExecutableNetworkInternal>& model);
|
||||
|
||||
std::shared_ptr<::InferenceEngine::IInferRequestInternal> convert_infer_request(
|
||||
const std::shared_ptr<::ov::IAsyncInferRequest>& request);
|
||||
std::shared_ptr<::ov::IAsyncInferRequest> convert_infer_request(
|
||||
const ov::SoPtr<::ov::IAsyncInferRequest>& request);
|
||||
ov::SoPtr<::ov::IAsyncInferRequest> convert_infer_request(
|
||||
const std::shared_ptr<::InferenceEngine::IInferRequestInternal>& request,
|
||||
const std::string& plugin_name = "");
|
||||
|
||||
std::shared_ptr<InferenceEngine::RemoteContext> convert_remote_context(
|
||||
const std::shared_ptr<ov::IRemoteContext>& context);
|
||||
std::shared_ptr<ov::IRemoteContext> convert_remote_context(
|
||||
const std::shared_ptr<InferenceEngine::RemoteContext>& context);
|
||||
std::shared_ptr<InferenceEngine::RemoteContext> convert_remote_context(const ov::SoPtr<ov::IRemoteContext>& context);
|
||||
ov::SoPtr<ov::IRemoteContext> convert_remote_context(const std::shared_ptr<InferenceEngine::RemoteContext>& context);
|
||||
|
||||
} // namespace legacy_convert
|
||||
} // namespace ov
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include "cpp_interfaces/interface/ie_iplugin_internal.hpp"
|
||||
#include "dev/converter_utils.hpp"
|
||||
#include "dev/icompiled_model_wrapper.hpp"
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "file_utils.h"
|
||||
#include "ie_itt.hpp"
|
||||
#include "ie_network_reader.hpp"
|
||||
@ -32,6 +31,7 @@
|
||||
#include "openvino/runtime/device_id_parser.hpp"
|
||||
#include "openvino/runtime/icompiled_model.hpp"
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
#include "openvino/runtime/remote_context.hpp"
|
||||
#include "openvino/runtime/threading/executor_manager.hpp"
|
||||
#include "openvino/util/common_util.hpp"
|
||||
@ -573,6 +573,8 @@ ov::Plugin ov::CoreImpl::get_plugin(const std::string& pluginName) const {
|
||||
so = ov::util::load_shared_object(desc.libraryLocation.c_str());
|
||||
std::shared_ptr<ov::IPlugin> plugin_impl;
|
||||
reinterpret_cast<ov::CreatePluginFunc*>(ov::util::get_symbol(so, ov::create_plugin_function))(plugin_impl);
|
||||
if (auto wrapper = std::dynamic_pointer_cast<InferenceEngine::IPluginWrapper>(plugin_impl))
|
||||
wrapper->set_shared_object(so);
|
||||
plugin = Plugin{plugin_impl, so};
|
||||
}
|
||||
|
||||
@ -706,23 +708,27 @@ ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::compile_model(const std::shared_ptr<
|
||||
// Proxy plugin fallback to lowlevel device
|
||||
if (!is_proxy_device(plugin))
|
||||
lock = cacheGuard.get_hash_lock(cacheContent.blobId);
|
||||
res = load_model_from_cache(cacheContent, plugin, parsed._config, ov::RemoteContext{}, [&]() {
|
||||
return compile_model_and_cache(model, plugin, parsed._config, ov::RemoteContext{}, cacheContent);
|
||||
res = load_model_from_cache(cacheContent, plugin, parsed._config, ov::SoPtr<ov::IRemoteContext>{}, [&]() {
|
||||
return compile_model_and_cache(model,
|
||||
plugin,
|
||||
parsed._config,
|
||||
ov::SoPtr<ov::IRemoteContext>{},
|
||||
cacheContent);
|
||||
});
|
||||
} else {
|
||||
res = compile_model_with_preprocess(plugin, model, ov::RemoteContext{}, parsed._config);
|
||||
res = compile_model_with_preprocess(plugin, model, ov::SoPtr<ov::IRemoteContext>{}, parsed._config);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::compile_model(const std::shared_ptr<const ov::Model>& model_,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& config) const {
|
||||
OV_ITT_SCOPE(FIRST_INFERENCE, ie::itt::domains::IE_LT, "Core::compile_model::RemoteContext");
|
||||
if (!context) {
|
||||
IE_THROW() << "Remote context is null";
|
||||
}
|
||||
std::string deviceName = context.get_device_name();
|
||||
std::string deviceName = context->get_device_name();
|
||||
ov::AnyMap config_with_batch = config;
|
||||
// if auto-batching is applicable, the below function will patch the device name and config accordingly:
|
||||
auto model = apply_auto_batching(model_, deviceName, config_with_batch);
|
||||
@ -749,7 +755,7 @@ ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::compile_model(const std::shared_ptr<
|
||||
|
||||
ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::compile_model_with_preprocess(ov::Plugin& plugin,
|
||||
const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& config) const {
|
||||
std::shared_ptr<const ov::Model> preprocessed_model = model;
|
||||
|
||||
@ -785,7 +791,8 @@ ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::compile_model(const std::string& mod
|
||||
// Proxy plugin fallback to lowlevel device
|
||||
if (!is_proxy_device(plugin))
|
||||
lock = cacheGuard.get_hash_lock(cacheContent.blobId);
|
||||
compiled_model = load_model_from_cache(cacheContent, plugin, parsed._config, ov::RemoteContext{}, [&]() {
|
||||
compiled_model =
|
||||
load_model_from_cache(cacheContent, plugin, parsed._config, ov::SoPtr<ov::IRemoteContext>{}, [&]() {
|
||||
auto cnnNetwork = ReadNetwork(model_path, std::string());
|
||||
return compile_model_and_cache(cnnNetwork.getFunction(), plugin, parsed._config, {}, cacheContent);
|
||||
});
|
||||
@ -795,8 +802,10 @@ ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::compile_model(const std::string& mod
|
||||
compiled_model = plugin.compile_model(model_path, parsed._config);
|
||||
} else {
|
||||
auto cnnNetwork = ReadNetwork(model_path, std::string());
|
||||
compiled_model =
|
||||
compile_model_with_preprocess(plugin, cnnNetwork.getFunction(), ov::RemoteContext{}, parsed._config);
|
||||
compiled_model = compile_model_with_preprocess(plugin,
|
||||
cnnNetwork.getFunction(),
|
||||
ov::SoPtr<ov::IRemoteContext>{},
|
||||
parsed._config);
|
||||
}
|
||||
return compiled_model;
|
||||
}
|
||||
@ -820,13 +829,18 @@ ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::compile_model(const std::string& mod
|
||||
// Proxy plugin fallback to lowlevel device
|
||||
if (!is_proxy_device(plugin))
|
||||
lock = cacheGuard.get_hash_lock(cacheContent.blobId);
|
||||
compiled_model = load_model_from_cache(cacheContent, plugin, parsed._config, ov::RemoteContext{}, [&]() {
|
||||
compiled_model =
|
||||
load_model_from_cache(cacheContent, plugin, parsed._config, ov::SoPtr<ov::IRemoteContext>{}, [&]() {
|
||||
auto cnnNetwork = read_model(model_str, weights);
|
||||
return compile_model_and_cache(cnnNetwork, plugin, parsed._config, ov::RemoteContext{}, cacheContent);
|
||||
return compile_model_and_cache(cnnNetwork,
|
||||
plugin,
|
||||
parsed._config,
|
||||
ov::SoPtr<ov::IRemoteContext>{},
|
||||
cacheContent);
|
||||
});
|
||||
} else {
|
||||
auto model = read_model(model_str, weights);
|
||||
compiled_model = compile_model_with_preprocess(plugin, model, ov::RemoteContext{}, parsed._config);
|
||||
compiled_model = compile_model_with_preprocess(plugin, model, ov::SoPtr<ov::IRemoteContext>{}, parsed._config);
|
||||
}
|
||||
return compiled_model;
|
||||
}
|
||||
@ -845,10 +859,10 @@ ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::import_model(std::istream& model,
|
||||
}
|
||||
|
||||
ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::import_model(std::istream& modelStream,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& config) const {
|
||||
OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model");
|
||||
auto parsed = parseDeviceNameIntoConfig(context.get_device_name(), config);
|
||||
auto parsed = parseDeviceNameIntoConfig(context->get_device_name(), config);
|
||||
auto compiled_model = get_plugin(parsed._deviceName).import_model(modelStream, context, parsed._config);
|
||||
if (auto wrapper = std::dynamic_pointer_cast<InferenceEngine::ICompiledModelWrapper>(compiled_model._ptr)) {
|
||||
wrapper->get_executable_network()->loadedFromCache();
|
||||
@ -924,7 +938,7 @@ std::vector<std::string> ov::CoreImpl::get_available_devices() const {
|
||||
return devices;
|
||||
}
|
||||
|
||||
ov::RemoteContext ov::CoreImpl::create_context(const std::string& device_name, const AnyMap& params) const {
|
||||
ov::SoPtr<ov::IRemoteContext> ov::CoreImpl::create_context(const std::string& device_name, const AnyMap& params) const {
|
||||
auto parsed = ov::parseDeviceNameIntoConfig(device_name, params);
|
||||
return get_plugin(parsed._deviceName).create_context(parsed._config);
|
||||
}
|
||||
@ -1002,7 +1016,7 @@ bool ov::CoreImpl::is_new_api() const {
|
||||
return m_new_api;
|
||||
}
|
||||
|
||||
ov::RemoteContext ov::CoreImpl::get_default_context(const std::string& device_name) const {
|
||||
ov::SoPtr<ov::IRemoteContext> ov::CoreImpl::get_default_context(const std::string& device_name) const {
|
||||
auto parsed = ov::parseDeviceNameIntoConfig(device_name);
|
||||
return get_plugin(parsed._deviceName).get_default_context(parsed._config);
|
||||
}
|
||||
@ -1357,7 +1371,7 @@ bool ov::CoreImpl::device_supports_cache_dir(const ov::Plugin& plugin) const {
|
||||
ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::compile_model_and_cache(const std::shared_ptr<const ov::Model>& model,
|
||||
ov::Plugin& plugin,
|
||||
const ov::AnyMap& parsedConfig,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const CacheContent& cacheContent) const {
|
||||
OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "CoreImpl::compile_model_and_cache");
|
||||
ov::SoPtr<ov::ICompiledModel> execNetwork;
|
||||
@ -1383,7 +1397,7 @@ ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::load_model_from_cache(
|
||||
const CacheContent& cacheContent,
|
||||
ov::Plugin& plugin,
|
||||
const ov::AnyMap& config,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
std::function<ov::SoPtr<ov::ICompiledModel>()> compile_model_lambda) {
|
||||
ov::SoPtr<ov::ICompiledModel> compiled_model;
|
||||
struct HeaderException {};
|
||||
@ -1589,7 +1603,7 @@ std::shared_ptr<ov::Model> ov::CoreImpl::read_model(const std::string& model,
|
||||
bool frontendMode) const {
|
||||
InferenceEngine::Blob::Ptr blob;
|
||||
if (weights) {
|
||||
blob = tensor_to_blob(weights._impl);
|
||||
blob = tensor_to_blob(get_tensor_impl(weights));
|
||||
}
|
||||
OV_ITT_SCOPE(FIRST_INFERENCE, ov::itt::domains::IE_RT, "CoreImpl::read_model from memory");
|
||||
return ReadNetwork(model, blob, frontendMode).getFunction();
|
||||
|
@ -158,14 +158,14 @@ private:
|
||||
ov::SoPtr<ov::ICompiledModel> compile_model_and_cache(const std::shared_ptr<const ov::Model>& model,
|
||||
ov::Plugin& plugin,
|
||||
const ov::AnyMap& parsedConfig,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const CacheContent& cacheContent) const;
|
||||
|
||||
static ov::SoPtr<ov::ICompiledModel> load_model_from_cache(
|
||||
const CacheContent& cacheContent,
|
||||
ov::Plugin& plugin,
|
||||
const ov::AnyMap& config,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
std::function<ov::SoPtr<ov::ICompiledModel>()> compile_model_lambda);
|
||||
|
||||
bool device_supports_model_caching(const ov::Plugin& plugin) const;
|
||||
@ -178,7 +178,7 @@ private:
|
||||
|
||||
ov::SoPtr<ov::ICompiledModel> compile_model_with_preprocess(ov::Plugin& plugin,
|
||||
const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& config) const;
|
||||
|
||||
ov::AnyMap create_compile_config(const ov::Plugin& plugin, const ov::AnyMap& origConfig) const;
|
||||
@ -377,7 +377,7 @@ public:
|
||||
const ov::AnyMap& config = {}) const override;
|
||||
|
||||
ov::SoPtr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& config = {}) const override;
|
||||
|
||||
ov::SoPtr<ov::ICompiledModel> compile_model(const std::string& model_path,
|
||||
@ -394,7 +394,7 @@ public:
|
||||
const ov::AnyMap& config = {}) const override;
|
||||
|
||||
ov::SoPtr<ov::ICompiledModel> import_model(std::istream& modelStream,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& config) const override;
|
||||
|
||||
ov::SupportedOpsMap query_model(const std::shared_ptr<const ov::Model>& model,
|
||||
@ -403,13 +403,13 @@ public:
|
||||
|
||||
std::vector<std::string> get_available_devices() const override;
|
||||
|
||||
ov::RemoteContext create_context(const std::string& device_name, const AnyMap& args) const override;
|
||||
ov::SoPtr<ov::IRemoteContext> create_context(const std::string& device_name, const AnyMap& args) const override;
|
||||
|
||||
ov::AnyMap get_supported_property(const std::string& device_name, const ov::AnyMap& config) const override;
|
||||
|
||||
bool is_new_api() const override;
|
||||
|
||||
ov::RemoteContext get_default_context(const std::string& device_name) const override;
|
||||
ov::SoPtr<ov::IRemoteContext> get_default_context(const std::string& device_name) const override;
|
||||
|
||||
/**
|
||||
* @brief Sets properties for a device, acceptable keys can be found in openvino/runtime/properties.hpp.
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include "cpp_interfaces/interface/ie_iplugin_internal.hpp"
|
||||
#include "dev/converter_utils.hpp"
|
||||
#include "dev/icompiled_model_wrapper.hpp"
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "ie_itt.hpp"
|
||||
#include "ie_network_reader.hpp"
|
||||
#include "iplugin_wrapper.hpp"
|
||||
@ -20,7 +19,9 @@
|
||||
#include "openvino/itt.hpp"
|
||||
#include "openvino/runtime/device_id_parser.hpp"
|
||||
#include "openvino/runtime/icompiled_model.hpp"
|
||||
#include "openvino/runtime/iplugin.hpp"
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
#include "openvino/util/common_util.hpp"
|
||||
|
||||
bool ov::CoreImpl::isNewAPI() const {
|
||||
@ -44,7 +45,7 @@ ov::SoPtr<InferenceEngine::IExecutableNetworkInternal> ov::CoreImpl::LoadNetwork
|
||||
}
|
||||
|
||||
InferenceEngine::RemoteContext::Ptr ov::CoreImpl::GetDefaultContext(const std::string& deviceName) {
|
||||
return ov::legacy_convert::convert_remote_context(get_default_context(deviceName)._impl);
|
||||
return ov::legacy_convert::convert_remote_context(get_default_context(deviceName));
|
||||
}
|
||||
|
||||
InferenceEngine::CNNNetwork ov::CoreImpl::ReadNetwork(const std::string& modelPath, const std::string& binPath) const {
|
||||
@ -70,10 +71,10 @@ ov::SoPtr<InferenceEngine::IExecutableNetworkInternal> ov::CoreImpl::LoadNetwork
|
||||
const std::map<std::string, std::string>& config) {
|
||||
OV_ITT_SCOPE(FIRST_INFERENCE, InferenceEngine::itt::domains::IE_LT, "Core::LoadNetwork::RemoteContext");
|
||||
if (network.getFunction()) {
|
||||
ov::RemoteContext ctx{ov::legacy_convert::convert_remote_context(context), {nullptr}};
|
||||
auto ctx = ov::legacy_convert::convert_remote_context(context);
|
||||
auto compiled_model =
|
||||
compile_model(ov::legacy_convert::convert_model(network, isNewAPI()), ctx, any_copy(config));
|
||||
return {ov::legacy_convert::convert_compiled_model(compiled_model._ptr), compiled_model._so};
|
||||
return {ov::legacy_convert::convert_compiled_model(compiled_model), compiled_model._so};
|
||||
}
|
||||
if (context == nullptr) {
|
||||
IE_THROW() << "Remote context is null";
|
||||
@ -93,7 +94,7 @@ InferenceEngine::SoExecutableNetworkInternal ov::CoreImpl::LoadNetwork(
|
||||
if (network.getFunction()) {
|
||||
auto compiled_model =
|
||||
compile_model(ov::legacy_convert::convert_model(network, isNewAPI()), deviceName, any_copy(config));
|
||||
return {ov::legacy_convert::convert_compiled_model(compiled_model._ptr), compiled_model._so};
|
||||
return {ov::legacy_convert::convert_compiled_model(compiled_model), compiled_model._so};
|
||||
}
|
||||
auto parsed = parseDeviceNameIntoConfig(deviceName, any_copy(config));
|
||||
auto plugin = get_plugin(parsed._deviceName);
|
||||
@ -109,7 +110,7 @@ InferenceEngine::SoExecutableNetworkInternal ov::CoreImpl::LoadNetwork(
|
||||
OV_ITT_SCOPE(FIRST_INFERENCE, ie::itt::domains::IE_LT, "Core::LoadNetwork::Path");
|
||||
|
||||
auto compiled_model = compile_model(modelPath, deviceName, any_copy(config));
|
||||
return {ov::legacy_convert::convert_compiled_model(compiled_model._ptr), compiled_model._so};
|
||||
return {ov::legacy_convert::convert_compiled_model(compiled_model), compiled_model._so};
|
||||
}
|
||||
|
||||
InferenceEngine::SoExecutableNetworkInternal ov::CoreImpl::LoadNetwork(
|
||||
@ -122,10 +123,10 @@ InferenceEngine::SoExecutableNetworkInternal ov::CoreImpl::LoadNetwork(
|
||||
|
||||
auto compiled_model =
|
||||
compile_model(modelStr,
|
||||
ov::Tensor{ov::make_tensor(std::const_pointer_cast<InferenceEngine::Blob>(weights)), {}},
|
||||
ov::make_tensor(ov::make_tensor(std::const_pointer_cast<InferenceEngine::Blob>(weights))),
|
||||
deviceName,
|
||||
ov::any_copy(config));
|
||||
return {ov::legacy_convert::convert_compiled_model(compiled_model._ptr), compiled_model._so};
|
||||
return {ov::legacy_convert::convert_compiled_model(compiled_model), compiled_model._so};
|
||||
}
|
||||
|
||||
InferenceEngine::SoExecutableNetworkInternal ov::CoreImpl::ImportNetwork(
|
||||
@ -136,7 +137,7 @@ InferenceEngine::SoExecutableNetworkInternal ov::CoreImpl::ImportNetwork(
|
||||
if (auto wrapper = std::dynamic_pointer_cast<InferenceEngine::ICompiledModelWrapper>(compiled_model._ptr)) {
|
||||
wrapper->get_executable_network()->loadedFromCache();
|
||||
}
|
||||
return {ov::legacy_convert::convert_compiled_model(compiled_model._ptr), compiled_model._so};
|
||||
return {ov::legacy_convert::convert_compiled_model(compiled_model), compiled_model._so};
|
||||
}
|
||||
|
||||
InferenceEngine::QueryNetworkResult ov::CoreImpl::QueryNetwork(const InferenceEngine::CNNNetwork& network,
|
||||
@ -207,7 +208,7 @@ std::vector<std::string> ov::CoreImpl::GetAvailableDevices() const {
|
||||
|
||||
InferenceEngine::RemoteContext::Ptr ov::CoreImpl::CreateContext(const std::string& deviceName,
|
||||
const InferenceEngine::ParamMap& params) {
|
||||
return ov::legacy_convert::convert_remote_context(create_context(deviceName, params)._impl);
|
||||
return ov::legacy_convert::convert_remote_context(create_context(deviceName, params));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -269,7 +270,8 @@ std::map<std::string, InferenceEngine::Version> ov::CoreImpl::GetVersions(const
|
||||
|
||||
ov::Plugin cppPlugin = get_plugin(deviceNameLocal);
|
||||
|
||||
versions[deviceNameLocal] = ov::legacy_convert::convert_plugin(cppPlugin.m_ptr)->GetVersion();
|
||||
versions[deviceNameLocal] =
|
||||
ov::legacy_convert::convert_plugin(ov::SoPtr<ov::IPlugin>{cppPlugin.m_ptr, cppPlugin.m_so})->GetVersion();
|
||||
}
|
||||
|
||||
return versions;
|
||||
|
@ -102,7 +102,7 @@ void ov::IAsyncInferRequest::set_callback(std::function<void(std::exception_ptr)
|
||||
m_callback = std::move(callback);
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<ov::IVariableState>> ov::IAsyncInferRequest::query_state() const {
|
||||
std::vector<ov::SoPtr<ov::IVariableState>> ov::IAsyncInferRequest::query_state() const {
|
||||
check_state();
|
||||
return m_sync_request->query_state();
|
||||
}
|
||||
@ -206,23 +206,23 @@ std::vector<ov::ProfilingInfo> ov::IAsyncInferRequest::get_profiling_info() cons
|
||||
return m_sync_request->get_profiling_info();
|
||||
}
|
||||
|
||||
ov::Tensor ov::IAsyncInferRequest::get_tensor(const ov::Output<const ov::Node>& port) const {
|
||||
ov::SoPtr<ov::ITensor> ov::IAsyncInferRequest::get_tensor(const ov::Output<const ov::Node>& port) const {
|
||||
check_state();
|
||||
return m_sync_request->get_tensor(port);
|
||||
}
|
||||
|
||||
void ov::IAsyncInferRequest::set_tensor(const ov::Output<const ov::Node>& port, const ov::Tensor& tensor) {
|
||||
void ov::IAsyncInferRequest::set_tensor(const ov::Output<const ov::Node>& port, const ov::SoPtr<ov::ITensor>& tensor) {
|
||||
check_state();
|
||||
return m_sync_request->set_tensor(port, tensor);
|
||||
}
|
||||
|
||||
std::vector<ov::Tensor> ov::IAsyncInferRequest::get_tensors(const ov::Output<const ov::Node>& port) const {
|
||||
std::vector<ov::SoPtr<ov::ITensor>> ov::IAsyncInferRequest::get_tensors(const ov::Output<const ov::Node>& port) const {
|
||||
check_state();
|
||||
return m_sync_request->get_tensors(port);
|
||||
}
|
||||
|
||||
void ov::IAsyncInferRequest::set_tensors(const ov::Output<const ov::Node>& port,
|
||||
const std::vector<ov::Tensor>& tensors) {
|
||||
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) {
|
||||
check_state();
|
||||
return m_sync_request->set_tensors(port, tensors);
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ ov::ICompiledModel::ICompiledModel(const std::shared_ptr<const ov::Model>& model
|
||||
|
||||
ov::ICompiledModel::ICompiledModel(const std::shared_ptr<const ov::Model>& model,
|
||||
const std::shared_ptr<const ov::IPlugin>& plugin,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const std::shared_ptr<ov::threading::ITaskExecutor>& task_executor,
|
||||
const std::shared_ptr<ov::threading::ITaskExecutor>& callback_executor)
|
||||
: m_plugin(plugin),
|
||||
@ -136,11 +136,11 @@ void ov::ICompiledModel::set_callback_executor(const std::shared_ptr<ov::threadi
|
||||
m_callback_executor = callback_executor;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> ov::ICompiledModel::get_context() const {
|
||||
ov::SoPtr<ov::IRemoteContext> ov::ICompiledModel::get_context() const {
|
||||
if (auto wrapper = dynamic_cast<const InferenceEngine::ICompiledModelWrapper*>(this)) {
|
||||
return ov::legacy_convert::convert_remote_context(wrapper->get_executable_network()->GetContext());
|
||||
}
|
||||
if (m_context)
|
||||
return m_context._impl;
|
||||
return m_context;
|
||||
return m_plugin->get_default_context({});
|
||||
}
|
||||
|
@ -23,7 +23,9 @@ InferenceEngine::ICompiledModelWrapper::ICompiledModelWrapper(
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IAsyncInferRequest> InferenceEngine::ICompiledModelWrapper::create_infer_request() const {
|
||||
return ov::legacy_convert::convert_infer_request(m_model->CreateInferRequest(), m_model->_plugin->GetName());
|
||||
auto infer_request = m_model->CreateInferRequest();
|
||||
infer_request->setPointerToSo(m_model->GetPointerToSo());
|
||||
return ov::legacy_convert::convert_infer_request(infer_request, m_model->_plugin->GetName())._ptr;
|
||||
}
|
||||
|
||||
void InferenceEngine::ICompiledModelWrapper::export_model(std::ostream& model) const {
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "any_copy.hpp"
|
||||
#include "dev/converter_utils.hpp"
|
||||
#include "ie_icore.hpp"
|
||||
#include "openvino/runtime/iremote_context.hpp"
|
||||
#include "threading/ie_executor_manager.hpp"
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -27,6 +28,9 @@ IPluginWrapper::IPluginWrapper(const std::shared_ptr<InferenceEngine::IInference
|
||||
const std::shared_ptr<InferenceEngine::IExecutableNetworkInternal>& IPluginWrapper::update_exec_network(
|
||||
const std::shared_ptr<InferenceEngine::IExecutableNetworkInternal>& network) const {
|
||||
network->SetPointerToPlugin(m_old_plugin);
|
||||
if (!network->GetPointerToSo())
|
||||
network->_so = m_so;
|
||||
|
||||
return network;
|
||||
}
|
||||
|
||||
@ -34,22 +38,23 @@ std::shared_ptr<ov::ICompiledModel> IPluginWrapper::compile_model(const std::sha
|
||||
const ov::AnyMap& properties) const {
|
||||
auto exec_network =
|
||||
m_old_plugin->LoadNetwork(ov::legacy_convert::convert_model(model, is_new_api()), ov::any_copy(properties));
|
||||
return ov::legacy_convert::convert_compiled_model(update_exec_network(exec_network));
|
||||
return ov::legacy_convert::convert_compiled_model(update_exec_network(exec_network))._ptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> IPluginWrapper::compile_model(const std::string& model_path,
|
||||
const ov::AnyMap& properties) const {
|
||||
auto exec_network = m_old_plugin->LoadNetwork(model_path, any_copy(properties));
|
||||
return ov::legacy_convert::convert_compiled_model(update_exec_network(exec_network._ptr));
|
||||
return ov::legacy_convert::convert_compiled_model(update_exec_network(exec_network._ptr))._ptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> IPluginWrapper::compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties,
|
||||
const ov::RemoteContext& context) const {
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) const {
|
||||
return ov::legacy_convert::convert_compiled_model(
|
||||
update_exec_network(m_old_plugin->LoadNetwork(ov::legacy_convert::convert_model(model, is_new_api()),
|
||||
any_copy(properties),
|
||||
ov::legacy_convert::convert_remote_context(context._impl))));
|
||||
ov::legacy_convert::convert_remote_context(context))))
|
||||
._ptr;
|
||||
}
|
||||
|
||||
void IPluginWrapper::set_property(const ov::AnyMap& properties) {
|
||||
@ -64,27 +69,29 @@ ov::Any IPluginWrapper::get_property(const std::string& name, const ov::AnyMap&
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> IPluginWrapper::create_context(const ov::AnyMap& remote_properties) const {
|
||||
ov::SoPtr<ov::IRemoteContext> IPluginWrapper::create_context(const ov::AnyMap& remote_properties) const {
|
||||
return ov::legacy_convert::convert_remote_context(m_old_plugin->CreateContext(remote_properties));
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> IPluginWrapper::get_default_context(const ov::AnyMap& remote_properties) const {
|
||||
ov::SoPtr<ov::IRemoteContext> IPluginWrapper::get_default_context(const ov::AnyMap& remote_properties) const {
|
||||
return ov::legacy_convert::convert_remote_context(m_old_plugin->GetDefaultContext(remote_properties));
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> IPluginWrapper::import_model(std::istream& model,
|
||||
const ov::AnyMap& properties) const {
|
||||
return ov::legacy_convert::convert_compiled_model(
|
||||
update_exec_network(m_old_plugin->ImportNetwork(model, any_copy(properties))));
|
||||
update_exec_network(m_old_plugin->ImportNetwork(model, any_copy(properties))))
|
||||
._ptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> IPluginWrapper::import_model(std::istream& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const {
|
||||
return ov::legacy_convert::convert_compiled_model(
|
||||
update_exec_network(m_old_plugin->ImportNetwork(model,
|
||||
ov::legacy_convert::convert_remote_context(context._impl),
|
||||
any_copy(properties))));
|
||||
ov::legacy_convert::convert_remote_context(context),
|
||||
any_copy(properties))))
|
||||
._ptr;
|
||||
}
|
||||
|
||||
ov::SupportedOpsMap IPluginWrapper::query_model(const std::shared_ptr<const ov::Model>& model,
|
||||
@ -115,4 +122,8 @@ void IPluginWrapper::set_device_name(const std::string& device_name) {
|
||||
m_old_plugin->SetName(device_name);
|
||||
}
|
||||
|
||||
void IPluginWrapper::set_shared_object(const std::shared_ptr<void>& so) {
|
||||
m_so = so;
|
||||
}
|
||||
|
||||
} // namespace InferenceEngine
|
||||
|
@ -59,7 +59,7 @@ public:
|
||||
*/
|
||||
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties,
|
||||
const ov::RemoteContext& context) const override;
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) const override;
|
||||
|
||||
/**
|
||||
* @brief Specifies some plugin properties
|
||||
@ -85,7 +85,7 @@ public:
|
||||
*
|
||||
* @return Remote context
|
||||
*/
|
||||
std::shared_ptr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override;
|
||||
ov::SoPtr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override;
|
||||
|
||||
/**
|
||||
* @brief Create default remote context
|
||||
@ -94,7 +94,7 @@ public:
|
||||
*
|
||||
* @return Remote context
|
||||
*/
|
||||
std::shared_ptr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override;
|
||||
ov::SoPtr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override;
|
||||
|
||||
/**
|
||||
* @brief Import model to the plugin
|
||||
@ -116,7 +116,7 @@ public:
|
||||
* @return shared pointer to compiled model interface
|
||||
*/
|
||||
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const override;
|
||||
|
||||
/**
|
||||
@ -160,8 +160,11 @@ public:
|
||||
*/
|
||||
void set_device_name(const std::string& device_name);
|
||||
|
||||
void set_shared_object(const std::shared_ptr<void>& so);
|
||||
|
||||
private:
|
||||
std::shared_ptr<InferenceEngine::IInferencePlugin> m_old_plugin;
|
||||
std::shared_ptr<void> m_so;
|
||||
|
||||
const std::shared_ptr<InferenceEngine::IExecutableNetworkInternal>& update_exec_network(
|
||||
const std::shared_ptr<InferenceEngine::IExecutableNetworkInternal>& network) const;
|
||||
|
@ -4,9 +4,8 @@
|
||||
|
||||
#include "openvino/runtime/iremote_context.hpp"
|
||||
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
|
||||
std::shared_ptr<ov::ITensor> ov::IRemoteContext::create_host_tensor(const ov::element::Type type,
|
||||
const ov::Shape& shape) {
|
||||
return ov::make_tensor(type, shape);
|
||||
ov::SoPtr<ov::ITensor> ov::IRemoteContext::create_host_tensor(const ov::element::Type type, const ov::Shape& shape) {
|
||||
return ov::SoPtr<ov::ITensor>(ov::make_tensor(type, shape), nullptr);
|
||||
}
|
||||
|
@ -6,7 +6,6 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "ie_blob.h"
|
||||
#include "ie_ngraph_utils.hpp"
|
||||
#include "ie_remote_blob.hpp"
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include "openvino/runtime/isync_infer_request.hpp"
|
||||
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "cpp_interfaces/plugin_itt.hpp"
|
||||
@ -14,10 +15,12 @@
|
||||
#include "openvino/runtime/icompiled_model.hpp"
|
||||
#include "openvino/runtime/iinfer_request.hpp"
|
||||
#include "openvino/runtime/iremote_context.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
#include "openvino/runtime/tensor.hpp"
|
||||
|
||||
namespace {
|
||||
void check_batched_tensors(const ov::Output<const ov::Node>& input, const std::vector<ov::Tensor>& tensors) {
|
||||
void check_batched_tensors(const ov::Output<const ov::Node>& input,
|
||||
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) {
|
||||
OPENVINO_ASSERT(!tensors.empty(), "set_input_tensors/set_tensors can't be called with empty tensors");
|
||||
OPENVINO_ASSERT(
|
||||
tensors.size() != 1,
|
||||
@ -31,16 +34,17 @@ void check_batched_tensors(const ov::Output<const ov::Node>& input, const std::v
|
||||
auto batch_idx = ov::layout::batch_idx(layout);
|
||||
if (batch_idx < 0) {
|
||||
// TODO: Do we need this logic?
|
||||
batch_idx += static_cast<int64_t>(tensors[0].get_shape().size());
|
||||
batch_idx += static_cast<int64_t>(tensors[0]->get_shape().size());
|
||||
}
|
||||
OPENVINO_ASSERT(batch_idx == 0,
|
||||
"set_input_tensors/set_tensors is not currently supported for batch dimension index ",
|
||||
batch_idx,
|
||||
" != 0");
|
||||
std::for_each(tensors.begin(), tensors.end(), [&batch_idx](const ov::Tensor& item) {
|
||||
OPENVINO_ASSERT(item.get_shape()[batch_idx] == 1,
|
||||
std::for_each(tensors.begin(), tensors.end(), [&batch_idx](const ov::SoPtr<ov::ITensor>& item) {
|
||||
OPENVINO_ASSERT(item, "Unintialized tensor is provided!");
|
||||
OPENVINO_ASSERT(item->get_shape()[batch_idx] == 1,
|
||||
"set_input_tensors/set_tensors. Tensors shall represent one item in a batch, ",
|
||||
item.get_shape()[batch_idx],
|
||||
item->get_shape()[batch_idx],
|
||||
" provided");
|
||||
});
|
||||
auto tensors_size = static_cast<int>(tensors.size());
|
||||
@ -62,22 +66,23 @@ void check_batched_tensors(const ov::Output<const ov::Node>& input, const std::v
|
||||
}
|
||||
|
||||
// In future consider checking if blobs point to contiguous range of memory and use single 'SetBlob' instead
|
||||
auto batched_shape = tensors[0].get_shape();
|
||||
auto element_type = tensors[0].get_element_type();
|
||||
auto batched_shape = tensors[0]->get_shape();
|
||||
auto element_type = tensors[0]->get_element_type();
|
||||
batched_shape[batch_idx] = tensors_size;
|
||||
for (const auto& item : tensors) {
|
||||
auto item_shape = item.get_shape();
|
||||
OPENVINO_ASSERT(item, "Unintialized tensor is provided!");
|
||||
auto item_shape = item->get_shape();
|
||||
item_shape[batch_idx] = batched_shape[batch_idx];
|
||||
OPENVINO_ASSERT(item_shape == batched_shape && item.get_element_type() == element_type &&
|
||||
OPENVINO_ASSERT(item_shape == batched_shape && item->get_element_type() == element_type &&
|
||||
"set_input_tensors/set_tensors error. Tensor with element type ",
|
||||
item.get_element_type(),
|
||||
item->get_element_type(),
|
||||
" and shape ",
|
||||
item_shape,
|
||||
" is not compatible with batched tensor with element type ",
|
||||
element_type,
|
||||
" and shape ",
|
||||
batched_shape);
|
||||
OPENVINO_ASSERT(item.is_continuous(), "Strides for batched tensors should be default.");
|
||||
OPENVINO_ASSERT(item->is_continuous(), "Strides for batched tensors should be default.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -91,11 +96,11 @@ ov::ISyncInferRequest::ISyncInferRequest(const std::shared_ptr<const ov::ICompil
|
||||
// Create map of empty tensors
|
||||
for (const auto& input : get_inputs()) {
|
||||
if (m_tensors.find(input.get_tensor_ptr()) == m_tensors.end())
|
||||
m_tensors[input.get_tensor_ptr()] = ov::Tensor();
|
||||
m_tensors[input.get_tensor_ptr()] = ov::SoPtr<ov::ITensor>();
|
||||
}
|
||||
for (const auto& output : get_outputs()) {
|
||||
if (m_tensors.find(output.get_tensor_ptr()) == m_tensors.end())
|
||||
m_tensors[output.get_tensor_ptr()] = ov::Tensor();
|
||||
m_tensors[output.get_tensor_ptr()] = ov::SoPtr<ov::ITensor>();
|
||||
}
|
||||
}
|
||||
|
||||
@ -132,13 +137,14 @@ ov::ISyncInferRequest::FoundPort ov::ISyncInferRequest::find_port(const ov::Outp
|
||||
}
|
||||
|
||||
void ov::ISyncInferRequest::convert_batched_tensors() {
|
||||
std::unordered_map<std::shared_ptr<ov::descriptor::Tensor>, ov::Tensor> prepared_tensors;
|
||||
std::unordered_map<std::shared_ptr<ov::descriptor::Tensor>, ov::SoPtr<ov::ITensor>> prepared_tensors;
|
||||
for (const auto& item : m_batched_tensors) {
|
||||
auto tmp_shape = item.second.at(0).get_shape();
|
||||
auto tmp_et = item.second.at(0).get_element_type();
|
||||
OPENVINO_ASSERT(item.second.at(0), "Unintialized tensor is provided!");
|
||||
auto tmp_shape = item.second.at(0)->get_shape();
|
||||
auto tmp_et = item.second.at(0)->get_element_type();
|
||||
tmp_shape[0] = item.second.size();
|
||||
std::shared_ptr<ov::IRemoteContext> remote_context;
|
||||
ov::Tensor input_tensor;
|
||||
ov::SoPtr<ov::IRemoteContext> remote_context;
|
||||
ov::SoPtr<ov::ITensor> input_tensor;
|
||||
try {
|
||||
auto net = get_compiled_model();
|
||||
if (net) {
|
||||
@ -147,16 +153,16 @@ void ov::ISyncInferRequest::convert_batched_tensors() {
|
||||
} catch (const ov::NotImplemented&) {
|
||||
}
|
||||
if (remote_context) {
|
||||
input_tensor = ov::Tensor(remote_context->create_host_tensor(tmp_et, tmp_shape), {});
|
||||
input_tensor = remote_context->create_host_tensor(tmp_et, tmp_shape);
|
||||
} else {
|
||||
input_tensor = ov::Tensor(tmp_et, tmp_shape);
|
||||
input_tensor = {ov::make_tensor(tmp_et, tmp_shape), nullptr};
|
||||
}
|
||||
auto ptr = static_cast<uint8_t*>(input_tensor.data());
|
||||
auto ptr = static_cast<uint8_t*>(input_tensor->data());
|
||||
|
||||
// Perform memory copy
|
||||
ov::parallel_for(item.second.size(), [&](size_t i) {
|
||||
const auto& tensor = item.second.at(i);
|
||||
memcpy(ptr + i * tensor.get_byte_size(), static_cast<uint8_t*>(tensor.data()), tensor.get_byte_size());
|
||||
memcpy(ptr + i * tensor->get_byte_size(), static_cast<uint8_t*>(tensor->data()), tensor->get_byte_size());
|
||||
});
|
||||
prepared_tensors[item.first] = input_tensor;
|
||||
}
|
||||
@ -167,7 +173,7 @@ void ov::ISyncInferRequest::convert_batched_tensors() {
|
||||
}
|
||||
}
|
||||
|
||||
ov::Tensor& ov::ISyncInferRequest::get_ref_tensor(const ov::Output<const ov::Node>& port) const {
|
||||
ov::SoPtr<ov::ITensor>& ov::ISyncInferRequest::get_tensor_ptr(const ov::Output<const ov::Node>& port) const {
|
||||
auto found_port = find_port(port);
|
||||
OPENVINO_ASSERT(found_port.found(), "Cannot find tensor for port ", port);
|
||||
auto ports = found_port.is_input() ? get_inputs() : get_outputs();
|
||||
@ -177,12 +183,12 @@ ov::Tensor& ov::ISyncInferRequest::get_ref_tensor(const ov::Output<const ov::Nod
|
||||
return it->second;
|
||||
}
|
||||
|
||||
ov::Tensor ov::ISyncInferRequest::get_tensor(const ov::Output<const ov::Node>& port) const {
|
||||
ov::SoPtr<ov::ITensor> ov::ISyncInferRequest::get_tensor(const ov::Output<const ov::Node>& port) const {
|
||||
OV_ITT_SCOPED_TASK(InferenceEngine::itt::domains::Plugin, "get_tensor");
|
||||
return get_ref_tensor(port);
|
||||
return get_tensor_ptr(port);
|
||||
}
|
||||
|
||||
void ov::ISyncInferRequest::set_tensor(const ov::Output<const ov::Node>& port, const ov::Tensor& tensor) {
|
||||
void ov::ISyncInferRequest::set_tensor(const ov::Output<const ov::Node>& port, const ov::SoPtr<ov::ITensor>& tensor) {
|
||||
OV_ITT_SCOPED_TASK(InferenceEngine::itt::domains::Plugin, "set_tensor");
|
||||
auto found_port = find_port(port);
|
||||
OPENVINO_ASSERT(found_port.found(), "Cannot find tensor for port ", port);
|
||||
@ -199,7 +205,7 @@ void ov::ISyncInferRequest::set_tensor(const ov::Output<const ov::Node>& port, c
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<ov::Tensor> ov::ISyncInferRequest::get_tensors(const ov::Output<const ov::Node>& port) const {
|
||||
std::vector<ov::SoPtr<ov::ITensor>> ov::ISyncInferRequest::get_tensors(const ov::Output<const ov::Node>& port) const {
|
||||
OV_ITT_SCOPED_TASK(InferenceEngine::itt::domains::Plugin, "get_tensors");
|
||||
auto found_port = find_port(port);
|
||||
OPENVINO_ASSERT(found_port.found(), "Cannot find input tensors for port ", port);
|
||||
@ -209,7 +215,7 @@ std::vector<ov::Tensor> ov::ISyncInferRequest::get_tensors(const ov::Output<cons
|
||||
}
|
||||
|
||||
void ov::ISyncInferRequest::set_tensors(const ov::Output<const ov::Node>& port,
|
||||
const std::vector<ov::Tensor>& tensors) {
|
||||
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) {
|
||||
OV_ITT_SCOPED_TASK(InferenceEngine::itt::domains::Plugin, "set_tensors");
|
||||
auto found_port = find_port(port);
|
||||
OPENVINO_ASSERT(found_port.found() && found_port.is_input(), "Cannot find input tensors for port ", port);
|
||||
@ -223,7 +229,7 @@ void ov::ISyncInferRequest::set_tensors(const ov::Output<const ov::Node>& port,
|
||||
}
|
||||
|
||||
void ov::ISyncInferRequest::set_tensors_impl(const ov::Output<const ov::Node> port,
|
||||
const std::vector<ov::Tensor>& tensors) {
|
||||
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) {
|
||||
OPENVINO_ASSERT_HELPER(::ov::NotImplemented,
|
||||
"",
|
||||
false,
|
||||
@ -231,42 +237,46 @@ void ov::ISyncInferRequest::set_tensors_impl(const ov::Output<const ov::Node> po
|
||||
"set_input_tensors/set_tensors are not supported by this plugin");
|
||||
}
|
||||
|
||||
void ov::ISyncInferRequest::check_tensor(const ov::Output<const ov::Node>& port, const ov::Tensor& tensor) const {
|
||||
void ov::ISyncInferRequest::check_tensor(const ov::Output<const ov::Node>& port,
|
||||
const ov::SoPtr<ov::ITensor>& tensor) const {
|
||||
OPENVINO_ASSERT(tensor);
|
||||
bool is_input = ov::op::util::is_parameter(port.get_node());
|
||||
std::string tensor_type = is_input ? "input" : "output";
|
||||
|
||||
OPENVINO_ASSERT(port.get_element_type() == tensor.get_element_type(),
|
||||
OPENVINO_ASSERT(port.get_element_type() == tensor->get_element_type(),
|
||||
"The tensor element type is not corresponding with output element type (",
|
||||
tensor.get_element_type(),
|
||||
tensor->get_element_type(),
|
||||
" != ",
|
||||
port.get_element_type());
|
||||
bool is_dynamic = port.get_partial_shape().is_dynamic();
|
||||
OPENVINO_ASSERT(is_dynamic || port.get_shape() == tensor.get_shape(),
|
||||
OPENVINO_ASSERT(is_dynamic || port.get_shape() == tensor->get_shape(),
|
||||
"The ",
|
||||
tensor_type,
|
||||
" tensor size is not equal to the model ",
|
||||
tensor_type,
|
||||
" type: got ",
|
||||
tensor.get_shape(),
|
||||
tensor->get_shape(),
|
||||
" expecting ",
|
||||
port.get_shape(),
|
||||
".");
|
||||
OPENVINO_ASSERT(tensor.is<ov::RemoteTensor>() || tensor.data() != nullptr, "Tensor data equal nullptr!");
|
||||
OPENVINO_ASSERT(std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr) || tensor->data() != nullptr,
|
||||
"Tensor data equal nullptr!");
|
||||
}
|
||||
|
||||
void ov::ISyncInferRequest::allocate_tensor(const ov::Output<const ov::Node>& port,
|
||||
const std::function<void(ov::Tensor& tensor)>& allocate_callback) {
|
||||
auto& tensor = get_ref_tensor(port);
|
||||
void ov::ISyncInferRequest::allocate_tensor(
|
||||
const ov::Output<const ov::Node>& port,
|
||||
const std::function<void(ov::SoPtr<ov::ITensor>& tensor)>& allocate_callback) {
|
||||
auto& tensor = get_tensor_ptr(port);
|
||||
allocate_callback(tensor);
|
||||
}
|
||||
|
||||
void ov::ISyncInferRequest::check_tensors() const {
|
||||
const auto& inputs = m_compiled_model->inputs();
|
||||
for (size_t i = 0; i < inputs.size(); i++) {
|
||||
check_tensor(inputs[i], get_ref_tensor(inputs[i]));
|
||||
check_tensor(inputs[i], get_tensor_ptr(inputs[i]));
|
||||
}
|
||||
const auto& outputs = m_compiled_model->outputs();
|
||||
for (size_t i = 0; i < outputs.size(); i++) {
|
||||
check_tensor(outputs[i], get_ref_tensor(outputs[i]));
|
||||
check_tensor(outputs[i], get_tensor_ptr(outputs[i]));
|
||||
}
|
||||
}
|
||||
|
@ -18,10 +18,10 @@ void ov::IVariableState::reset() {
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
void ov::IVariableState::set_state(const ov::Tensor& state) {
|
||||
void ov::IVariableState::set_state(const ov::SoPtr<ov::ITensor>& state) {
|
||||
m_state = state;
|
||||
}
|
||||
|
||||
const ov::Tensor& ov::IVariableState::get_state() const {
|
||||
const ov::SoPtr<ov::ITensor>& ov::IVariableState::get_state() const {
|
||||
return m_state;
|
||||
}
|
||||
|
@ -2,11 +2,10 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "ie_blob.h"
|
||||
#include "ie_ngraph_utils.hpp"
|
||||
#include "ie_remote_blob.hpp"
|
||||
@ -442,19 +441,24 @@ public:
|
||||
*/
|
||||
class TensorRemoteBlob : public ie::RemoteBlob {
|
||||
public:
|
||||
TensorRemoteBlob(const std::shared_ptr<ITensor>& tensor)
|
||||
TensorRemoteBlob(const ov::SoPtr<ITensor>& tensor)
|
||||
: ie::RemoteBlob{ie::TensorDesc{ie::details::convertPrecision(tensor->get_element_type()),
|
||||
tensor->get_shape(),
|
||||
ie::TensorDesc::getLayoutByRank(tensor->get_shape().size())}},
|
||||
tensor{std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor)} {
|
||||
tensor{tensor} {
|
||||
OPENVINO_ASSERT(this->tensor);
|
||||
}
|
||||
std::shared_ptr<ov::IRemoteTensor> cast_tensor() const {
|
||||
auto remote = std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr);
|
||||
OPENVINO_ASSERT(remote);
|
||||
return remote;
|
||||
}
|
||||
AnyMap getParams() const override {
|
||||
return tensor->get_properties();
|
||||
return cast_tensor()->get_properties();
|
||||
}
|
||||
std::string getDeviceName() const noexcept override {
|
||||
try {
|
||||
return tensor->get_device_name();
|
||||
return cast_tensor()->get_device_name();
|
||||
} catch (...) {
|
||||
return {};
|
||||
}
|
||||
@ -489,7 +493,7 @@ public:
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<IRemoteTensor> tensor;
|
||||
ov::SoPtr<ITensor> tensor;
|
||||
|
||||
private:
|
||||
std::shared_ptr<ie::IAllocator> m_allocator;
|
||||
@ -504,7 +508,7 @@ template <typename T>
|
||||
class TensorMemoryBlob : public ie::TBlob<T> {
|
||||
public:
|
||||
~TensorMemoryBlob() override = default;
|
||||
explicit TensorMemoryBlob(const std::shared_ptr<ITensor>& tensor_) try : ie
|
||||
explicit TensorMemoryBlob(const ov::SoPtr<ITensor>& tensor_) try : ie
|
||||
::TBlob<T>{[&] {
|
||||
auto element_type = tensor_->get_element_type();
|
||||
auto shape = tensor_->get_shape();
|
||||
@ -536,7 +540,7 @@ public:
|
||||
static_cast<T*>(tensor_->data()),
|
||||
tensor_->get_byte_size()},
|
||||
tensor{tensor_} {
|
||||
OPENVINO_ASSERT(!std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor));
|
||||
OPENVINO_ASSERT(!std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr));
|
||||
}
|
||||
catch (const std::exception& ex) {
|
||||
OPENVINO_THROW(ex.what());
|
||||
@ -552,10 +556,10 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<ITensor> tensor;
|
||||
ov::SoPtr<ITensor> tensor;
|
||||
};
|
||||
|
||||
std::shared_ptr<ITensor> make_tensor(const std::shared_ptr<ie::Blob>& blob) {
|
||||
ov::SoPtr<ITensor> make_tensor(const std::shared_ptr<ie::Blob>& blob) {
|
||||
#define ELSE_IF(type) \
|
||||
else if (auto tblob = dynamic_cast<const TensorMemoryBlob<type>*>(blob.get())) { \
|
||||
return tblob->tensor; \
|
||||
@ -565,7 +569,7 @@ std::shared_ptr<ITensor> make_tensor(const std::shared_ptr<ie::Blob>& blob) {
|
||||
} else if (auto remote_blob = std::dynamic_pointer_cast<TensorRemoteBlob>(blob)) {
|
||||
return remote_blob->tensor;
|
||||
} else if (auto remote_blob = std::dynamic_pointer_cast<InferenceEngine::RemoteBlob>(blob)) {
|
||||
return std::make_shared<RemoteBlobTensor>(remote_blob);
|
||||
return {std::make_shared<RemoteBlobTensor>(remote_blob), nullptr};
|
||||
}
|
||||
ELSE_IF(float)
|
||||
ELSE_IF(double)
|
||||
@ -581,7 +585,7 @@ std::shared_ptr<ITensor> make_tensor(const std::shared_ptr<ie::Blob>& blob) {
|
||||
ELSE_IF(uint64_t)
|
||||
ELSE_IF(int8_t)
|
||||
ELSE_IF(bool) else {
|
||||
return std::make_shared<BlobTensor>(blob);
|
||||
return {std::make_shared<BlobTensor>(blob), nullptr};
|
||||
}
|
||||
#undef IF
|
||||
}
|
||||
@ -590,9 +594,9 @@ ie::Blob* get_hardware_blob(ie::Blob* blob) {
|
||||
#ifdef PROXY_PLUGIN_ENABLED
|
||||
if (auto remote_blob = dynamic_cast<TensorRemoteBlob*>(blob)) {
|
||||
const auto& tensor = ov::proxy::get_hardware_tensor(remote_blob->tensor);
|
||||
if (auto blob_tensor = std::dynamic_pointer_cast<BlobTensor>(tensor)) {
|
||||
if (auto blob_tensor = std::dynamic_pointer_cast<BlobTensor>(tensor._ptr)) {
|
||||
return blob_tensor->blob.get();
|
||||
} else if (auto blob_tensor = std::dynamic_pointer_cast<RemoteBlobTensor>(tensor)) {
|
||||
} else if (auto blob_tensor = std::dynamic_pointer_cast<RemoteBlobTensor>(tensor._ptr)) {
|
||||
return blob_tensor->blob.get();
|
||||
}
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
@ -605,9 +609,9 @@ const ie::Blob* get_hardware_blob(const ie::Blob* blob) {
|
||||
#ifdef PROXY_PLUGIN_ENABLED
|
||||
if (auto remote_blob = dynamic_cast<const TensorRemoteBlob*>(blob)) {
|
||||
const auto& tensor = ov::proxy::get_hardware_tensor(remote_blob->tensor);
|
||||
if (auto blob_tensor = std::dynamic_pointer_cast<BlobTensor>(tensor)) {
|
||||
if (auto blob_tensor = std::dynamic_pointer_cast<BlobTensor>(tensor._ptr)) {
|
||||
return blob_tensor->blob.get();
|
||||
} else if (auto blob_tensor = std::dynamic_pointer_cast<RemoteBlobTensor>(tensor)) {
|
||||
} else if (auto blob_tensor = std::dynamic_pointer_cast<RemoteBlobTensor>(tensor._ptr)) {
|
||||
return blob_tensor->blob.get();
|
||||
}
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
@ -616,7 +620,7 @@ const ie::Blob* get_hardware_blob(const ie::Blob* blob) {
|
||||
return blob;
|
||||
}
|
||||
|
||||
ie::Blob::Ptr tensor_to_blob(const std::shared_ptr<ITensor>& orig_tensor, bool unwrap) {
|
||||
ie::Blob::Ptr tensor_to_blob(const ov::SoPtr<ITensor>& orig_tensor, bool unwrap) {
|
||||
#ifdef PROXY_PLUGIN_ENABLED
|
||||
const auto& tensor = unwrap ? ov::proxy::get_hardware_tensor(orig_tensor) : orig_tensor;
|
||||
#else
|
||||
@ -624,13 +628,11 @@ ie::Blob::Ptr tensor_to_blob(const std::shared_ptr<ITensor>& orig_tensor, bool u
|
||||
#endif
|
||||
if (tensor == nullptr) {
|
||||
return {};
|
||||
} else if (auto blob_tensor = std::dynamic_pointer_cast<BlobTensor>(tensor)) {
|
||||
} else if (auto blob_tensor = std::dynamic_pointer_cast<BlobTensor>(tensor._ptr)) {
|
||||
return blob_tensor->blob;
|
||||
} else if (auto blob_tensor = std::dynamic_pointer_cast<RemoteBlobTensor>(tensor)) {
|
||||
} else if (auto blob_tensor = std::dynamic_pointer_cast<RemoteBlobTensor>(tensor._ptr)) {
|
||||
return blob_tensor->blob;
|
||||
} else if (auto blob_tensor = dynamic_cast<const BlobTensor*>(tensor.get())) {
|
||||
return blob_tensor->blob;
|
||||
} else if (std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor)) {
|
||||
} else if (std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr)) {
|
||||
return std::make_shared<TensorRemoteBlob>(tensor);
|
||||
} else {
|
||||
#define CASE(precision, T) \
|
||||
@ -662,4 +664,29 @@ ie::Blob::Ptr tensor_to_blob(const std::shared_ptr<ITensor>& orig_tensor, bool u
|
||||
}
|
||||
OPENVINO_THROW("Cannot convert tensor to blob!");
|
||||
}
|
||||
|
||||
namespace util {
|
||||
|
||||
ov::Tensor make_tensor(const std::shared_ptr<ITensor>& tensor, const std::shared_ptr<void>& so) {
|
||||
return ov::Tensor(tensor, so);
|
||||
}
|
||||
|
||||
void get_tensor_impl(const ov::Tensor& tensor, std::shared_ptr<ITensor>& tensor_impl, std::shared_ptr<void>& so) {
|
||||
tensor_impl = tensor._impl;
|
||||
so = tensor._so;
|
||||
}
|
||||
|
||||
} // namespace util
|
||||
|
||||
ov::Tensor make_tensor(const ov::SoPtr<ITensor>& tensor) {
|
||||
return util::make_tensor(tensor._ptr, tensor._so);
|
||||
}
|
||||
|
||||
ov::SoPtr<ov::ITensor> get_tensor_impl(const ov::Tensor& tensor) {
|
||||
std::shared_ptr<ov::ITensor> tensor_impl;
|
||||
std::shared_ptr<void> so;
|
||||
util::get_tensor_impl(tensor, tensor_impl, so);
|
||||
return ov::SoPtr<ov::ITensor>(tensor_impl, so);
|
||||
}
|
||||
|
||||
} // namespace ov
|
||||
|
@ -70,7 +70,7 @@ ov::SoPtr<ov::ICompiledModel> ov::Plugin::compile_model(const std::string& model
|
||||
}
|
||||
|
||||
ov::SoPtr<ov::ICompiledModel> ov::Plugin::compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const {
|
||||
OV_PLUGIN_CALL_STATEMENT(return {m_ptr->compile_model(model, properties, context), m_so});
|
||||
}
|
||||
@ -85,22 +85,26 @@ ov::SoPtr<ov::ICompiledModel> ov::Plugin::import_model(std::istream& model, cons
|
||||
}
|
||||
|
||||
ov::SoPtr<ov::ICompiledModel> ov::Plugin::import_model(std::istream& networkModel,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& config) const {
|
||||
OV_PLUGIN_CALL_STATEMENT(return {m_ptr->import_model(networkModel, context, config), m_so});
|
||||
}
|
||||
|
||||
ov::RemoteContext ov::Plugin::create_context(const AnyMap& params) const {
|
||||
ov::SoPtr<ov::IRemoteContext> ov::Plugin::create_context(const AnyMap& params) const {
|
||||
OV_PLUGIN_CALL_STATEMENT({
|
||||
auto remote = m_ptr->create_context(params);
|
||||
return {remote, {m_so}};
|
||||
if (!remote._so)
|
||||
remote._so = m_so;
|
||||
return remote;
|
||||
});
|
||||
}
|
||||
|
||||
ov::RemoteContext ov::Plugin::get_default_context(const AnyMap& params) const {
|
||||
ov::SoPtr<ov::IRemoteContext> ov::Plugin::get_default_context(const AnyMap& params) const {
|
||||
OV_PLUGIN_CALL_STATEMENT({
|
||||
auto remote = m_ptr->get_default_context(params);
|
||||
return {remote, {m_so}};
|
||||
if (!remote._so)
|
||||
remote._so = m_so;
|
||||
return remote;
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "ie_iextension.h"
|
||||
#include "openvino/runtime/icompiled_model.hpp"
|
||||
#include "openvino/runtime/iplugin.hpp"
|
||||
#include "openvino/runtime/so_ptr.hpp"
|
||||
|
||||
namespace ov {
|
||||
|
||||
@ -50,7 +51,7 @@ public:
|
||||
SoPtr<ov::ICompiledModel> compile_model(const std::string& model_path, const ov::AnyMap& properties) const;
|
||||
|
||||
SoPtr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const;
|
||||
|
||||
ov::SupportedOpsMap query_model(const std::shared_ptr<const ov::Model>& model, const ov::AnyMap& properties) const;
|
||||
@ -58,12 +59,12 @@ public:
|
||||
SoPtr<ov::ICompiledModel> import_model(std::istream& model, const ov::AnyMap& properties) const;
|
||||
|
||||
SoPtr<ov::ICompiledModel> import_model(std::istream& networkModel,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& config) const;
|
||||
|
||||
ov::RemoteContext create_context(const AnyMap& params) const;
|
||||
ov::SoPtr<ov::IRemoteContext> create_context(const AnyMap& params) const;
|
||||
|
||||
ov::RemoteContext get_default_context(const AnyMap& params) const;
|
||||
ov::SoPtr<ov::IRemoteContext> get_default_context(const AnyMap& params) const;
|
||||
|
||||
Any get_property(const std::string& name, const AnyMap& arguments) const;
|
||||
|
||||
|
@ -4,21 +4,22 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "ie_ngraph_utils.hpp"
|
||||
#include "ie_remote_context.hpp"
|
||||
#include "openvino/runtime/iremote_context.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
#include "openvino/runtime/so_ptr.hpp"
|
||||
|
||||
namespace ov {
|
||||
|
||||
class RemoteContextWrapper : public InferenceEngine::RemoteContext {
|
||||
private:
|
||||
std::shared_ptr<ov::IRemoteContext> m_context;
|
||||
ov::SoPtr<ov::IRemoteContext> m_context;
|
||||
|
||||
public:
|
||||
RemoteContextWrapper(const std::shared_ptr<ov::IRemoteContext>& context) : m_context(context) {}
|
||||
RemoteContextWrapper(const ov::SoPtr<ov::IRemoteContext>& context) : m_context(context) {}
|
||||
|
||||
const std::shared_ptr<ov::IRemoteContext>& get_context() const {
|
||||
const ov::SoPtr<ov::IRemoteContext>& get_context() const {
|
||||
return m_context;
|
||||
}
|
||||
|
||||
|
@ -6,8 +6,8 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "ie_blob.h"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
#include "system_allocator.hpp"
|
||||
|
||||
namespace InferenceEngine {
|
||||
|
@ -203,7 +203,7 @@ ExecutableNetwork Core::ImportNetwork(const std::string& modelFileName,
|
||||
if (!modelStream.is_open())
|
||||
IE_THROW(NetworkNotRead) << "Model file " << modelFileName << " cannot be opened!";
|
||||
auto exec = _impl->get_plugin(parsed._deviceName).import_model(modelStream, parsed._config);
|
||||
return {ov::legacy_convert::convert_compiled_model(exec._ptr), exec._so};
|
||||
return {ov::legacy_convert::convert_compiled_model(exec), exec._so};
|
||||
}
|
||||
|
||||
ExecutableNetwork Core::ImportNetwork(std::istream& networkModel,
|
||||
@ -233,7 +233,7 @@ ExecutableNetwork Core::ImportNetwork(std::istream& networkModel) {
|
||||
networkModel.seekg(currentPos, networkModel.beg);
|
||||
|
||||
auto exec = _impl->get_plugin(deviceName).import_model(networkModel, {});
|
||||
return {ov::legacy_convert::convert_compiled_model(exec._ptr), exec._so};
|
||||
return {ov::legacy_convert::convert_compiled_model(exec), exec._so};
|
||||
}
|
||||
|
||||
ExecutableNetwork Core::ImportNetwork(std::istream& networkModel,
|
||||
@ -251,10 +251,8 @@ ExecutableNetwork Core::ImportNetwork(std::istream& networkModel,
|
||||
|
||||
auto parsed = ov::parseDeviceNameIntoConfig(deviceName, ov::any_copy(config));
|
||||
auto exec = _impl->get_plugin(deviceName)
|
||||
.import_model(networkModel,
|
||||
ov::RemoteContext{ov::legacy_convert::convert_remote_context(context), {}},
|
||||
parsed._config);
|
||||
return {ov::legacy_convert::convert_compiled_model(exec._ptr), exec._so};
|
||||
.import_model(networkModel, ov::legacy_convert::convert_remote_context(context), parsed._config);
|
||||
return {ov::legacy_convert::convert_compiled_model(exec), exec._so};
|
||||
}
|
||||
|
||||
QueryNetworkResult Core::QueryNetwork(const CNNNetwork& network,
|
||||
|
@ -31,7 +31,7 @@ const std::shared_ptr<InferenceEngine::RemoteContext> RemoteContext::GetHardware
|
||||
if (auto wrapper = dynamic_cast<ov::RemoteContextWrapper*>(this)) {
|
||||
auto ov_context = wrapper->get_context();
|
||||
auto hw_context = ov::proxy::get_hardware_context(ov_context);
|
||||
return ov::legacy_convert::convert_remote_context(hw_context);
|
||||
return ov::legacy_convert::convert_remote_context(hw_context._ptr);
|
||||
}
|
||||
#endif
|
||||
return shared_from_this();
|
||||
@ -42,7 +42,7 @@ const std::shared_ptr<const InferenceEngine::RemoteContext> RemoteContext::GetHa
|
||||
if (auto wrapper = dynamic_cast<const ov::RemoteContextWrapper*>(this)) {
|
||||
auto ov_context = wrapper->get_context();
|
||||
auto hw_context = ov::proxy::get_hardware_context(ov_context);
|
||||
return ov::legacy_convert::convert_remote_context(hw_context);
|
||||
return ov::legacy_convert::convert_remote_context(hw_context._ptr);
|
||||
}
|
||||
#endif
|
||||
return shared_from_this();
|
||||
|
@ -14,6 +14,8 @@
|
||||
#include "openvino/runtime/compiled_model.hpp"
|
||||
#include "openvino/runtime/exception.hpp"
|
||||
#include "openvino/runtime/iasync_infer_request.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
#include "openvino/runtime/so_ptr.hpp"
|
||||
#include "transformations/utils/utils.hpp"
|
||||
|
||||
#define OV_INFER_REQ_CALL_STATEMENT(...) \
|
||||
@ -62,7 +64,7 @@ InferRequest::InferRequest(const std::shared_ptr<ov::IAsyncInferRequest>& impl,
|
||||
}
|
||||
|
||||
void InferRequest::set_tensor(const ov::Output<const ov::Node>& port, const Tensor& tensor) {
|
||||
OV_INFER_REQ_CALL_STATEMENT({ _impl->set_tensor(port, tensor); });
|
||||
OV_INFER_REQ_CALL_STATEMENT({ _impl->set_tensor(port, get_tensor_impl(tensor)); });
|
||||
}
|
||||
|
||||
void InferRequest::set_tensor(const ov::Output<ov::Node>& port, const Tensor& tensor) {
|
||||
@ -90,7 +92,12 @@ void InferRequest::set_tensors(const std::string& name, const std::vector<Tensor
|
||||
}
|
||||
|
||||
void InferRequest::set_tensors(const ov::Output<const ov::Node>& port, const std::vector<Tensor>& tensors) {
|
||||
OV_INFER_REQ_CALL_STATEMENT({ _impl->set_tensors(port, tensors); })
|
||||
std::vector<ov::SoPtr<ov::ITensor>> tensor_ptrs;
|
||||
tensor_ptrs.reserve(tensors.size());
|
||||
for (const auto& tensor : tensors) {
|
||||
tensor_ptrs.emplace_back(get_tensor_impl(tensor));
|
||||
}
|
||||
OV_INFER_REQ_CALL_STATEMENT({ _impl->set_tensors(port, tensor_ptrs); })
|
||||
}
|
||||
|
||||
void InferRequest::set_input_tensor(size_t idx, const Tensor& tensor) {
|
||||
@ -170,7 +177,7 @@ Tensor InferRequest::get_tensor(const ov::Output<const ov::Node>& port) {
|
||||
if (!tensor._so)
|
||||
tensor._so = _so;
|
||||
|
||||
return tensor;
|
||||
return make_tensor(tensor);
|
||||
});
|
||||
}
|
||||
|
||||
@ -269,7 +276,9 @@ std::vector<VariableState> InferRequest::query_state() {
|
||||
std::vector<VariableState> variable_states;
|
||||
OV_INFER_REQ_CALL_STATEMENT({
|
||||
for (auto&& state : _impl->query_state()) {
|
||||
variable_states.emplace_back(ov::VariableState{state, {_so}});
|
||||
if (!state._so)
|
||||
state._so = _so;
|
||||
variable_states.emplace_back(ov::VariableState{state._ptr, state._so});
|
||||
}
|
||||
})
|
||||
return variable_states;
|
||||
|
@ -8,13 +8,14 @@
|
||||
|
||||
#include "openvino/runtime/iremote_tensor.hpp"
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
#include "openvino/runtime/properties.hpp"
|
||||
|
||||
namespace ov {
|
||||
|
||||
void RemoteTensor::type_check(const Tensor& tensor, const std::map<std::string, std::vector<std::string>>& type_info) {
|
||||
OPENVINO_ASSERT(tensor, "Could not check empty tensor type");
|
||||
auto remote_tensor = std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._impl);
|
||||
auto remote_tensor = std::dynamic_pointer_cast<ov::IRemoteTensor>(get_tensor_impl(tensor)._ptr);
|
||||
OPENVINO_ASSERT(remote_tensor, "Tensor is not remote.");
|
||||
if (!type_info.empty()) {
|
||||
auto remote_properties = remote_tensor->get_properties();
|
||||
|
@ -36,7 +36,7 @@ class MockPlugin : public ov::IPlugin {
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties,
|
||||
const ov::RemoteContext& context) const override {
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) const override {
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
@ -62,11 +62,11 @@ class MockPlugin : public ov::IPlugin {
|
||||
return "";
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override {
|
||||
ov::SoPtr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override {
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override {
|
||||
ov::SoPtr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override {
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
@ -75,7 +75,7 @@ class MockPlugin : public ov::IPlugin {
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const override {
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include <gtest/gtest.h>
|
||||
#include <ie_blob.h>
|
||||
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
#include "unit_test_utils/mocks/mock_allocator.hpp"
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
@ -600,7 +600,7 @@ TEST_F(BlobTests, readRangeRoiBlob) {
|
||||
|
||||
TEST_F(BlobTests, setBiggerShapeOnPreAllocatedMemory) {
|
||||
const auto t = ov::make_tensor(ov::element::i64, ov::Shape{2, 6});
|
||||
const auto b = ov::tensor_to_blob(t);
|
||||
const auto b = ov::tensor_to_blob({t, nullptr});
|
||||
|
||||
const auto origin_ptr = t->data();
|
||||
b->setShape({2, 8});
|
||||
@ -612,7 +612,7 @@ TEST_F(BlobTests, setBiggerShapeOnPreAllocatedMemory) {
|
||||
|
||||
TEST_F(BlobTests, setSmallerShapeOnPreAllocatedMemory) {
|
||||
const auto t = ov::make_tensor(ov::element::i64, ov::Shape{2, 6});
|
||||
const auto b = ov::tensor_to_blob(t);
|
||||
const auto b = ov::tensor_to_blob({t, nullptr});
|
||||
|
||||
const auto origin_ptr = t->data();
|
||||
b->setShape({2, 4});
|
||||
|
@ -12,6 +12,8 @@
|
||||
|
||||
#include "itt.hpp"
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "openvino/runtime/iremote_tensor.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
#include "openvino/runtime/profiling_info.hpp"
|
||||
#include "openvino/runtime/tensor.hpp"
|
||||
#include "plugin.hpp"
|
||||
@ -20,11 +22,11 @@ using Time = std::chrono::high_resolution_clock;
|
||||
|
||||
namespace {
|
||||
|
||||
void allocate_tensor_impl(ov::Tensor& tensor, const ov::element::Type& element_type, const ov::Shape& shape) {
|
||||
if (!tensor || tensor.get_element_type() != element_type) {
|
||||
tensor = ov::Tensor(element_type, shape);
|
||||
void allocate_tensor_impl(ov::SoPtr<ov::ITensor>& tensor, const ov::element::Type& element_type, const ov::Shape& shape) {
|
||||
if (!tensor || tensor->get_element_type() != element_type) {
|
||||
tensor = ov::make_tensor(element_type, shape);
|
||||
} else {
|
||||
tensor.set_shape(shape);
|
||||
tensor->set_shape(shape);
|
||||
}
|
||||
}
|
||||
|
||||
@ -37,7 +39,7 @@ ov::auto_plugin::InferRequest::InferRequest(const std::shared_ptr<const ov::auto
|
||||
if (!m_shared_request) {
|
||||
// Allocate input/output tensors
|
||||
for (const auto& input : get_inputs()) {
|
||||
allocate_tensor(input, [input](ov::Tensor& tensor) {
|
||||
allocate_tensor(input, [input](ov::SoPtr<ov::ITensor>& tensor) {
|
||||
// Can add a check to avoid double work in case of shared tensors
|
||||
allocate_tensor_impl(tensor,
|
||||
input.get_element_type(),
|
||||
@ -45,7 +47,7 @@ ov::auto_plugin::InferRequest::InferRequest(const std::shared_ptr<const ov::auto
|
||||
});
|
||||
}
|
||||
for (const auto& output : get_outputs()) {
|
||||
allocate_tensor(output, [output](ov::Tensor& tensor) {
|
||||
allocate_tensor(output, [output](ov::SoPtr<ov::ITensor>& tensor) {
|
||||
// Can add a check to avoid double work in case of shared tensors
|
||||
allocate_tensor_impl(tensor,
|
||||
output.get_element_type(),
|
||||
@ -54,10 +56,10 @@ ov::auto_plugin::InferRequest::InferRequest(const std::shared_ptr<const ov::auto
|
||||
}
|
||||
} else {
|
||||
for (const auto& input : get_inputs()) {
|
||||
ov::ISyncInferRequest::set_tensor(input, ov::Tensor(m_shared_request->get_tensor(input), m_shared_request._so));
|
||||
ov::ISyncInferRequest::set_tensor(input, m_shared_request->get_tensor(input));
|
||||
}
|
||||
for (const auto& output : get_outputs()) {
|
||||
ov::ISyncInferRequest::set_tensor(output, ov::Tensor(m_shared_request->get_tensor(output), m_shared_request._so));
|
||||
ov::ISyncInferRequest::set_tensor(output, m_shared_request->get_tensor(output));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -75,22 +77,24 @@ void ov::auto_plugin::InferRequest::set_tensors_to_another_request(const SoAsync
|
||||
for (const auto &it : get_inputs()) {
|
||||
// this request is already in BUSY state, so using the internal functions safely
|
||||
auto tensor = get_tensor(it);
|
||||
auto type = tensor.get_element_type();
|
||||
bool is_remote = tensor.is<ov::RemoteTensor>() || req->get_tensor(it).is<ov::RemoteTensor>();
|
||||
if (is_remote || req->get_tensor(it).data(type) != tensor.data(type))
|
||||
auto type = tensor->get_element_type();
|
||||
bool is_remote = std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr) ||
|
||||
std::dynamic_pointer_cast<ov::IRemoteTensor>(req->get_tensor(it)._ptr);
|
||||
if (is_remote || req->get_tensor(it)->data(type) != tensor->data(type))
|
||||
req->set_tensor(it, tensor);
|
||||
}
|
||||
for (const auto &it : get_outputs()) {
|
||||
// this request is already in BUSY state, so using the internal functions safely
|
||||
auto tensor = get_tensor(it);
|
||||
auto type = tensor.get_element_type();
|
||||
bool is_remote = tensor.is<ov::RemoteTensor>() || req->get_tensor(it).is<ov::RemoteTensor>();
|
||||
if (is_remote || req->get_tensor(it).data(type) != tensor.data(type))
|
||||
auto type = tensor->get_element_type();
|
||||
bool is_remote = std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr) ||
|
||||
std::dynamic_pointer_cast<ov::IRemoteTensor>(req->get_tensor(it)._ptr);
|
||||
if (is_remote || req->get_tensor(it)->data(type) != tensor->data(type))
|
||||
req->set_tensor(it, tensor);
|
||||
}
|
||||
}
|
||||
|
||||
void ov::auto_plugin::InferRequest::set_tensor(const ov::Output<const ov::Node>& port, const ov::Tensor& tensor) {
|
||||
void ov::auto_plugin::InferRequest::set_tensor(const ov::Output<const ov::Node>& port, const ov::SoPtr<ov::ITensor>& tensor) {
|
||||
if (m_shared_request)
|
||||
m_shared_request->set_tensor(port, tensor);
|
||||
ov::ISyncInferRequest::set_tensor(port, tensor);
|
||||
@ -111,8 +115,15 @@ std::vector<ov::ProfilingInfo> ov::auto_plugin::InferRequest::get_profiling_info
|
||||
|
||||
ov::auto_plugin::InferRequest::~InferRequest() = default;
|
||||
|
||||
std::vector<std::shared_ptr<ov::IVariableState>> ov::auto_plugin::InferRequest::query_state() const {
|
||||
if (m_shared_request)
|
||||
return m_shared_request->query_state();
|
||||
std::vector<ov::SoPtr<ov::IVariableState>> ov::auto_plugin::InferRequest::query_state() const {
|
||||
if (m_shared_request) {
|
||||
auto states = m_shared_request->query_state();
|
||||
for (auto&& state : states) {
|
||||
if (!state._so) {
|
||||
state._so = m_shared_request._so;
|
||||
}
|
||||
}
|
||||
return states;
|
||||
}
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
@ -26,14 +26,14 @@ public:
|
||||
~InferRequest();
|
||||
|
||||
void infer() override;
|
||||
std::vector<std::shared_ptr<ov::IVariableState>> query_state() const override;
|
||||
std::vector<ov::SoPtr<ov::IVariableState>> query_state() const override;
|
||||
std::vector<ov::ProfilingInfo> get_profiling_info() const override;
|
||||
|
||||
const SoAsyncInferRequest& get_shared_request();
|
||||
void set_scheduled_request(SoAsyncInferRequest request);
|
||||
// Auto-Device impl specific: sets the data (tensors from the device-less requests to the specific device request)
|
||||
void set_tensors_to_another_request(const SoAsyncInferRequest& req);
|
||||
void set_tensor(const ov::Output<const ov::Node>& port, const ov::Tensor& tensor) override;
|
||||
void set_tensor(const ov::Output<const ov::Node>& port, const ov::SoPtr<ov::ITensor>& tensor) override;
|
||||
|
||||
private:
|
||||
SoAsyncInferRequest m_shared_request;
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "openvino/runtime/auto/properties.hpp"
|
||||
#include "openvino/runtime/device_id_parser.hpp"
|
||||
#include "openvino/runtime/internal_properties.hpp"
|
||||
#include "openvino/runtime/iremote_context.hpp"
|
||||
#include "plugin.hpp"
|
||||
#include "auto_schedule.hpp"
|
||||
#include "auto_compiled_model.hpp"
|
||||
@ -78,11 +79,11 @@ namespace auto_plugin {
|
||||
std::mutex Plugin::m_mtx;
|
||||
std::map<unsigned int, std::list<std::string>> Plugin::m_priority_map;
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> Plugin::create_context(const ov::AnyMap& remote_properties) const {
|
||||
ov::SoPtr<ov::IRemoteContext> Plugin::create_context(const ov::AnyMap& remote_properties) const {
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> Plugin::get_default_context(const ov::AnyMap& remote_properties) const {
|
||||
ov::SoPtr<ov::IRemoteContext> Plugin::get_default_context(const ov::AnyMap& remote_properties) const {
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
@ -92,7 +93,7 @@ std::shared_ptr<ov::ICompiledModel> Plugin::import_model(std::istream& model,
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> Plugin::import_model(std::istream& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const {
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
@ -350,7 +351,7 @@ Plugin::Plugin() {
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> Plugin::compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties,
|
||||
const ov::RemoteContext& context) const {
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) const {
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@ public:
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties,
|
||||
const ov::RemoteContext& context) const override;
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) const override;
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> compile_model(const std::string& model_path,
|
||||
const ov::AnyMap& properties) const override;
|
||||
@ -55,16 +55,16 @@ public:
|
||||
void unregister_priority(const unsigned int& priority, const std::string& device_name);
|
||||
void register_priority(const unsigned int& priority, const std::string& device_name);
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override;
|
||||
ov::SoPtr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override;
|
||||
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override;
|
||||
ov::SoPtr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override;
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model,
|
||||
const ov::AnyMap& properties) const override;
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const override;
|
||||
|
||||
protected:
|
||||
|
@ -206,8 +206,8 @@ Pipeline Schedule::get_async_pipeline(const ISyncInferPtr& infer_request, Worker
|
||||
// if any input is remote (e.g. was set with set_tensor), let' use the corresponding device
|
||||
for (const auto& it : compiled_model->inputs()) {
|
||||
auto tensor = infer_request->get_tensor(it);
|
||||
if (tensor.is<ov::RemoteTensor>()) {
|
||||
const auto name = tensor.as<ov::RemoteTensor>().get_device_name();
|
||||
if (auto remote_tensor = std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr)) {
|
||||
const auto name = remote_tensor->get_device_name();
|
||||
const auto res = std::find_if(
|
||||
m_context->m_device_priorities_initial.cbegin(),
|
||||
m_context->m_device_priorities_initial.cend(),
|
||||
|
@ -62,7 +62,7 @@ public:
|
||||
//MOCK_METHOD(std::vector<Tensor>, get_tensors, (const Output<const Node>&), (const, override));
|
||||
//MOCK_METHOD(void, set_tensors, (const Output<const Node>&, const std::vector<Tensor>&), (override));
|
||||
MOCK_METHOD(void, infer, (), (override));
|
||||
MOCK_METHOD(std::vector<std::shared_ptr<IVariableState>>, query_state, (), (const, override));
|
||||
MOCK_METHOD(std::vector<ov::SoPtr<IVariableState>>, query_state, (), (const, override));
|
||||
//MOCK_METHOD(const std::shared_ptr<const ICompiledModel>&, get_compiled_model, (), (const, override));
|
||||
//MOCK_METHOD(const std::vector<Output<const Node>>&, get_inputs, (), (const, override));
|
||||
//MOCK_METHOD(const std::vector<Output<const Node>>&, get_outputs, (), (const, override));
|
||||
@ -70,6 +70,6 @@ public:
|
||||
~MockSyncInferRequest() = default;
|
||||
|
||||
private:
|
||||
void allocate_tensor_impl(ov::Tensor& tensor, const ov::element::Type& element_type, const ov::Shape& shape);
|
||||
void allocate_tensor_impl(ov::SoPtr<ov::ITensor>& tensor, const ov::element::Type& element_type, const ov::Shape& shape);
|
||||
};
|
||||
} // namespace ov
|
@ -3,6 +3,7 @@
|
||||
//
|
||||
|
||||
#include "include/mock_common.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
|
||||
// getMetric will return a fake ov::Any, gmock will call ostreamer << ov::Any
|
||||
// it will cause core dump, so add this special implemented
|
||||
@ -29,11 +30,11 @@ MockAsyncInferRequest::MockAsyncInferRequest(const std::shared_ptr<IInferRequest
|
||||
} });
|
||||
}
|
||||
|
||||
void MockSyncInferRequest::allocate_tensor_impl(Tensor& tensor, const element::Type& element_type, const Shape& shape) {
|
||||
if (!tensor || tensor.get_element_type() != element_type) {
|
||||
tensor = ov::Tensor(element_type, shape);
|
||||
void MockSyncInferRequest::allocate_tensor_impl(ov::SoPtr<ov::ITensor>& tensor, const element::Type& element_type, const Shape& shape) {
|
||||
if (!tensor || tensor->get_element_type() != element_type) {
|
||||
tensor = ov::make_tensor(element_type, shape);
|
||||
} else {
|
||||
tensor.set_shape(shape);
|
||||
tensor->set_shape(shape);
|
||||
}
|
||||
}
|
||||
|
||||
@ -42,7 +43,7 @@ MockSyncInferRequest::MockSyncInferRequest(const std::shared_ptr<const MockCompi
|
||||
OPENVINO_ASSERT(compiled_model);
|
||||
// Allocate input/output tensors
|
||||
for (const auto& input : get_inputs()) {
|
||||
allocate_tensor(input, [this, input](ov::Tensor& tensor) {
|
||||
allocate_tensor(input, [this, input](ov::SoPtr<ov::ITensor>& tensor) {
|
||||
// Can add a check to avoid double work in case of shared tensors
|
||||
allocate_tensor_impl(tensor,
|
||||
input.get_element_type(),
|
||||
@ -50,7 +51,7 @@ MockSyncInferRequest::MockSyncInferRequest(const std::shared_ptr<const MockCompi
|
||||
});
|
||||
}
|
||||
for (const auto& output : get_outputs()) {
|
||||
allocate_tensor(output, [this, output](ov::Tensor& tensor) {
|
||||
allocate_tensor(output, [this, output](ov::SoPtr<ov::ITensor>& tensor) {
|
||||
// Can add a check to avoid double work in case of shared tensors
|
||||
allocate_tensor_impl(tensor,
|
||||
output.get_element_type(),
|
||||
|
@ -34,14 +34,15 @@ std::shared_ptr<ov::ICompiledModel> ov::hetero::Plugin::compile_model(const std:
|
||||
return compiled_model;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> ov::hetero::Plugin::compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
std::shared_ptr<ov::ICompiledModel> ov::hetero::Plugin::compile_model(
|
||||
const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties,
|
||||
const ov::RemoteContext& context) const {
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) const {
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> ov::hetero::Plugin::import_model(std::istream& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const {
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
@ -191,11 +192,11 @@ ov::Any ov::hetero::Plugin::caching_device_properties(const std::string& device_
|
||||
return ov::Any(result);
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> ov::hetero::Plugin::create_context(const ov::AnyMap& remote_properties) const {
|
||||
ov::SoPtr<ov::IRemoteContext> ov::hetero::Plugin::create_context(const ov::AnyMap& remote_properties) const {
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> ov::hetero::Plugin::get_default_context(const ov::AnyMap& remote_properties) const {
|
||||
ov::SoPtr<ov::IRemoteContext> ov::hetero::Plugin::get_default_context(const ov::AnyMap& remote_properties) const {
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
|
@ -32,20 +32,20 @@ public:
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties,
|
||||
const ov::RemoteContext& context) const override;
|
||||
const ov::SoPtr<IRemoteContext>& context) const override;
|
||||
|
||||
void set_property(const ov::AnyMap& properties) override;
|
||||
|
||||
ov::Any get_property(const std::string& name, const ov::AnyMap& properties) const override;
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override;
|
||||
ov::SoPtr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override;
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override;
|
||||
ov::SoPtr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override;
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model, const ov::AnyMap& properties) const override;
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const override;
|
||||
|
||||
ov::SupportedOpsMap query_model(const std::shared_ptr<const ov::Model>& model,
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include "itt.hpp"
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "plugin.hpp"
|
||||
#include "variable_state.hpp"
|
||||
|
||||
ov::hetero::InferRequest::InferRequest(const std::shared_ptr<const ov::hetero::CompiledModel>& compiled_model)
|
||||
: ov::ISyncInferRequest(compiled_model) {
|
||||
@ -66,20 +65,22 @@ ov::SoPtr<ov::IAsyncInferRequest> ov::hetero::InferRequest::get_request(const ov
|
||||
OPENVINO_THROW("Cannot find infer request for port ", port);
|
||||
}
|
||||
|
||||
ov::Tensor ov::hetero::InferRequest::get_tensor(const ov::Output<const ov::Node>& port) const {
|
||||
ov::SoPtr<ov::ITensor> ov::hetero::InferRequest::get_tensor(const ov::Output<const ov::Node>& port) const {
|
||||
return get_request(port)->get_tensor(port);
|
||||
}
|
||||
|
||||
void ov::hetero::InferRequest::set_tensor(const ov::Output<const ov::Node>& port, const ov::Tensor& tensor) {
|
||||
void ov::hetero::InferRequest::set_tensor(const ov::Output<const ov::Node>& port,
|
||||
const ov::SoPtr<ov::ITensor>& tensor) {
|
||||
get_request(port)->set_tensor(port, tensor);
|
||||
}
|
||||
|
||||
std::vector<ov::Tensor> ov::hetero::InferRequest::get_tensors(const ov::Output<const ov::Node>& port) const {
|
||||
std::vector<ov::SoPtr<ov::ITensor>> ov::hetero::InferRequest::get_tensors(
|
||||
const ov::Output<const ov::Node>& port) const {
|
||||
return get_request(port)->get_tensors(port);
|
||||
}
|
||||
|
||||
void ov::hetero::InferRequest::set_tensors(const ov::Output<const ov::Node>& port,
|
||||
const std::vector<ov::Tensor>& tensors) {
|
||||
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) {
|
||||
return get_request(port)->set_tensors(port, tensors);
|
||||
}
|
||||
|
||||
@ -89,12 +90,14 @@ void ov::hetero::InferRequest::check_tensors() const {
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<ov::IVariableState>> ov::hetero::InferRequest::query_state() const {
|
||||
std::vector<std::shared_ptr<ov::IVariableState>> variable_states = {};
|
||||
std::vector<ov::SoPtr<ov::IVariableState>> ov::hetero::InferRequest::query_state() const {
|
||||
std::vector<ov::SoPtr<ov::IVariableState>> variable_states = {};
|
||||
for (const auto& request : m_subrequests) {
|
||||
OPENVINO_ASSERT(request);
|
||||
for (auto&& state : request->query_state()) {
|
||||
variable_states.emplace_back(std::make_shared<VariableState>(state, request._so));
|
||||
if (!state._so)
|
||||
state._so = request._so;
|
||||
variable_states.emplace_back(state);
|
||||
}
|
||||
}
|
||||
return variable_states;
|
||||
|
@ -30,17 +30,18 @@ public:
|
||||
|
||||
void infer() override;
|
||||
|
||||
std::vector<std::shared_ptr<ov::IVariableState>> query_state() const override;
|
||||
std::vector<ov::SoPtr<ov::IVariableState>> query_state() const override;
|
||||
|
||||
std::vector<ov::ProfilingInfo> get_profiling_info() const override;
|
||||
|
||||
ov::Tensor get_tensor(const ov::Output<const ov::Node>& port) const override;
|
||||
ov::SoPtr<ov::ITensor> get_tensor(const ov::Output<const ov::Node>& port) const override;
|
||||
|
||||
void set_tensor(const ov::Output<const ov::Node>& port, const ov::Tensor& tensor) override;
|
||||
void set_tensor(const ov::Output<const ov::Node>& port, const ov::SoPtr<ov::ITensor>& tensor) override;
|
||||
|
||||
std::vector<ov::Tensor> get_tensors(const ov::Output<const ov::Node>& port) const override;
|
||||
std::vector<ov::SoPtr<ov::ITensor>> get_tensors(const ov::Output<const ov::Node>& port) const override;
|
||||
|
||||
void set_tensors(const ov::Output<const ov::Node>& port, const std::vector<ov::Tensor>& tensors) override;
|
||||
void set_tensors(const ov::Output<const ov::Node>& port,
|
||||
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) override;
|
||||
|
||||
void check_tensors() const override;
|
||||
|
||||
|
@ -1,52 +0,0 @@
|
||||
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "openvino/runtime/ivariable_state.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace hetero {
|
||||
/**
|
||||
* @brief Simple wrapper for hardware variable states which holds plugin shared object
|
||||
*/
|
||||
class VariableState : public ov::IVariableState {
|
||||
std::shared_ptr<ov::IVariableState> m_state;
|
||||
std::shared_ptr<void> m_so;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Constructor of hetero VariableState
|
||||
*
|
||||
* @param state hardware state
|
||||
* @param so shared object
|
||||
*/
|
||||
VariableState(const std::shared_ptr<ov::IVariableState>& state, const std::shared_ptr<void>& so)
|
||||
: IVariableState(""),
|
||||
m_state(state),
|
||||
m_so(so) {
|
||||
OPENVINO_ASSERT(m_state);
|
||||
}
|
||||
const std::string& get_name() const override {
|
||||
return m_state->get_name();
|
||||
}
|
||||
|
||||
void reset() override {
|
||||
m_state->reset();
|
||||
}
|
||||
|
||||
void set_state(const ov::Tensor& state) override {
|
||||
m_state->set_state(state);
|
||||
}
|
||||
|
||||
const ov::Tensor& get_state() const override {
|
||||
return m_state->get_state();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace hetero
|
||||
} // namespace ov
|
@ -34,15 +34,16 @@ public:
|
||||
|
||||
std::vector<ov::ProfilingInfo> get_profiling_info() const override;
|
||||
|
||||
ov::Tensor get_tensor(const ov::Output<const ov::Node>& port) const override;
|
||||
ov::SoPtr<ov::ITensor> get_tensor(const ov::Output<const ov::Node>& port) const override;
|
||||
|
||||
void set_tensor(const ov::Output<const ov::Node>& port, const ov::Tensor& tensor) override;
|
||||
void set_tensor(const ov::Output<const ov::Node>& port, const ov::SoPtr<ov::ITensor>& tensor) override;
|
||||
|
||||
std::vector<ov::Tensor> get_tensors(const ov::Output<const ov::Node>& port) const override;
|
||||
std::vector<ov::SoPtr<ov::ITensor>> get_tensors(const ov::Output<const ov::Node>& port) const override;
|
||||
|
||||
void set_tensors(const ov::Output<const ov::Node>& port, const std::vector<ov::Tensor>& tensors) override;
|
||||
void set_tensors(const ov::Output<const ov::Node>& port,
|
||||
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) override;
|
||||
|
||||
std::vector<std::shared_ptr<ov::IVariableState>> query_state() const override;
|
||||
std::vector<ov::SoPtr<ov::IVariableState>> query_state() const override;
|
||||
|
||||
const std::shared_ptr<const ov::ICompiledModel>& get_compiled_model() const override;
|
||||
|
||||
|
@ -26,7 +26,7 @@ void create_plugin(std::shared_ptr<ov::IPlugin>& plugin);
|
||||
*
|
||||
* @return Original remote context
|
||||
*/
|
||||
const std::shared_ptr<ov::IRemoteContext>& get_hardware_context(const std::shared_ptr<ov::IRemoteContext>& context);
|
||||
const ov::SoPtr<ov::IRemoteContext>& get_hardware_context(const ov::SoPtr<ov::IRemoteContext>& context);
|
||||
|
||||
/**
|
||||
* @brief Get wrapped remote tensor
|
||||
@ -35,7 +35,7 @@ const std::shared_ptr<ov::IRemoteContext>& get_hardware_context(const std::share
|
||||
*
|
||||
* @return Original remote tensor
|
||||
*/
|
||||
const std::shared_ptr<ov::ITensor>& get_hardware_tensor(const std::shared_ptr<ov::ITensor>& tensor);
|
||||
const ov::SoPtr<ov::ITensor>& get_hardware_tensor(const ov::SoPtr<ov::ITensor>& tensor);
|
||||
|
||||
} // namespace proxy
|
||||
} // namespace ov
|
||||
|
@ -18,7 +18,7 @@ public:
|
||||
m_compiled_model(model) {}
|
||||
CompiledModel(const ov::SoPtr<ov::ICompiledModel>& model,
|
||||
const std::shared_ptr<const ov::IPlugin>& plugin,
|
||||
const ov::RemoteContext& context)
|
||||
const ov::SoPtr<ov::IRemoteContext>& context)
|
||||
: ov::ICompiledModel(nullptr, plugin, context),
|
||||
m_compiled_model(model) {}
|
||||
std::shared_ptr<ov::IAsyncInferRequest> create_infer_request() const override {
|
||||
|
@ -4,11 +4,14 @@
|
||||
|
||||
#include "openvino/proxy/infer_request.hpp"
|
||||
|
||||
#include <memory>
|
||||
#include <openvino/runtime/iremote_tensor.hpp>
|
||||
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
#include "openvino/runtime/remote_context.hpp"
|
||||
#include "openvino/runtime/so_ptr.hpp"
|
||||
#include "remote_context.hpp"
|
||||
#include "variable_state.hpp"
|
||||
|
||||
ov::proxy::InferRequest::InferRequest(ov::SoPtr<ov::IAsyncInferRequest>&& request,
|
||||
const std::shared_ptr<const ov::ICompiledModel>& compiled_model)
|
||||
@ -43,42 +46,43 @@ std::vector<ov::ProfilingInfo> ov::proxy::InferRequest::get_profiling_info() con
|
||||
return m_infer_request->get_profiling_info();
|
||||
}
|
||||
|
||||
ov::Tensor ov::proxy::InferRequest::get_tensor(const ov::Output<const ov::Node>& port) const {
|
||||
ov::SoPtr<ov::ITensor> ov::proxy::InferRequest::get_tensor(const ov::Output<const ov::Node>& port) const {
|
||||
auto tensor = m_infer_request->get_tensor(port);
|
||||
if (tensor.is<ov::RemoteTensor>()) {
|
||||
auto remote_context = std::dynamic_pointer_cast<ov::proxy::RemoteContext>(m_compiled_model->get_context());
|
||||
if (std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr)) {
|
||||
auto remote_context = std::dynamic_pointer_cast<ov::proxy::RemoteContext>(m_compiled_model->get_context()._ptr);
|
||||
OPENVINO_ASSERT(remote_context);
|
||||
tensor = remote_context->wrap_tensor(tensor.as<ov::RemoteTensor>());
|
||||
tensor = remote_context->wrap_tensor(tensor);
|
||||
}
|
||||
return ov::Tensor(tensor, m_infer_request._so);
|
||||
return tensor;
|
||||
}
|
||||
|
||||
void ov::proxy::InferRequest::set_tensor(const ov::Output<const ov::Node>& port, const ov::Tensor& tensor) {
|
||||
void ov::proxy::InferRequest::set_tensor(const ov::Output<const ov::Node>& port, const ov::SoPtr<ov::ITensor>& tensor) {
|
||||
m_infer_request->set_tensor(port, tensor);
|
||||
}
|
||||
|
||||
std::vector<ov::Tensor> ov::proxy::InferRequest::get_tensors(const ov::Output<const ov::Node>& port) const {
|
||||
std::vector<ov::SoPtr<ov::ITensor>> ov::proxy::InferRequest::get_tensors(const ov::Output<const ov::Node>& port) const {
|
||||
auto tensors = m_infer_request->get_tensors(port);
|
||||
for (auto&& tensor : tensors) {
|
||||
if (tensor.is<ov::RemoteTensor>()) {
|
||||
auto remote_context = std::dynamic_pointer_cast<ov::proxy::RemoteContext>(m_compiled_model->get_context());
|
||||
if (std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr)) {
|
||||
auto remote_context =
|
||||
std::dynamic_pointer_cast<ov::proxy::RemoteContext>(m_compiled_model->get_context()._ptr);
|
||||
OPENVINO_ASSERT(remote_context);
|
||||
tensor = remote_context->wrap_tensor(tensor.as<ov::RemoteTensor>());
|
||||
tensor = remote_context->wrap_tensor(tensor);
|
||||
}
|
||||
tensor = ov::Tensor(tensor, m_infer_request._so);
|
||||
}
|
||||
return tensors;
|
||||
}
|
||||
|
||||
void ov::proxy::InferRequest::set_tensors(const ov::Output<const ov::Node>& port,
|
||||
const std::vector<ov::Tensor>& tensors) {
|
||||
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) {
|
||||
return m_infer_request->set_tensors(port, tensors);
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<ov::IVariableState>> ov::proxy::InferRequest::query_state() const {
|
||||
std::vector<ov::SoPtr<ov::IVariableState>> ov::proxy::InferRequest::query_state() const {
|
||||
auto states = m_infer_request->query_state();
|
||||
for (auto&& state : states) {
|
||||
state = std::make_shared<ov::proxy::VariableState>(state, m_infer_request._so);
|
||||
if (!state._so)
|
||||
state._so = m_infer_request._so;
|
||||
}
|
||||
return states;
|
||||
}
|
||||
|
@ -310,11 +310,12 @@ std::shared_ptr<ov::ICompiledModel> ov::proxy::Plugin::compile_model(const std::
|
||||
return std::dynamic_pointer_cast<ov::ICompiledModel>(compiled_model);
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> ov::proxy::Plugin::compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
std::shared_ptr<ov::ICompiledModel> ov::proxy::Plugin::compile_model(
|
||||
const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties,
|
||||
const ov::RemoteContext& context) const {
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) const {
|
||||
auto ctx = ov::proxy::RemoteContext::get_hardware_context(context);
|
||||
auto dev_name = ctx.get_device_name();
|
||||
auto dev_name = ctx->get_device_name();
|
||||
auto device_config = construct_device_config(dev_name, m_configs, properties);
|
||||
|
||||
std::shared_ptr<const ov::IPlugin> plugin = shared_from_this();
|
||||
@ -325,7 +326,7 @@ std::shared_ptr<ov::ICompiledModel> ov::proxy::Plugin::compile_model(const std::
|
||||
return std::dynamic_pointer_cast<ov::ICompiledModel>(compiled_model);
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> ov::proxy::Plugin::create_context(const ov::AnyMap& remote_properties) const {
|
||||
ov::SoPtr<ov::IRemoteContext> ov::proxy::Plugin::create_context(const ov::AnyMap& remote_properties) const {
|
||||
// TODO: if no device id, try to create context for each plugin
|
||||
auto dev_name = get_device_name();
|
||||
auto dev_idx = get_device_from_config(remote_properties);
|
||||
@ -342,7 +343,7 @@ std::shared_ptr<ov::IRemoteContext> ov::proxy::Plugin::create_context(const ov::
|
||||
dev_idx,
|
||||
has_dev_idx,
|
||||
is_new_api);
|
||||
return std::dynamic_pointer_cast<ov::IRemoteContext>(remote_context);
|
||||
return remote_context;
|
||||
}
|
||||
// Properties doesn't have device id, so try to create context for all devices
|
||||
const auto hidden_devices = get_hidden_devices();
|
||||
@ -355,7 +356,7 @@ std::shared_ptr<ov::IRemoteContext> ov::proxy::Plugin::create_context(const ov::
|
||||
i,
|
||||
has_dev_idx,
|
||||
is_new_api);
|
||||
return std::dynamic_pointer_cast<ov::IRemoteContext>(remote_context);
|
||||
return remote_context;
|
||||
} catch (const ov::Exception&) {
|
||||
}
|
||||
}
|
||||
@ -363,7 +364,7 @@ std::shared_ptr<ov::IRemoteContext> ov::proxy::Plugin::create_context(const ov::
|
||||
ov::Any(remote_properties).as<std::string>());
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> ov::proxy::Plugin::get_default_context(const ov::AnyMap& remote_properties) const {
|
||||
ov::SoPtr<ov::IRemoteContext> ov::proxy::Plugin::get_default_context(const ov::AnyMap& remote_properties) const {
|
||||
auto dev_name = get_device_name();
|
||||
auto dev_idx = get_device_from_config(remote_properties);
|
||||
auto has_dev_idx = is_device_in_config(remote_properties);
|
||||
@ -378,7 +379,7 @@ std::shared_ptr<ov::IRemoteContext> ov::proxy::Plugin::get_default_context(const
|
||||
dev_idx,
|
||||
has_dev_idx,
|
||||
is_new_api);
|
||||
return std::dynamic_pointer_cast<ov::IRemoteContext>(remote_context);
|
||||
return remote_context;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> ov::proxy::Plugin::import_model(std::istream& model,
|
||||
@ -391,10 +392,10 @@ std::shared_ptr<ov::ICompiledModel> ov::proxy::Plugin::import_model(std::istream
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> ov::proxy::Plugin::import_model(std::istream& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const {
|
||||
auto ctx = ov::proxy::RemoteContext::get_hardware_context(context);
|
||||
auto dev_name = ctx.get_device_name();
|
||||
auto dev_name = ctx->get_device_name();
|
||||
auto device_config = construct_device_config(dev_name, m_configs, properties);
|
||||
|
||||
return std::make_shared<ov::proxy::CompiledModel>(get_core()->import_model(model, ctx, device_config),
|
||||
|
@ -27,16 +27,16 @@ public:
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties,
|
||||
const ov::RemoteContext& context) const override;
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) const override;
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override;
|
||||
ov::SoPtr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override;
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override;
|
||||
ov::SoPtr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override;
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model, const ov::AnyMap& properties) const override;
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const override;
|
||||
|
||||
private:
|
||||
|
@ -8,9 +8,10 @@
|
||||
|
||||
#include "openvino/proxy/plugin.hpp"
|
||||
#include "openvino/runtime/iremote_context.hpp"
|
||||
#include "openvino/runtime/so_ptr.hpp"
|
||||
#include "remote_tensor.hpp"
|
||||
|
||||
ov::proxy::RemoteContext::RemoteContext(ov::RemoteContext&& ctx,
|
||||
ov::proxy::RemoteContext::RemoteContext(ov::SoPtr<ov::IRemoteContext>&& ctx,
|
||||
const std::string& dev_name,
|
||||
size_t dev_index,
|
||||
bool has_index,
|
||||
@ -29,40 +30,35 @@ const std::string& ov::proxy::RemoteContext::get_device_name() const {
|
||||
}
|
||||
|
||||
const ov::AnyMap& ov::proxy::RemoteContext::get_property() const {
|
||||
return m_context._impl->get_property();
|
||||
return m_context->get_property();
|
||||
}
|
||||
|
||||
ov::Tensor ov::proxy::RemoteContext::wrap_tensor(const ov::RemoteTensor& tensor) {
|
||||
return ov::Tensor(std::make_shared<ov::proxy::RemoteTensor>(tensor, m_tensor_name), {});
|
||||
ov::SoPtr<ov::ITensor> ov::proxy::RemoteContext::wrap_tensor(const ov::SoPtr<ov::ITensor>& tensor) {
|
||||
auto proxy_tensor = std::make_shared<ov::proxy::RemoteTensor>(tensor, m_tensor_name);
|
||||
return ov::SoPtr<ov::ITensor>(std::dynamic_pointer_cast<ov::ITensor>(proxy_tensor), nullptr);
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteTensor> ov::proxy::RemoteContext::create_tensor(const ov::element::Type& type,
|
||||
ov::SoPtr<ov::IRemoteTensor> ov::proxy::RemoteContext::create_tensor(const ov::element::Type& type,
|
||||
const ov::Shape& shape,
|
||||
const ov::AnyMap& params) {
|
||||
return std::make_shared<ov::proxy::RemoteTensor>(m_context.create_tensor(type, shape, params), m_tensor_name);
|
||||
auto proxy_tensor =
|
||||
std::make_shared<ov::proxy::RemoteTensor>(m_context->create_tensor(type, shape, params), m_tensor_name);
|
||||
return ov::SoPtr<ov::IRemoteTensor>(std::dynamic_pointer_cast<ov::IRemoteTensor>(proxy_tensor), nullptr);
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ITensor> ov::proxy::RemoteContext::create_host_tensor(const ov::element::Type type,
|
||||
ov::SoPtr<ov::ITensor> ov::proxy::RemoteContext::create_host_tensor(const ov::element::Type type,
|
||||
const ov::Shape& shape) {
|
||||
return m_context._impl->create_host_tensor(type, shape);
|
||||
return m_context->create_host_tensor(type, shape);
|
||||
}
|
||||
|
||||
const ov::RemoteContext& ov::proxy::RemoteContext::get_hardware_context(const ov::RemoteContext& context) {
|
||||
if (auto proxy_context = std::dynamic_pointer_cast<ov::proxy::RemoteContext>(context._impl)) {
|
||||
const ov::SoPtr<ov::IRemoteContext>& ov::proxy::RemoteContext::get_hardware_context(
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) {
|
||||
if (auto proxy_context = std::dynamic_pointer_cast<ov::proxy::RemoteContext>(context._ptr)) {
|
||||
return proxy_context->m_context;
|
||||
}
|
||||
return context;
|
||||
}
|
||||
|
||||
const std::shared_ptr<ov::IRemoteContext>& ov::proxy::RemoteContext::get_hardware_context(
|
||||
const std::shared_ptr<ov::IRemoteContext>& context) {
|
||||
if (auto proxy_context = std::dynamic_pointer_cast<ov::proxy::RemoteContext>(context)) {
|
||||
return proxy_context->m_context._impl;
|
||||
}
|
||||
return context;
|
||||
}
|
||||
|
||||
const std::shared_ptr<ov::IRemoteContext>& ov::proxy::get_hardware_context(
|
||||
const std::shared_ptr<ov::IRemoteContext>& context) {
|
||||
const ov::SoPtr<ov::IRemoteContext>& ov::proxy::get_hardware_context(const ov::SoPtr<ov::IRemoteContext>& context) {
|
||||
return ov::proxy::RemoteContext::get_hardware_context(context);
|
||||
}
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "openvino/runtime/iremote_context.hpp"
|
||||
#include "openvino/runtime/iremote_tensor.hpp"
|
||||
#include "openvino/runtime/remote_context.hpp"
|
||||
#include "openvino/runtime/so_ptr.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace proxy {
|
||||
@ -31,7 +32,7 @@ public:
|
||||
* In legacy API remote context doesn't contain the index in the name but Blob contains.
|
||||
* In 2.0 API Tensor and Context always contain device index
|
||||
*/
|
||||
RemoteContext(ov::RemoteContext&& ctx,
|
||||
RemoteContext(ov::SoPtr<ov::IRemoteContext>&& ctx,
|
||||
const std::string& dev_name,
|
||||
size_t dev_index,
|
||||
bool has_index,
|
||||
@ -40,20 +41,18 @@ public:
|
||||
|
||||
const ov::AnyMap& get_property() const override;
|
||||
|
||||
std::shared_ptr<ov::IRemoteTensor> create_tensor(const ov::element::Type& type,
|
||||
ov::SoPtr<ov::IRemoteTensor> create_tensor(const ov::element::Type& type,
|
||||
const ov::Shape& shape,
|
||||
const ov::AnyMap& params = {}) override;
|
||||
|
||||
std::shared_ptr<ov::ITensor> create_host_tensor(const ov::element::Type type, const ov::Shape& shape) override;
|
||||
ov::SoPtr<ov::ITensor> create_host_tensor(const ov::element::Type type, const ov::Shape& shape) override;
|
||||
|
||||
ov::Tensor wrap_tensor(const ov::RemoteTensor& tensor);
|
||||
ov::SoPtr<ov::ITensor> wrap_tensor(const ov::SoPtr<ov::ITensor>& tensor);
|
||||
|
||||
static const ov::RemoteContext& get_hardware_context(const ov::RemoteContext& context);
|
||||
static const std::shared_ptr<ov::IRemoteContext>& get_hardware_context(
|
||||
const std::shared_ptr<ov::IRemoteContext>& context);
|
||||
static const ov::SoPtr<ov::IRemoteContext>& get_hardware_context(const ov::SoPtr<ov::IRemoteContext>& context);
|
||||
|
||||
private:
|
||||
ov::RemoteContext m_context;
|
||||
ov::SoPtr<ov::IRemoteContext> m_context;
|
||||
std::string m_name;
|
||||
std::string m_tensor_name;
|
||||
|
||||
|
@ -9,23 +9,26 @@
|
||||
#include "openvino/proxy/plugin.hpp"
|
||||
|
||||
namespace {
|
||||
std::shared_ptr<ov::IRemoteTensor> cast_tensor(const std::shared_ptr<ov::ITensor>& tensor) {
|
||||
auto rem_tensor = std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor);
|
||||
std::shared_ptr<ov::IRemoteTensor> cast_tensor(const ov::SoPtr<ov::ITensor>& tensor) {
|
||||
auto rem_tensor = std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr);
|
||||
OPENVINO_ASSERT(rem_tensor);
|
||||
return rem_tensor;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
ov::proxy::RemoteTensor::RemoteTensor(ov::RemoteTensor&& tensor, const std::string& dev_name)
|
||||
ov::proxy::RemoteTensor::RemoteTensor(const ov::SoPtr<ov::ITensor>& tensor, const std::string& dev_name)
|
||||
: m_name(dev_name),
|
||||
m_tensor(std::move(tensor)) {}
|
||||
|
||||
ov::proxy::RemoteTensor::RemoteTensor(const ov::RemoteTensor& tensor, const std::string& dev_name)
|
||||
m_tensor(tensor) {
|
||||
OPENVINO_ASSERT(std::dynamic_pointer_cast<ov::IRemoteTensor>(m_tensor._ptr));
|
||||
}
|
||||
ov::proxy::RemoteTensor::RemoteTensor(ov::SoPtr<ov::ITensor>&& tensor, const std::string& dev_name)
|
||||
: m_name(dev_name),
|
||||
m_tensor(tensor) {}
|
||||
m_tensor(std::move(tensor)) {
|
||||
OPENVINO_ASSERT(m_tensor);
|
||||
}
|
||||
|
||||
const ov::AnyMap& ov::proxy::RemoteTensor::get_properties() const {
|
||||
return cast_tensor(m_tensor._impl)->get_properties();
|
||||
return cast_tensor(m_tensor)->get_properties();
|
||||
}
|
||||
|
||||
const std::string& ov::proxy::RemoteTensor::get_device_name() const {
|
||||
@ -33,36 +36,35 @@ const std::string& ov::proxy::RemoteTensor::get_device_name() const {
|
||||
}
|
||||
|
||||
void ov::proxy::RemoteTensor::set_shape(ov::Shape shape) {
|
||||
m_tensor.set_shape(shape);
|
||||
m_tensor->set_shape(shape);
|
||||
}
|
||||
|
||||
const ov::element::Type& ov::proxy::RemoteTensor::get_element_type() const {
|
||||
return m_tensor.get_element_type();
|
||||
return m_tensor->get_element_type();
|
||||
}
|
||||
|
||||
const ov::Shape& ov::proxy::RemoteTensor::get_shape() const {
|
||||
return m_tensor.get_shape();
|
||||
return m_tensor->get_shape();
|
||||
}
|
||||
|
||||
size_t ov::proxy::RemoteTensor::get_size() const {
|
||||
return m_tensor.get_size();
|
||||
return m_tensor->get_size();
|
||||
}
|
||||
|
||||
size_t ov::proxy::RemoteTensor::get_byte_size() const {
|
||||
return m_tensor.get_byte_size();
|
||||
return m_tensor->get_byte_size();
|
||||
}
|
||||
|
||||
const ov::Strides& ov::proxy::RemoteTensor::get_strides() const {
|
||||
return cast_tensor(m_tensor._impl)->get_strides();
|
||||
return m_tensor->get_strides();
|
||||
}
|
||||
|
||||
const std::shared_ptr<ov::ITensor>& ov::proxy::RemoteTensor::get_hardware_tensor(
|
||||
const std::shared_ptr<ov::ITensor>& tensor) {
|
||||
if (auto remote_tensor = std::dynamic_pointer_cast<ov::proxy::RemoteTensor>(tensor))
|
||||
return remote_tensor->m_tensor._impl;
|
||||
const ov::SoPtr<ov::ITensor>& ov::proxy::RemoteTensor::get_hardware_tensor(const ov::SoPtr<ov::ITensor>& tensor) {
|
||||
if (auto remote_tensor = std::dynamic_pointer_cast<ov::proxy::RemoteTensor>(tensor._ptr))
|
||||
return remote_tensor->m_tensor;
|
||||
return tensor;
|
||||
}
|
||||
|
||||
const std::shared_ptr<ov::ITensor>& ov::proxy::get_hardware_tensor(const std::shared_ptr<ov::ITensor>& tensor) {
|
||||
const ov::SoPtr<ov::ITensor>& ov::proxy::get_hardware_tensor(const ov::SoPtr<ov::ITensor>& tensor) {
|
||||
return ov::proxy::RemoteTensor::get_hardware_tensor(tensor);
|
||||
}
|
||||
|
@ -4,6 +4,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <openvino/runtime/so_ptr.hpp>
|
||||
|
||||
#include "openvino/runtime/iremote_tensor.hpp"
|
||||
#include "openvino/runtime/remote_tensor.hpp"
|
||||
@ -17,8 +18,8 @@ namespace proxy {
|
||||
*/
|
||||
class RemoteTensor : public ov::IRemoteTensor {
|
||||
public:
|
||||
RemoteTensor(ov::RemoteTensor&& ctx, const std::string& dev_name);
|
||||
RemoteTensor(const ov::RemoteTensor& ctx, const std::string& dev_name);
|
||||
RemoteTensor(const ov::SoPtr<ov::ITensor>& ctx, const std::string& dev_name);
|
||||
RemoteTensor(ov::SoPtr<ov::ITensor>&& ctx, const std::string& dev_name);
|
||||
|
||||
const AnyMap& get_properties() const override;
|
||||
const std::string& get_device_name() const override;
|
||||
@ -35,11 +36,11 @@ public:
|
||||
|
||||
const ov::Strides& get_strides() const override;
|
||||
|
||||
static const std::shared_ptr<ov::ITensor>& get_hardware_tensor(const std::shared_ptr<ov::ITensor>& tensor);
|
||||
static const ov::SoPtr<ov::ITensor>& get_hardware_tensor(const ov::SoPtr<ov::ITensor>& tensor);
|
||||
|
||||
private:
|
||||
mutable std::string m_name;
|
||||
ov::RemoteTensor m_tensor;
|
||||
ov::SoPtr<ov::ITensor> m_tensor;
|
||||
};
|
||||
|
||||
} // namespace proxy
|
||||
|
@ -1,52 +0,0 @@
|
||||
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "openvino/runtime/ivariable_state.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace proxy {
|
||||
/**
|
||||
* @brief Simple wrapper for hardware variable states which holds plugin shared object
|
||||
*/
|
||||
class VariableState : public ov::IVariableState {
|
||||
std::shared_ptr<ov::IVariableState> m_state;
|
||||
std::shared_ptr<void> m_so;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Constructor of proxy VariableState
|
||||
*
|
||||
* @param state hardware state
|
||||
* @param so shared object
|
||||
*/
|
||||
VariableState(const std::shared_ptr<ov::IVariableState>& state, const std::shared_ptr<void>& so)
|
||||
: IVariableState(""),
|
||||
m_state(state),
|
||||
m_so(so) {
|
||||
OPENVINO_ASSERT(m_state);
|
||||
}
|
||||
const std::string& get_name() const override {
|
||||
return m_state->get_name();
|
||||
}
|
||||
|
||||
void reset() override {
|
||||
m_state->reset();
|
||||
}
|
||||
|
||||
void set_state(const ov::Tensor& state) override {
|
||||
m_state->set_state(state);
|
||||
}
|
||||
|
||||
const ov::Tensor& get_state() const override {
|
||||
return m_state->get_state();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace proxy
|
||||
} // namespace ov
|
@ -18,6 +18,7 @@
|
||||
#include "openvino/runtime/iplugin.hpp"
|
||||
#include "openvino/runtime/iremote_context.hpp"
|
||||
#include "openvino/runtime/iremote_tensor.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
#include "openvino/runtime/properties.hpp"
|
||||
#include "openvino/util/file_util.hpp"
|
||||
#include "openvino/util/shared_object.hpp"
|
||||
@ -158,7 +159,7 @@ public:
|
||||
MockCompiledModel(const std::shared_ptr<const ov::Model>& model,
|
||||
const std::shared_ptr<const ov::IPlugin>& plugin,
|
||||
const ov::AnyMap& config,
|
||||
const ov::RemoteContext& context)
|
||||
const ov::SoPtr<ov::IRemoteContext>& context)
|
||||
: ov::ICompiledModel(model, plugin),
|
||||
m_config(config),
|
||||
m_model(model),
|
||||
@ -189,7 +190,7 @@ public:
|
||||
return m_model;
|
||||
}
|
||||
|
||||
ov::RemoteContext get_context() const {
|
||||
ov::SoPtr<ov::IRemoteContext> get_context() const {
|
||||
return m_context;
|
||||
}
|
||||
|
||||
@ -201,7 +202,7 @@ private:
|
||||
ov::AnyMap m_config;
|
||||
std::shared_ptr<const ov::Model> m_model;
|
||||
bool m_has_context;
|
||||
ov::RemoteContext m_context;
|
||||
ov::SoPtr<ov::IRemoteContext> m_context;
|
||||
};
|
||||
|
||||
class MockInferRequest : public ov::ISyncInferRequest {
|
||||
@ -212,7 +213,7 @@ public:
|
||||
m_model = compiled_model->get_model();
|
||||
// Allocate input/output tensors
|
||||
for (const auto& input : get_inputs()) {
|
||||
allocate_tensor(input, [this, input, compiled_model](ov::Tensor& tensor) {
|
||||
allocate_tensor(input, [this, input, compiled_model](ov::SoPtr<ov::ITensor>& tensor) {
|
||||
// Can add a check to avoid double work in case of shared tensors
|
||||
allocate_tensor_impl(tensor,
|
||||
input.get_element_type(),
|
||||
@ -222,7 +223,7 @@ public:
|
||||
});
|
||||
}
|
||||
for (const auto& output : get_outputs()) {
|
||||
allocate_tensor(output, [this, output, compiled_model](ov::Tensor& tensor) {
|
||||
allocate_tensor(output, [this, output, compiled_model](ov::SoPtr<ov::ITensor>& tensor) {
|
||||
// Can add a check to avoid double work in case of shared tensors
|
||||
allocate_tensor_impl(tensor,
|
||||
output.get_element_type(),
|
||||
@ -237,15 +238,15 @@ public:
|
||||
void infer() override {
|
||||
ov::TensorVector input_tensors;
|
||||
for (const auto& input : get_inputs()) {
|
||||
input_tensors.emplace_back(get_tensor(input));
|
||||
input_tensors.emplace_back(ov::make_tensor(get_tensor(input)));
|
||||
}
|
||||
ov::TensorVector output_tensors;
|
||||
for (const auto& output : get_outputs()) {
|
||||
output_tensors.emplace_back(get_tensor(output));
|
||||
output_tensors.emplace_back(ov::make_tensor(get_tensor(output)));
|
||||
}
|
||||
m_model->evaluate(output_tensors, input_tensors);
|
||||
}
|
||||
std::vector<std::shared_ptr<ov::IVariableState>> query_state() const override {
|
||||
std::vector<ov::SoPtr<ov::IVariableState>> query_state() const override {
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
std::vector<ov::ProfilingInfo> get_profiling_info() const override {
|
||||
@ -253,19 +254,19 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
void allocate_tensor_impl(ov::Tensor& tensor,
|
||||
void allocate_tensor_impl(ov::SoPtr<ov::ITensor>& tensor,
|
||||
const ov::element::Type& element_type,
|
||||
const ov::Shape& shape,
|
||||
bool has_context,
|
||||
ov::RemoteContext context) {
|
||||
if (!tensor || tensor.get_element_type() != element_type) {
|
||||
ov::SoPtr<ov::IRemoteContext> context) {
|
||||
if (!tensor || tensor->get_element_type() != element_type) {
|
||||
if (has_context) {
|
||||
tensor = context.create_tensor(element_type, shape, {});
|
||||
tensor = context->create_tensor(element_type, shape, {});
|
||||
} else {
|
||||
tensor = ov::Tensor(element_type, shape);
|
||||
tensor = ov::SoPtr<ov::ITensor>(ov::make_tensor(element_type, shape), nullptr);
|
||||
}
|
||||
} else {
|
||||
tensor.set_shape(shape);
|
||||
tensor->set_shape(shape);
|
||||
}
|
||||
}
|
||||
std::shared_ptr<const ov::Model> m_model;
|
||||
@ -318,11 +319,11 @@ public:
|
||||
return m_property;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteTensor> create_tensor(const ov::element::Type& type,
|
||||
ov::SoPtr<ov::IRemoteTensor> create_tensor(const ov::element::Type& type,
|
||||
const ov::Shape& shape,
|
||||
const ov::AnyMap& params = {}) override {
|
||||
auto remote_tensor = std::make_shared<MockRemoteTensor>(m_dev_name, m_property);
|
||||
return remote_tensor;
|
||||
return {remote_tensor, nullptr};
|
||||
}
|
||||
};
|
||||
|
||||
@ -340,11 +341,11 @@ public:
|
||||
return m_property;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteTensor> create_tensor(const ov::element::Type& type,
|
||||
ov::SoPtr<ov::IRemoteTensor> create_tensor(const ov::element::Type& type,
|
||||
const ov::Shape& shape,
|
||||
const ov::AnyMap& params = {}) override {
|
||||
auto remote_tensor = std::make_shared<MockRemoteTensor>(m_dev_name, m_property);
|
||||
return remote_tensor;
|
||||
return {remote_tensor, nullptr};
|
||||
}
|
||||
};
|
||||
|
||||
@ -368,7 +369,7 @@ public:
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties,
|
||||
const ov::RemoteContext& context) const override {
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) const override {
|
||||
if (!support_model(model, query_model(model, properties)))
|
||||
OPENVINO_THROW("Unsupported model");
|
||||
|
||||
@ -383,13 +384,13 @@ public:
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override {
|
||||
ov::SoPtr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override {
|
||||
if (remote_properties.find("CUSTOM_CTX") == remote_properties.end())
|
||||
return std::make_shared<MockRemoteContext>(get_device_name());
|
||||
return std::make_shared<MockCustomRemoteContext>(get_device_name());
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override {
|
||||
ov::SoPtr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override {
|
||||
return std::make_shared<MockRemoteContext>(get_device_name());
|
||||
}
|
||||
|
||||
@ -424,7 +425,7 @@ public:
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const override {
|
||||
std::string xmlString, xmlInOutString;
|
||||
ov::Tensor weights;
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <memory>
|
||||
|
||||
#include "openvino/runtime/core.hpp"
|
||||
#include "openvino/runtime/iplugin.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace proxy {
|
||||
|
@ -20,7 +20,7 @@
|
||||
// ! [compiled_model:ctor]
|
||||
ov::template_plugin::CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model,
|
||||
const std::shared_ptr<const ov::IPlugin>& plugin,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const std::shared_ptr<ov::threading::ITaskExecutor>& task_executor,
|
||||
const Configuration& cfg,
|
||||
bool loaded_from_cache)
|
||||
|
@ -25,7 +25,7 @@ class CompiledModel : public ov::ICompiledModel {
|
||||
public:
|
||||
CompiledModel(const std::shared_ptr<ov::Model>& model,
|
||||
const std::shared_ptr<const ov::IPlugin>& plugin,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const std::shared_ptr<ov::threading::ITaskExecutor>& task_executor,
|
||||
const Configuration& cfg,
|
||||
bool loaded_from_cache = false);
|
||||
|
@ -47,14 +47,13 @@ ov::template_plugin::Plugin::~Plugin() {
|
||||
// ! [plugin:dtor]
|
||||
|
||||
// ! [plugin:create_context]
|
||||
std::shared_ptr<ov::IRemoteContext> ov::template_plugin::Plugin::create_context(
|
||||
const ov::AnyMap& remote_properties) const {
|
||||
ov::SoPtr<ov::IRemoteContext> ov::template_plugin::Plugin::create_context(const ov::AnyMap& remote_properties) const {
|
||||
return std::make_shared<ov::template_plugin::RemoteContext>();
|
||||
}
|
||||
// ! [plugin:create_context]
|
||||
|
||||
// ! [plugin:get_default_context]
|
||||
std::shared_ptr<ov::IRemoteContext> ov::template_plugin::Plugin::get_default_context(
|
||||
ov::SoPtr<ov::IRemoteContext> ov::template_plugin::Plugin::get_default_context(
|
||||
const ov::AnyMap& remote_properties) const {
|
||||
return std::make_shared<ov::template_plugin::RemoteContext>();
|
||||
}
|
||||
@ -97,7 +96,7 @@ std::shared_ptr<ov::ICompiledModel> ov::template_plugin::Plugin::compile_model(
|
||||
std::shared_ptr<ov::ICompiledModel> ov::template_plugin::Plugin::compile_model(
|
||||
const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties,
|
||||
const ov::RemoteContext& context) const {
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) const {
|
||||
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::compile_model");
|
||||
|
||||
auto fullConfig = Configuration{properties, m_cfg};
|
||||
@ -124,8 +123,9 @@ std::shared_ptr<ov::ICompiledModel> ov::template_plugin::Plugin::import_model(st
|
||||
// ! [plugin:import_model]
|
||||
|
||||
// ! [plugin:import_model_with_remote]
|
||||
std::shared_ptr<ov::ICompiledModel> ov::template_plugin::Plugin::import_model(std::istream& model,
|
||||
const ov::RemoteContext& context,
|
||||
std::shared_ptr<ov::ICompiledModel> ov::template_plugin::Plugin::import_model(
|
||||
std::istream& model,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const {
|
||||
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::import_model");
|
||||
|
||||
|
@ -25,20 +25,20 @@ public:
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties,
|
||||
const ov::RemoteContext& context) const override;
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) const override;
|
||||
|
||||
void set_property(const ov::AnyMap& properties) override;
|
||||
|
||||
ov::Any get_property(const std::string& name, const ov::AnyMap& arguments) const override;
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override;
|
||||
ov::SoPtr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override;
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override;
|
||||
ov::SoPtr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override;
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model, const ov::AnyMap& properties) const override;
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const override;
|
||||
|
||||
ov::SupportedOpsMap query_model(const std::shared_ptr<const ov::Model>& model,
|
||||
|
@ -149,7 +149,7 @@ const ov::AnyMap& ov::template_plugin::RemoteContext::get_property() const {
|
||||
// ! [remote_context:get_property]
|
||||
|
||||
// ! [remote_context:create_tensor]
|
||||
std::shared_ptr<ov::IRemoteTensor> ov::template_plugin::RemoteContext::create_tensor(const ov::element::Type& type,
|
||||
ov::SoPtr<ov::IRemoteTensor> ov::template_plugin::RemoteContext::create_tensor(const ov::element::Type& type,
|
||||
const ov::Shape& shape,
|
||||
const ov::AnyMap& params) {
|
||||
std::shared_ptr<ov::IRemoteTensor> tensor;
|
||||
@ -199,6 +199,6 @@ std::shared_ptr<ov::IRemoteTensor> ov::template_plugin::RemoteContext::create_te
|
||||
default:
|
||||
OPENVINO_THROW("Cannot create remote tensor for unsupported type: ", type);
|
||||
}
|
||||
return std::make_shared<VectorImpl>(tensor);
|
||||
return {std::make_shared<VectorImpl>(tensor), nullptr};
|
||||
}
|
||||
// ! [remote_context:create_tensor]
|
||||
|
@ -13,7 +13,7 @@ public:
|
||||
RemoteContext();
|
||||
const std::string& get_device_name() const override;
|
||||
const ov::AnyMap& get_property() const override;
|
||||
std::shared_ptr<IRemoteTensor> create_tensor(const ov::element::Type& type,
|
||||
ov::SoPtr<IRemoteTensor> create_tensor(const ov::element::Type& type,
|
||||
const ov::Shape& shape,
|
||||
const ov::AnyMap& params = {}) override;
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "openvino/op/util/variable_context.hpp"
|
||||
#include "openvino/runtime/ivariable_state.hpp"
|
||||
#include "openvino/runtime/make_tensor.hpp"
|
||||
#include "openvino/runtime/profiling_info.hpp"
|
||||
#include "openvino/runtime/tensor.hpp"
|
||||
#include "plugin.hpp"
|
||||
@ -24,11 +25,13 @@ using Time = std::chrono::high_resolution_clock;
|
||||
|
||||
namespace {
|
||||
|
||||
void allocate_tensor_impl(ov::Tensor& tensor, const ov::element::Type& element_type, const ov::Shape& shape) {
|
||||
if (!tensor || tensor.get_element_type() != element_type) {
|
||||
tensor = ov::Tensor(element_type, shape);
|
||||
void allocate_tensor_impl(ov::SoPtr<ov::ITensor>& tensor,
|
||||
const ov::element::Type& element_type,
|
||||
const ov::Shape& shape) {
|
||||
if (!tensor || tensor->get_element_type() != element_type) {
|
||||
tensor = ov::make_tensor(element_type, shape);
|
||||
} else {
|
||||
tensor.set_shape(shape);
|
||||
tensor->set_shape(shape);
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,7 +64,7 @@ ov::template_plugin::InferRequest::InferRequest(const std::shared_ptr<const ov::
|
||||
|
||||
// Allocate input/output tensors
|
||||
for (const auto& input : get_inputs()) {
|
||||
allocate_tensor(input, [input](ov::Tensor& tensor) {
|
||||
allocate_tensor(input, [input](ov::SoPtr<ov::ITensor>& tensor) {
|
||||
// Can add a check to avoid double work in case of shared tensors
|
||||
allocate_tensor_impl(tensor,
|
||||
input.get_element_type(),
|
||||
@ -69,7 +72,7 @@ ov::template_plugin::InferRequest::InferRequest(const std::shared_ptr<const ov::
|
||||
});
|
||||
}
|
||||
for (const auto& output : get_outputs()) {
|
||||
allocate_tensor(output, [output](ov::Tensor& tensor) {
|
||||
allocate_tensor(output, [output](ov::SoPtr<ov::ITensor>& tensor) {
|
||||
// Can add a check to avoid double work in case of shared tensors
|
||||
allocate_tensor_impl(tensor,
|
||||
output.get_element_type(),
|
||||
@ -86,8 +89,9 @@ ov::template_plugin::InferRequest::InferRequest(const std::shared_ptr<const ov::
|
||||
ov::Tensor tensor = ov::Tensor(variable->get_info().data_type, shape);
|
||||
variable_context.set_variable_value(variable, std::make_shared<ov::op::util::VariableValue>(tensor));
|
||||
}
|
||||
auto state = std::make_shared<VariableState>(variable->get_info().variable_id,
|
||||
variable_context.get_variable_value(variable)->get_state());
|
||||
auto state = std::make_shared<VariableState>(
|
||||
variable->get_info().variable_id,
|
||||
get_tensor_impl(variable_context.get_variable_value(variable)->get_state()));
|
||||
m_variable_states.emplace_back(state);
|
||||
}
|
||||
m_eval_context.emplace("VariableContext", variable_context);
|
||||
@ -100,7 +104,7 @@ ov::template_plugin::InferRequest::~InferRequest() = default;
|
||||
|
||||
// ! [infer_request:set_tensors_impl]
|
||||
void ov::template_plugin::InferRequest::set_tensors_impl(const ov::Output<const ov::Node> port,
|
||||
const std::vector<ov::Tensor>& tensors) {
|
||||
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) {
|
||||
for (const auto& input : get_inputs()) {
|
||||
if (input == port) {
|
||||
m_batched_tensors[input.get_tensor_ptr()] = tensors;
|
||||
@ -112,7 +116,7 @@ void ov::template_plugin::InferRequest::set_tensors_impl(const ov::Output<const
|
||||
// ! [infer_request:set_tensors_impl]
|
||||
|
||||
// ! [infer_request:query_state]
|
||||
std::vector<std::shared_ptr<ov::IVariableState>> ov::template_plugin::InferRequest::query_state() const {
|
||||
std::vector<ov::SoPtr<ov::IVariableState>> ov::template_plugin::InferRequest::query_state() const {
|
||||
return m_variable_states;
|
||||
}
|
||||
// ! [infer_request:query_state]
|
||||
@ -145,7 +149,7 @@ void ov::template_plugin::InferRequest::infer_preprocess() {
|
||||
// Allocate backend tensors
|
||||
OPENVINO_ASSERT(get_inputs().size() == m_backend_input_tensors.size());
|
||||
for (size_t i = 0; i < get_inputs().size(); i++) {
|
||||
auto tensor = get_tensor(get_inputs()[i]);
|
||||
auto tensor = make_tensor(get_tensor(get_inputs()[i]));
|
||||
if (tensor.is<ov::RemoteTensor>()) {
|
||||
OPENVINO_ASSERT(tensor.is<ov::template_plugin::VectorTensor>(),
|
||||
"Template plugin supports only VectorTensor with remote context.");
|
||||
@ -187,7 +191,7 @@ void ov::template_plugin::InferRequest::infer_preprocess() {
|
||||
m_backend_output_tensors[i] = get_template_model()->get_template_plugin()->m_backend->create_tensor();
|
||||
continue;
|
||||
}
|
||||
auto tensor = get_tensor(get_outputs()[i]);
|
||||
auto tensor = make_tensor(get_tensor(get_outputs()[i]));
|
||||
if (tensor.is_continuous() && !tensor.is<ov::RemoteTensor>())
|
||||
m_backend_output_tensors[i] =
|
||||
get_template_model()->get_template_plugin()->m_backend->create_tensor(tensor.get_element_type(),
|
||||
@ -232,12 +236,13 @@ void ov::template_plugin::InferRequest::infer_postprocess() {
|
||||
for (size_t i = 0; i < get_outputs().size(); i++) {
|
||||
const auto& result = get_template_model()->m_model->get_results()[i];
|
||||
auto host_tensor = m_backend_output_tensors[i];
|
||||
auto tensor = get_tensor(get_outputs()[i]);
|
||||
auto tensor = make_tensor(get_tensor(get_outputs()[i]));
|
||||
if (result->get_output_partial_shape(0).is_dynamic()) {
|
||||
ov::Output<const ov::Node> output{result->output(0).get_node(), result->output(0).get_index()};
|
||||
allocate_tensor(output, [host_tensor](ov::Tensor& tensor) {
|
||||
allocate_tensor(output, [host_tensor](ov::SoPtr<ov::ITensor>& tensor) {
|
||||
allocate_tensor_impl(tensor, host_tensor.get_element_type(), host_tensor.get_shape());
|
||||
host_tensor.copy_to(tensor);
|
||||
auto ov_tensor = make_tensor(tensor);
|
||||
host_tensor.copy_to(ov_tensor);
|
||||
});
|
||||
} else if (!tensor.is_continuous()) {
|
||||
host_tensor.copy_to(tensor);
|
||||
|
@ -31,7 +31,7 @@ public:
|
||||
~InferRequest();
|
||||
|
||||
void infer() override;
|
||||
std::vector<std::shared_ptr<ov::IVariableState>> query_state() const override;
|
||||
std::vector<ov::SoPtr<ov::IVariableState>> query_state() const override;
|
||||
std::vector<ov::ProfilingInfo> get_profiling_info() const override;
|
||||
|
||||
// pipeline methods-stages which are used in async infer request implementation and assigned to particular executor
|
||||
@ -41,7 +41,8 @@ public:
|
||||
void infer_postprocess();
|
||||
void cancel();
|
||||
|
||||
void set_tensors_impl(const ov::Output<const ov::Node> port, const std::vector<ov::Tensor>& tensors) override;
|
||||
void set_tensors_impl(const ov::Output<const ov::Node> port,
|
||||
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<const CompiledModel> get_template_model() const;
|
||||
@ -56,7 +57,7 @@ private:
|
||||
std::vector<ov::Tensor> m_backend_output_tensors;
|
||||
std::shared_ptr<ov::runtime::Executable> m_executable;
|
||||
ov::EvaluationContext m_eval_context;
|
||||
std::vector<std::shared_ptr<ov::IVariableState>> m_variable_states;
|
||||
std::vector<ov::SoPtr<ov::IVariableState>> m_variable_states;
|
||||
};
|
||||
// ! [infer_request:header]
|
||||
|
||||
|
@ -11,18 +11,18 @@ namespace template_plugin {
|
||||
|
||||
class VariableState : public ov::IVariableState {
|
||||
public:
|
||||
VariableState(const std::string& name, const ov::Tensor& tensor) : ov::IVariableState(name) {
|
||||
VariableState(const std::string& name, const ov::SoPtr<ov::ITensor>& tensor) : ov::IVariableState(name) {
|
||||
m_state = tensor;
|
||||
}
|
||||
void set_state(const ov::Tensor& state) override {
|
||||
OPENVINO_ASSERT(state.get_shape() == m_state.get_shape(), "Wrong tensor shape.");
|
||||
OPENVINO_ASSERT(state.get_element_type() == state.get_element_type(), "Wrong tensor type.");
|
||||
OPENVINO_ASSERT(state.get_byte_size() == state.get_byte_size(), "Blob size of tensors are not equal.");
|
||||
std::memcpy(m_state.data(), state.data(), state.get_byte_size());
|
||||
void set_state(const ov::SoPtr<ov::ITensor>& state) override {
|
||||
OPENVINO_ASSERT(state->get_shape() == m_state->get_shape(), "Wrong tensor shape.");
|
||||
OPENVINO_ASSERT(state->get_element_type() == state->get_element_type(), "Wrong tensor type.");
|
||||
OPENVINO_ASSERT(state->get_byte_size() == state->get_byte_size(), "Blob size of tensors are not equal.");
|
||||
std::memcpy(m_state->data(), state->data(), state->get_byte_size());
|
||||
}
|
||||
|
||||
void reset() override {
|
||||
std::memset(m_state.data(), 0, m_state.get_byte_size());
|
||||
std::memset(m_state->data(), 0, m_state->get_byte_size());
|
||||
}
|
||||
|
||||
~VariableState() override = default;
|
||||
|
@ -67,7 +67,8 @@ public:
|
||||
MOCK_METHOD1(GetDefaultContext, InferenceEngine::RemoteContext::Ptr(const std::string&));
|
||||
|
||||
MOCK_CONST_METHOD0(is_new_api, bool());
|
||||
MOCK_CONST_METHOD2(create_context, ov::RemoteContext(const std::string& deviceName, const ov::AnyMap& params));
|
||||
MOCK_CONST_METHOD2(create_context,
|
||||
ov::SoPtr<ov::IRemoteContext>(const std::string& deviceName, const ov::AnyMap& params));
|
||||
MOCK_CONST_METHOD0(get_available_devices, std::vector<std::string>());
|
||||
MOCK_CONST_METHOD3(query_model,
|
||||
ov::SupportedOpsMap(const std::shared_ptr<const ov::Model>&,
|
||||
@ -81,7 +82,7 @@ public:
|
||||
const ov::AnyMap&));
|
||||
MOCK_CONST_METHOD3(compile_model,
|
||||
ov::SoPtr<ov::ICompiledModel>(const std::shared_ptr<const ov::Model>&,
|
||||
const ov::RemoteContext&,
|
||||
const ov::SoPtr<ov::IRemoteContext>&,
|
||||
const ov::AnyMap&));
|
||||
MOCK_CONST_METHOD3(compile_model,
|
||||
ov::SoPtr<ov::ICompiledModel>(const std::string&, const std::string&, const ov::AnyMap&));
|
||||
@ -90,9 +91,11 @@ public:
|
||||
ov::SoPtr<ov::ICompiledModel>(const std::string&, const ov::Tensor&, const std::string&, const ov::AnyMap&));
|
||||
MOCK_CONST_METHOD3(read_model, std::shared_ptr<ov::Model>(const std::string&, const ov::Tensor&, bool));
|
||||
MOCK_CONST_METHOD2(read_model, std::shared_ptr<ov::Model>(const std::string&, const std::string&));
|
||||
MOCK_CONST_METHOD1(get_default_context, ov::RemoteContext(const std::string&));
|
||||
MOCK_CONST_METHOD1(get_default_context, ov::SoPtr<ov::IRemoteContext>(const std::string&));
|
||||
MOCK_CONST_METHOD3(import_model,
|
||||
ov::SoPtr<ov::ICompiledModel>(std::istream&, const ov::RemoteContext&, const ov::AnyMap&));
|
||||
ov::SoPtr<ov::ICompiledModel>(std::istream&,
|
||||
const ov::SoPtr<ov::IRemoteContext>&,
|
||||
const ov::AnyMap&));
|
||||
MOCK_CONST_METHOD1(device_supports_model_caching, bool(const std::string&));
|
||||
MOCK_METHOD2(set_property, void(const std::string& device_name, const ov::AnyMap& properties));
|
||||
|
||||
|
@ -52,7 +52,7 @@ public:
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties,
|
||||
const ov::RemoteContext& context) const override {
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) const override {
|
||||
if (m_plugin)
|
||||
return m_plugin->compile_model(model, properties, context);
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
@ -71,13 +71,13 @@ public:
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override {
|
||||
ov::SoPtr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override {
|
||||
if (m_plugin)
|
||||
return m_plugin->create_context(remote_properties);
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override {
|
||||
ov::SoPtr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override {
|
||||
if (m_plugin)
|
||||
return m_plugin->get_default_context(remote_properties);
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
@ -90,7 +90,7 @@ public:
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const override {
|
||||
if (m_plugin)
|
||||
return m_plugin->import_model(model, context, properties);
|
||||
@ -160,17 +160,17 @@ std::shared_ptr<ov::ICompiledModel> MockPlugin::compile_model(const std::string&
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> MockPlugin::compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties,
|
||||
const ov::RemoteContext& context) const {
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) const {
|
||||
set_parameters_if_need();
|
||||
return m_plugin->compile_model(model, properties, context);
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> MockPlugin::create_context(const ov::AnyMap& remote_properties) const {
|
||||
ov::SoPtr<ov::IRemoteContext> MockPlugin::create_context(const ov::AnyMap& remote_properties) const {
|
||||
set_parameters_if_need();
|
||||
return m_plugin->create_context(remote_properties);
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> MockPlugin::get_default_context(const ov::AnyMap& remote_properties) const {
|
||||
ov::SoPtr<ov::IRemoteContext> MockPlugin::get_default_context(const ov::AnyMap& remote_properties) const {
|
||||
set_parameters_if_need();
|
||||
return m_plugin->get_default_context(remote_properties);
|
||||
}
|
||||
@ -180,7 +180,7 @@ std::shared_ptr<ov::ICompiledModel> MockPlugin::import_model(std::istream& model
|
||||
return m_plugin->import_model(model, properties);
|
||||
}
|
||||
std::shared_ptr<ov::ICompiledModel> MockPlugin::import_model(std::istream& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const {
|
||||
set_parameters_if_need();
|
||||
return m_plugin->import_model(model, context, properties);
|
||||
|
@ -28,19 +28,19 @@ public:
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties,
|
||||
const ov::RemoteContext& context) const override;
|
||||
const ov::SoPtr<ov::IRemoteContext>& context) const override;
|
||||
|
||||
void set_property(const ov::AnyMap& properties) override;
|
||||
|
||||
ov::Any get_property(const std::string& name, const ov::AnyMap& arguments) const override;
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override;
|
||||
ov::SoPtr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override;
|
||||
|
||||
std::shared_ptr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override;
|
||||
ov::SoPtr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override;
|
||||
|
||||
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model, const ov::AnyMap& properties) const override;
|
||||
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model,
|
||||
const ov::RemoteContext& context,
|
||||
const ov::SoPtr<ov::IRemoteContext>& context,
|
||||
const ov::AnyMap& properties) const override;
|
||||
ov::SupportedOpsMap query_model(const std::shared_ptr<const ov::Model>& model,
|
||||
const ov::AnyMap& properties) const override;
|
||||
|
Loading…
Reference in New Issue
Block a user