Port some changes from proxy branch (#17961)

* Port some changes from proxy branch

* Port test changes

* Rewrite approach for compile model and tensor

* Fixed review
This commit is contained in:
Ilya Churaev 2023-06-09 18:08:53 +04:00 committed by GitHub
parent 21c2b513d1
commit c8e331003f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 77 additions and 32 deletions

View File

@ -76,6 +76,14 @@ public:
/// @brief Default constructor
Tensor() = default;
/**
* @brief Copy constructor with adding new shared object
*
* @param other Original tensor
* @param so Shared object
*/
Tensor(const Tensor& other, const std::shared_ptr<void>& so);
/// @brief Default copy constructor
/// @param other other Tensor object
Tensor(const Tensor& other) = default;

View File

@ -32,6 +32,11 @@ Tensor::~Tensor() {
_impl = {};
}
Tensor::Tensor(const Tensor& tensor, const std::shared_ptr<void>& so) : _impl{tensor._impl}, _so{tensor._so} {
OPENVINO_ASSERT(_impl != nullptr, "Tensor was not initialized.");
_so.emplace_back(so);
}
Tensor::Tensor(const std::shared_ptr<ITensor>& impl, const std::vector<std::shared_ptr<void>>& so)
: _impl{impl},
_so{so} {

View File

@ -83,14 +83,14 @@ public:
*
* @return model outputs
*/
const std::vector<ov::Output<const ov::Node>>& outputs() const;
virtual const std::vector<ov::Output<const ov::Node>>& outputs() const;
/**
* @brief Gets all inputs from compiled model
*
* @return model inputs
*/
const std::vector<ov::Output<const ov::Node>>& inputs() const;
virtual const std::vector<ov::Output<const ov::Node>>& inputs() const;
/**
* @brief Create infer request
@ -136,6 +136,8 @@ public:
*/
std::shared_ptr<ov::IRemoteContext> get_context() const;
virtual ~ICompiledModel() = default;
private:
std::shared_ptr<const ov::IPlugin> m_plugin;
std::vector<ov::Output<const ov::Node>> m_inputs;

View File

@ -46,65 +46,65 @@ public:
virtual std::shared_ptr<ov::Model> read_model(const std::string& model_path, const std::string& bin_path) const = 0;
/**
* @brief Creates an executable network from a network object.
* @brief Creates a compiled mdel from a model object.
*
* Users can create as many networks as they need and use
* Users can create as many models as they need and use
* them simultaneously (up to the limitation of the hardware resources)
*
* @param model OpenVINO Model
* @param device_name Name of device to load network to
* @param device_name Name of device to load model to
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation
* @return An executable network reference
* @return A pointer to compiled model
*/
virtual ov::SoPtr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
const std::string& device_name,
const ov::AnyMap& config = {}) const = 0;
/**
* @brief Creates an executable network from a network object.
* @brief Creates a compiled model from a model object.
*
* Users can create as many networks as they need and use
* Users can create as many models as they need and use
* them simultaneously (up to the limitation of the hardware resources)
*
* @param model OpenVINO Model
* @param context "Remote" (non-CPU) accelerator device-specific execution context to use
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation
* @return An executable network reference
* @return A pointer to compiled model
*/
virtual ov::SoPtr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
const ov::RemoteContext& context,
const ov::AnyMap& config = {}) const = 0;
/**
* @brief Creates an executable network from a model file.
* @brief Creates a compiled model from a model file.
*
* Users can create as many networks as they need and use
* Users can create as many models as they need and use
* them simultaneously (up to the limitation of the hardware resources)
*
* @param model_path Path to model
* @param device_name Name of device to load network to
* @param device_name Name of device to load model to
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation
* @return An executable network reference
* @return A pointer to compiled model
*/
virtual ov::SoPtr<ov::ICompiledModel> compile_model(const std::string& model_path,
const std::string& device_name,
const ov::AnyMap& config) const = 0;
/**
* @brief Creates an executable network from a model memory.
* @brief Creates a compiled model from a model memory.
*
* Users can create as many networks as they need and use
* Users can create as many models as they need and use
* them simultaneously (up to the limitation of the hardware resources)
*
* @param model_str String data of model
* @param weights Model's weights
* @param device_name Name of device to load network to
* @param device_name Name of device to load model to
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation
* @return An executable network reference
* @return A pointer to compiled model
*/
virtual ov::SoPtr<ov::ICompiledModel> compile_model(const std::string& model_str,
const ov::Tensor& weights,
@ -112,17 +112,29 @@ public:
const ov::AnyMap& config) const = 0;
/**
* @brief Creates an executable network from a previously exported network
* @brief Creates a compiled model from a previously exported model
* @param model model stream
* @param device_name Name of device load executable network on
* @param device_name Name of device load executable model on
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation*
* @return An executable network reference
* @return A pointer to compiled model
*/
virtual ov::SoPtr<ov::ICompiledModel> import_model(std::istream& model,
const std::string& device_name,
const ov::AnyMap& config = {}) const = 0;
/**
* @brief Creates a compiled model from a previously exported model
* @param model model stream
* @param context Remote context
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation*
* @return A pointer to compiled model
*/
virtual ov::SoPtr<ov::ICompiledModel> import_model(std::istream& modelStream,
const ov::RemoteContext& context,
const ov::AnyMap& config = {}) const = 0;
/**
* @brief Query device if it supports specified network with specified configuration
*

View File

@ -234,7 +234,7 @@ public:
*/
const std::shared_ptr<ov::threading::ExecutorManager>& get_executor_manager() const;
~IPlugin() = default;
virtual ~IPlugin() = default;
protected:
IPlugin();
@ -294,8 +294,6 @@ constexpr static const auto create_plugin_function = OV_PP_TOSTRING(OV_CREATE_PL
try { \
plugin = ::std::make_shared<PluginType>(__VA_ARGS__); \
plugin->set_version(version); \
} catch (const InferenceEngine::Exception& ex) { \
OPENVINO_THROW(ex.what()); \
} catch (const std::exception& ex) { \
OPENVINO_THROW(ex.what()); \
} \

View File

@ -21,6 +21,8 @@ namespace ov {
class OPENVINO_RUNTIME_API IRemoteContext : public std::enable_shared_from_this<IRemoteContext> {
public:
virtual ~IRemoteContext() = default;
/**
* @brief Returns name of a device on which underlying object is allocated.
* Abstract method.

View File

@ -221,9 +221,8 @@ CompiledModel Core::import_model(std::istream& modelStream, const std::string& d
CompiledModel Core::import_model(std::istream& modelStream, const RemoteContext& context, const AnyMap& config) {
OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model");
auto parsed = parseDeviceNameIntoConfig(context.get_device_name(), config);
OV_CORE_CALL_STATEMENT({
auto exec = _impl->get_plugin(parsed._deviceName).import_model(modelStream, context, parsed._config);
auto exec = _impl->import_model(modelStream, context, config);
return {exec._ptr, exec._so};
});
}

View File

@ -597,7 +597,8 @@ ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::compile_model_with_preprocess(ov::Pl
const ov::AnyMap& config) const {
std::shared_ptr<const ov::Model> preprocessed_model = model;
if (!is_new_api() && !std::dynamic_pointer_cast<InferenceEngine::IPluginWrapper>(plugin.m_ptr)) {
if (!is_new_api() && !std::dynamic_pointer_cast<InferenceEngine::IPluginWrapper>(plugin.m_ptr) &&
!is_virtual_device(plugin.get_name())) {
ov::pass::Manager manager;
manager.register_pass<ov::pass::AddPreprocessing>();
@ -680,6 +681,19 @@ ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::import_model(std::istream& model,
return compiled_model;
}
ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::import_model(std::istream& modelStream,
const ov::RemoteContext& context,
const ov::AnyMap& config) const {
OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model");
auto parsed = parseDeviceNameIntoConfig(context.get_device_name(), config);
auto compiled_model = get_plugin(parsed._deviceName).import_model(modelStream, parsed._config);
if (auto wrapper = std::dynamic_pointer_cast<InferenceEngine::ICompiledModelWrapper>(compiled_model._ptr)) {
wrapper->get_executable_network()->loadedFromCache();
}
return compiled_model;
}
ov::SupportedOpsMap ov::CoreImpl::query_model(const std::shared_ptr<const ov::Model>& model,
const std::string& device_name,
const ov::AnyMap& config) const {

View File

@ -219,8 +219,8 @@ public:
void register_plugins_in_registry(const std::string& xml_config_file, const bool& by_abs_path = false);
std::shared_ptr<const ov::Model> apply_auto_batching(const std::shared_ptr<const ov::Model>& model,
std::string& deviceName,
ov::AnyMap& config) const;
std::string& deviceName,
ov::AnyMap& config) const;
/*
* @brief Register plugins according to the build configuration
@ -383,6 +383,10 @@ public:
const std::string& device_name = {},
const ov::AnyMap& config = {}) const override;
ov::SoPtr<ov::ICompiledModel> import_model(std::istream& modelStream,
const ov::RemoteContext& context,
const ov::AnyMap& config) const override;
ov::SupportedOpsMap query_model(const std::shared_ptr<const ov::Model>& model,
const std::string& device_name,
const ov::AnyMap& config) const override;

View File

@ -62,8 +62,7 @@ public:
MOCK_CONST_METHOD1(DeviceSupportsModelCaching, bool(const std::string&)); // NOLINT not a cast to bool
MOCK_METHOD2(GetSupportedConfig,
std::map<std::string, std::string>(const std::string&, const std::map<std::string, std::string>&));
MOCK_CONST_METHOD2(get_supported_property,
ov::AnyMap(const std::string&, const ov::AnyMap&));
MOCK_CONST_METHOD2(get_supported_property, ov::AnyMap(const std::string&, const ov::AnyMap&));
MOCK_CONST_METHOD0(isNewAPI, bool());
MOCK_METHOD1(GetDefaultContext, InferenceEngine::RemoteContext::Ptr(const std::string&));
@ -92,6 +91,8 @@ public:
MOCK_CONST_METHOD3(read_model, std::shared_ptr<ov::Model>(const std::string&, const ov::Tensor&, bool));
MOCK_CONST_METHOD2(read_model, std::shared_ptr<ov::Model>(const std::string&, const std::string&));
MOCK_CONST_METHOD1(get_default_context, ov::RemoteContext(const std::string&));
MOCK_CONST_METHOD3(import_model,
ov::SoPtr<ov::ICompiledModel>(std::istream&, const ov::RemoteContext&, const ov::AnyMap&));
~MockICore() = default;
};

View File

@ -21,9 +21,9 @@
#include "openvino/runtime/iplugin.hpp"
class MockInternalPlugin : public ov::IPlugin {
ov::IPlugin* m_plugin;
ov::IPlugin* m_plugin = nullptr;
std::shared_ptr<ov::IPlugin> m_converted_plugin;
InferenceEngine::IInferencePlugin* m_old_plugin;
InferenceEngine::IInferencePlugin* m_old_plugin = nullptr;
ov::AnyMap config;
public: