Reviewed header files for new APIs (#9873)

* Reviewed header files for new APIs

* Update compiled_model.hpp

* Resolved conflicts

* Implemented review comments

* Fixed code style issues

* Fixed code style issues

* Fixed code style issues

Co-authored-by: Alexander Zhogov <alexander.zhogov@intel.com>
This commit is contained in:
Anastasiya Ageeva 2022-02-10 17:12:18 +03:00 committed by GitHub
parent 9af8d9339c
commit 87f8ff5918
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 460 additions and 461 deletions

View File

@ -3,7 +3,7 @@
//
/**
* @brief This is a header file for the OpenVINO Runtime Components
* @brief A header file for the OpenVINO Runtime Components.
*
* @file openvino/runtime/runtime.hpp
*/

View File

@ -3,7 +3,7 @@
//
/**
* @brief This is a header file for the OpenVINO Runtime common aliases and data types
* @brief A header file for the OpenVINO Runtime common aliases and data types.
*
* @file openvino/runtime/common.hpp
*/
@ -30,7 +30,7 @@
/**
* @def OPENVINO_PLUGIN_API
* @brief Defines OpenVINO Runtime Plugin API method
* @brief Defines the OpenVINO Runtime Plugin API method.
*/
#ifdef IMPLEMENT_INFERENCE_ENGINE_PLUGIN

View File

@ -3,7 +3,7 @@
//
/**
* @brief A header file that provides CompiledModel class
* @brief A header file that provides the CompiledModel class.
*
* @file openvino/runtime/compiled_model.hpp
*/
@ -31,8 +31,8 @@ class Core;
class InferRequest;
/**
* @brief This class represents compiled model
* Model is compiled by a specific device by applying multiple optimization
* @brief This class represents a compiled model.
* A model is compiled by a specific device by applying multiple optimization
* transformations, then mapping to compute kernels.
*/
class OPENVINO_RUNTIME_API CompiledModel {
@ -40,10 +40,10 @@ class OPENVINO_RUNTIME_API CompiledModel {
std::shared_ptr<void> _so;
/**
* @brief Constructs CompiledModel from the initialized std::shared_ptr
* @param impl Initialized shared pointer
* @param so Plugin to use. This is required to ensure that CompiledModel can work properly even if plugin
* object is destroyed.
* @brief Constructs CompiledModel from the initialized std::shared_ptr.
* @param impl Initialized shared pointer.
* @param so Plugin to use. This parameter is required to ensure that CompiledModel can work properly even if a
* plugin object is destroyed.
*/
CompiledModel(const std::shared_ptr<InferenceEngine::IExecutableNetworkInternal>& impl,
const std::shared_ptr<void>& so);
@ -54,99 +54,99 @@ class OPENVINO_RUNTIME_API CompiledModel {
public:
/**
* @brief A default constructor.
* @brief Default constructor.
*/
CompiledModel() = default;
/**
* @brief Destructor preserves unloading order of implementation object and reference to library
* @brief Destructor that preserves unloading order of an implementation object and reference to library.
*/
~CompiledModel();
/**
* @brief Get runtime model information from a device
* This object represents the internal device specific model which is optimized for particular
* accelerator. It contains device specific nodes, runtime information and can be used only
* to understand how the source model is optimized and which kernels, element types and layouts
* @brief Gets runtime model information from a device.
* This object represents an internal device-specific model that is optimized for a particular
* accelerator. It contains device-specific nodes, runtime information and can be used only
* to understand how the source model is optimized and which kernels, element types, and layouts
* are selected for optimal inference.
*
* @return Model containing Executable Graph Info
* @return A model containing Executable Graph Info.
*/
std::shared_ptr<const Model> get_runtime_model() const;
/**
* @brief Gets all inputs of a compiled model
* Inputs are represented as a vector of outputs of ov::op::v0::Parameter operations.
* They contain information about input tensors such as tensor shape, names and element type
* @return std::vector of model inputs
* @brief Gets all inputs of a compiled model.
* Inputs are represented as a vector of outputs of the ov::op::v0::Parameter operations.
* They contain information about input tensors such as tensor shape, names, and element type.
* @return std::vector of model inputs.
*/
std::vector<ov::Output<const ov::Node>> inputs() const;
/**
* @brief Gets a single input of a compiled model
* An input is represented as an output of ov::op::v0::Parameter operation.
* An input contain information about input tensor such as tensor shape, names and element type
* @return A compiled model input
* @note If a model has more than one input, this method throws an ov::Exception
* @brief Gets a single input of a compiled model.
* The input is represented as an output of the ov::op::v0::Parameter operation.
* The input contains information about input tensor such as tensor shape, names, and element type.
* @return Compiled model input.
* @note If a model has more than one input, this method throws ov::Exception.
*/
ov::Output<const ov::Node> input() const;
/**
* @brief Gets input of a compiled model identified by an @p i
* An input contains information about input tensor such as tensor shape, names and element type
* @param i An input index
* @return A compiled model input
* @note The method throws ov::Exception if input with specified index @p i is not found
* @brief Gets input of a compiled model identified by @p i.
* The input contains information about input tensor such as tensor shape, names, and element type.
* @param i Index of input.
* @return Compiled model input.
* @note The method throws ov::Exception if input with the specified index @p i is not found.
*/
ov::Output<const ov::Node> input(size_t i) const;
/**
* @brief Gets input of a compiled model identified by a @p tensor_name
* An input contain information about input tensor such as tensor shape, names and element type
* @param tensor_name The input tensor name
* @return A compiled model input
* @note The method throws ov::Exception if input with specified tensor name @p tensor_name is not found
* @brief Gets input of a compiled model identified by @p tensor_name.
* The input contains information about input tensor such as tensor shape, names, and element type.
* @param tensor_name The input tensor name.
* @return Compiled model input.
* @note The method throws ov::Exception if input with the specified tensor name @p tensor_name is not found.
*/
ov::Output<const ov::Node> input(const std::string& tensor_name) const;
/**
* @brief Get all outputs of a compiled model
* Outputs are represented as a vector of output from ov::op::v0::Result operations.
* Outputs contain information about output tensors such as tensor shape, names and element type
* @return std::vector of model outputs
* @brief Get all outputs of a compiled model.
* Outputs are represented as a vector of output from the ov::op::v0::Result operations.
* Outputs contain information about output tensors such as tensor shape, names, and element type.
* @return std::vector of model outputs.
*/
std::vector<ov::Output<const ov::Node>> outputs() const;
/**
* @brief Gets a single output of a compiled model
* An output is represented as an output from ov::op::v0::Result operation.
* An output contain information about output tensor such as tensor shape, names and element type
* @return A compiled model output
* @note If a model has more than one output, this method throws an ov::Exception
* @brief Gets a single output of a compiled model.
* The output is represented as an output from the ov::op::v0::Result operation.
* The output contains information about output tensor such as tensor shape, names, and element type.
* @return Compiled model output.
* @note If a model has more than one output, this method throws ov::Exception.
*/
ov::Output<const ov::Node> output() const;
/**
* @brief Gets output of a compiled model identified by an @p index
* An output contain information about output tensor such as tensor shape, names and element type
* @param i An output index
* @return A compiled model output
* @note The method throws ov::Exception if output with specified index @p index is not found
* @brief Gets output of a compiled model identified by @p index.
* The output contains information about output tensor such as tensor shape, names, and element type.
* @param i Index of input.
* @return Compiled model output.
* @note The method throws ov::Exception if output with the specified index @p index is not found.
*/
ov::Output<const ov::Node> output(size_t i) const;
/**
* @brief Gets output of a compiled model identified by a @p tensor_name
* An output contain information about output tensor such as tensor shape, names and element type
* @param tensor_name The output tensor name
* @return A compiled model output
* @note The method throws ov::Exception if output with specified tensor name @p tensor_name is not found
* @brief Gets output of a compiled model identified by @p tensor_name.
* The output contains information about output tensor such as tensor shape, names, and element type.
* @param tensor_name Output tensor name.
* @return Compiled model output.
* @note The method throws ov::Exception if output with the specified tensor name @p tensor_name is not found.
*/
ov::Output<const ov::Node> output(const std::string& tensor_name) const;
/**
* @brief Creates an inference request object used to infer the compiled model.
* The created request has allocated input and output tensors (that can be changed later).
* The created request has allocated input and output tensors (which can be changed later).
*
* @return InferRequest object
*/
@ -154,25 +154,24 @@ public:
/**
* @brief Exports the current compiled model to an output stream `std::ostream`.
* The exported model can also be imported via ov::Core::import_model method
* The exported model can also be imported via the ov::Core::import_model method.
* @see ov::Core::import_model
* @param model_stream Output stream to store the model to
* @param model_stream Output stream to store the model to.
*/
void export_model(std::ostream& model_stream);
/**
* @brief Sets properties for current compiled model
* @brief Sets properties for the current compiled model.
*
* @param properties Map of pairs: (property name, property value)
* @param properties Map of pairs: (property name, property value).
*/
void set_property(const AnyMap& properties);
/**
* @brief Sets properties for current compiled model
* @brief Sets properties for the current compiled model.
*
* @tparam Properties Should be the pack of `std::pair<std::string, ov::Any>` types
* @param properties Optional pack of pairs: (property name, property value)
* @return nothing
* @tparam Properties Should be the pack of `std::pair<std::string, ov::Any>` types.
* @param properties Optional pack of pairs: (property name, property value).
*/
template <typename... Properties>
util::EnableIfAllStringAny<void, Properties...> set_property(Properties&&... properties) {
@ -181,24 +180,24 @@ public:
/** @brief Gets properties for current compiled model
*
* The method is responsible to extract information
* which affects compiled model inference. The list of supported configuration values can be extracted via
* The method is responsible for extracting information
* that affects compiled model inference. The list of supported configuration values can be extracted via
* CompiledModel::get_property with the ov::supported_properties key, but some of these keys cannot be changed
* dynamically, e.g. ov::device::id cannot changed if a compiled model has already been compiled for particular
* device.
* dynamically, for example, ov::device::id cannot be changed if a compiled model has already been compiled for a
* particular device.
*
* @param name property key, can be found in openvino/runtime/properties.hpp
* @return Property value
* @param name Property key, can be found in openvino/runtime/properties.hpp.
* @return Property value.
*/
Any get_property(const std::string& name) const;
/**
* @brief Gets properties dedicated to device behaviour.
* @brief Gets properties related to device behaviour.
*
* The method is targeted to extract information which can be set via set_property method.
* The method extracts information that can be set via the set_property method.
*
* @tparam T - type of returned value
* @param property - property object.
* @tparam T Type of a returned value.
* @param property Property object.
* @return Value of property.
*/
template <typename T, PropertyMutability mutability>
@ -210,20 +209,20 @@ public:
/**
* @brief Returns pointer to device-specific shared context
* on remote accelerator device that was used to create this CompiledModel
* @return A context
* on a remote accelerator device that was used to create this CompiledModel.
* @return A context.
*/
RemoteContext get_context() const;
/**
* @brief Checks if current CompiledModel object is not initialized
* @return `true` if current CompiledModel object is not initialized, `false` - otherwise
* @brief Checks if the current CompiledModel object is not initialized.
* @return `true` if the current CompiledModel object is not initialized; `false`, otherwise.
*/
bool operator!() const noexcept;
/**
* @brief Checks if current CompiledModel object is initialized
* @return `true` if current CompiledModel object is initialized, `false` - otherwise
* @brief Checks if the current CompiledModel object is initialized.
* @return `true` if the current CompiledModel object is initialized; `false`, otherwise.
*/
explicit operator bool() const noexcept;
};

View File

@ -3,7 +3,7 @@
//
/**
* @brief This is a header file for the OpenVINO Runtime Core class C++ API
* @brief A header file for the OpenVINO Runtime Core class C++ API.
*
* @file openvino/runtime/core.hpp
*/
@ -33,7 +33,7 @@ class IExtension;
namespace ov {
/**
* @brief This class represents OpenVINO runtime Core entity.
* @brief This class represents an OpenVINO runtime Core entity.
* User applications can create several Core class instances, but in this case the underlying plugins
* are created multiple times and not shared between several Core instances. The recommended way is to have
* a single Core instance per application.
@ -45,82 +45,82 @@ class OPENVINO_RUNTIME_API Core {
void get_property(const std::string& device_name, const std::string& name, const AnyMap& arguments, Any& to) const;
public:
/** @brief Constructs OpenVINO Core instance using XML configuration file with
/** @brief Constructs an OpenVINO Core instance using the XML configuration file with
* devices and their plugins description.
*
* See Core::register_plugins for more details.
*
* @param xml_config_file A path to .xml file with plugins to load from. If XML configuration file is not specified,
* then default OpenVINO Runtime plugins are loaded from the default `plugin.xml` file located in the same folder
* as OpenVINO runtime shared library.
* @param xml_config_file Path to the .xml file with plugins to load from. If the XML configuration file is not
* specified, default OpenVINO Runtime plugins are loaded from the default `plugin.xml` file located in the same
* folder as OpenVINO runtime shared library.
*/
explicit Core(const std::string& xml_config_file = {});
/**
* @brief Returns device plugins version information
* Device name can be complex and identify multiple devices at once like `HETERO:CPU,GPU`
* and in this case a std::map contains multiple entries each per device.
* @brief Returns device plugins version information.
* Device name can be complex and identify multiple devices at once like `HETERO:CPU,GPU`;
* in this case, std::map contains multiple entries, each per device.
*
* @param device_name Device name to identify a plugin
* @return A vector of versions
* @param device_name Device name to identify a plugin.
* @return A vector of versions.
*/
std::map<std::string, Version> get_versions(const std::string& device_name) const;
#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT
/**
* @brief Reads models from IR / ONNX / PDPD formats
* @param model_path A path to a model
* @param bin_path A path to a data file
* @brief Reads models from IR/ONNX/PDPD formats.
* @param model_path Path to a model.
* @param bin_path Path to a data file.
* For IR format (*.bin):
* * if path is empty, will try to read bin file with the same name as xml and
* * if bin file with the same name was not found, will load IR without weights.
* * if path is empty, will try to read a bin file with the same name as xml and
* * if the bin file with the same name is not found, will load IR without weights.
* For ONNX format (*.onnx):
* * bin_path parameter is not used.
* * the bin_path parameter is not used.
* For PDPD format (*.pdmodel)
* * bin_path parameter is not used.
* @return A model
* * the bin_path parameter is not used.
* @return A model.
*/
std::shared_ptr<ov::Model> read_model(const std::wstring& model_path, const std::wstring& bin_path = {}) const;
#endif
/**
* @brief Reads models from IR / ONNX / PDPD formats
* @param model_path A path to a model
* @param bin_path A path to a data file
* @brief Reads models from IR/ONNX/PDPD formats.
* @param model_path Path to a model.
* @param bin_path Path to a data file.
* For IR format (*.bin):
* * if path is empty, will try to read bin file with the same name as xml and
* * if bin file with the same name was not found, will load IR without weights.
* * if path is empty, will try to read a bin file with the same name as xml and
* * if the bin file with the same name is not found, will load IR without weights.
* For ONNX format (*.onnx):
* * bin_path parameter is not used.
* * the bin_path parameter is not used.
* For PDPD format (*.pdmodel)
* * bin_path parameter is not used.
* @return A model
* * the bin_path parameter is not used.
* @return A model.
*/
std::shared_ptr<ov::Model> read_model(const std::string& model_path, const std::string& bin_path = {}) const;
/**
* @brief Reads models from IR / ONNX / PDPD formats
* @param model A string with model in IR / ONNX / PDPD format
* @param weights A shared pointer to constant tensor with weights
* Reading ONNX / PDPD models doesn't support loading weights from @p weights tensors.
* @note Created model object shares the weights with @p weights object.
* So, do not create @p weights on temporary data which can be later freed, since the model
* constant data becomes point to an invalid memory.
* @return A model
* @brief Reads models from IR/ONNX/PDPD formats.
* @param model String with a model in IR/ONNX/PDPD format.
* @param weights Shared pointer to a constant tensor with weights.
* Reading ONNX/PDPD models does not support loading weights from the @p weights tensors.
* @note Created model object shares the weights with the @p weights object.
* Thus, do not create @p weights on temporary data that can be freed later, since the model
* constant data will point to an invalid memory.
* @return A model.
*/
std::shared_ptr<ov::Model> read_model(const std::string& model, const Tensor& weights) const;
/**
* @brief Creates and loads a compiled model from a source model to the default OpenVINO device selected by AUTO
* @brief Creates and loads a compiled model from a source model to the default OpenVINO device selected by the AUTO
* plugin.
*
* Users can create as many compiled models as they need and use
* them simultaneously (up to the limitation of the hardware resources)
* them simultaneously (up to the limitation of the hardware resources).
*
* @param model Model object acquired from Core::read_model
* @param model Model object acquired from Core::read_model.
* @param properties Optional map of pairs: (property name, property value) relevant only for this load
* operation
* @return A compiled model
* operation.
* @return A compiled model.
*/
CompiledModel compile_model(const std::shared_ptr<const ov::Model>& model, const AnyMap& properties = {});
@ -149,13 +149,13 @@ public:
* @brief Creates a compiled model from a source model object.
*
* Users can create as many compiled models as they need and use
* them simultaneously (up to the limitation of the hardware resources)
* them simultaneously (up to the limitation of the hardware resources).
*
* @param model Model object acquired from Core::read_model
* @param device_name Name of device to load model to
* @param model Model object acquired from Core::read_model.
* @param device_name Name of a device to load a model to.
* @param properties Optional map of pairs: (property name, property value) relevant only for this load
* operation
* @return A compiled model
* operation.
* @return A compiled model.
*/
CompiledModel compile_model(const std::shared_ptr<const ov::Model>& model,
const std::string& device_name,
@ -182,17 +182,17 @@ public:
}
/**
* @brief Reads and loads a compiled model from IR / ONNX / PDPD file to the default OpenVINI device selected by
* @brief Reads and loads a compiled model from the IR/ONNX/PDPD file to the default OpenVINO device selected by the
* AUTO plugin.
*
* This can be more efficient than using Core::read_model + Core::compile_model(model_in_memory_object) flow
* especially for cases when caching is enabled and cached model is available
* This can be more efficient than using the Core::read_model + Core::compile_model(model_in_memory_object) flow,
* especially for cases when caching is enabled and a cached model is available.
*
* @param model_path path to model
* @param model_path Path to a model.
* @param properties Optional map of pairs: (property name, property value) relevant only for this load
* operation/
* operation.
*
* @return A compiled model
* @return A compiled model.
*/
CompiledModel compile_model(const std::string& model_path, const AnyMap& properties = {});
@ -217,35 +217,35 @@ public:
}
/**
* @brief Reads model and creates a compiled model from IR / ONNX / PDPD file
* @brief Reads a model and creates a compiled model from the IR/ONNX/PDPD file.
*
* This can be more efficient than using Core::read_model + Core::compile_model(model_in_memory_object) flow
* especially for cases when caching is enabled and cached model is available
* This can be more efficient than using the Core::read_model + Core::compile_model(model_in_memory_object) flow,
* especially for cases when caching is enabled and a cached model is available.
*
* @param model_path Path to a model
* @param device_name Name of device to load a model to
* @param model_path Path to a model.
* @param device_name Name of a device to load a model to.
* @param properties Optional map of pairs: (property name, property value) relevant only for this load
* operation/
* operation.
*
* @return A compiled model
* @return A compiled model.
*/
CompiledModel compile_model(const std::string& model_path,
const std::string& device_name,
const AnyMap& properties = {});
/**
* @brief Reads model and creates a compiled model from IR / ONNX / PDPD file
* @brief Reads a model and creates a compiled model from the IR/ONNX/PDPD file.
*
* This can be more efficient than using read_model + compile_model(Model) flow
* especially for cases when caching is enabled and cached model is available
* especially for cases when caching is enabled and cached model is available.
*
* @tparam Properties Should be the pack of `std::pair<std::string, ov::Any>` types
* @param model_path path to model
* @param device_name Name of device to load model to
* @tparam Properties Should be a pack of `std::pair<std::string, ov::Any>` types.
* @param model_path Path to a model.
* @param device_name Name of a device to load a model to.
* @param properties Optional pack of pairs: (property name, property value) relevant only for this
* load operation
* load operation.
*
* @return A compiled model
* @return A compiled model.
*/
template <typename... Properties>
util::EnableIfAllStringAny<CompiledModel, Properties...> compile_model(const std::string& model_path,
@ -256,11 +256,11 @@ public:
/**
* @brief Creates a compiled model from a source model within a specified remote context.
* @param model Model object acquired from Core::read_model
* @param context A reference to a RemoteContext object
* @param model Model object acquired from Core::read_model.
* @param context A reference to a RemoteContext object.
* @param properties Optional map of pairs: (property name, property value) relevant only for this load
* operation
* @return A compiled model object
* operation.
* @return A compiled model object.
*/
CompiledModel compile_model(const std::shared_ptr<const ov::Model>& model,
const RemoteContext& context,
@ -284,42 +284,42 @@ public:
}
/**
* @deprecated This method is deprecated. Please use other Core::add_extension methods
* @brief Registers OpenVINO 1.0 extension to a Core object
* @param extension Pointer to already loaded extension
* @deprecated This method is deprecated. Please use other Core::add_extension methods.
* @brief Registers OpenVINO 1.0 extension to a Core object.
* @param extension Pointer to the already loaded extension.
*/
OPENVINO_DEPRECATED("Please use add_extension(ov::Extension) or add_extension(path_to_library) instead.")
void add_extension(const std::shared_ptr<InferenceEngine::IExtension>& extension);
/**
* @brief Registers an extension to a Core object
* @param library_path Path to library with ov::Extension
* @brief Registers an extension to a Core object.
* @param library_path Path to the library with ov::Extension.
*/
void add_extension(const std::string& library_path);
#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT
/**
* @brief Registers an extension to a Core object
* @param library_path Unicode path to library with ov::Extension
* @brief Registers an extension to a Core object.
* @param library_path Unicode path to the library with ov::Extension.
*/
void add_extension(const std::wstring& library_path);
#endif
/**
* @brief Registers an extension to a Core object
* @param extension Pointer to extension
* @brief Registers an extension to a Core object.
* @param extension Pointer to the extension.
*/
void add_extension(const std::shared_ptr<ov::Extension>& extension);
/**
* @brief Registers extensions to a Core object
* @param extensions Vector of loaded extensions
* @brief Registers extensions to a Core object.
* @param extensions Vector of loaded extensions.
*/
void add_extension(const std::vector<std::shared_ptr<ov::Extension>>& extensions);
/**
* @brief Registers an extension to a Core object
* @param extension Extension class which is inherited from ov::Extension class
* @brief Registers an extension to a Core object.
* @param extension Extension class that is inherited from the ov::Extension class.
*/
template <class T, typename std::enable_if<std::is_base_of<ov::Extension, T>::value, bool>::type = true>
void add_extension(const T& extension) {
@ -328,9 +328,9 @@ public:
}
/**
* @brief Registers extensions to a Core object
* @param extension Extension class which is inherited from ov::Extension class
* @param args A list of extensions
* @brief Registers extensions to a Core object.
* @param extension Extension class that is inherited from the ov::Extension class.
* @param args A list of extensions.
*/
template <class T,
class... Targs,
@ -342,7 +342,7 @@ public:
}
/**
* @brief Registers a custom operation inherited from ov::op::Op
* @brief Registers a custom operation inherited from ov::op::Op.
*/
template <class T, typename std::enable_if<std::is_base_of<ov::op::Op, T>::value, bool>::type = true>
void add_extension() {
@ -351,7 +351,7 @@ public:
}
/**
* @brief Registers custom operations inherited from ov::op::Op
* @brief Registers custom operations inherited from ov::op::Op.
*/
template <class T,
class... Targs,
@ -364,28 +364,28 @@ public:
}
/**
* @brief Imports a compiled model from a previously exported one
* @param model_stream std::istream input stream containing a model previously exported using
* ov::CompiledModel::export_model method
* @param device_name Name of device to import compiled model for. Note, if @p device_name device was not used to
* compile the original mode, an exception is thrown
* @brief Imports a compiled model from the previously exported one.
* @param model_stream std::istream input stream containing a model previously exported using the
* ov::CompiledModel::export_model method.
* @param device_name Name of a device to import a compiled model for. Note, if @p device_name device was not used
* to compile the original mode, an exception is thrown.
* @param properties Optional map of pairs: (property name, property value) relevant only for this load
* operation*
* @return A compiled model
* operation.
* @return A compiled model.
*/
CompiledModel import_model(std::istream& model_stream,
const std::string& device_name,
const AnyMap& properties = {});
/**
* @brief Imports a compiled model from a previously exported one
* @tparam Properties Should be the pack of `std::pair<std::string, ov::Any>` types
* @param model_stream Model stream
* @param device_name Name of device to import compiled model for. Note, if @p device_name device was not used to
* compile the original mode, an exception is thrown
* @brief Imports a compiled model from the previously exported one.
* @tparam Properties Should be the pack of `std::pair<std::string, ov::Any>` types.
* @param model_stream Model stream.
* @param device_name Name of a device to import a compiled model for. Note, if @p device_name device was not used
* to compile the original mode, an exception is thrown.
* @param properties Optional pack of pairs: (property name, property value) relevant only for this
* load operation
* @return A compiled model
* load operation.
* @return A compiled model.
*/
template <typename... Properties>
util::EnableIfAllStringAny<CompiledModel, Properties...> import_model(std::istream& model_stream,
@ -395,25 +395,25 @@ public:
}
/**
* @brief Imports a compiled model from a previously exported one with a specified remote context.
* @brief Imports a compiled model from the previously exported one with the specified remote context.
* @param model_stream std::istream input stream containing a model previously exported from
* ov::CompiledModel::export_model
* @param context A reference to a RemoteContext object. Note, if the device from @p context was not used to compile
* the original mode, an exception is thrown
* the original mode, an exception is thrown.
* @param properties Optional map of pairs: (property name, property value) relevant only for this load
* operation
* @return A compiled model
* operation.
* @return A compiled model.
*/
CompiledModel import_model(std::istream& model_stream, const RemoteContext& context, const AnyMap& properties = {});
/**
* @brief Imports a compiled model from a previously exported one with a specified remote context.
* @tparam Properties Should be the pack of `std::pair<std::string, ov::Any>` types
* @param model_stream Model stream
* @param context Pointer to RemoteContext object
* @brief Imports a compiled model from the previously exported one with the specified remote context.
* @tparam Properties Should be the pack of `std::pair<std::string, ov::Any>` types.
* @param model_stream Model stream.
* @param context Pointer to a RemoteContext object.
* @param properties Optional pack of pairs: (property name, property value) relevant only for this
* load operation
* @return A compiled model
* load operation.
* @return A compiled model.
*/
template <typename... Properties>
util::EnableIfAllStringAny<CompiledModel, Properties...> import_model(std::istream& model_stream,
@ -423,26 +423,26 @@ public:
}
/**
* @brief Query device if it supports specified model with specified properties
* @brief Query device if it supports the specified model with specified properties.
*
* @param device_name A name of a device to query
* @param model Model object to query
* @param properties Optional map of pairs: (property name, property value)
* @return An object containing a map of pairs a operation name -> a device name supporting this operation.
* @param device_name Name of a device to query.
* @param model Model object to query.
* @param properties Optional map of pairs: (property name, property value).
* @return An object containing a map of pairs an operation name -> a device name supporting this operation.
*/
SupportedOpsMap query_model(const std::shared_ptr<const ov::Model>& model,
const std::string& device_name,
const AnyMap& properties = {}) const;
/**
* @brief Query device if it supports specified model with specified properties
* @brief Queries a device if it supports the specified model with specified properties.
*
* @tparam Properties Should be the pack of `std::pair<std::string, ov::Any>` types
* @param device_name A name of a device to query
* @param model Model object to query
* @tparam Properties Should be the pack of `std::pair<std::string, ov::Any>` types.
* @param device_name Name of a device to query.
* @param model Model object to query.
* @param properties Optional pack of pairs: (property name, property value) relevant only for this
* query operation
* @return An object containing a map of pairs a operation name -> a device name supporting this operation.
* query operation.
* @return An object containing a map of pairs an operation name -> a device name supporting this operation.
*/
template <typename... Properties>
util::EnableIfAllStringAny<SupportedOpsMap, Properties...> query_model(
@ -454,19 +454,18 @@ public:
/**
* @brief Sets properties for all the
* registered devices, acceptable keys can be found in openvino/runtime/properties.hpp
* registered devices, acceptable keys can be found in openvino/runtime/properties.hpp.
*
* @param properties Map of pairs: (property name, property value)
* @param properties Map of pairs: (property name, property value).
*/
void set_property(const AnyMap& properties);
/**
* @brief Sets properties for all the
* registered devices, acceptable keys can be found in openvino/runtime/properties.hpp
* registered devices, acceptable keys can be found in openvino/runtime/properties.hpp.
*
* @tparam Properties Should be the pack of `std::pair<std::string, ov::Any>` types
* @param properties Optional pack of pairs: (property name, property value)
* @return nothing
* @tparam Properties Should be a pack of `std::pair<std::string, ov::Any>` types.
* @param properties Optional pack of pairs: property name, property value.
*/
template <typename... Properties>
util::EnableIfAllStringAny<void, Properties...> set_property(Properties&&... properties) {
@ -474,21 +473,20 @@ public:
}
/**
* @brief Sets properties for device, acceptable keys can be found in openvino/runtime/properties.hpp
* @brief Sets properties for a device, acceptable keys can be found in openvino/runtime/properties.hpp.
*
* @param device_name An name of a device.
* @param device_name Name of a device.
*
* @param properties Map of pairs: (property name, property value)
* @param properties Map of pairs: (property name, property value).
*/
void set_property(const std::string& device_name, const AnyMap& properties);
/**
* @brief Sets properties for device, acceptable keys can be found in openvino/runtime/properties.hpp
* @brief Sets properties for a device, acceptable keys can be found in openvino/runtime/properties.hpp.
*
* @tparam Properties Should be the pack of `std::pair<std::string, ov::Any>` types
* @param device_name An name of a device.
* @param properties Optional pack of pairs: (property name, property value)
* @return nothing
* @tparam Properties Should be the pack of `std::pair<std::string, ov::Any>` types.
* @param device_name Name of a device.
* @param properties Optional pack of pairs: (property name, property value).
*/
template <typename... Properties>
util::EnableIfAllStringAny<void, Properties...> set_property(const std::string& device_name,
@ -497,38 +495,38 @@ public:
}
/**
* @brief Gets properties dedicated to device behaviour.
* @brief Gets properties related to device behaviour.
*
* The method is targeted to extract information which can be set via set_property method.
* The method extracts information that can be set via the set_property method.
*
* @param device_name - A name of a device to get a properties value.
* @param name - property name.
* @return Value of property corresponding to property name.
* @param device_name Name of a device to get a property value.
* @param name Property name.
* @return Value of a property corresponding to the property name.
*/
Any get_property(const std::string& device_name, const std::string& name) const;
/**
* @brief Gets properties dedicated to device behaviour.
* @brief Gets properties related to device behaviour.
*
* The method is targeted to extract information which can be set via set_property method.
* The method extracts information that can be set via the set_property method.
*
* @param device_name - A name of a device to get a properties value.
* @param name - property name.
* @param arguments - additional arguments to get property
* @return Value of property corresponding to property name.
* @param device_name Name of a device to get a property value.
* @param name Property name.
* @param arguments Additional arguments to get a property.
* @return Value of a property corresponding to the property name.
*/
Any get_property(const std::string& device_name, const std::string& name, const AnyMap& arguments) const;
/**
* @brief Gets properties dedicated to device behaviour.
* @brief Gets properties related to device behaviour.
*
* The method is needed to request common device or system properties.
* It can be device name, temperature, other devices-specific values.
* It can be device name, temperature, and other devices-specific values.
*
* @tparam T - type of returned value
* @tparam M - property mutability
* @param deviceName - A name of a device to get a properties value.
* @param property - property object.
* @tparam T Type of a returned value.
* @tparam M Property mutability.
* @param deviceName Name of a device to get a property value.
* @param property Property object.
* @return Property value.
*/
template <typename T, PropertyMutability M>
@ -539,16 +537,16 @@ public:
}
/**
* @brief Gets properties dedicated to device behaviour.
* @brief Gets properties related to device behaviour.
*
* The method is needed to request common device or system properties.
* It can be device name, temperature, other devices-specific values.
*
* @tparam T - type of returned value
* @tparam M - property mutability
* @param deviceName - A name of a device to get a properties value.
* @param property - property object.
* @param arguments - additional arguments to get property
* @tparam T Type of a returned value.
* @tparam M Property mutability.
* @param deviceName Name of a device to get a property value.
* @param property Property object.
* @param arguments Additional arguments to get a property.
* @return Property value.
*/
template <typename T, PropertyMutability M>
@ -559,17 +557,17 @@ public:
}
/**
* @brief Gets properties dedicated to device behaviour.
* @brief Gets properties related to device behaviour.
*
* The method is needed to request common device or system properties.
* It can be device name, temperature, other devices-specific values.
*
* @tparam T - type of returned value
* @tparam M - property mutability
* @tparam Args - set of additional arguments ended with property object variable
* @param deviceName - A name of a device to get a properties value.
* @param property - property object.
* @param args - Optional pack of pairs: (argument name, argument value) ended with property object
* @tparam T Type of a returned value.
* @tparam M Property mutability.
* @tparam Args Set of additional arguments ended with property object variable.
* @param deviceName Name of a device to get a property value.
* @param property Property object.
* @param args Optional pack of pairs: (argument name, argument value) ended with property object.
* @return Property value.
*/
template <typename T, PropertyMutability M, typename... Args>
@ -582,44 +580,44 @@ public:
}
/**
* @brief Returns devices available for inference
* Core objects goes over all registered plugins and asks about available devices.
* @brief Returns devices available for inference.
* Core objects go over all registered plugins and ask about available devices.
*
* @return A vector of devices. The devices are returned as { CPU, GPU.0, GPU.1, MYRIAD }
* If there more than one device of specific type, they are enumerated with .# suffix.
* @return A vector of devices. The devices are returned as { CPU, GPU.0, GPU.1, MYRIAD }.
* If there is more than one device of a specific type, they are enumerated with the .# suffix.
* Such enumerated device can later be used as a device name in all Core methods like Core::compile_model,
* Core::query_model, Core::set_property and so on.
*/
std::vector<std::string> get_available_devices() const;
/**
* @brief Register a new device and plugin which enable this device inside OpenVINO Runtime.
* @brief Register a new device and plugin that enables this device inside OpenVINO Runtime.
*
* @param plugin_name A name of plugin. Depending on platform `plugin_name` is wrapped with shared library suffix
* and prefix to identify library full name
* E.g. on Linux platform plugin name specified as `plugin_name` will be wrapped as `libplugin_name.so`.
* @param plugin_name Name of a plugin. Depending on platform, `plugin_name` is wrapped with shared library suffix
* and prefix to identify library full name.
* For example, on Linux platform, plugin name specified as `plugin_name` will be wrapped as `libplugin_name.so`.
* Plugin search algorithm:
* - If plugin is located in the same directory as OpenVINO runtime library, it will be used
* - If no, plugin is tried to be loaded from paths pointed by PATH / LD_LIBRARY_PATH / DYLD_LIBRARY_PATH
* - If plugin is located in the same directory as OpenVINO runtime library, it will be used.
* - If no, plugin is tried to be loaded from paths pointed by PATH/LD_LIBRARY_PATH/DYLD_LIBRARY_PATH
* environment variables depending on the platform.
*
* @param device_name A device name to register plugin for.
* @param device_name Device name to register a plugin for.
*/
void register_plugin(const std::string& plugin_name, const std::string& device_name);
/**
* @brief Unloads the previously loaded plugin identified by @p device_name from OpenVINO Runtime
* @brief Unloads the previously loaded plugin identified by @p device_name from OpenVINO Runtime.
* The method is needed to remove loaded plugin instance and free its resources. If plugin for a
* specified device has not been created before, the method throws an exception.
* @note This method does not remove plugin from the plugins known to OpenVINO Core object.
* @param device_name A device name identifying plugin to remove from OpenVINO Runtime
* @param device_name Device name identifying plugin to remove from OpenVINO Runtime.
*/
void unload_plugin(const std::string& device_name);
/** @brief Registers a device plugin to OpenVINO Runtime Core instance using XML configuration file with
/** @brief Registers a device plugin to the OpenVINO Runtime Core instance using an XML configuration file with
* plugins description.
*
* XML file has the following structure:
* The XML file has the following structure:
*
* ```xml
* <ie>
@ -636,9 +634,9 @@ public:
* </ie>
* ```
*
* - `name` identifies name of device enabled by a plugin
* - `name` identifies name of a device enabled by a plugin.
* - `location` specifies absolute path to dynamic library with a plugin.
* A path can also be relative to inference engine shared library. It allows to have common config
* The path can also be relative to inference engine shared library. It allows having common config
* for different systems with different configurations.
* - `properties` are set to a plugin via the ov::Core::set_property method.
* - `extensions` are set to a plugin via the ov::Core::add_extension method.
@ -648,16 +646,16 @@ public:
void register_plugins(const std::string& xml_config_file);
/**
* @brief Create a new remote shared context object on specified accelerator device
* using specified plugin-specific low level device API parameters (device handle, pointer, context, etc.)
* @param device_name A name of a device to create a new shared context on.
* @brief Creates a new remote shared context object on the specified accelerator device
* using specified plugin-specific low-level device API parameters (device handle, pointer, context, etc.).
* @param device_name Name of a device to create a new shared context on.
* @param properties Map of device-specific shared context properties.
* @return A reference to a created remote context.
* @return Reference to a created remote context.
*/
RemoteContext create_context(const std::string& device_name, const AnyMap& properties);
/**
* @brief Create a new shared context object on specified accelerator device
* @brief Creates a new shared context object on specified accelerator device
* using specified plugin-specific low level device API properties (device handle, pointer, etc.)
* @tparam Properties Should be the pack of `std::pair<std::string, ov::Any>` types
* @param device_name Name of a device to create new shared context on.
@ -671,9 +669,9 @@ public:
}
/**
* @brief Get a pointer to default (plugin-supplied) shared context object for specified accelerator device.
* @param device_name A name of a device to get a default shared context from.
* @return A reference to a default remote context.
* @brief Gets a pointer to default (plugin-supplied) shared context object for the specified accelerator device.
* @param device_name Name of a device to get a default shared context from.
* @return Reference to a default remote context.
*/
RemoteContext get_default_context(const std::string& device_name);
};

View File

@ -10,14 +10,14 @@
namespace ov {
/**
* @brief Thrown in case of cancel;ed asynchronous operation
* @brief Thrown in case of cancelled asynchronous operation.
*/
class OPENVINO_RUNTIME_API Cancelled : public Exception {
using Exception::Exception;
};
/**
* @brief Thrown in case of calling InferRequest methods while the request is busy with compute operation.
* @brief Thrown in case of calling the InferRequest methods while the request is busy with compute operation.
*/
class OPENVINO_RUNTIME_API Busy : public Exception {
using Exception::Exception;

View File

@ -28,72 +28,72 @@ namespace ov {
class CompiledModel;
/**
* @brief This is a class of infer request which can be run in asynchronous or synchronous manners.
* @brief This is a class of infer request that can be run in asynchronous or synchronous manners.
*/
class OPENVINO_RUNTIME_API InferRequest {
std::shared_ptr<InferenceEngine::IInferRequestInternal> _impl;
std::shared_ptr<void> _so;
/**
* @brief Constructs InferRequest from the initialized std::shared_ptr
* @param impl Initialized shared pointer
* @param so Plugin to use. This is required to ensure that InferRequest can work properly even if plugin object is
* destroyed.
* @brief Constructs InferRequest from the initialized std::shared_ptr.
* @param impl Initialized shared pointer.
* @param so Plugin to use. This is required to ensure that InferRequest can work properly even if a plugin object
* is destroyed.
*/
InferRequest(const std::shared_ptr<InferenceEngine::IInferRequestInternal>& impl, const std::shared_ptr<void>& so);
friend class ov::CompiledModel;
public:
/**
* @brief Default constructor
* @brief Default constructor.
*/
InferRequest() = default;
/**
* @brief Default copy constructor
* @param other other InferRequest object
* @brief Default copy constructor.
* @param other Another InferRequest object.
*/
InferRequest(const InferRequest& other) = default;
/**
* @brief Default copy assignment operator
* @param other Another InferRequest object
* @return A reference to the current object
* @brief Default copy assignment operator.
* @param other Another InferRequest object.
* @return Reference to the current object.
*/
InferRequest& operator=(const InferRequest& other) = default;
/**
* @brief Default move constructor
* @param other other InferRequest object
* @brief Default move constructor.
* @param other Another InferRequest object.
*/
InferRequest(InferRequest&& other) = default;
/**
* @brief Default move assignment operator
* @param other other InferRequest object
* @return reference to the current object
* @brief Default move assignment operator.
* @param other Another InferRequest object.
* @return Reference to the current object.
*/
InferRequest& operator=(InferRequest&& other) = default;
/**
* @brief Destructor preserves unloading order of implementation object and reference to library
* @note To preserve destruction order inside default generated assignment operator we store `_impl` before `_so`.
* And use destructor to remove implementation object before reference to library explicitly
* @brief Destructor that preserves unloading order of implementation object and reference to the library.
* @note To preserve destruction order inside the default generated assignment operator, `_impl` is stored before
* `_so`. Use the destructor to remove implementation object before referencing to the library explicitly.
*/
~InferRequest();
/**
* @brief Sets input/output tensor to infer on
* @brief Sets an input/output tensor to infer on.
*
* @param tensor_name Name of input or output tensor.
* @param tensor Reference to a tensor. The element_type and shape of a tensor must match
* @param tensor_name Name of the input or output tensor.
* @param tensor Reference to the tensor. The element_type and shape of the tensor must match
* the model's input/output element_type and size.
*/
void set_tensor(const std::string& tensor_name, const Tensor& tensor);
/**
* @brief Sets input/output tensor to infer
* @param port Port of input or output tensor. Note, that the ports get from the following methods can be used:
* @brief Sets an input/output tensor to infer.
* @param port Port of the input or output tensor. Use the following methods to get the ports:
* - ov::Model::input()
* - ov::Model::inputs()
* - ov::Model::outputs()
@ -108,8 +108,8 @@ public:
void set_tensor(const ov::Output<const ov::Node>& port, const Tensor& tensor);
/**
* @brief Sets input/output tensor to infer
* @param port Port of input or output tensor. Note, that the ports get from the following methods can be used:
* @brief Sets an input/output tensor to infer.
* @param port Port of the input or output tensor. Use the following methods to get the ports:
* - ov::Model::input()
* - ov::Model::inputs()
* - ov::Model::outputs()
@ -124,219 +124,220 @@ public:
void set_tensor(const ov::Output<ov::Node>& port, const Tensor& tensor);
/**
* @brief Sets batch of tensors for input data to infer by tensor name
* Model input shall have batch dimension and number of @p tensors shall match with batch size
* Current version supports set tensors to model inputs only. In case if @p tensor_name is associated
* with output (or any other non-input node) - an exception will be thrown
* @brief Sets a batch of tensors for input data to infer by tensor name.
* Model input must have batch dimension, and the number of @p tensors must match the batch size.
* The current version supports setting tensors to model inputs only. If @p tensor_name is associated
* with output (or any other non-input node), an exception is thrown.
*
* @param tensor_name Name of input tensor.
* @param tensor_name Name of the input tensor.
* @param tensors Input tensors for batched infer request. The type of each tensor must match the model
* input element type and shape (except batch dimension). Total size of tensors shall match with input's size
* input element type and shape (except batch dimension). Total size of tensors must match the input size.
*/
void set_tensors(const std::string& tensor_name, const std::vector<Tensor>& tensors);
/**
* @brief Sets batch of tensors for input data to infer by input port
* Model input shall have batch dimension and number of @p tensors shall match with batch size
* Current version supports set tensors to model inputs only. In case if @p port is associated
* with output (or any other non-input node) - an exception will be thrown
* @brief Sets a batch of tensors for input data to infer by input port.
* Model input must have batch dimension, and the number of @p tensors must match the batch size.
* The current version supports setting tensors to model inputs only. If @p port is associated
* with output (or any other non-input node), an exception is thrown.
*
* @param port Port of input tensor.
* @param port Port of the input tensor.
* @param tensors Input tensors for batched infer request. The type of each tensor must match the model
* input element type and shape (except batch dimension). Total size of tensors shall match with input's size
* input element type and shape (except batch dimension). Total size of tensors must match the input size.
*/
void set_tensors(const ov::Output<const ov::Node>& port, const std::vector<Tensor>& tensors);
/**
* @brief Sets input tensor to infer
* @brief Sets an input tensor to infer.
*
* @param idx Index of input tensor. If @p idx is greater than number of model inputs, an exception is thrown
* @param tensor Reference to a tensor. The element_type and shape of a tensor must match
* @param idx Index of the input tensor. If @p idx is greater than the number of model inputs, an exception
* is thrown.
* @param tensor Reference to the tensor. The element_type and shape of the tensor must match
* the model's input/output element_type and size.
*/
void set_input_tensor(size_t idx, const Tensor& tensor);
/**
* @brief Sets input tensor to infer models with single input
* @note If model has several inputs, an exception is thrown
* @param tensor Reference to input tensor.
* @brief Sets an input tensor to infer models with single input.
* @note If model has several inputs, an exception is thrown.
* @param tensor Reference to the input tensor.
*/
void set_input_tensor(const Tensor& tensor);
/**
* @brief Sets batch of tensors for single input data
* Model input shall have batch dimension and number of @p tensors shall match with batch size
* @brief Sets a batch of tensors for single input data.
* Model input must have batch dimension, and the number of @p tensors must match the batch size.
*
* @param tensors Input tensors for batched infer request. The type of each tensor must match the model
* input element type and shape (except batch dimension). Total size of tensors shall match with input's size
* input element type and shape (except batch dimension). Total size of tensors must match the input size.
*/
void set_input_tensors(const std::vector<Tensor>& tensors);
/**
* @brief Sets batch of tensors for input data to infer by input name
* Model input shall have batch dimension and number of @p tensors shall match with batch size
* @brief Sets a batch of tensors for input data to infer by input name.
* Model input must have batch dimension, and number of @p tensors must match the batch size.
*
* @param idx Name of input tensor.
* @param idx Name of the input tensor.
* @param tensors Input tensors for batched infer request. The type of each tensor must match the model
* input element type and shape (except batch dimension). Total size of tensors shall match with input's size
* input element type and shape (except batch dimension). Total size of tensors must match the input size.
*/
void set_input_tensors(size_t idx, const std::vector<Tensor>& tensors);
/**
* @brief Sets output tensor to infer
* @note An index of input preserved accross ov::Model, ov::CompiledModel and ov::InferRequest
* @param idx Index of output tensor.
* @param tensor Reference to output tensor. The type of a tensor must match the model output element type and
* @brief Sets an output tensor to infer.
* @note Index of the input preserved accross ov::Model, ov::CompiledModel, and ov::InferRequest.
* @param idx Index of the output tensor.
* @param tensor Reference to the output tensor. The type of the tensor must match the model output element type and
* shape.
*/
void set_output_tensor(size_t idx, const Tensor& tensor);
/**
* @brief Sets output tensor to infer models with single output
* @brief Sets an output tensor to infer models with single output.
* @note If model has several outputs, an exception is thrown.
* @param tensor Reference to output tensor.
* @param tensor Reference to the output tensor.
*/
void set_output_tensor(const Tensor& tensor);
/**
* @brief Gets input/output tensor for inference by tensor name
* @param tensor_name A name of tensor to get
* @return A Tensor with a name @p tensor_name. If a tensor is not found, an exception is thrown.
* @brief Gets an input/output tensor for inference by tensor name.
* @param tensor_name Name of a tensor to get.
* @return The tensor with name @p tensor_name. If the tensor is not found, an exception is thrown.
*/
Tensor get_tensor(const std::string& tensor_name);
/**
* @brief Gets input/output tensor for inference
* @note If a tensor with specified @p port is not found, an exception is thrown
* @param port Port of tensor to get
* @return A Tensor for the port @p port.
* @brief Gets an input/output tensor for inference.
* @note If the tensor with the specified @p port is not found, an exception is thrown.
* @param port Port of the tensor to get.
* @return Tensor for the port @p port.
*/
Tensor get_tensor(const ov::Output<const ov::Node>& port);
/**
* @brief Gets input/output tensor for inference
* @note If a tensor with specified @p port is not found, an exception is thrown
* @param port Port of tensor to get
* @return A Tensor for the port @p port.
* @brief Gets an input/output tensor for inference.
* @note If the tensor with the specified @p port is not found, an exception is thrown.
* @param port Port of the tensor to get.
* @return Tensor for the port @p port.
*/
Tensor get_tensor(const ov::Output<ov::Node>& port);
/**
* @brief Gets input tensor for inference
* @brief Gets an input tensor for inference.
*
* @param idx An index of tensor to get
* @return A Tensor with an input index @p idx. If a tensor with specified @p idx is not found, an exception is
* thrown.
* @param idx Index of the tensor to get.
* @return Tensor with the input index @p idx. If the tensor with the specified @p idx is not found, an exception
* is thrown.
*/
Tensor get_input_tensor(size_t idx);
/**
* @brief Gets input tensor for inference
* @brief Gets an input tensor for inference.
*
* @return An input Tensor for the model. If model has several inputs, an exception is thrown.
* @return The input tensor for the model. If model has several inputs, an exception is thrown.
*/
Tensor get_input_tensor();
/**
* @brief Gets output tensor for inference
* @brief Gets an output tensor for inference.
*
* @param idx An index of tensor to get
* @return A Tensor with an output index @p idx. If a tensor with specified @p idx is not found, an exception is
* thrown.
* @param idx Index of the tensor to get.
* @return Tensor with the output index @p idx. If the tensor with the specified @p idx is not found, an exception
* is thrown.
*/
Tensor get_output_tensor(size_t idx);
/**
* @brief Gets output tensor for inference
* @brief Gets an output tensor for inference.
*
* @return An output Tensor for the model. If model has several outputs, an exception is thrown.
* @return Output tensor for the model. If model has several outputs, an exception is thrown.
*/
Tensor get_output_tensor();
/**
* @brief Infers specified input(s) in synchronous mode
* @note blocks all methods of InferRequest while request is ongoing (running or waiting in queue)
* Calling any method will lead to throwning ov::Busy exception
* @brief Infers specified input(s) in synchronous mode.
* @note It blocks all methods of InferRequest while request is ongoing (running or waiting in a queue).
* Calling any method leads to throwning the ov::Busy exception.
*/
void infer();
/**
* @brief Cancels inference request
* @brief Cancels inference request.
*/
void cancel();
/**
* @brief Queries performance measures per layer to get feedback of what is the most time consuming operation
* @note not all plugins provide meaningful data
* @return Vector of profiling information for operations in model
* @brief Queries performance measures per layer to identify the most time consuming operation.
* @note Not all plugins provide meaningful data.
* @return Vector of profiling information for operations in a model.
*/
std::vector<ProfilingInfo> get_profiling_info() const;
/**
* @brief Starts inference of specified input(s) in asynchronous mode
* @brief Starts inference of specified input(s) in asynchronous mode.
* @note It returns immediately. Inference starts also immediately.
* Calling any method while the request in a running state will lead to throwning ov::Busy exception
* Calling any method while the request in a running state leads to throwning the ov::Busy exception.
*/
void start_async();
/**
* @brief Waits for the result to become available. Blocks until the result
* becomes available
* becomes available.
*/
void wait();
/**
* @brief Waits for the result to become available. Blocks until specified timeout has elapsed or the result
* @brief Waits for the result to become available. Blocks until the specified timeout has elapsed or the result
* becomes available, whichever comes first.
*
* @param timeout Maximum duration in milliseconds to block for
* @return true if inference request is ready and false otherwise
* @param timeout Maximum duration, in milliseconds, to block for.
* @return True if inference request is ready and false, otherwise.
*/
bool wait_for(const std::chrono::milliseconds timeout);
/**
* @brief Sets a callback std::function that will be called on success or failure of asynchronous request
* @param callback callback object which will be called on when inference finish.
* @brief Sets a callback std::function that is called on success or failure of an asynchronous request.
* @param callback Callback object that is called when inference is completed.
*/
void set_callback(std::function<void(std::exception_ptr)> callback);
/**
* @brief Gets state control interface for given infer request.
* @brief Gets state control interface for the given infer request.
*
* State control essential for recurrent models
* @return A vector of Variable State objects
* State control essential for recurrent models.
* @return Vector of Variable State objects.
*/
std::vector<VariableState> query_state();
/**
* @brief Returns compiled model that creates this inference request
* @return Compiled model object
* @brief Returns a compiled model that creates this inference request.
* @return Compiled model object.
*/
CompiledModel get_compiled_model();
/**
* @brief Checks if current InferRequest object is not initialized
* @return true if current InferRequest object is not initialized, false - otherwise
* @brief Checks if the current InferRequest object is not initialized.
* @return True if the current InferRequest object is not initialized; false, otherwise.
*/
bool operator!() const noexcept;
/**
* @brief Checks if current InferRequest object is initialized
* @return true if current InferRequest object is initialized, false - otherwise
* @brief Checks if the current InferRequest object is initialized.
* @return True if the current InferRequest object is initialized; false, otherwise.
*/
explicit operator bool() const noexcept;
/**
* @brief Compares whether this request wraps the same impl underneath
* @param other Another inference request
* @return true if current InferRequest object doesn't wrap the same impl as the operator's arg
* @brief Compares whether this request wraps the same impl underneath.
* @param other Another inference request.
* @return True if the current InferRequest object does not wrap the same impl as the operator's arg.
*/
bool operator!=(const InferRequest& other) const noexcept;
/**
* @brief Compares whether this request wraps the same impl underneath
* @param other Another inference request
* @return true if current InferRequest object wraps the same impl as the operator's arg
* @brief Compares whether this request wraps the same impl underneath.
* @param other Another inference request.
* @return True if the current InferRequest object wraps the same impl as the operator's arg.
*/
bool operator==(const InferRequest& other) const noexcept;
};

View File

@ -3,8 +3,8 @@
//
/**
* @brief This is a header file for the ProfilingInfo objects that contains performance
* metric for single node
* @brief A header file for the ProfilingInfo objects that contain performance
* metric for a single node.
*
* @file openvino/runtime/profiling_info.hpp
*/
@ -24,40 +24,40 @@ namespace ov {
*/
struct ProfilingInfo {
/**
* @brief Defines the general status of the node
* @brief Defines the general status of a node.
*/
enum class Status {
NOT_RUN, //!< A node is not executed
OPTIMIZED_OUT, //!< A node is optimized out during graph optimization phase
EXECUTED //!< A node is executed
NOT_RUN, //!< A node is not executed.
OPTIMIZED_OUT, //!< A node is optimized out during graph optimization phase.
EXECUTED //!< A node is executed.
};
/**
* @brief Defines a node status
* @brief Defines the node status.
*/
Status status;
/**
* @brief The absolute time in microseconds that the node ran (in total)
* @brief The absolute time, in microseconds, that the node ran (in total).
*/
std::chrono::microseconds real_time;
/**
* @brief The net host cpu time that the node ran
* @brief The net host CPU time that the node ran.
*/
std::chrono::microseconds cpu_time;
/**
* @brief A name of node
* @brief Name of a node.
*/
std::string node_name;
/**
* @brief An execution type of unit
* @brief Execution type of a unit.
*/
std::string exec_type;
/**
* @brief A node type
* @brief Node type.
*/
std::string node_type;
};

View File

@ -3,7 +3,7 @@
//
/**
* @brief A header file for the OpenVINO Runtime RemoteContext class
* @brief A header file for the OpenVINO Runtime RemoteContext class.
* @file openvino/runtime/remote_context.hpp
*/
#pragma once
@ -31,17 +31,17 @@ class CompiledModel;
* @brief This class represents an abstraction
* for remote (non-CPU) accelerator device-specific inference context.
* Such context represents a scope on the device within which compiled
* models and remote memory tensors can exist, function and exchange data.
* models and remote memory tensors can exist, function, and exchange data.
*/
class OPENVINO_RUNTIME_API RemoteContext {
protected:
std::shared_ptr<InferenceEngine::RemoteContext> _impl; //!< Pointer to remote context implementation
std::shared_ptr<void> _so; //!< Reference to shared object that loaded implementation
std::shared_ptr<InferenceEngine::RemoteContext> _impl; //!< Pointer to the remote context implementation.
std::shared_ptr<void> _so; //!< Reference to the shared object that loaded implementation.
/**
* @brief Constructs RemoteContext from the initialized std::shared_ptr
* @param impl Initialized shared pointer
* @param so Plugin to use. This is required to ensure that RemoteContext can work properly even if plugin
* @brief Constructs RemoteContext from the initialized std::shared_ptr.
* @param impl Initialized shared pointer.
* @param so Plugin to use. This is required to ensure that RemoteContext can work properly even if a plugin
* object is destroyed.
*/
RemoteContext(const std::shared_ptr<InferenceEngine::RemoteContext>& impl, const std::shared_ptr<void>& so);
@ -50,55 +50,55 @@ protected:
public:
/**
* @brief Default constructor
* @brief Default constructor.
*/
RemoteContext() = default;
/**
* @brief Default copy constructor
* @param other other RemoteContext object
* @brief Default copy constructor.
* @param other Another RemoteContext object.
*/
RemoteContext(const RemoteContext& other) = default;
/**
* @brief Default copy assignment operator
* @param other other RemoteContext object
* @return reference to the current object
* @brief Default copy assignment operator.
* @param other Another RemoteContext object.
* @return Reference to the current object.
*/
RemoteContext& operator=(const RemoteContext& other) = default;
/**
* @brief Default move constructor
* @param other other RemoteContext object
* @brief Default move constructor.
* @param other Another RemoteContext object.
*/
RemoteContext(RemoteContext&& other) = default;
/**
* @brief Default move assignment operator
* @param other other RemoteContext object
* @return reference to current object
* @brief Default move assignment operator.
* @param other Another RemoteContext object.
* @return Reference to the current object.
*/
RemoteContext& operator=(RemoteContext&& other) = default;
/**
* @brief Destructor preserves unloading order of implementation object and reference to library
* @brief Destructor that preserves unloading order of implementation object and reference to the library.
*/
~RemoteContext();
/**
* @brief Internal method: Checks remote type
* @param remote_context a remote context which type will be checked
* @param type_info map with remote object runtime info
* @throw Exception if type check with specified parameters failed
* @brief Internal method: checks remote type.
* @param remote_context Remote context which type is checked.
* @param type_info Map with remote object runtime info.
* @throw Exception if type check with the specified parameters failed.
*/
static void type_check(const RemoteContext& remote_context,
const std::map<std::string, std::vector<std::string>>& type_info = {});
/**
* @brief Checks if the RemoteContext object can be cast to the type T
* @brief Checks if the RemoteContext object can be cast to the type T.
*
* @tparam T Type to be checked. Must represent a class derived from the RemoteContext
* @return true if this object can be dynamically cast to the type T*. Otherwise, false
* @tparam T Type to be checked. Must represent a class derived from RemoteContext.
* @return True if this object can be dynamically cast to the type T*; false, otherwise.
*/
template <typename T>
bool is() const noexcept {
@ -115,8 +115,8 @@ public:
/**
* @brief Casts this RemoteContext object to the type T.
*
* @tparam T Type to cast to. Must represent a class derived from the RemoteContext
* @return T object
* @tparam T Type to cast to. Must represent a class derived from RemoteContext.
* @return T Object.
*/
template <typename T>
const T as() const {
@ -127,7 +127,7 @@ public:
}
/**
* @brief Returns name of the device on which underlying object is allocated.
* @brief Returns name of a device on which underlying object is allocated.
* Abstract method.
* @return A device name string in fully specified format `<device_name>[.<device_id>[.<tile_id>]]` (e.g. GPU.0.1).
*/
@ -136,19 +136,19 @@ public:
/**
* @brief Allocates memory tensor in device memory or wraps user-supplied memory handle
* using the specified tensor description and low-level device-specific parameters.
* Returns a pointer to the object which implements RemoteTensor interface.
* @param type Defines the element type of the tensor
* @param shape Defines the shape of the tensor
* Returns a pointer to the object that implements the RemoteTensor interface.
* @param type Defines the element type of the tensor.
* @param shape Defines the shape of the tensor.
* @param params Map of the low-level tensor object parameters.
* @return A pointer to plugin object that implements RemoteTensor interface.
* @return Pointer to a plugin object that implements the RemoteTensor interface.
*/
RemoteTensor create_tensor(const element::Type& type, const Shape& shape, const AnyMap& params = {});
/**
* @brief Returns a map of device-specific parameters required for low-level
* operations with underlying object.
* operations with the underlying object.
* Parameters include device/context handles, access flags,
* etc. Content of the returned map depends on remote execution context that is
* etc. Content of the returned map depends on a remote execution context that is
* currently set on the device (working scenario).
* Abstract method.
* @return A map of name/parameter elements.
@ -156,12 +156,12 @@ public:
AnyMap get_params() const;
/**
* @brief This method is used to create host tensor object friendly for the device in current context
* For example, GPU context may allocate USM host memory (if corresponding extension is available)
* @brief This method is used to create a host tensor object friendly for the device in current context.
* For example, GPU context may allocate USM host memory (if corresponding extension is available),
* which could be more efficient than regular host memory.
* @param type Tensor element type
* @param shape Tensor shape
* @return A Tensor instance with device friendly memory
* @param type Tensor element type.
* @param shape Tensor shape.
* @return A tensor instance with device friendly memory.
*/
Tensor create_host_tensor(const element::Type type, const Shape& shape);
};

View File

@ -3,7 +3,7 @@
//
/**
* @brief This is a header file for the OpenVINO Runtime tensor API
* @brief A header file for the OpenVINO Runtime tensor API.
*
* @file openvino/runtime/remote_tensor.hpp
*/
@ -17,7 +17,7 @@ namespace ov {
class RemoteContext;
/**
* @brief Remote memory access and interpretation API
* @brief Remote memory access and interpretation API.
*/
class OPENVINO_RUNTIME_API RemoteTensor : public Tensor {
using Tensor::Tensor;
@ -25,18 +25,18 @@ class OPENVINO_RUNTIME_API RemoteTensor : public Tensor {
public:
/**
* @brief Checks openvino remote type
* @param tensor tensor which type will be checked
* @param type_info map with remote object runtime info
* @throw Exception if type check with specified parameters failed
* @brief Checks OpenVINO remote type.
* @param tensor Tensor which type is checked.
* @param type_info Map with remote object runtime info.
* @throw Exception if type check with specified parameters failed.
*/
static void type_check(const Tensor& tensor, const std::map<std::string, std::vector<std::string>>& type_info = {});
/**
* @brief Access of host memory is not available for RemoteTensor
* To access a device specific memory, cast to specific RemoteTensor derived object and works with its
* properties or parse device memory properies via RemoteTensor::get_params
* @return Nothing, throws an exception
* @brief Access to host memory is not available for RemoteTensor.
* To access a device-specific memory, cast to a specific RemoteTensor derived object and work with its
* properties or parse device memory properties via RemoteTensor::get_params.
* @return Nothing, throws an exception.
*/
void* data(const element::Type) = delete;
@ -55,7 +55,7 @@ public:
ov::AnyMap get_params() const;
/**
* @brief Returns name of the device on which underlying object is allocated.
* @brief Returns name of a device on which the underlying object is allocated.
* Abstract method.
* @return A device name string in fully specified format `<device_name>[.<device_id>[.<tile_id>]]`.
*/

View File

@ -3,7 +3,7 @@
//
/**
* @brief This is a header file for the OpenVINO Runtime Components
* @brief A header file for the OpenVINO Runtime Components.
*
* @file openvino/runtime/runtime.hpp
*/

View File

@ -3,7 +3,7 @@
//
/**
* @brief A header file that provides ov::VariableState
* @brief A header file that provides ov::VariableState.
* @file openvino/runtime/variable_state.hpp
*/
@ -31,10 +31,10 @@ class OPENVINO_RUNTIME_API VariableState {
std::shared_ptr<void> _so;
/**
* @brief Constructs VariableState from the initialized std::shared_ptr
* @param impl Initialized shared pointer
* @param so Optional: Plugin to use. This is required to ensure that VariableState can work properly even if plugin
* object is destroyed.
* @brief Constructs VariableState from the initialized std::shared_ptr.
* @param impl Initialized shared pointer.
* @param so Optional: plugin to use. This is required to ensure that VariableState can work properly even if a
* plugin object is destroyed.
*/
VariableState(const std::shared_ptr<InferenceEngine::IVariableStateInternal>& impl,
const std::shared_ptr<void>& so);
@ -43,37 +43,38 @@ class OPENVINO_RUNTIME_API VariableState {
public:
/**
* @brief Default constructor
* @brief Default constructor.
*/
VariableState() = default;
/**
* @brief Destructor preserves unloading order of implementation object and reference to library
* @brief Destructor that preserves unloading order of implementation object and reference to the library.
*/
~VariableState();
/**
* @brief Reset internal variable state for relevant infer request,
* to a value specified as default for according ReadValue node
* @brief Resets internal variable state for relevant infer request
* to a value specified as default for the corresponding ReadValue node.
*/
void reset();
/**
* @brief Gets name of current variable state, if length of array is not enough name is truncated by len, null
* terminator is inserted as well. As variable state name `variable_id` from according `ReadValue` used.
* @return A string representing a state name
* @brief Gets the name of the current variable state. If length of an array is not enough, the name is truncated by
* len, null terminator is inserted as well. `variable_id` from the corresponding `ReadValue` is used as variable
* state name.
* @return A string representing state name.
*/
std::string get_name() const;
/**
* @brief Returns the value of the variable state.
* @return A tensor representing a state
* @return A tensor representing a state.
*/
Tensor get_state() const;
/**
* @brief Sets the new state for the next inference.
* @param state The current state to set
* @param state The current state to set.
*/
void set_state(const Tensor& state);
};