Remove NV12 and I420 blobs and deprecate some legacy API (#17919)

* Remove NV12 and I420 blobs and deprecate some legacy API

* Fixed some errors

* Remove NV12 blobs

* Remote NV12 conversion

* Fixed other warnings

* Suppress version

* Fix some warnings

* Fixed version

* Try to fix some warnings

* Suppress warnings in C header

* Suppress warnings in C

* Fixed Windows exceptions

* Try to fix warnings

* Try to fix C bindings build

* Suppress InferRequest

* Fixed some build issues

* Fixed some errors
This commit is contained in:
Ilya Churaev
2023-06-12 21:15:02 +04:00
committed by GitHub
parent 90a0e5f81a
commit df44f92a97
49 changed files with 203 additions and 1823 deletions

View File

@@ -16,6 +16,7 @@
#include "ie_common.h"
namespace InferenceEngine {
IE_SUPPRESS_DEPRECATED_START
/**
* @brief A description buffer wrapping StatusCode and ResponseDesc
@@ -119,4 +120,5 @@ private:
}
}
};
IE_SUPPRESS_DEPRECATED_END
} // namespace InferenceEngine

View File

@@ -42,6 +42,7 @@
* Exec order is predefined.
*/
IE_SUPPRESS_DEPRECATED_START
class MemorySolver {
public:
/** @brief Representation of edge (size and live time)*/
@@ -224,3 +225,4 @@ private:
}
}
};
IE_SUPPRESS_DEPRECATED_END

View File

@@ -50,7 +50,7 @@ struct SoPtr {
template <typename U>
SoPtr(const SoPtr<U>& that) : _ptr{std::dynamic_pointer_cast<T>(that._ptr)},
_so{that._so} {
IE_ASSERT(_ptr != nullptr);
OPENVINO_ASSERT(_ptr != nullptr);
}
/**

View File

@@ -29,6 +29,8 @@
namespace InferenceEngine {
IE_SUPPRESS_DEPRECATED_START
class IInferRequestInternal;
namespace details {
@@ -271,7 +273,6 @@ public:
bool operator==(const InferRequest&) const noexcept;
};
IE_SUPPRESS_DEPRECATED_START
/**
* @private
*/

View File

@@ -23,6 +23,7 @@
#include "ie_parameter.hpp"
IE_SUPPRESS_DEPRECATED_START
namespace InferenceEngine {
namespace gpu {
@@ -91,3 +92,4 @@ protected:
} // namespace gpu
} // namespace InferenceEngine
IE_SUPPRESS_DEPRECATED_END

View File

@@ -130,36 +130,6 @@ public:
}
};
/**
* @brief This function is used to obtain a NV12 compound blob object from NV12 DXGI video decoder output.
* The resulting compound contains two remote blobs for Y and UV planes of the surface.
* @param height Height of Y plane
* @param width Widht of Y plane
* @param ctx A pointer to remote context
* @param nv12_surf A ID3D11Texture2D instance to create NV12 blob from
* @return NV12 remote blob
*/
OPENVINO_DEPRECATED("This function is deprecated and will be removed in 2023.1 release")
static inline Blob::Ptr make_shared_blob_nv12(size_t height,
size_t width,
RemoteContext::Ptr ctx,
ID3D11Texture2D* nv12_surf) {
// despite of layout, blob dimensions always follow in N,C,H,W order
TensorDesc desc(Precision::U8, {1, 1, height, width}, Layout::NHWC);
ParamMap blobParams = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(VA_SURFACE)},
{GPU_PARAM_KEY(DEV_OBJECT_HANDLE), static_cast<gpu_handle_param>(nv12_surf)},
{GPU_PARAM_KEY(VA_PLANE), uint32_t(0)}};
Blob::Ptr y_blob = std::dynamic_pointer_cast<Blob>(ctx->CreateBlob(desc, blobParams));
TensorDesc uvdesc(Precision::U8, {1, 2, height / 2, width / 2}, Layout::NHWC);
blobParams[GPU_PARAM_KEY(MEM_HANDLE)] = static_cast<gpu_handle_param>(nv12_surf);
blobParams[GPU_PARAM_KEY(VA_PLANE)] = uint32_t(1);
Blob::Ptr uv_blob = std::dynamic_pointer_cast<Blob>(ctx->CreateBlob(uvdesc, blobParams));
return InferenceEngine::make_shared_blob<NV12Blob>(y_blob, uv_blob);
}
/**
* @brief This function is used to obtain remote context object from ID3D11Device
* @param core Inference Engine Core object instance

View File

@@ -238,41 +238,6 @@ public:
}
};
/**
* @brief This function is used to construct a NV12 compound blob object from two cl::Image2D wrapper objects.
* The resulting compound contains two remote blobs for Y and UV planes of the surface.
* @param ctx RemoteContext plugin object derived from ClContext class.
* @param nv12_image_plane_y cl::Image2D object containing Y plane data.
* @param nv12_image_plane_uv cl::Image2D object containing UV plane data.
* @return A shared remote blob instance
*/
OPENVINO_DEPRECATED("This function is deprecated and will be removed in 2023.1 release")
static inline Blob::Ptr make_shared_blob_nv12(RemoteContext::Ptr ctx,
cl::Image2D& nv12_image_plane_y,
cl::Image2D& nv12_image_plane_uv) {
auto casted = std::dynamic_pointer_cast<ClContext>(ctx);
if (nullptr == casted) {
IE_THROW() << "Invalid remote context passed";
}
size_t width = nv12_image_plane_y.getImageInfo<CL_IMAGE_WIDTH>();
size_t height = nv12_image_plane_y.getImageInfo<CL_IMAGE_HEIGHT>();
// despite of layout, blob dimensions always follow in N,C,H,W order
TensorDesc ydesc(Precision::U8, {1, 1, height, width}, Layout::NHWC);
ParamMap blobParams = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(OCL_IMAGE2D)},
{GPU_PARAM_KEY(MEM_HANDLE), static_cast<gpu_handle_param>(nv12_image_plane_y.get())}};
Blob::Ptr y_blob = std::dynamic_pointer_cast<Blob>(casted->CreateBlob(ydesc, blobParams));
TensorDesc uvdesc(Precision::U8, {1, 2, height / 2, width / 2}, Layout::NHWC);
blobParams[GPU_PARAM_KEY(MEM_HANDLE)] = static_cast<gpu_handle_param>(nv12_image_plane_uv.get());
Blob::Ptr uv_blob = std::dynamic_pointer_cast<Blob>(casted->CreateBlob(uvdesc, blobParams));
Blob::Ptr res = make_shared_blob<NV12Blob>(y_blob, uv_blob);
return res;
}
/**
* @brief This function is used to obtain remote context object from user-supplied OpenCL context handle
* @param core A reference to Inference Engine Core object

View File

@@ -101,34 +101,6 @@ public:
}
};
/**
* @brief This function is used to obtain a NV12 compound blob object from NV12 VA decoder output.
* The resulting compound contains two remote blobs for Y and UV planes of the surface.
* @param height A height of Y plane
* @param width A width of Y plane
* @param ctx A remote context instance
* @param nv12_surf NV12 `VASurfaceID` to create NV12 from
* @return A remote NV12 blob wrapping `VASurfaceID`
*/
OPENVINO_DEPRECATED("This function is deprecated and will be removed in 2023.1 release")
static inline Blob::Ptr make_shared_blob_nv12(size_t height,
size_t width,
RemoteContext::Ptr ctx,
VASurfaceID nv12_surf) {
// despite of layout, blob dimensions always follow in N, C, H, W order
TensorDesc ydesc(Precision::U8, {1, 1, height, width}, Layout::NHWC);
ParamMap blobParams = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(VA_SURFACE)},
{GPU_PARAM_KEY(DEV_OBJECT_HANDLE), nv12_surf},
{GPU_PARAM_KEY(VA_PLANE), uint32_t(0)}};
Blob::Ptr y_blob = std::dynamic_pointer_cast<Blob>(ctx->CreateBlob(ydesc, blobParams));
TensorDesc uvdesc(Precision::U8, {1, 2, height / 2, width / 2}, Layout::NHWC);
blobParams[GPU_PARAM_KEY(VA_PLANE)] = uint32_t(1);
Blob::Ptr uv_blob = std::dynamic_pointer_cast<Blob>(ctx->CreateBlob(uvdesc, blobParams));
return InferenceEngine::make_shared_blob<NV12Blob>(y_blob, uv_blob);
}
/**
* @brief This function is used to obtain remote context object from VA display handle
* @param core Inference Engine Core object

View File

@@ -9,6 +9,16 @@
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#if defined(OPENVINO_STATIC_LIBRARY) || defined(USE_STATIC_IE) || (defined(__GNUC__) && (__GNUC__ < 4))
# define INFERENCE_ENGINE_API(...) extern "C" __VA_ARGS__
# define INFERENCE_ENGINE_API_CPP(...) __VA_ARGS__

View File

@@ -9,6 +9,16 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <algorithm>
#include <cstdlib>
#include <iterator>
@@ -22,6 +32,7 @@
#include "ie_api.h"
IE_SUPPRESS_DEPRECATED_START
#ifndef NDEBUG
# include <cassert>
#endif
@@ -58,7 +69,7 @@ using DataWeakPtr = std::weak_ptr<Data>;
* @union UserValue
* @brief The method holds the user values to enable binding of data per graph node.
*/
union UserValue {
union INFERENCE_ENGINE_1_0_DEPRECATED UserValue {
int v_int; //!< An integer value
float v_float; //!< A floating point value
void* v_ptr; //!< A pointer to a void
@@ -68,7 +79,7 @@ union UserValue {
* @enum Layout
* @brief Layouts that the inference engine supports
*/
enum Layout : uint8_t {
enum INFERENCE_ENGINE_1_0_DEPRECATED Layout : uint8_t {
ANY = 0, //!< "any" layout
// I/O data layouts
@@ -107,7 +118,7 @@ enum Layout : uint8_t {
* @param p A layout value to print to a stream
* @return A reference to the `out` stream
*/
inline std::ostream& operator<<(std::ostream& out, const Layout& p) {
INFERENCE_ENGINE_1_0_DEPRECATED inline std::ostream& operator<<(std::ostream& out, const Layout& p) {
switch (p) {
#define PRINT_LAYOUT(name) \
case name: \
@@ -143,18 +154,12 @@ inline std::ostream& operator<<(std::ostream& out, const Layout& p) {
* @enum ColorFormat
* @brief Extra information about input color format for preprocessing
*/
enum ColorFormat : uint32_t {
enum INFERENCE_ENGINE_1_0_DEPRECATED ColorFormat : uint32_t {
RAW = 0u, ///< Plain blob (default), no extra color processing required
RGB, ///< RGB color format
BGR, ///< BGR color format, default in OpenVINO
RGBX, ///< RGBX color format with X ignored during inference
BGRX, ///< BGRX color format with X ignored during inference
NV12 INFERENCE_ENGINE_ENUM_DEPRECATED(
"This type is deprecated and will be removed in 2023.1 release"), ///< NV12 color format represented as
///< compound Y+UV blob
I420 INFERENCE_ENGINE_ENUM_DEPRECATED(
"This type is deprecated and will be removed in 2023.1 release"), ///< I420 color format represented as
///< compound Y+U+V blob
};
/**
@@ -163,7 +168,7 @@ enum ColorFormat : uint32_t {
* @param fmt A color format value to print to a stream
* @return A reference to the `out` stream
*/
inline std::ostream& operator<<(std::ostream& out, const ColorFormat& fmt) {
INFERENCE_ENGINE_1_0_DEPRECATED inline std::ostream& operator<<(std::ostream& out, const ColorFormat& fmt) {
switch (fmt) {
#define PRINT_COLOR_FORMAT(name) \
case name: \
@@ -175,10 +180,6 @@ inline std::ostream& operator<<(std::ostream& out, const ColorFormat& fmt) {
PRINT_COLOR_FORMAT(BGR);
PRINT_COLOR_FORMAT(RGBX);
PRINT_COLOR_FORMAT(BGRX);
IE_SUPPRESS_DEPRECATED_START
PRINT_COLOR_FORMAT(NV12);
PRINT_COLOR_FORMAT(I420);
IE_SUPPRESS_DEPRECATED_END
#undef PRINT_COLOR_FORMAT
default:
@@ -195,11 +196,11 @@ inline std::ostream& operator<<(std::ostream& out, const ColorFormat& fmt) {
* If the layer is executed using tiling, the sum time per each tile is indicated as the total execution time.
* Due to parallel execution, the total execution time for all layers might be greater than the total inference time.
*/
struct InferenceEngineProfileInfo {
struct INFERENCE_ENGINE_1_0_DEPRECATED InferenceEngineProfileInfo {
/**
* @brief Defines the general status of the layer
*/
enum LayerStatus {
enum INFERENCE_ENGINE_1_0_DEPRECATED LayerStatus {
NOT_RUN, //!< A layer is not executed
OPTIMIZED_OUT, //!< A layer is optimized out during graph optimization phase
EXECUTED //!< A layer is executed
@@ -239,7 +240,7 @@ struct InferenceEngineProfileInfo {
* @enum StatusCode
* @brief This enum contains codes for all possible return values of the interface functions
*/
enum StatusCode : int {
enum INFERENCE_ENGINE_1_0_DEPRECATED StatusCode : int {
OK = 0,
GENERAL_ERROR = -1,
NOT_IMPLEMENTED = -2,
@@ -263,7 +264,7 @@ enum StatusCode : int {
* @struct ResponseDesc
* @brief Represents detailed information for an error
*/
struct ResponseDesc {
struct INFERENCE_ENGINE_1_0_DEPRECATED ResponseDesc {
/**
* @brief A character buffer that holds the detailed information for an error.
*/
@@ -273,7 +274,7 @@ struct ResponseDesc {
/**
* @brief Response structure encapsulating information about supported layer
*/
struct QueryNetworkResult {
struct INFERENCE_ENGINE_1_0_DEPRECATED QueryNetworkResult {
/**
* @brief A map of supported layers:
* - key - a layer name
@@ -303,7 +304,7 @@ using ConstOutputsDataMap = std::map<std::string, CDataPtr>;
using OutputsDataMap = std::map<std::string, DataPtr>;
namespace details {
struct INFERENCE_ENGINE_DEPRECATED("Use InferRequest::Exception") INFERENCE_ENGINE_API_CLASS(InferenceEngineException)
struct INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(InferenceEngineException)
: public std::runtime_error {
using std::runtime_error::runtime_error;
bool hasStatus() const {
@@ -317,7 +318,8 @@ struct INFERENCE_ENGINE_DEPRECATED("Use InferRequest::Exception") INFERENCE_ENGI
* @brief Base Inference Engine exception class
*/
IE_SUPPRESS_DEPRECATED_START
struct INFERENCE_ENGINE_API_CLASS(Exception) : public details::InferenceEngineException {
struct INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Exception)
: public details::InferenceEngineException {
using InferenceEngineException::InferenceEngineException;
};
IE_SUPPRESS_DEPRECATED_END
@@ -328,17 +330,18 @@ template <typename ExceptionType>
struct ExceptionTraits;
}
#define INFERENCE_ENGINE_DECLARE_EXCEPTION(ExceptionType, statusCode) \
struct INFERENCE_ENGINE_API_CLASS(ExceptionType) final : public InferenceEngine::Exception { \
using Exception::Exception; \
}; \
namespace details { \
template <> \
struct ExceptionTraits<ExceptionType> { \
static const char* string() { \
return "[ " #statusCode " ]"; \
} \
}; \
#define INFERENCE_ENGINE_DECLARE_EXCEPTION(ExceptionType, statusCode) \
struct INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(ExceptionType) final \
: public InferenceEngine::Exception { \
using Exception::Exception; \
}; \
namespace details { \
template <> \
struct INFERENCE_ENGINE_1_0_DEPRECATED ExceptionTraits<ExceptionType> { \
static const char* string() { \
return "[ " #statusCode " ]"; \
} \
}; \
}
/// @endcond
@@ -392,13 +395,13 @@ namespace details {
/**
* @brief Rethrow a copy of exception. UShould be used in catch blocks
*/
[[noreturn]] INFERENCE_ENGINE_API_CPP(void) Rethrow();
[[noreturn]] INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CPP(void) Rethrow();
/**
* @brief Tag struct used to throw exception
*/
template <typename ExceptionType>
struct ThrowNow final {
struct INFERENCE_ENGINE_1_0_DEPRECATED ThrowNow final {
[[noreturn]] void operator<<=(const std::ostream& ostream) {
std::ostringstream stream;
stream << ostream.rdbuf();
@@ -510,3 +513,4 @@ struct NullStream {
#else
# define __PRETTY_FUNCTION__ __PRETTY_FUNCTION__
#endif
IE_SUPPRESS_DEPRECATED_END

View File

@@ -9,6 +9,16 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <initializer_list>
#include <memory>
#include <vector>
@@ -23,7 +33,7 @@ namespace InferenceEngine {
* Compound blob is a wrapper blob over references to underlying blobs. These blobs should share
* some properties and can be grouped into a single entity.
*/
class INFERENCE_ENGINE_API_CLASS(CompoundBlob) : public Blob {
class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(CompoundBlob) : public Blob {
public:
/**
* @brief A smart pointer to the CompoundBlob object
@@ -116,168 +126,12 @@ protected:
const std::shared_ptr<IAllocator>& getAllocator() const noexcept override;
};
/**
* @brief Represents a blob that contains two planes (Y and UV) in NV12 color format
*/
class INFERENCE_ENGINE_DEPRECATED("This class is deprecated and will be removed in 2023.1 release")
INFERENCE_ENGINE_API_CLASS(NV12Blob)
: public CompoundBlob {
public:
/**
* @brief A smart pointer to the NV12Blob object
*/
using Ptr = std::shared_ptr<NV12Blob>;
/**
* @brief A smart pointer to the const NV12Blob object
*/
using CPtr = std::shared_ptr<const NV12Blob>;
/**
* @brief Constructs NV12 blob from two planes Y and UV
*
* @param y Blob object that represents Y plane in NV12 color format
* @param uv Blob object that represents UV plane in NV12 color format
*/
NV12Blob(const Blob::Ptr& y, const Blob::Ptr& uv);
/**
* @brief Constructs NV12 blob from two planes Y and UV
*
* @param y Blob object that represents Y plane in NV12 color format
* @param uv Blob object that represents UV plane in NV12 color format
*/
NV12Blob(Blob::Ptr&& y, Blob::Ptr&& uv);
/**
* @brief Returns a shared pointer to Y plane
* @return Y plane
*/
virtual Blob::Ptr& y() noexcept;
/**
* @brief Returns a shared pointer to Y plane
* @return Y plane
*/
virtual const Blob::Ptr& y() const noexcept;
/**
* @brief Returns a shared pointer to UV plane
* @return UV plane
*/
virtual Blob::Ptr& uv() noexcept;
/**
* @brief Returns a shared pointer to UV plane
* @return UV plane
*/
virtual const Blob::Ptr& uv() const noexcept;
Blob::Ptr createROI(const ROI& roi) const override;
};
/**
* @brief Represents a blob that contains three planes (Y,U and V) in I420 color format
*/
class INFERENCE_ENGINE_DEPRECATED("This class is deprecated and will be removed in 2023.1 release")
INFERENCE_ENGINE_API_CLASS(I420Blob)
: public CompoundBlob {
public:
/**
* @brief A smart pointer to the I420Blob object
*/
using Ptr = std::shared_ptr<I420Blob>;
/**
* @brief A smart pointer to the const I420Blob object
*/
using CPtr = std::shared_ptr<const I420Blob>;
/**
* @brief Constructs I420 blob from three planes Y, U and V
* @param y Blob object that represents Y plane in I420 color format
* @param u Blob object that represents U plane in I420 color format
* @param v Blob object that represents V plane in I420 color format
*/
I420Blob(const Blob::Ptr& y, const Blob::Ptr& u, const Blob::Ptr& v);
/**
* @brief Constructs I420 blob from three planes Y, U and V
* @param y Blob object that represents Y plane in I420 color format
* @param u Blob object that represents U plane in I420 color format
* @param v Blob object that represents V plane in I420 color format
*/
I420Blob(Blob::Ptr&& y, Blob::Ptr&& u, Blob::Ptr&& v);
/**
* @brief Returns a reference to shared pointer to Y plane
*
* Please note that reference to Blob::Ptr is returned. I.e. the reference will be valid until
* the I420Blob object is destroyed.
*
* @return reference to shared pointer object of Y plane
*/
Blob::Ptr& y() noexcept;
/**
* @brief Returns a constant reference to shared pointer to Y plane
*
* Please note that reference to Blob::Ptr is returned. I.e. the reference will be valid until
* the I420Blob object is destroyed.
*
* @return constant reference to shared pointer object of Y plane*
*/
const Blob::Ptr& y() const noexcept;
/**
* @brief Returns a reference to shared pointer to U plane
*
* Please note that reference to Blob::Ptr is returned. I.e. the reference will be valid until
* the I420Blob object is destroyed.
*
* @return reference to shared pointer object of U plane
*/
Blob::Ptr& u() noexcept;
/**
* @brief Returns a constant reference to shared pointer to U plane
*
* Please note that reference to Blob::Ptr is returned. I.e. the reference will be valid until
* the I420Blob object is destroyed.
*
* @return constant reference to shared pointer object of U plane
*/
const Blob::Ptr& u() const noexcept;
/**
* @brief Returns a reference to shared pointer to V plane
*
* Please note that reference to Blob::Ptr is returned. I.e. the reference will be valid until
* the I420Blob object is destroyed.
*
* @return reference to shared pointer object of V plane
*/
Blob::Ptr& v() noexcept;
/**
* @brief Returns a constant reference to shared pointer to V plane
*
* Please note that reference to Blob::Ptr is returned. I.e. the reference will be valid until
* the I420Blob object is destroyed.
*
* @return constant reference to shared pointer object of V plane
*/
const Blob::Ptr& v() const noexcept;
Blob::Ptr createROI(const ROI& roi) const override;
};
/**
* @brief This class represents a blob that contains other blobs - one per batch
* @details Plugin which supports BatchedBlob input should report BATCHED_BLOB
* in the OPTIMIZATION_CAPABILITIES metric.
*/
class INFERENCE_ENGINE_API_CLASS(BatchedBlob) : public CompoundBlob {
class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(BatchedBlob) : public CompoundBlob {
public:
/**
* @brief A smart pointer to the BatchedBlob object

View File

@@ -10,6 +10,16 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <memory>
#include <vector>
@@ -21,7 +31,7 @@ IE_SUPPRESS_DEPRECATED_START
/**
* @brief This structure stores info about pre-processing of network inputs (scale, mean image, ...)
*/
struct PreProcessChannel {
struct INFERENCE_ENGINE_1_0_DEPRECATED PreProcessChannel {
/** @brief Scale parameter for a channel */
float stdScale = 1;
@@ -38,7 +48,7 @@ struct PreProcessChannel {
/**
* @brief Defines available types of mean
*/
enum MeanVariant {
enum INFERENCE_ENGINE_1_0_DEPRECATED MeanVariant {
MEAN_IMAGE, /**< mean value is specified for each input pixel */
MEAN_VALUE, /**< mean value is specified for each input channel */
NONE, /**< no mean value specified */
@@ -48,12 +58,12 @@ enum MeanVariant {
* @enum ResizeAlgorithm
* @brief Represents the list of supported resize algorithms.
*/
enum ResizeAlgorithm { NO_RESIZE = 0, RESIZE_BILINEAR, RESIZE_AREA };
enum INFERENCE_ENGINE_1_0_DEPRECATED ResizeAlgorithm { NO_RESIZE = 0, RESIZE_BILINEAR, RESIZE_AREA };
/**
* @brief This class stores pre-process information for the input
*/
class PreProcessInfo {
class INFERENCE_ENGINE_1_0_DEPRECATED PreProcessInfo {
// Channel data
std::vector<PreProcessChannel::Ptr> _channelsInfo;
MeanVariant _variant = NONE;

View File

@@ -10,6 +10,16 @@
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include "cpp/ie_cnn_network.h"
#include "ie_api.h"
@@ -42,5 +52,6 @@ namespace InferenceEngine {
If "false, then the transformation leaves existed initializing subgraph for ReadValue operation.
* Loop operation by a given number. Does not affect TensorIterators.
*/
INFERENCE_ENGINE_API_CPP(void) lowLatency2(InferenceEngine::CNNNetwork& network, bool use_const_initializer = true);
INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CPP(void)
lowLatency2(InferenceEngine::CNNNetwork& network, bool use_const_initializer = true);
} // namespace InferenceEngine

View File

@@ -9,6 +9,16 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
/**
* @def IE_VERSION_MAJOR
* @brief Defines Inference Engine major version
@@ -30,19 +40,20 @@
* @brief Inference Engine C++ API
*/
namespace InferenceEngine {
IE_SUPPRESS_DEPRECATED_START
/**
* @struct Version
* @brief Represents version information that describes plugins and the inference engine runtime library
*/
#pragma pack(push, 1)
struct Version {
struct INFERENCE_ENGINE_1_0_DEPRECATED Version {
IE_SUPPRESS_DEPRECATED_START
/**
* @deprecated Use IE_VERSION_[MAJOR|MINOR|PATCH] definitions, buildNumber property
* @brief An API version reflects the set of supported features
*/
struct ApiVersion {
struct INFERENCE_ENGINE_1_0_DEPRECATED ApiVersion {
INFERENCE_ENGINE_DEPRECATED("Use IE_VERSION_[MAJOR|MINOR|PATCH] definitions, buildNumber property")
int major; //!< A major version
INFERENCE_ENGINE_DEPRECATED("Use IE_VERSION_[MAJOR|MINOR|PATCH] definitions, buildNumber property")
@@ -112,4 +123,5 @@ struct Version {
*/
INFERENCE_ENGINE_API(const Version*) GetInferenceEngineVersion() noexcept;
IE_SUPPRESS_DEPRECATED_END
} // namespace InferenceEngine

View File

@@ -8,6 +8,16 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include "ie_compound_blob.h"
#include "ie_core.hpp"
#include "ie_transformations.hpp"

View File

@@ -59,6 +59,7 @@ void VariableState::SetState(Blob::Ptr state) {
} // namespace InferenceEngine
IE_SUPPRESS_DEPRECATED_END
namespace ov {
VariableState::~VariableState() {

View File

@@ -4,6 +4,7 @@
#include <cpp_interfaces/interface/ie_ivariable_state_internal.hpp>
IE_SUPPRESS_DEPRECATED_START
namespace InferenceEngine {
IVariableStateInternal::IVariableStateInternal(const std::string& name_) : name{name_} {}

View File

@@ -19,6 +19,7 @@
#include <sys/stat.h>
#include "ie_common.h"
#include "openvino/core/except.hpp"
#include "openvino/util/file_util.hpp"
#ifndef _WIN32
@@ -77,7 +78,7 @@ std::string getIELibraryPathA() {
if (!GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
reinterpret_cast<LPSTR>(getIELibraryPath),
&hm)) {
IE_THROW() << "GetModuleHandle returned " << GetLastError();
OPENVINO_THROW("GetModuleHandle returned ", GetLastError());
}
GetModuleFileNameA(hm, (LPSTR)ie_library_path, sizeof(ie_library_path));
return getPathName(std::string(ie_library_path));
@@ -119,7 +120,7 @@ std::wstring getIELibraryPathW() {
if (!GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
reinterpret_cast<LPCWSTR>(getIELibraryPath),
&hm)) {
IE_THROW() << "GetModuleHandle returned " << GetLastError();
OPENVINO_THROW("GetModuleHandle returned ", GetLastError());
}
GetModuleFileNameW(hm, (LPWSTR)ie_library_path, sizeof(ie_library_path) / sizeof(ie_library_path[0]));
return getPathName(std::wstring(ie_library_path));

View File

@@ -19,196 +19,7 @@ namespace InferenceEngine {
namespace {
TensorDesc verifyNV12BlobInput(const Blob::Ptr& y, const Blob::Ptr& uv) {
// Y and UV must be valid pointers
if (y == nullptr || uv == nullptr) {
IE_THROW() << "Y and UV planes must be valid Blob objects";
}
// both Y and UV must be MemoryBlob objects
if (!y->is<MemoryBlob>() || !uv->is<MemoryBlob>()) {
IE_THROW() << "Y and UV planes must be MemoryBlob objects";
}
// NOTE: having Blob::Ptr (shared_ptr) and checking Blob::is() status above ensures that the
// cast is always successful
auto yMemoryBlob = y->as<MemoryBlob>();
auto uvMemoryBlob = uv->as<MemoryBlob>();
// check Blob element size
if (yMemoryBlob->element_size() != uvMemoryBlob->element_size()) {
IE_THROW() << "Y and UV planes have different element sizes: " << yMemoryBlob->element_size()
<< " != " << uvMemoryBlob->element_size();
}
// check tensor descriptor parameters
const auto& yDesc = yMemoryBlob->getTensorDesc();
const auto& uvDesc = uvMemoryBlob->getTensorDesc();
// check precision
if (yDesc.getPrecision() != Precision::U8) {
IE_THROW() << "Y plane precision must be U8, actual: " << yDesc.getPrecision();
}
if (uvDesc.getPrecision() != Precision::U8) {
IE_THROW() << "UV plane precision must be U8, actual: " << uvDesc.getPrecision();
}
// check layout
if (yDesc.getLayout() != Layout::NHWC) {
IE_THROW() << "Y plane layout must be NHWC, actual: " << yDesc.getLayout();
}
if (uvDesc.getLayout() != Layout::NHWC) {
IE_THROW() << "UV plane layout must be NHWC, actual: " << uvDesc.getLayout();
}
// check dimensions
const auto& yDims = yDesc.getDims();
const auto& uvDims = uvDesc.getDims();
if (yDims.size() != 4 || uvDims.size() != 4) {
IE_THROW() << "Y and UV planes dimension sizes must be 4, actual: " << yDims.size() << "(Y plane) and "
<< uvDims.size() << "(UV plane)";
}
// check batch size
if (yDims[0] != uvDims[0]) {
IE_THROW() << "Y and UV planes must have the same batch size";
}
// check number of channels
if (yDims[1] != 1) {
IE_THROW() << "Y plane must have 1 channel, actual: " << yDims[1];
}
if (uvDims[1] != 2) {
IE_THROW() << "UV plane must have 2 channels, actual: " << uvDims[1];
}
// check height
if (yDims[2] != 2 * uvDims[2]) {
IE_THROW() << "The height of the Y plane must be equal to (2 * the height of the UV plane), actual: "
<< yDims[2] << "(Y plane) and " << uvDims[2] << "(UV plane)";
}
// check width
if (yDims[3] != 2 * uvDims[3]) {
IE_THROW() << "The width of the Y plane must be equal to (2 * the width of the UV plane), actual: " << yDims[3]
<< "(Y plane) and " << uvDims[3] << "(UV plane)";
}
return {Precision::U8, {}, Layout::NCHW};
}
TensorDesc verifyI420BlobInput(const Blob::Ptr& y, const Blob::Ptr& u, const Blob::Ptr& v) {
// Y and UV must be valid pointers
if (y == nullptr || u == nullptr || v == nullptr) {
IE_THROW() << "Y, U and V planes must be valid Blob objects";
}
// both Y and UV must be MemoryBlob objects
if (!y->is<MemoryBlob>() || !u->is<MemoryBlob>() || !v->is<MemoryBlob>()) {
IE_THROW() << "Y, U and V planes must be MemoryBlob objects";
}
// NOTE: having Blob::Ptr (shared_ptr) and checking Blob::is() status above ensures that the
// cast is always successful
auto yMemoryBlob = y->as<MemoryBlob>();
auto uMemoryBlob = u->as<MemoryBlob>();
auto vMemoryBlob = v->as<MemoryBlob>();
// check Blob element size
if (yMemoryBlob->element_size() != uMemoryBlob->element_size() ||
yMemoryBlob->element_size() != vMemoryBlob->element_size()) {
IE_THROW() << "Y and UV planes have different element sizes: " << yMemoryBlob->element_size()
<< " != " << uMemoryBlob->element_size() << " != " << vMemoryBlob->element_size();
}
// check tensor descriptor parameters
const auto& yDesc = yMemoryBlob->getTensorDesc();
const auto& uDesc = uMemoryBlob->getTensorDesc();
const auto& vDesc = vMemoryBlob->getTensorDesc();
// check precision
if (yDesc.getPrecision() != Precision::U8) {
IE_THROW() << "Y plane precision must be U8, actual: " << yDesc.getPrecision();
}
if (uDesc.getPrecision() != Precision::U8) {
IE_THROW() << "U plane precision must be U8, actual: " << uDesc.getPrecision();
}
if (vDesc.getPrecision() != Precision::U8) {
IE_THROW() << "V plane precision must be U8, actual: " << vDesc.getPrecision();
}
// check layout
if (yDesc.getLayout() != Layout::NHWC) {
IE_THROW() << "Y plane layout must be NHWC, actual: " << yDesc.getLayout();
}
if (uDesc.getLayout() != Layout::NHWC) {
IE_THROW() << "U plane layout must be NHWC, actual: " << uDesc.getLayout();
}
if (uDesc.getLayout() != Layout::NHWC) {
IE_THROW() << "V plane layout must be NHWC, actual: " << vDesc.getLayout();
}
// check dimensions
const auto& yDims = yDesc.getDims();
const auto& uDims = uDesc.getDims();
const auto& vDims = vDesc.getDims();
if (yDims.size() != 4 || uDims.size() != 4 || vDims.size() != 4) {
IE_THROW() << "Y,U and V planes dimension sizes must be 4, actual: " << yDims.size() << "(Y plane) and "
<< uDims.size() << "(U plane) " << vDims.size() << "(V plane)";
}
// check batch size
if (yDims[0] != uDims[0] || yDims[0] != vDims[0]) {
IE_THROW() << "Y, U and U planes must have the same batch size";
}
// check number of channels
if (yDims[1] != 1) {
IE_THROW() << "Y plane must have 1 channel, actual: " << yDims[1];
}
if (uDims[1] != 1) {
IE_THROW() << "U plane must have 1 channel, actual: " << uDims[1];
}
if (vDims[1] != 1) {
IE_THROW() << "V plane must have 1 channel, actual: " << vDims[1];
}
// check height
if (yDims[2] != 2 * uDims[2]) {
IE_THROW() << "The height of the Y plane must be equal to (2 * the height of the U plane), actual: " << yDims[2]
<< "(Y plane) and " << uDims[2] << "(U plane)";
}
if (yDims[2] != 2 * vDims[2]) {
IE_THROW() << "The height of the Y plane must be equal to (2 * the height of the UV plane), actual: "
<< yDims[2] << "(Y plane) and " << vDims[2] << "(V plane)";
}
// check width
if (yDims[3] != 2 * uDims[3]) {
IE_THROW() << "The width of the Y plane must be equal to (2 * the width of the UV plane), actual: " << yDims[3]
<< "(Y plane) and " << uDims[3] << "(U plane)";
}
if (yDims[3] != 2 * vDims[3]) {
IE_THROW() << "The width of the Y plane must be equal to (2 * the width of the UV plane), actual: " << yDims[3]
<< "(Y plane) and " << vDims[3] << "(V plane)";
}
return {Precision::U8, {}, Layout::NCHW};
}
TensorDesc getBlobTensorDesc(const Blob::Ptr& blob) {
if (auto nv12 = dynamic_cast<NV12Blob*>(blob.get())) {
auto yDesc = nv12->y()->getTensorDesc();
yDesc.getDims()[1] += 2;
return yDesc;
}
if (auto i420 = dynamic_cast<I420Blob*>(blob.get())) {
auto yDesc = i420->y()->getTensorDesc();
yDesc.getDims()[1] += 2;
return yDesc;
}
return blob->getTensorDesc();
}
@@ -361,100 +172,6 @@ const std::shared_ptr<IAllocator>& CompoundBlob::getAllocator() const noexcept {
return _allocator;
};
NV12Blob::NV12Blob(const Blob::Ptr& y, const Blob::Ptr& uv) : CompoundBlob(verifyNV12BlobInput(y, uv)) {
this->_blobs = {y, uv};
}
NV12Blob::NV12Blob(Blob::Ptr&& y, Blob::Ptr&& uv) : CompoundBlob(verifyNV12BlobInput(y, uv)) {
this->_blobs = {std::move(y), std::move(uv)};
}
Blob::Ptr& NV12Blob::y() noexcept {
// NOTE: Y plane is a memory blob, which is checked in the constructor
return _blobs[0];
}
const Blob::Ptr& NV12Blob::y() const noexcept {
// NOTE: Y plane is a memory blob, which is checked in the constructor
return _blobs[0];
}
Blob::Ptr& NV12Blob::uv() noexcept {
// NOTE: UV plane is a memory blob, which is checked in the constructor
return _blobs[1];
}
const Blob::Ptr& NV12Blob::uv() const noexcept {
// NOTE: UV plane is a memory blob, which is checked in the constructor
return _blobs[1];
}
Blob::Ptr NV12Blob::createROI(const ROI& roi) const {
auto yROI = roi;
yROI.sizeX += yROI.sizeX % 2;
yROI.sizeY += yROI.sizeY % 2;
const auto uvROI = ROI(yROI.id, yROI.posX / 2, yROI.posY / 2, yROI.sizeX / 2, yROI.sizeY / 2);
const auto yRoiBlob = y()->createROI(yROI);
const auto uvRoiBlob = uv()->createROI(uvROI);
return std::make_shared<NV12Blob>(yRoiBlob, uvRoiBlob);
}
I420Blob::I420Blob(const Blob::Ptr& y, const Blob::Ptr& u, const Blob::Ptr& v)
: CompoundBlob(verifyI420BlobInput(y, u, v)) {
this->_blobs = {y, u, v};
}
I420Blob::I420Blob(Blob::Ptr&& y, Blob::Ptr&& u, Blob::Ptr&& v) : CompoundBlob(verifyI420BlobInput(y, u, v)) {
this->_blobs = {std::move(y), std::move(u), std::move(v)};
}
Blob::Ptr& I420Blob::y() noexcept {
// NOTE: Y plane is a memory blob, which is checked in the constructor
return _blobs[0];
}
const Blob::Ptr& I420Blob::y() const noexcept {
// NOTE: Y plane is a memory blob, which is checked in the constructor
return _blobs[0];
}
Blob::Ptr& I420Blob::u() noexcept {
// NOTE: U plane is a memory blob, which is checked in the constructor
return _blobs[1];
}
const Blob::Ptr& I420Blob::u() const noexcept {
// NOTE: U plane is a memory blob, which is checked in the constructor
return _blobs[1];
}
Blob::Ptr& I420Blob::v() noexcept {
// NOTE: V plane is a memory blob, which is checked in the constructor
return _blobs[2];
}
const Blob::Ptr& I420Blob::v() const noexcept {
// NOTE: V plane is a memory blob, which is checked in the constructor
return _blobs[2];
}
Blob::Ptr I420Blob::createROI(const ROI& roi) const {
auto yROI = roi;
yROI.sizeX += yROI.sizeX % 2;
yROI.sizeY += yROI.sizeY % 2;
const auto uvROI = ROI(yROI.id, yROI.posX / 2, yROI.posY / 2, yROI.sizeX / 2, yROI.sizeY / 2);
const auto yRoiBlob = y()->createROI(yROI);
const auto uRoiBlob = u()->createROI(uvROI);
const auto vRoiBlob = v()->createROI(uvROI);
return std::make_shared<I420Blob>(yRoiBlob, uRoiBlob, vRoiBlob);
}
BatchedBlob::BatchedBlob(const std::vector<Blob::Ptr>& blobs) : CompoundBlob(verifyBatchedBlobInput(blobs)) {
this->_blobs = blobs;
}

View File

@@ -4,6 +4,7 @@
#include "ie_version.hpp"
IE_SUPPRESS_DEPRECATED_START
namespace InferenceEngine {
const Version* GetInferenceEngineVersion() noexcept {

View File

@@ -18,6 +18,7 @@
#define OV_INFER_REQ_CALL_STATEMENT(...) \
OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized."); \
OPENVINO_SUPPRESS_DEPRECATED_START \
try { \
__VA_ARGS__; \
} catch (const ::InferenceEngine::RequestBusy& ex) { \
@@ -26,7 +27,8 @@
OPENVINO_THROW(ex.what()); \
} catch (...) { \
OPENVINO_THROW("Unexpected exception"); \
}
} \
OPENVINO_SUPPRESS_DEPRECATED_END
namespace {
@@ -229,6 +231,7 @@ void InferRequest::start_async() {
void InferRequest::wait() {
OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized.");
OPENVINO_SUPPRESS_DEPRECATED_START
try {
_impl->wait();
} catch (const ov::Cancelled&) {
@@ -240,10 +243,12 @@ void InferRequest::wait() {
} catch (...) {
OPENVINO_THROW("Unexpected exception");
}
OPENVINO_SUPPRESS_DEPRECATED_END
}
bool InferRequest::wait_for(const std::chrono::milliseconds timeout) {
OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized.");
OPENVINO_SUPPRESS_DEPRECATED_START
try {
return _impl->wait_for(timeout);
} catch (const ie::InferCancelled& e) {
@@ -253,6 +258,7 @@ bool InferRequest::wait_for(const std::chrono::milliseconds timeout) {
} catch (...) {
OPENVINO_THROW("Unexpected exception");
}
OPENVINO_SUPPRESS_DEPRECATED_END
}
void InferRequest::set_callback(std::function<void(std::exception_ptr)> callback) {

View File

@@ -10,6 +10,7 @@ using namespace std;
using namespace InferenceEngine;
IE_SUPPRESS_DEPRECATED_START
using ResponseBufferTests = ::testing::Test;
TEST_F(ResponseBufferTests, canCreateResponseMessage) {

View File

@@ -48,9 +48,6 @@ public:
}
};
class NV12BlobTests : public CompoundBlobTests {};
class I420BlobTests : public CompoundBlobTests {};
TEST(BlobConversionTests, canWorkWithMemoryBlob) {
Blob::Ptr blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW));
ASSERT_TRUE(blob->is<MemoryBlob>());
@@ -246,177 +243,3 @@ TEST_F(CompoundBlobTests, compoundBlobHoldsValidDataWhenUnderlyingBlobIsDestroye
ASSERT_NE(nullptr, mb0);
EXPECT_EQ(stored_value, mb0->rmap().as<const uint8_t*>()[0]);
}
IE_SUPPRESS_DEPRECATED_START
TEST_F(NV12BlobTests, cannotCreateNV12BlobFromNullptrBlobs) {
Blob::Ptr valid = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 4, 4}, NHWC));
EXPECT_THROW(make_shared_blob<NV12Blob>(valid, nullptr), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<NV12Blob>(nullptr, valid), InferenceEngine::Exception);
}
TEST_F(NV12BlobTests, cannotCreateNV12BlobFromCompoundBlobs) {
Blob::Ptr blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 4, 4}, NHWC));
auto cblob = make_shared_blob<CompoundBlob>(std::vector<Blob::Ptr>({blob}));
EXPECT_THROW(make_shared_blob<NV12Blob>(cblob, blob), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<NV12Blob>(blob, cblob), InferenceEngine::Exception);
}
TEST_F(NV12BlobTests, cannotCreateNV12BlobFromPlanesWithDifferentElementSize) {
Blob::Ptr blob_u8 = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 4, 4}, NHWC));
Blob::Ptr blob_float = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 2, 2, 2}, NHWC));
EXPECT_THROW(make_shared_blob<NV12Blob>(blob_u8, blob_float), InferenceEngine::Exception);
}
TEST_F(NV12BlobTests, cannotCreateNV12BlobFromPlanesWithNonU8Precision) {
Blob::Ptr float_y_blob = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 1, 4, 4}, NHWC));
Blob::Ptr float_uv_blob = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 2, 2, 2}, NHWC));
EXPECT_THROW(make_shared_blob<NV12Blob>(float_y_blob, float_uv_blob), InferenceEngine::Exception);
}
TEST_F(NV12BlobTests, cannotCreateNV12BlobFromPlanesWithInconsistentBatchSize) {
Blob::Ptr y = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 4, 4}, NHWC));
Blob::Ptr uv = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {2, 2, 2, 2}, NHWC));
EXPECT_THROW(make_shared_blob<NV12Blob>(y, uv), InferenceEngine::Exception);
}
TEST_F(NV12BlobTests, cannotCreateNV12BlobFromPlanesWithWrongChannelNumber) {
Blob::Ptr y = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 4, 4}, NHWC));
Blob::Ptr uv = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 2, 2, 2}, NHWC));
EXPECT_THROW(make_shared_blob<NV12Blob>(y, y), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<NV12Blob>(uv, uv), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<NV12Blob>(uv, y), InferenceEngine::Exception);
}
TEST_F(NV12BlobTests, cannotCreateNV12BlobFromPlanesWithWrongWidthRatio) {
Blob::Ptr y = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 6, 6}, NHWC));
Blob::Ptr uv0 = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 2, 1, 3}, NHWC));
Blob::Ptr uv1 = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 2, 5, 3}, NHWC));
Blob::Ptr uv2 = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 2, 6, 3}, NHWC));
Blob::Ptr uv3 = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 2, 8, 3}, NHWC));
EXPECT_THROW(make_shared_blob<NV12Blob>(y, uv0), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<NV12Blob>(y, uv1), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<NV12Blob>(y, uv2), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<NV12Blob>(y, uv3), InferenceEngine::Exception);
}
TEST_F(NV12BlobTests, cannotCreateNV12BlobFromPlanesWithWrongHeightRatio) {
Blob::Ptr y = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 6, 6}, NHWC));
Blob::Ptr uv0 = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 2, 3, 1}, NHWC));
Blob::Ptr uv1 = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 2, 3, 5}, NHWC));
Blob::Ptr uv2 = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 2, 3, 6}, NHWC));
Blob::Ptr uv3 = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 2, 3, 8}, NHWC));
EXPECT_THROW(make_shared_blob<NV12Blob>(y, uv0), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<NV12Blob>(y, uv1), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<NV12Blob>(y, uv2), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<NV12Blob>(y, uv3), InferenceEngine::Exception);
}
TEST_F(NV12BlobTests, canCreateNV12BlobFromTwoPlanes) {
Blob::Ptr y_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 6, 8}, NHWC));
Blob::Ptr uv_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 2, 3, 4}, NHWC));
NV12Blob::Ptr nv12_blob = make_shared_blob<NV12Blob>(y_blob, uv_blob);
verifyCompoundBlob(nv12_blob, {y_blob, uv_blob});
EXPECT_EQ(y_blob, nv12_blob->y());
EXPECT_EQ(uv_blob, nv12_blob->uv());
}
TEST_F(NV12BlobTests, canCreateNV12BlobFromTwoMovedPlanes) {
NV12Blob::Ptr nv12_blob =
make_shared_blob<NV12Blob>(make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 6, 8}, NHWC)),
make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 2, 3, 4}, NHWC)));
verifyCompoundBlob(nv12_blob);
}
TEST_F(I420BlobTests, canCreateI420BlobFromThreePlanes) {
Blob::Ptr y_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 6, 8}, NHWC));
Blob::Ptr u_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC));
Blob::Ptr v_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC));
I420Blob::Ptr i420_blob = make_shared_blob<I420Blob>(y_blob, u_blob, v_blob);
verifyCompoundBlob(i420_blob, {y_blob, u_blob, v_blob});
EXPECT_EQ(y_blob, i420_blob->y());
EXPECT_EQ(u_blob, i420_blob->u());
EXPECT_EQ(v_blob, i420_blob->v());
}
TEST_F(I420BlobTests, canCreateI420BlobFromThreeMovedPlanes) {
I420Blob::Ptr i420_blob =
make_shared_blob<I420Blob>(make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 6, 8}, NHWC)),
make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC)),
make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC)));
verifyCompoundBlob(i420_blob);
}
TEST_F(I420BlobTests, cannotCreateI420BlobFromNullptrBlobs) {
Blob::Ptr valid = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 4, 4}, NHWC));
EXPECT_THROW(make_shared_blob<I420Blob>(valid, nullptr, nullptr), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<I420Blob>(nullptr, valid, nullptr), InferenceEngine::Exception);
}
TEST_F(I420BlobTests, cannotCreateI420BlobFromCompoundBlobs) {
Blob::Ptr y_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 6, 8}, NHWC));
Blob::Ptr u_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC));
Blob::Ptr v_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC));
auto make_cblob = [](Blob::Ptr const& b) {
return make_shared_blob<CompoundBlob>(std::vector<Blob::Ptr>({b}));
};
auto c_y_blob = make_cblob(y_blob);
auto c_u_blob = make_cblob(u_blob);
auto c_v_blob = make_cblob(v_blob);
using ie_exception_t = InferenceEngine::Exception;
EXPECT_THROW(make_shared_blob<I420Blob>(c_y_blob, u_blob, v_blob), ie_exception_t);
EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, c_u_blob, v_blob), ie_exception_t);
EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, u_blob, c_v_blob), ie_exception_t);
}
TEST_F(I420BlobTests, cannotCreateI420BlobFromPlanesWithDifferentElementSize) {
Blob::Ptr y_blob_u8 = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 4, 4}, NHWC));
Blob::Ptr u_blob_float = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 1, 2, 2}, NHWC));
Blob::Ptr v_blob_float = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 1, 2, 2}, NHWC));
EXPECT_THROW(make_shared_blob<I420Blob>(y_blob_u8, u_blob_float, v_blob_float), InferenceEngine::Exception);
}
TEST_F(I420BlobTests, cannotCreateI420BlobFromPlanesWithNonU8Precision) {
Blob::Ptr y_blob_float = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 1, 4, 4}, NHWC));
Blob::Ptr u_blob_float = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 1, 2, 2}, NHWC));
Blob::Ptr v_blob_float = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 1, 2, 2}, NHWC));
EXPECT_THROW(make_shared_blob<I420Blob>(y_blob_float, u_blob_float, v_blob_float), InferenceEngine::Exception);
}
TEST_F(I420BlobTests, cannotCreateI420BlobFromPlanesWithInconsistentBatchSize) {
Blob::Ptr y_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 6, 8}, NHWC));
Blob::Ptr u_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {2, 1, 3, 4}, NHWC));
Blob::Ptr v_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC));
EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, u_blob, v_blob), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, v_blob, u_blob), InferenceEngine::Exception);
}
TEST_F(I420BlobTests, cannotCreateI420BlobFromPlanesWithWrongChannelNumber) {
Blob::Ptr y_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 6, 8}, NHWC));
Blob::Ptr u_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 2, 3, 4}, NHWC));
Blob::Ptr v_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC));
EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, u_blob, v_blob), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, v_blob, u_blob), InferenceEngine::Exception);
}
TEST_F(I420BlobTests, cannotCreateI420BlobFromPlanesWithWrongWidthRatio) {
Blob::Ptr y_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 6, 8}, NHWC));
Blob::Ptr u_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 2}, NHWC));
Blob::Ptr v_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC));
EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, u_blob, v_blob), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, v_blob, u_blob), InferenceEngine::Exception);
}
TEST_F(I420BlobTests, cannotCreateI420BlobFromPlanesWithWrongHeightRatio) {
Blob::Ptr y_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 6, 8}, NHWC));
Blob::Ptr u_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 2, 4}, NHWC));
Blob::Ptr v_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC));
EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, u_blob, v_blob), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, v_blob, u_blob), InferenceEngine::Exception);
}

View File

@@ -9,6 +9,8 @@
#include "ie_common.h"
IE_SUPPRESS_DEPRECATED_START
// tests/unit/inference_engine/exception_test.cpp
TEST(ExceptionTests, CanThrowUsingMacro) {

View File

@@ -11,6 +11,8 @@
using Box = MemorySolver::Box;
IE_SUPPRESS_DEPRECATED_START
TEST(MemSolverTest, CanConstruct) {
{ // Empty vector<Box>
MemorySolver ms(std::vector<Box>{});