diff --git a/cmake/developer_package/add_ie_target.cmake b/cmake/developer_package/add_ie_target.cmake index c9513b4b0e6..0aff25cddc2 100644 --- a/cmake/developer_package/add_ie_target.cmake +++ b/cmake/developer_package/add_ie_target.cmake @@ -19,9 +19,10 @@ addIeTarget( ${SDL_INCLUDES} /some/specific/path LINK_LIBRARIES - ie::important_plugin + link_dependencies DEPENDENCIES dependencies + ie::important_plugin OBJECT_FILES object libraries ) diff --git a/docs/template_plugin/tests/functional/op_reference/acosh.cpp b/docs/template_plugin/tests/functional/op_reference/acosh.cpp index e854c98b7e0..4bf66df4a14 100644 --- a/docs/template_plugin/tests/functional/op_reference/acosh.cpp +++ b/docs/template_plugin/tests/functional/op_reference/acosh.cpp @@ -4,9 +4,6 @@ #include -#include -#include -#include #include #include diff --git a/docs/template_plugin/tests/functional/op_reference/atanh.cpp b/docs/template_plugin/tests/functional/op_reference/atanh.cpp index e80c6b6734f..12fd26970bd 100644 --- a/docs/template_plugin/tests/functional/op_reference/atanh.cpp +++ b/docs/template_plugin/tests/functional/op_reference/atanh.cpp @@ -12,6 +12,7 @@ #include #include "base_reference_test.hpp" +#include "openvino/runtime/allocator.hpp" using namespace reference_tests; using namespace ngraph; @@ -43,8 +44,8 @@ struct AtanhParams { ngraph::PartialShape pshape; ngraph::element::Type inType; ngraph::element::Type outType; - InferenceEngine::Blob::Ptr inputData; - InferenceEngine::Blob::Ptr refData; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; }; class ReferenceAtanhLayerTest : public testing::TestWithParam, public CommonReferenceTest { diff --git a/docs/template_plugin/tests/functional/op_reference/base_reference_test.cpp b/docs/template_plugin/tests/functional/op_reference/base_reference_test.cpp index f2d2cf68aa3..ceb902b7c30 100644 --- a/docs/template_plugin/tests/functional/op_reference/base_reference_test.cpp +++ b/docs/template_plugin/tests/functional/op_reference/base_reference_test.cpp @@ -2,9 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // #include "base_reference_test.hpp" +#include "functional_test_utils/ov_plugin_cache.hpp" #include +#include "openvino/core/type/element_type.hpp" +#include "openvino/runtime/allocator.hpp" +#include "openvino/runtime/tensor.hpp" #include "transformations/utils/utils.hpp" using namespace InferenceEngine; @@ -12,7 +16,7 @@ using namespace InferenceEngine; namespace reference_tests { CommonReferenceTest::CommonReferenceTest(): targetDevice("TEMPLATE") { - core = PluginCache::get().ie(targetDevice); + core = ov::test::PluginCache::get().core(targetDevice); } void CommonReferenceTest::Exec() { @@ -23,73 +27,41 @@ void CommonReferenceTest::Exec() { } void CommonReferenceTest::LoadNetwork() { - InferenceEngine::CNNNetwork cnnNetwork(function); - auto inputInfo = cnnNetwork.getInputsInfo(); - auto outputInfo = cnnNetwork.getOutputsInfo(); - for (const auto& param : function->get_parameters()) { - inputInfo[param->get_friendly_name()]->setPrecision(InferenceEngine::details::convertPrecision(param->get_element_type())); - } - for (const auto& result : function->get_results()) { - outputInfo[ngraph::op::util::create_ie_output_name(result->input_value(0))]->setPrecision( - InferenceEngine::details::convertPrecision(result->get_element_type())); - } - executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice); + executableNetwork = core->compile_model(function, targetDevice); } void CommonReferenceTest::FillInputs() { - const auto& inputInfo = executableNetwork.GetInputsInfo(); - const auto& params = function->get_parameters(); - ASSERT_EQ(params.size(), inputData.size()); - ASSERT_EQ(inputInfo.size(), inputData.size()); + const auto& functionParams = function->get_parameters(); + ASSERT_EQ(functionParams.size(), inputData.size()); - for (size_t i = 0; i < params.size(); i++) { - const auto& param = params[i]; - const auto infoIt = inputInfo.find(param->get_friendly_name()); - GTEST_ASSERT_NE(infoIt, inputInfo.cend()); + for (size_t i = 0; i < functionParams.size(); i++) { + const auto& param = functionParams[i]; - const auto& info = infoIt->second; - auto blob = make_blob_with_precision(info->getTensorDesc()); - blob->allocate(); + ov::runtime::Tensor blob(param->get_element_type(), param->get_shape()); + ASSERT_EQ(blob.get_byte_size(), inputData[i].get_byte_size()); - ASSERT_EQ(blob->byteSize(), inputData[i]->byteSize()); - - MemoryBlob::Ptr mInputData = as(inputData[i]); - ASSERT_NE(mInputData, nullptr); - auto minputDataHolder = mInputData->rmap(); - - MemoryBlob::Ptr mBlob = as(blob); - ASSERT_NE(mBlob, nullptr); - auto mBlobHolder = mBlob->wmap(); - - std::memcpy(mBlobHolder.as(), minputDataHolder.as(), inputData[i]->byteSize()); + std::memcpy(blob.data(), inputData[i].data(), inputData[i].get_byte_size()); inputData[i] = blob; } } void CommonReferenceTest::Infer() { - inferRequest = executableNetwork.CreateInferRequest(); - - const auto& inputsInfo = executableNetwork.GetInputsInfo(); + inferRequest = executableNetwork.create_infer_request(); const auto& functionParams = function->get_parameters(); + for (size_t i = 0; i < functionParams.size(); ++i) { const auto& param = functionParams[i]; - const auto infoIt = inputsInfo.find(param->get_friendly_name()); - GTEST_ASSERT_NE(infoIt, inputsInfo.cend()); - - const auto& info = infoIt->second; - auto blob = inputData[i]; - - inferRequest.SetBlob(info->name(), blob); + inferRequest.set_tensor(param->get_friendly_name(), inputData[i]); } - inferRequest.Infer(); + inferRequest.infer(); } void CommonReferenceTest::Validate() { - ASSERT_EQ(executableNetwork.GetOutputsInfo().size(), refOutData.size()); - std::vector outputs; + ASSERT_EQ(executableNetwork.get_results().size(), refOutData.size()); + std::vector outputs; for (const auto& result : function->get_results()) { auto name = ngraph::op::util::create_ie_output_name(result->input_value(0)); - outputs.emplace_back(inferRequest.GetBlob(name)); + outputs.emplace_back(inferRequest.get_tensor(name)); } ASSERT_EQ(refOutData.size(), outputs.size()); @@ -97,80 +69,86 @@ void CommonReferenceTest::Validate() { ValidateBlobs(refOutData[i], outputs[i]); } } -void CommonReferenceTest::ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlob, const InferenceEngine::Blob::Ptr& outBlob) { - ASSERT_TRUE(refBlob != nullptr); - ASSERT_TRUE(outBlob != nullptr); - ASSERT_EQ(refBlob->getTensorDesc().getPrecision(), outBlob->getTensorDesc().getPrecision()); - ASSERT_EQ(refBlob->byteSize(), outBlob->byteSize()); - auto mRef = as(refBlob); - IE_ASSERT(mRef); - const auto refLockMemory = mRef->rmap(); - const auto refBuffer = refLockMemory.as(); +void CommonReferenceTest::ValidateBlobs(const ov::runtime::Tensor& refBlob, const ov::runtime::Tensor& outBlob) { + ASSERT_EQ(refBlob.get_element_type(), outBlob.get_element_type()); + ASSERT_EQ(refBlob.get_byte_size(), outBlob.get_byte_size()); - auto mOut = as(outBlob); - IE_ASSERT(mOut); - const auto outLockMemory = mOut->rmap(); - const auto outBuffer = outLockMemory.as(); - - const auto& precision = refBlob->getTensorDesc().getPrecision(); - switch (precision) { - case InferenceEngine::Precision::BF16: - LayerTestsUtils::LayerTestsCommon::Compare( - reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), refBlob->size(), threshold); + const auto& element_type = refBlob.get_element_type(); + switch (element_type) { + case ov::element::bf16: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), outBlob.data(), + refBlob.get_size(), threshold); break; - case InferenceEngine::Precision::FP16: - LayerTestsUtils::LayerTestsCommon::Compare( - reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), refBlob->size(), threshold); + case ov::element::f16: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), outBlob.data(), + refBlob.get_size(), threshold); break; - case InferenceEngine::Precision::FP32: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), - refBlob->size(), threshold); + case ov::element::f32: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), outBlob.data(), + refBlob.get_size(), threshold); break; - case InferenceEngine::Precision::I8: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), - refBlob->size(), threshold); + case ov::element::i8: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), outBlob.data(), + refBlob.get_size(), threshold); break; - case InferenceEngine::Precision::I16: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), - refBlob->size(), threshold); + case ov::element::i16: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), outBlob.data(), + refBlob.get_size(), threshold); break; - case InferenceEngine::Precision::I32: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), - refBlob->size(), threshold); + case ov::element::i32: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), outBlob.data(), + refBlob.get_size(), threshold); break; - case InferenceEngine::Precision::I64: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), - refBlob->size(), threshold); + case ov::element::i64: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), outBlob.data(), + refBlob.get_size(), threshold); break; - case InferenceEngine::Precision::BOOL: - case InferenceEngine::Precision::U8: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), - refBlob->size(), threshold); + case ov::element::boolean: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), outBlob.data(), + refBlob.get_size(), threshold); break; - case InferenceEngine::Precision::U16: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), - reinterpret_cast(outBuffer), refBlob->size(), threshold); + case ov::element::u8: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), outBlob.data(), + refBlob.get_size(), threshold); break; - case InferenceEngine::Precision::U32: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), - reinterpret_cast(outBuffer), refBlob->size(), threshold); + case ov::element::u16: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), outBlob.data(), + refBlob.get_size(), threshold); break; - case InferenceEngine::Precision::U64: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), - reinterpret_cast(outBuffer), refBlob->size(), threshold); + case ov::element::u32: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), outBlob.data(), + refBlob.get_size(), threshold); break; - case InferenceEngine::Precision::I4: - case InferenceEngine::Precision::U4: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), - refBlob->size() / 2, threshold); + case ov::element::u64: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), outBlob.data(), + refBlob.get_size(), threshold); break; - case InferenceEngine::Precision::BIN: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), - refBlob->size() / 8, threshold); + case ov::element::i4: + case ov::element::u4: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), outBlob.data(), + refBlob.get_size() / 2, threshold); + break; + case ov::element::u1: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), outBlob.data(), + refBlob.get_size() / 8, threshold); break; default: - FAIL() << "Comparator for " << precision << " precision isn't supported"; + FAIL() << "Comparator for " << element_type << " element type isn't supported"; } } diff --git a/docs/template_plugin/tests/functional/op_reference/base_reference_test.hpp b/docs/template_plugin/tests/functional/op_reference/base_reference_test.hpp index de08533405e..49213892e51 100644 --- a/docs/template_plugin/tests/functional/op_reference/base_reference_test.hpp +++ b/docs/template_plugin/tests/functional/op_reference/base_reference_test.hpp @@ -2,11 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include #include -#include -#include +#include "openvino/core/shape.hpp" +#include "openvino/runtime/allocator.hpp" +#include "openvino/runtime/tensor.hpp" +#include "openvino/runtime/core.hpp" +#include "openvino/core/type/element_type.hpp" + #include namespace reference_tests { @@ -26,33 +28,27 @@ public: void Validate(); private: - void ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlob, const InferenceEngine::Blob::Ptr& outBlob); + void ValidateBlobs(const ov::runtime::Tensor& refBlob, const ov::runtime::Tensor& outBlob); protected: const std::string targetDevice; - std::shared_ptr core; - std::shared_ptr function; + std::shared_ptr core; + std::shared_ptr function; - InferenceEngine::ExecutableNetwork executableNetwork; - InferenceEngine::InferRequest inferRequest; - std::vector inputData; - std::vector refOutData; + ov::runtime::ExecutableNetwork executableNetwork; + ov::runtime::InferRequest inferRequest; + std::vector inputData; + std::vector refOutData; float threshold = 1e-2f; }; template -InferenceEngine::Blob::Ptr CreateBlob(const ngraph::element::Type& element_type, const std::vector& values, size_t size = 0) { +ov::runtime::Tensor CreateBlob(const ov::element::Type& element_type, const std::vector& values, size_t size = 0) { size_t real_size = size ? size : values.size() * sizeof(T) / element_type.size(); - auto blob = make_blob_with_precision( - InferenceEngine::TensorDesc(InferenceEngine::details::convertPrecision(element_type), {real_size}, InferenceEngine::Layout::C)); - blob->allocate(); - InferenceEngine::MemoryBlob::Ptr minput = InferenceEngine::as(blob); - IE_ASSERT(minput); - auto minputHolder = minput->wmap(); + ov::runtime::Tensor tensor { element_type, {real_size} }; + std::memcpy(tensor.data(), values.data(), std::min(real_size * element_type.size(), sizeof(T) * values.size())); - std::memcpy(minputHolder.as(), values.data(), std::min(real_size * element_type.size(), sizeof(T) * values.size())); - - return blob; + return tensor; } /// @@ -61,15 +57,15 @@ InferenceEngine::Blob::Ptr CreateBlob(const ngraph::element::Type& element_type, struct Tensor { Tensor() = default; - Tensor(const ngraph::Shape& shape, ngraph::element::Type type, const InferenceEngine::Blob::Ptr& data): shape {shape}, type {type}, data {data} {} + Tensor(const ov::Shape& shape, ov::element::Type type, const ov::runtime::Tensor& data): shape {shape}, type {type}, data {data} {} template - Tensor(const ngraph::Shape& shape, ngraph::element::Type type, const std::vector& data_elements) + Tensor(const ov::Shape& shape, ov::element::Type type, const std::vector& data_elements) : Tensor {shape, type, CreateBlob(type, data_elements)} {} - ngraph::Shape shape; - ngraph::element::Type type; - InferenceEngine::Blob::Ptr data; + ov::Shape shape; + ov::element::Type type; + ov::runtime::Tensor data; }; /// diff --git a/docs/template_plugin/tests/functional/op_reference/conversion.hpp b/docs/template_plugin/tests/functional/op_reference/conversion.hpp index 666060ddb82..22869c08076 100644 --- a/docs/template_plugin/tests/functional/op_reference/conversion.hpp +++ b/docs/template_plugin/tests/functional/op_reference/conversion.hpp @@ -31,8 +31,8 @@ struct ConvertParams { ngraph::PartialShape pshape; ngraph::element::Type inType; ngraph::element::Type outType; - InferenceEngine::Blob::Ptr inputData; - InferenceEngine::Blob::Ptr refData; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; }; class ReferenceConversionLayerTest : public testing::TestWithParam, public CommonReferenceTest { diff --git a/docs/template_plugin/tests/functional/op_reference/cum_sum.cpp b/docs/template_plugin/tests/functional/op_reference/cum_sum.cpp index 1539a138c3f..e6a74c88f74 100644 --- a/docs/template_plugin/tests/functional/op_reference/cum_sum.cpp +++ b/docs/template_plugin/tests/functional/op_reference/cum_sum.cpp @@ -55,9 +55,9 @@ struct CumSumParams { element::Type axisType; element::Type inType; element::Type outType; - Blob::Ptr axisData; - Blob::Ptr inputData; - Blob::Ptr refData; + ov::runtime::Tensor axisData; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; bool testDefaults = false; }; diff --git a/docs/template_plugin/tests/functional/op_reference/erf.cpp b/docs/template_plugin/tests/functional/op_reference/erf.cpp index bd888a8e03c..ffcbcea13a8 100644 --- a/docs/template_plugin/tests/functional/op_reference/erf.cpp +++ b/docs/template_plugin/tests/functional/op_reference/erf.cpp @@ -43,8 +43,8 @@ struct ErfParams { ngraph::PartialShape pshape; ngraph::element::Type inType; ngraph::element::Type outType; - InferenceEngine::Blob::Ptr inputData; - InferenceEngine::Blob::Ptr refData; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; }; class ReferenceErfLayerTest : public testing::TestWithParam, public CommonReferenceTest { diff --git a/docs/template_plugin/tests/functional/op_reference/grn.cpp b/docs/template_plugin/tests/functional/op_reference/grn.cpp index e7fc0c79f6b..3bf96a9769c 100644 --- a/docs/template_plugin/tests/functional/op_reference/grn.cpp +++ b/docs/template_plugin/tests/functional/op_reference/grn.cpp @@ -26,8 +26,8 @@ struct GrnParams { PartialShape pshape; element::Type inType; element::Type outType; - Blob::Ptr inputData; - Blob::Ptr refData; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; }; class ReferenceGrnLayerTest : public testing::TestWithParam, public CommonReferenceTest { diff --git a/docs/template_plugin/tests/functional/op_reference/max_pool.cpp b/docs/template_plugin/tests/functional/op_reference/max_pool.cpp index 2cca7c50513..1cbe103b9bc 100644 --- a/docs/template_plugin/tests/functional/op_reference/max_pool.cpp +++ b/docs/template_plugin/tests/functional/op_reference/max_pool.cpp @@ -47,9 +47,9 @@ struct MaxPoolParams { Shape m_input_shape; element::Type m_input_type; element::Type m_indices_type; - InferenceEngine::Blob::Ptr m_input_data; - InferenceEngine::Blob::Ptr m_expected_values; - InferenceEngine::Blob::Ptr m_expected_indices; + ov::runtime::Tensor m_input_data; + ov::runtime::Tensor m_expected_values; + ov::runtime::Tensor m_expected_indices; Strides m_strides; Strides m_dilations; Shape m_pads_begin; diff --git a/docs/template_plugin/tests/functional/op_reference/minimum.cpp b/docs/template_plugin/tests/functional/op_reference/minimum.cpp index 4dfeb44a532..a42d1eca1e3 100644 --- a/docs/template_plugin/tests/functional/op_reference/minimum.cpp +++ b/docs/template_plugin/tests/functional/op_reference/minimum.cpp @@ -31,9 +31,9 @@ struct MinimumParams { PartialShape pshape; element::Type inType; element::Type outType; - Blob::Ptr inputData1; - Blob::Ptr inputData2; - Blob::Ptr refData; + ov::runtime::Tensor inputData1; + ov::runtime::Tensor inputData2; + ov::runtime::Tensor refData; }; class ReferenceMinimumLayerTest : public testing::TestWithParam, public CommonReferenceTest { diff --git a/docs/template_plugin/tests/functional/op_reference/mvn.cpp b/docs/template_plugin/tests/functional/op_reference/mvn.cpp index 5321164807b..7ba1258ab0f 100644 --- a/docs/template_plugin/tests/functional/op_reference/mvn.cpp +++ b/docs/template_plugin/tests/functional/op_reference/mvn.cpp @@ -186,10 +186,7 @@ private: const ngraph::op::MVNEpsMode epsMode) { std::vector dataVector(reductionAxes.shape[0]); const auto in = std::make_shared(input.type, input.shape); - auto mRef = as(reductionAxes.data); - IE_ASSERT(mRef); - const auto refLockMemory = mRef->rmap(); - const auto refBuffer = refLockMemory.as(); + const auto refBuffer = reductionAxes.data.data(); for (size_t i = 0; i < dataVector.size(); ++i) { dataVector[i] = refBuffer[i]; } diff --git a/docs/template_plugin/tests/functional/op_reference/roi_pooling.cpp b/docs/template_plugin/tests/functional/op_reference/roi_pooling.cpp index 9baedeb3404..cf92b51fdfb 100644 --- a/docs/template_plugin/tests/functional/op_reference/roi_pooling.cpp +++ b/docs/template_plugin/tests/functional/op_reference/roi_pooling.cpp @@ -33,9 +33,9 @@ struct ROIPoolingParams { float spatialScale; std::string poolingMode; ngraph::element::Type dataType; - InferenceEngine::Blob::Ptr featureMap; - InferenceEngine::Blob::Ptr proposal; - InferenceEngine::Blob::Ptr refData; + ov::runtime::Tensor featureMap; + ov::runtime::Tensor proposal; + ov::runtime::Tensor refData; public: template diff --git a/docs/template_plugin/tests/functional/op_reference/select.cpp b/docs/template_plugin/tests/functional/op_reference/select.cpp index 0cbc242c61b..1f7070df7ca 100644 --- a/docs/template_plugin/tests/functional/op_reference/select.cpp +++ b/docs/template_plugin/tests/functional/op_reference/select.cpp @@ -34,12 +34,12 @@ struct SelectParams { element::Type data_type; op::AutoBroadcastSpec broadcast; PartialShape select_input_pshape; - Blob::Ptr select_input; + ov::runtime::Tensor select_input; PartialShape if_input_pshape; - Blob::Ptr if_input; + ov::runtime::Tensor if_input; PartialShape else_input_pshape; - Blob::Ptr else_input; - Blob::Ptr expected_output; + ov::runtime::Tensor else_input; + ov::runtime::Tensor expected_output; }; class ReferenceSelectLayerTest : public testing::TestWithParam, public CommonReferenceTest { diff --git a/docs/template_plugin/tests/functional/op_reference/sign.cpp b/docs/template_plugin/tests/functional/op_reference/sign.cpp index ca1505cea13..38d01c1e06e 100644 --- a/docs/template_plugin/tests/functional/op_reference/sign.cpp +++ b/docs/template_plugin/tests/functional/op_reference/sign.cpp @@ -24,8 +24,8 @@ struct SignParams { PartialShape pshape; element::Type inType; element::Type outType; - Blob::Ptr inputData; - Blob::Ptr refData; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; }; class ReferenceSignLayerTest : public testing::TestWithParam, public CommonReferenceTest { diff --git a/docs/template_plugin/tests/functional/op_reference/tan.cpp b/docs/template_plugin/tests/functional/op_reference/tan.cpp index 5be7a7ad03c..56118b04ea2 100644 --- a/docs/template_plugin/tests/functional/op_reference/tan.cpp +++ b/docs/template_plugin/tests/functional/op_reference/tan.cpp @@ -25,8 +25,8 @@ struct TanParams { ngraph::PartialShape pshape; ngraph::element::Type inType; ngraph::element::Type outType; - InferenceEngine::Blob::Ptr inputData; - InferenceEngine::Blob::Ptr refData; + ov::runtime::Tensor inputData; + ov::runtime::Tensor refData; }; class ReferenceTanLayerTest : public testing::TestWithParam, public CommonReferenceTest { diff --git a/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.cpp b/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.cpp index 281ec03419d..8164c619203 100644 --- a/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.cpp +++ b/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.cpp @@ -106,21 +106,27 @@ void CNNNetworkNGraphImpl::validateFunctionNames() const { } CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const std::shared_ptr& nGraph, - const std::vector& exts) + const std::vector& exts, + bool newAPI) : _ngraph_function(nGraph), - _ie_extensions(exts) { + _ie_extensions(exts), + _new_api(newAPI) { // Restore usual attributes for CNNNetwork - auto keep_input_info = [](CNNNetworkNGraphImpl& network, const DataPtr& inData) { + auto keep_input_info = [=](CNNNetworkNGraphImpl& network, const DataPtr& inData) { InputInfo::Ptr info(new InputInfo()); info->setInputData(inData); - Precision prc = info->getPrecision(); - // Convert precision into native format (keep element size) - prc = prc == Precision::Q78 - ? Precision::I16 - : prc == Precision::FP16 ? Precision::FP32 : static_cast(prc); + if (!_new_api) { + Precision prc = info->getPrecision(); + + // Convert precision into native format (keep element size) + prc = prc == Precision::Q78 + ? Precision::I16 + : prc == Precision::FP16 ? Precision::FP32 : static_cast(prc); + + info->setPrecision(prc); + } - info->setPrecision(prc); network.setInputInfo(info); }; @@ -141,13 +147,16 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const std::shared_ptr& nGra keep_input_info(*this, ptr); } - for (auto& output : _outputData) { - // Convert precision into native format. Be consistent with possible conversion to CNNNetwork later. - if (output.second->getPrecision() == Precision::I64) { - output.second->setPrecision(Precision::I32); - } else if (output.second->getPrecision() != Precision::FP32 && - output.second->getPrecision() != Precision::I32) { - output.second->setPrecision(Precision::FP32); + + if (!_new_api) { + for (auto& output : _outputData) { + // Convert precision into native format. Be consistent with possible conversion to CNNNetwork later. + if (output.second->getPrecision() == Precision::I64) { + output.second->setPrecision(Precision::I32); + } else if (output.second->getPrecision() != Precision::FP32 && + output.second->getPrecision() != Precision::I32) { + output.second->setPrecision(Precision::FP32); + } } } } diff --git a/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.hpp b/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.hpp index 856ab398764..f0eae70fa90 100644 --- a/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.hpp +++ b/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.hpp @@ -40,7 +40,8 @@ IE_SUPPRESS_DEPRECATED_START class INFERENCE_ENGINE_API_CLASS(CNNNetworkNGraphImpl) final : public ICNNNetwork { public: CNNNetworkNGraphImpl(const std::shared_ptr<::ngraph::Function>& nGraph, - const std::vector& exts = {}); + const std::vector& exts = {}, + bool newAPI = false); CNNNetworkNGraphImpl(const CNNNetwork& nGraph); void getOutputsInfo(std::map& out) const noexcept override; @@ -98,6 +99,7 @@ private: std::map _outputData; const std::vector _ie_extensions; std::unordered_map _tensorNames; + bool _new_api = false; /** * @brief Create DataPtr for nGraph operation diff --git a/inference-engine/src/inference_engine/src/ie_core.cpp b/inference-engine/src/inference_engine/src/ie_core.cpp index 9f8f2699cf3..8bc2b23b998 100644 --- a/inference-engine/src/inference_engine/src/ie_core.cpp +++ b/inference-engine/src/inference_engine/src/ie_core.cpp @@ -12,7 +12,9 @@ #include #include +#include "cnn_network_ngraph_impl.hpp" #include "compilation_context.hpp" +#include "cpp/ie_cnn_network.h" #include "cpp/ie_plugin.hpp" #include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" @@ -1276,12 +1278,22 @@ std::shared_ptr Core::read_model(const std::string& model, con OV_CORE_CALL_STATEMENT(return _impl->ReadNetwork(model, weights).getFunction();); } -ExecutableNetwork Core::compile_model(const std::shared_ptr& network, +namespace { + +ie::CNNNetwork toCNN(const std::shared_ptr& model) { + return ie::CNNNetwork( + std::make_shared(std::const_pointer_cast(model), + std::vector{}, + true)); +} + +} // namespace + +ExecutableNetwork Core::compile_model(const std::shared_ptr& model, const std::string& deviceName, const ConfigMap& config) { OV_CORE_CALL_STATEMENT({ - auto exec = - _impl->LoadNetwork(ie::CNNNetwork(std::const_pointer_cast(network)), deviceName, config); + auto exec = _impl->LoadNetwork(toCNN(model), deviceName, config); return {exec.operator const InferenceEngine::details::SharedObjectLoader&().get(), exec.operator std::shared_ptr&()}; }); @@ -1291,19 +1303,18 @@ ExecutableNetwork Core::compile_model(const std::string& modelPath, const std::string& deviceName, const ConfigMap& config) { OV_CORE_CALL_STATEMENT({ + // TODO: need to pass newAPI flag to preserve conversions of precisions auto exec = _impl->LoadNetwork(modelPath, deviceName, config); return {exec.operator const InferenceEngine::details::SharedObjectLoader&().get(), exec.operator std::shared_ptr&()}; }); } -ExecutableNetwork Core::compile_model(const std::shared_ptr& network, +ExecutableNetwork Core::compile_model(const std::shared_ptr& model, const RemoteContext& context, const ConfigMap& config) { OV_CORE_CALL_STATEMENT({ - auto exec = _impl->LoadNetwork(ie::CNNNetwork(std::const_pointer_cast(network)), - context._impl, - config); + auto exec = _impl->LoadNetwork(toCNN(model), context._impl, config); return {exec._so, exec._ptr}; }); } @@ -1312,20 +1323,18 @@ void Core::add_extension(const ie::IExtensionPtr& extension) { OV_CORE_CALL_STATEMENT(_impl->AddExtension(extension);); } -ExecutableNetwork Core::import_model(std::istream& networkModel, +ExecutableNetwork Core::import_model(std::istream& modelStream, const std::string& deviceName, const ConfigMap& config) { OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model"); OV_CORE_CALL_STATEMENT({ - auto exec = _impl->ImportNetwork(networkModel, deviceName, config); + auto exec = _impl->ImportNetwork(modelStream, deviceName, config); return {exec.operator const InferenceEngine::details::SharedObjectLoader&().get(), exec.operator std::shared_ptr&()}; }); } -ExecutableNetwork Core::import_model(std::istream& networkModel, - const RemoteContext& context, - const ConfigMap& config) { +ExecutableNetwork Core::import_model(std::istream& modelStream, const RemoteContext& context, const ConfigMap& config) { OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model"); using ExportMagic = std::array; @@ -1333,29 +1342,28 @@ ExecutableNetwork Core::import_model(std::istream& networkModel, std::string deviceName; ExportMagic magic = {}; - auto currentPos = networkModel.tellg(); - networkModel.read(magic.data(), magic.size()); + auto currentPos = modelStream.tellg(); + modelStream.read(magic.data(), magic.size()); if (exportMagic == magic) { - std::getline(networkModel, deviceName); + std::getline(modelStream, deviceName); } else { OPENVINO_ASSERT(false, "Passed compiled stream does not contain device name. " "Please, provide device name manually"); } - networkModel.seekg(currentPos, networkModel.beg); + modelStream.seekg(currentPos, modelStream.beg); OV_CORE_CALL_STATEMENT({ - auto exec = _impl->GetCPPPluginByName(deviceName).import_model(networkModel, {}); + auto exec = _impl->GetCPPPluginByName(deviceName).import_model(modelStream, {}); return {exec._so, exec._ptr}; }); } -SupportedOpsMap Core::query_model(const std::shared_ptr& network, +SupportedOpsMap Core::query_model(const std::shared_ptr& model, const std::string& deviceName, const ConfigMap& config) const { OV_CORE_CALL_STATEMENT({ - auto cnnNet = ie::CNNNetwork(std::const_pointer_cast(network)); - auto qnResult = _impl->QueryNetwork(cnnNet, deviceName, config); + auto qnResult = _impl->QueryNetwork(toCNN(model), deviceName, config); return qnResult.supportedLayersMap; }); } diff --git a/inference-engine/tests/functional/inference_engine/cnn_network/cnn_ngraph_impl_tests.cpp b/inference-engine/tests/functional/inference_engine/cnn_network/cnn_ngraph_impl_tests.cpp index 4bdd53d7810..28662d8854c 100644 --- a/inference-engine/tests/functional/inference_engine/cnn_network/cnn_ngraph_impl_tests.cpp +++ b/inference-engine/tests/functional/inference_engine/cnn_network/cnn_ngraph_impl_tests.cpp @@ -36,6 +36,7 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/common_utils.hpp" +#include "ie_precision.hpp" #include "transformations/rt_info/primitives_priority_attribute.hpp" #include "cnn_network_ngraph_impl.hpp" @@ -307,6 +308,37 @@ TEST(CNNNGraphImplTests, TestSetBatchDynamic) { ASSERT_EQ(PARAMETER_MISMATCH, cnnNet.setBatchSize(2, nullptr)); // must not trigger conversion } +TEST(CNNNGraphImplTests, TestDoesChangePrecisionsWithNewAPI) { + std::shared_ptr ngraph; + { + auto param = std::make_shared(ngraph::element::Type_t::f16, ngraph::PartialShape::dynamic()); + auto relu = std::make_shared(param); + auto result = std::make_shared(relu); + ngraph = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); + } + + // new OpenVINO 2.0 + { + auto ngraphImpl = std::make_shared(ngraph, + std::vector{}, true); + InferenceEngine::CNNNetwork cnnNet(ngraphImpl); + ASSERT_EQ(InferenceEngine::Precision::FP16, + cnnNet.getInputsInfo().begin()->second->getTensorDesc().getPrecision()); + ASSERT_EQ(InferenceEngine::Precision::FP16, + cnnNet.getOutputsInfo().begin()->second->getTensorDesc().getPrecision()); + } + + // current API + { + auto ngraphImpl = std::make_shared(ngraph); + InferenceEngine::CNNNetwork cnnNet(ngraphImpl); + ASSERT_EQ(InferenceEngine::Precision::FP32, + cnnNet.getInputsInfo().begin()->second->getTensorDesc().getPrecision()); + ASSERT_EQ(InferenceEngine::Precision::FP32, + cnnNet.getOutputsInfo().begin()->second->getTensorDesc().getPrecision()); + } +} + TEST(CNNNGraphImplTests, TestSaveAffinity) { const std::string testAffinity = "testAffinity"; std::shared_ptr ngraph; diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/ov_plugin_cache.hpp b/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/ov_plugin_cache.hpp new file mode 100644 index 00000000000..2c9345dcef9 --- /dev/null +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/ov_plugin_cache.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "openvino/runtime/core.hpp" + +namespace ov { +namespace test { + +class PluginCache { +public: + std::shared_ptr core(const std::string& deviceToCheck = std::string()); + + static PluginCache& get(); + + void reset(); + + PluginCache(const PluginCache&) = delete; + PluginCache& operator=(const PluginCache&) = delete; + +private: + PluginCache(); + ~PluginCache() = default; + + std::mutex g_mtx; + std::shared_ptr ov_core; +}; +} // namespace test +} // namespace ov diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/src/ov_plugin_cache.cpp b/inference-engine/tests/ie_test_utils/functional_test_utils/src/ov_plugin_cache.cpp new file mode 100644 index 00000000000..0821596a617 --- /dev/null +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/src/ov_plugin_cache.cpp @@ -0,0 +1,102 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "functional_test_utils/ov_plugin_cache.hpp" + +#include + +#include +#include +#include + +namespace ov { +namespace test { +namespace { +class TestListener : public testing::EmptyTestEventListener { +public: + void OnTestEnd(const testing::TestInfo& testInfo) override { + if (auto testResult = testInfo.result()) { + if (testResult->Failed()) { + PluginCache::get().reset(); + } + } + } +}; +} // namespace + +PluginCache& PluginCache::get() { + static PluginCache instance; + return instance; +} + +std::shared_ptr PluginCache::core(const std::string& deviceToCheck) { + std::lock_guard lock(g_mtx); + if (std::getenv("DISABLE_PLUGIN_CACHE") != nullptr) { +#ifndef NDEBUG + std::cout << "'DISABLE_PLUGIN_CACHE' environment variable is set. New Core object will be created!" + << std::endl; +#endif + return std::make_shared(); + } +#ifndef NDEBUG + std::cout << "Access PluginCache ov core. OV Core use count: " << ov_core.use_count() << std::endl; +#endif + + if (!ov_core) { +#ifndef NDEBUG + std::cout << "Created ov core." << std::endl; +#endif + ov_core = std::make_shared(); + } + assert(0 != ov_core.use_count()); + + // register template plugin if it is needed + try { + std::string pluginName = "templatePlugin"; + pluginName += IE_BUILD_POSTFIX; + ov_core->register_plugin(pluginName, "TEMPLATE"); + } catch (...) { + } + + if (!deviceToCheck.empty()) { + std::vector metrics = ov_core->get_metric(deviceToCheck, METRIC_KEY(SUPPORTED_METRICS)); + + if (std::find(metrics.begin(), metrics.end(), METRIC_KEY(AVAILABLE_DEVICES)) != metrics.end()) { + std::vector availableDevices = + ov_core->get_metric(deviceToCheck, METRIC_KEY(AVAILABLE_DEVICES)); + + if (availableDevices.empty()) { + std::cerr << "No available devices for " << deviceToCheck << std::endl; + std::exit(EXIT_FAILURE); + } + +#ifndef NDEBUG + std::cout << "Available devices for " << deviceToCheck << ":" << std::endl; + + for (const auto& device : availableDevices) { + std::cout << " " << device << std::endl; + } +#endif + } + } + return ov_core; +} + +void PluginCache::reset() { + std::lock_guard lock(g_mtx); + +#ifndef NDEBUG + std::cout << "Reset PluginCache. OV Core use count: " << ov_core.use_count() << std::endl; +#endif + + ov_core.reset(); +} + +PluginCache::PluginCache() { + auto& listeners = testing::UnitTest::GetInstance()->listeners(); + listeners.Append(new TestListener); +} + +} // namespace test +} // namespace ov diff --git a/ngraph/core/include/openvino/runtime/tensor.hpp b/ngraph/core/include/openvino/runtime/tensor.hpp index b7701129dc9..4061f528672 100644 --- a/ngraph/core/include/openvino/runtime/tensor.hpp +++ b/ngraph/core/include/openvino/runtime/tensor.hpp @@ -9,6 +9,8 @@ */ #pragma once +#include + #include "openvino/core/coordinate.hpp" #include "openvino/core/shape.hpp" #include "openvino/core/type/element_type.hpp" @@ -136,9 +138,9 @@ public: * @return A host pointer to tensor memory casted to specified type `T`. * @note Throws exception if specified type does not match with tensor element type */ - template + template ::type> T* data() const { - return static_cast(data(element::from())); + return static_cast(data(element::from())); } /** diff --git a/ngraph/core/src/runtime/ov_tensor.cpp b/ngraph/core/src/runtime/ov_tensor.cpp index a003633e1b7..73a6c60a84b 100644 --- a/ngraph/core/src/runtime/ov_tensor.cpp +++ b/ngraph/core/src/runtime/ov_tensor.cpp @@ -117,7 +117,7 @@ void* Tensor::data(const element::Type element_type) const { element::fundamental_type_for(get_element_type()), ", but it casted to ", element_type, - " with fundamental element type", + " with fundamental element type ", element::fundamental_type_for(element_type)); } return _impl->getTensorDesc().getBlockingDesc().getOffsetPadding() * get_element_type().size() +