Reference template plugin tests to OpenVINO 2.0 (#7685)

This commit is contained in:
Ilya Lavrenov 2021-09-28 10:46:03 +03:00 committed by GitHub
parent 66bf54898c
commit 476fbee00f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 368 additions and 208 deletions

View File

@ -19,9 +19,10 @@ addIeTarget(
${SDL_INCLUDES} ${SDL_INCLUDES}
/some/specific/path /some/specific/path
LINK_LIBRARIES LINK_LIBRARIES
ie::important_plugin link_dependencies
DEPENDENCIES DEPENDENCIES
dependencies dependencies
ie::important_plugin
OBJECT_FILES OBJECT_FILES
object libraries object libraries
) )

View File

@ -4,9 +4,6 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <ie_core.hpp>
#include <ie_ngraph_utils.hpp>
#include <ngraph/ngraph.hpp>
#include <shared_test_classes/base/layer_test_utils.hpp> #include <shared_test_classes/base/layer_test_utils.hpp>
#include <vector> #include <vector>

View File

@ -12,6 +12,7 @@
#include <shared_test_classes/base/layer_test_utils.hpp> #include <shared_test_classes/base/layer_test_utils.hpp>
#include "base_reference_test.hpp" #include "base_reference_test.hpp"
#include "openvino/runtime/allocator.hpp"
using namespace reference_tests; using namespace reference_tests;
using namespace ngraph; using namespace ngraph;
@ -43,8 +44,8 @@ struct AtanhParams {
ngraph::PartialShape pshape; ngraph::PartialShape pshape;
ngraph::element::Type inType; ngraph::element::Type inType;
ngraph::element::Type outType; ngraph::element::Type outType;
InferenceEngine::Blob::Ptr inputData; ov::runtime::Tensor inputData;
InferenceEngine::Blob::Ptr refData; ov::runtime::Tensor refData;
}; };
class ReferenceAtanhLayerTest : public testing::TestWithParam<AtanhParams>, public CommonReferenceTest { class ReferenceAtanhLayerTest : public testing::TestWithParam<AtanhParams>, public CommonReferenceTest {

View File

@ -2,9 +2,13 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "base_reference_test.hpp" #include "base_reference_test.hpp"
#include "functional_test_utils/ov_plugin_cache.hpp"
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "openvino/core/type/element_type.hpp"
#include "openvino/runtime/allocator.hpp"
#include "openvino/runtime/tensor.hpp"
#include "transformations/utils/utils.hpp" #include "transformations/utils/utils.hpp"
using namespace InferenceEngine; using namespace InferenceEngine;
@ -12,7 +16,7 @@ using namespace InferenceEngine;
namespace reference_tests { namespace reference_tests {
CommonReferenceTest::CommonReferenceTest(): targetDevice("TEMPLATE") { CommonReferenceTest::CommonReferenceTest(): targetDevice("TEMPLATE") {
core = PluginCache::get().ie(targetDevice); core = ov::test::PluginCache::get().core(targetDevice);
} }
void CommonReferenceTest::Exec() { void CommonReferenceTest::Exec() {
@ -23,73 +27,41 @@ void CommonReferenceTest::Exec() {
} }
void CommonReferenceTest::LoadNetwork() { void CommonReferenceTest::LoadNetwork() {
InferenceEngine::CNNNetwork cnnNetwork(function); executableNetwork = core->compile_model(function, targetDevice);
auto inputInfo = cnnNetwork.getInputsInfo();
auto outputInfo = cnnNetwork.getOutputsInfo();
for (const auto& param : function->get_parameters()) {
inputInfo[param->get_friendly_name()]->setPrecision(InferenceEngine::details::convertPrecision(param->get_element_type()));
}
for (const auto& result : function->get_results()) {
outputInfo[ngraph::op::util::create_ie_output_name(result->input_value(0))]->setPrecision(
InferenceEngine::details::convertPrecision(result->get_element_type()));
}
executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice);
} }
void CommonReferenceTest::FillInputs() { void CommonReferenceTest::FillInputs() {
const auto& inputInfo = executableNetwork.GetInputsInfo(); const auto& functionParams = function->get_parameters();
const auto& params = function->get_parameters(); ASSERT_EQ(functionParams.size(), inputData.size());
ASSERT_EQ(params.size(), inputData.size());
ASSERT_EQ(inputInfo.size(), inputData.size());
for (size_t i = 0; i < params.size(); i++) { for (size_t i = 0; i < functionParams.size(); i++) {
const auto& param = params[i]; const auto& param = functionParams[i];
const auto infoIt = inputInfo.find(param->get_friendly_name());
GTEST_ASSERT_NE(infoIt, inputInfo.cend());
const auto& info = infoIt->second; ov::runtime::Tensor blob(param->get_element_type(), param->get_shape());
auto blob = make_blob_with_precision(info->getTensorDesc()); ASSERT_EQ(blob.get_byte_size(), inputData[i].get_byte_size());
blob->allocate();
ASSERT_EQ(blob->byteSize(), inputData[i]->byteSize()); std::memcpy(blob.data(), inputData[i].data(), inputData[i].get_byte_size());
MemoryBlob::Ptr mInputData = as<MemoryBlob>(inputData[i]);
ASSERT_NE(mInputData, nullptr);
auto minputDataHolder = mInputData->rmap();
MemoryBlob::Ptr mBlob = as<MemoryBlob>(blob);
ASSERT_NE(mBlob, nullptr);
auto mBlobHolder = mBlob->wmap();
std::memcpy(mBlobHolder.as<void*>(), minputDataHolder.as<const void*>(), inputData[i]->byteSize());
inputData[i] = blob; inputData[i] = blob;
} }
} }
void CommonReferenceTest::Infer() { void CommonReferenceTest::Infer() {
inferRequest = executableNetwork.CreateInferRequest(); inferRequest = executableNetwork.create_infer_request();
const auto& inputsInfo = executableNetwork.GetInputsInfo();
const auto& functionParams = function->get_parameters(); const auto& functionParams = function->get_parameters();
for (size_t i = 0; i < functionParams.size(); ++i) { for (size_t i = 0; i < functionParams.size(); ++i) {
const auto& param = functionParams[i]; const auto& param = functionParams[i];
const auto infoIt = inputsInfo.find(param->get_friendly_name()); inferRequest.set_tensor(param->get_friendly_name(), inputData[i]);
GTEST_ASSERT_NE(infoIt, inputsInfo.cend());
const auto& info = infoIt->second;
auto blob = inputData[i];
inferRequest.SetBlob(info->name(), blob);
} }
inferRequest.Infer(); inferRequest.infer();
} }
void CommonReferenceTest::Validate() { void CommonReferenceTest::Validate() {
ASSERT_EQ(executableNetwork.GetOutputsInfo().size(), refOutData.size()); ASSERT_EQ(executableNetwork.get_results().size(), refOutData.size());
std::vector<InferenceEngine::Blob::Ptr> outputs; std::vector<ov::runtime::Tensor> outputs;
for (const auto& result : function->get_results()) { for (const auto& result : function->get_results()) {
auto name = ngraph::op::util::create_ie_output_name(result->input_value(0)); auto name = ngraph::op::util::create_ie_output_name(result->input_value(0));
outputs.emplace_back(inferRequest.GetBlob(name)); outputs.emplace_back(inferRequest.get_tensor(name));
} }
ASSERT_EQ(refOutData.size(), outputs.size()); ASSERT_EQ(refOutData.size(), outputs.size());
@ -97,80 +69,86 @@ void CommonReferenceTest::Validate() {
ValidateBlobs(refOutData[i], outputs[i]); ValidateBlobs(refOutData[i], outputs[i]);
} }
} }
void CommonReferenceTest::ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlob, const InferenceEngine::Blob::Ptr& outBlob) {
ASSERT_TRUE(refBlob != nullptr);
ASSERT_TRUE(outBlob != nullptr);
ASSERT_EQ(refBlob->getTensorDesc().getPrecision(), outBlob->getTensorDesc().getPrecision());
ASSERT_EQ(refBlob->byteSize(), outBlob->byteSize());
auto mRef = as<InferenceEngine::MemoryBlob>(refBlob); void CommonReferenceTest::ValidateBlobs(const ov::runtime::Tensor& refBlob, const ov::runtime::Tensor& outBlob) {
IE_ASSERT(mRef); ASSERT_EQ(refBlob.get_element_type(), outBlob.get_element_type());
const auto refLockMemory = mRef->rmap(); ASSERT_EQ(refBlob.get_byte_size(), outBlob.get_byte_size());
const auto refBuffer = refLockMemory.as<const std::uint8_t*>();
auto mOut = as<InferenceEngine::MemoryBlob>(outBlob); const auto& element_type = refBlob.get_element_type();
IE_ASSERT(mOut); switch (element_type) {
const auto outLockMemory = mOut->rmap(); case ov::element::bf16:
const auto outBuffer = outLockMemory.as<const std::uint8_t*>(); LayerTestsUtils::LayerTestsCommon::Compare<ov::bfloat16, ov::bfloat16>(
refBlob.data<const ov::bfloat16>(), outBlob.data<const ov::bfloat16>(),
const auto& precision = refBlob->getTensorDesc().getPrecision(); refBlob.get_size(), threshold);
switch (precision) {
case InferenceEngine::Precision::BF16:
LayerTestsUtils::LayerTestsCommon::Compare<ngraph::bfloat16, ngraph::bfloat16>(
reinterpret_cast<const ngraph::bfloat16*>(refBuffer), reinterpret_cast<const ngraph::bfloat16*>(outBuffer), refBlob->size(), threshold);
break; break;
case InferenceEngine::Precision::FP16: case ov::element::f16:
LayerTestsUtils::LayerTestsCommon::Compare<ngraph::float16, ngraph::float16>( LayerTestsUtils::LayerTestsCommon::Compare<ov::float16, ov::float16>(
reinterpret_cast<const ngraph::float16*>(refBuffer), reinterpret_cast<const ngraph::float16*>(outBuffer), refBlob->size(), threshold); refBlob.data<const ov::float16>(), outBlob.data<const ov::float16>(),
refBlob.get_size(), threshold);
break; break;
case InferenceEngine::Precision::FP32: case ov::element::f32:
LayerTestsUtils::LayerTestsCommon::Compare<float, float>(reinterpret_cast<const float*>(refBuffer), reinterpret_cast<const float*>(outBuffer), LayerTestsUtils::LayerTestsCommon::Compare<float, float>(
refBlob->size(), threshold); refBlob.data<const float>(), outBlob.data<const float>(),
refBlob.get_size(), threshold);
break; break;
case InferenceEngine::Precision::I8: case ov::element::i8:
LayerTestsUtils::LayerTestsCommon::Compare<int8_t, int8_t>(reinterpret_cast<const int8_t*>(refBuffer), reinterpret_cast<const int8_t*>(outBuffer), LayerTestsUtils::LayerTestsCommon::Compare<int8_t, int8_t>(
refBlob->size(), threshold); refBlob.data<const int8_t>(), outBlob.data<const int8_t>(),
refBlob.get_size(), threshold);
break; break;
case InferenceEngine::Precision::I16: case ov::element::i16:
LayerTestsUtils::LayerTestsCommon::Compare<int16_t, int16_t>(reinterpret_cast<const int16_t*>(refBuffer), reinterpret_cast<const int16_t*>(outBuffer), LayerTestsUtils::LayerTestsCommon::Compare<int16_t, int16_t>(
refBlob->size(), threshold); refBlob.data<const int16_t>(), outBlob.data<const int16_t>(),
refBlob.get_size(), threshold);
break; break;
case InferenceEngine::Precision::I32: case ov::element::i32:
LayerTestsUtils::LayerTestsCommon::Compare<int32_t, int32_t>(reinterpret_cast<const int32_t*>(refBuffer), reinterpret_cast<const int32_t*>(outBuffer), LayerTestsUtils::LayerTestsCommon::Compare<int32_t, int32_t>(
refBlob->size(), threshold); refBlob.data<const int32_t>(), outBlob.data<const int32_t>(),
refBlob.get_size(), threshold);
break; break;
case InferenceEngine::Precision::I64: case ov::element::i64:
LayerTestsUtils::LayerTestsCommon::Compare<int64_t, int64_t>(reinterpret_cast<const int64_t*>(refBuffer), reinterpret_cast<const int64_t*>(outBuffer), LayerTestsUtils::LayerTestsCommon::Compare<int64_t, int64_t>(
refBlob->size(), threshold); refBlob.data<const int64_t>(), outBlob.data<const int64_t>(),
refBlob.get_size(), threshold);
break; break;
case InferenceEngine::Precision::BOOL: case ov::element::boolean:
case InferenceEngine::Precision::U8: LayerTestsUtils::LayerTestsCommon::Compare<bool, bool>(
LayerTestsUtils::LayerTestsCommon::Compare<uint8_t, uint8_t>(reinterpret_cast<const uint8_t*>(refBuffer), reinterpret_cast<const uint8_t*>(outBuffer), refBlob.data<const bool>(), outBlob.data<const bool>(),
refBlob->size(), threshold); refBlob.get_size(), threshold);
break; break;
case InferenceEngine::Precision::U16: case ov::element::u8:
LayerTestsUtils::LayerTestsCommon::Compare<uint16_t, uint16_t>(reinterpret_cast<const uint16_t*>(refBuffer), LayerTestsUtils::LayerTestsCommon::Compare<uint8_t, uint8_t>(
reinterpret_cast<const uint16_t*>(outBuffer), refBlob->size(), threshold); refBlob.data<const uint8_t>(), outBlob.data<const uint8_t>(),
refBlob.get_size(), threshold);
break; break;
case InferenceEngine::Precision::U32: case ov::element::u16:
LayerTestsUtils::LayerTestsCommon::Compare<uint32_t, uint32_t>(reinterpret_cast<const uint32_t*>(refBuffer), LayerTestsUtils::LayerTestsCommon::Compare<uint16_t, uint16_t>(
reinterpret_cast<const uint32_t*>(outBuffer), refBlob->size(), threshold); refBlob.data<const uint16_t>(), outBlob.data<const uint16_t>(),
refBlob.get_size(), threshold);
break; break;
case InferenceEngine::Precision::U64: case ov::element::u32:
LayerTestsUtils::LayerTestsCommon::Compare<uint64_t, uint64_t>(reinterpret_cast<const uint64_t*>(refBuffer), LayerTestsUtils::LayerTestsCommon::Compare<uint32_t, uint32_t>(
reinterpret_cast<const uint64_t*>(outBuffer), refBlob->size(), threshold); refBlob.data<const uint32_t>(), outBlob.data<const uint32_t>(),
refBlob.get_size(), threshold);
break; break;
case InferenceEngine::Precision::I4: case ov::element::u64:
case InferenceEngine::Precision::U4: LayerTestsUtils::LayerTestsCommon::Compare<uint64_t, uint64_t>(
LayerTestsUtils::LayerTestsCommon::Compare<uint8_t, uint8_t>(reinterpret_cast<const uint8_t*>(refBuffer), reinterpret_cast<const uint8_t*>(outBuffer), refBlob.data<const uint64_t>(), outBlob.data<const uint64_t>(),
refBlob->size() / 2, threshold); refBlob.get_size(), threshold);
break; break;
case InferenceEngine::Precision::BIN: case ov::element::i4:
LayerTestsUtils::LayerTestsCommon::Compare<uint8_t, uint8_t>(reinterpret_cast<const uint8_t*>(refBuffer), reinterpret_cast<const uint8_t*>(outBuffer), case ov::element::u4:
refBlob->size() / 8, threshold); LayerTestsUtils::LayerTestsCommon::Compare<int8_t, int8_t>(
refBlob.data<const int8_t>(), outBlob.data<const int8_t>(),
refBlob.get_size() / 2, threshold);
break;
case ov::element::u1:
LayerTestsUtils::LayerTestsCommon::Compare<int8_t, int8_t>(
refBlob.data<const int8_t>(), outBlob.data<const int8_t>(),
refBlob.get_size() / 8, threshold);
break; break;
default: default:
FAIL() << "Comparator for " << precision << " precision isn't supported"; FAIL() << "Comparator for " << element_type << " element type isn't supported";
} }
} }

View File

@ -2,11 +2,13 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <ie_core.hpp>
#include <ie_ngraph_utils.hpp>
#include <ngraph/ngraph.hpp> #include <ngraph/ngraph.hpp>
#include <ngraph/shape.hpp> #include "openvino/core/shape.hpp"
#include <ngraph/type/element_type.hpp> #include "openvino/runtime/allocator.hpp"
#include "openvino/runtime/tensor.hpp"
#include "openvino/runtime/core.hpp"
#include "openvino/core/type/element_type.hpp"
#include <shared_test_classes/base/layer_test_utils.hpp> #include <shared_test_classes/base/layer_test_utils.hpp>
namespace reference_tests { namespace reference_tests {
@ -26,33 +28,27 @@ public:
void Validate(); void Validate();
private: private:
void ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlob, const InferenceEngine::Blob::Ptr& outBlob); void ValidateBlobs(const ov::runtime::Tensor& refBlob, const ov::runtime::Tensor& outBlob);
protected: protected:
const std::string targetDevice; const std::string targetDevice;
std::shared_ptr<InferenceEngine::Core> core; std::shared_ptr<ov::runtime::Core> core;
std::shared_ptr<ngraph::Function> function; std::shared_ptr<ov::Function> function;
InferenceEngine::ExecutableNetwork executableNetwork; ov::runtime::ExecutableNetwork executableNetwork;
InferenceEngine::InferRequest inferRequest; ov::runtime::InferRequest inferRequest;
std::vector<InferenceEngine::Blob::Ptr> inputData; std::vector<ov::runtime::Tensor> inputData;
std::vector<InferenceEngine::Blob::Ptr> refOutData; std::vector<ov::runtime::Tensor> refOutData;
float threshold = 1e-2f; float threshold = 1e-2f;
}; };
template <class T> template <class T>
InferenceEngine::Blob::Ptr CreateBlob(const ngraph::element::Type& element_type, const std::vector<T>& values, size_t size = 0) { ov::runtime::Tensor CreateBlob(const ov::element::Type& element_type, const std::vector<T>& values, size_t size = 0) {
size_t real_size = size ? size : values.size() * sizeof(T) / element_type.size(); size_t real_size = size ? size : values.size() * sizeof(T) / element_type.size();
auto blob = make_blob_with_precision( ov::runtime::Tensor tensor { element_type, {real_size} };
InferenceEngine::TensorDesc(InferenceEngine::details::convertPrecision(element_type), {real_size}, InferenceEngine::Layout::C)); std::memcpy(tensor.data(), values.data(), std::min(real_size * element_type.size(), sizeof(T) * values.size()));
blob->allocate();
InferenceEngine::MemoryBlob::Ptr minput = InferenceEngine::as<InferenceEngine::MemoryBlob>(blob);
IE_ASSERT(minput);
auto minputHolder = minput->wmap();
std::memcpy(minputHolder.as<void*>(), values.data(), std::min(real_size * element_type.size(), sizeof(T) * values.size())); return tensor;
return blob;
} }
/// ///
@ -61,15 +57,15 @@ InferenceEngine::Blob::Ptr CreateBlob(const ngraph::element::Type& element_type,
struct Tensor { struct Tensor {
Tensor() = default; Tensor() = default;
Tensor(const ngraph::Shape& shape, ngraph::element::Type type, const InferenceEngine::Blob::Ptr& data): shape {shape}, type {type}, data {data} {} Tensor(const ov::Shape& shape, ov::element::Type type, const ov::runtime::Tensor& data): shape {shape}, type {type}, data {data} {}
template <typename T> template <typename T>
Tensor(const ngraph::Shape& shape, ngraph::element::Type type, const std::vector<T>& data_elements) Tensor(const ov::Shape& shape, ov::element::Type type, const std::vector<T>& data_elements)
: Tensor {shape, type, CreateBlob(type, data_elements)} {} : Tensor {shape, type, CreateBlob(type, data_elements)} {}
ngraph::Shape shape; ov::Shape shape;
ngraph::element::Type type; ov::element::Type type;
InferenceEngine::Blob::Ptr data; ov::runtime::Tensor data;
}; };
/// ///

View File

@ -31,8 +31,8 @@ struct ConvertParams {
ngraph::PartialShape pshape; ngraph::PartialShape pshape;
ngraph::element::Type inType; ngraph::element::Type inType;
ngraph::element::Type outType; ngraph::element::Type outType;
InferenceEngine::Blob::Ptr inputData; ov::runtime::Tensor inputData;
InferenceEngine::Blob::Ptr refData; ov::runtime::Tensor refData;
}; };
class ReferenceConversionLayerTest : public testing::TestWithParam<ConvertParams>, public CommonReferenceTest { class ReferenceConversionLayerTest : public testing::TestWithParam<ConvertParams>, public CommonReferenceTest {

View File

@ -55,9 +55,9 @@ struct CumSumParams {
element::Type axisType; element::Type axisType;
element::Type inType; element::Type inType;
element::Type outType; element::Type outType;
Blob::Ptr axisData; ov::runtime::Tensor axisData;
Blob::Ptr inputData; ov::runtime::Tensor inputData;
Blob::Ptr refData; ov::runtime::Tensor refData;
bool testDefaults = false; bool testDefaults = false;
}; };

View File

@ -43,8 +43,8 @@ struct ErfParams {
ngraph::PartialShape pshape; ngraph::PartialShape pshape;
ngraph::element::Type inType; ngraph::element::Type inType;
ngraph::element::Type outType; ngraph::element::Type outType;
InferenceEngine::Blob::Ptr inputData; ov::runtime::Tensor inputData;
InferenceEngine::Blob::Ptr refData; ov::runtime::Tensor refData;
}; };
class ReferenceErfLayerTest : public testing::TestWithParam<ErfParams>, public CommonReferenceTest { class ReferenceErfLayerTest : public testing::TestWithParam<ErfParams>, public CommonReferenceTest {

View File

@ -26,8 +26,8 @@ struct GrnParams {
PartialShape pshape; PartialShape pshape;
element::Type inType; element::Type inType;
element::Type outType; element::Type outType;
Blob::Ptr inputData; ov::runtime::Tensor inputData;
Blob::Ptr refData; ov::runtime::Tensor refData;
}; };
class ReferenceGrnLayerTest : public testing::TestWithParam<GrnParams>, public CommonReferenceTest { class ReferenceGrnLayerTest : public testing::TestWithParam<GrnParams>, public CommonReferenceTest {

View File

@ -47,9 +47,9 @@ struct MaxPoolParams {
Shape m_input_shape; Shape m_input_shape;
element::Type m_input_type; element::Type m_input_type;
element::Type m_indices_type; element::Type m_indices_type;
InferenceEngine::Blob::Ptr m_input_data; ov::runtime::Tensor m_input_data;
InferenceEngine::Blob::Ptr m_expected_values; ov::runtime::Tensor m_expected_values;
InferenceEngine::Blob::Ptr m_expected_indices; ov::runtime::Tensor m_expected_indices;
Strides m_strides; Strides m_strides;
Strides m_dilations; Strides m_dilations;
Shape m_pads_begin; Shape m_pads_begin;

View File

@ -31,9 +31,9 @@ struct MinimumParams {
PartialShape pshape; PartialShape pshape;
element::Type inType; element::Type inType;
element::Type outType; element::Type outType;
Blob::Ptr inputData1; ov::runtime::Tensor inputData1;
Blob::Ptr inputData2; ov::runtime::Tensor inputData2;
Blob::Ptr refData; ov::runtime::Tensor refData;
}; };
class ReferenceMinimumLayerTest : public testing::TestWithParam<MinimumParams>, public CommonReferenceTest { class ReferenceMinimumLayerTest : public testing::TestWithParam<MinimumParams>, public CommonReferenceTest {

View File

@ -186,10 +186,7 @@ private:
const ngraph::op::MVNEpsMode epsMode) { const ngraph::op::MVNEpsMode epsMode) {
std::vector<int64_t> dataVector(reductionAxes.shape[0]); std::vector<int64_t> dataVector(reductionAxes.shape[0]);
const auto in = std::make_shared<op::Parameter>(input.type, input.shape); const auto in = std::make_shared<op::Parameter>(input.type, input.shape);
auto mRef = as<InferenceEngine::MemoryBlob>(reductionAxes.data); const auto refBuffer = reductionAxes.data.data<const std::int64_t>();
IE_ASSERT(mRef);
const auto refLockMemory = mRef->rmap();
const auto refBuffer = refLockMemory.as<const std::uint64_t*>();
for (size_t i = 0; i < dataVector.size(); ++i) { for (size_t i = 0; i < dataVector.size(); ++i) {
dataVector[i] = refBuffer[i]; dataVector[i] = refBuffer[i];
} }

View File

@ -33,9 +33,9 @@ struct ROIPoolingParams {
float spatialScale; float spatialScale;
std::string poolingMode; std::string poolingMode;
ngraph::element::Type dataType; ngraph::element::Type dataType;
InferenceEngine::Blob::Ptr featureMap; ov::runtime::Tensor featureMap;
InferenceEngine::Blob::Ptr proposal; ov::runtime::Tensor proposal;
InferenceEngine::Blob::Ptr refData; ov::runtime::Tensor refData;
public: public:
template<class T> template<class T>

View File

@ -34,12 +34,12 @@ struct SelectParams {
element::Type data_type; element::Type data_type;
op::AutoBroadcastSpec broadcast; op::AutoBroadcastSpec broadcast;
PartialShape select_input_pshape; PartialShape select_input_pshape;
Blob::Ptr select_input; ov::runtime::Tensor select_input;
PartialShape if_input_pshape; PartialShape if_input_pshape;
Blob::Ptr if_input; ov::runtime::Tensor if_input;
PartialShape else_input_pshape; PartialShape else_input_pshape;
Blob::Ptr else_input; ov::runtime::Tensor else_input;
Blob::Ptr expected_output; ov::runtime::Tensor expected_output;
}; };
class ReferenceSelectLayerTest : public testing::TestWithParam<SelectParams>, public CommonReferenceTest { class ReferenceSelectLayerTest : public testing::TestWithParam<SelectParams>, public CommonReferenceTest {

View File

@ -24,8 +24,8 @@ struct SignParams {
PartialShape pshape; PartialShape pshape;
element::Type inType; element::Type inType;
element::Type outType; element::Type outType;
Blob::Ptr inputData; ov::runtime::Tensor inputData;
Blob::Ptr refData; ov::runtime::Tensor refData;
}; };
class ReferenceSignLayerTest : public testing::TestWithParam<SignParams>, public CommonReferenceTest { class ReferenceSignLayerTest : public testing::TestWithParam<SignParams>, public CommonReferenceTest {

View File

@ -25,8 +25,8 @@ struct TanParams {
ngraph::PartialShape pshape; ngraph::PartialShape pshape;
ngraph::element::Type inType; ngraph::element::Type inType;
ngraph::element::Type outType; ngraph::element::Type outType;
InferenceEngine::Blob::Ptr inputData; ov::runtime::Tensor inputData;
InferenceEngine::Blob::Ptr refData; ov::runtime::Tensor refData;
}; };
class ReferenceTanLayerTest : public testing::TestWithParam<TanParams>, public CommonReferenceTest { class ReferenceTanLayerTest : public testing::TestWithParam<TanParams>, public CommonReferenceTest {

View File

@ -106,21 +106,27 @@ void CNNNetworkNGraphImpl::validateFunctionNames() const {
} }
CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const std::shared_ptr<Function>& nGraph, CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const std::shared_ptr<Function>& nGraph,
const std::vector<IExtensionPtr>& exts) const std::vector<IExtensionPtr>& exts,
bool newAPI)
: _ngraph_function(nGraph), : _ngraph_function(nGraph),
_ie_extensions(exts) { _ie_extensions(exts),
_new_api(newAPI) {
// Restore usual attributes for CNNNetwork // Restore usual attributes for CNNNetwork
auto keep_input_info = [](CNNNetworkNGraphImpl& network, const DataPtr& inData) { auto keep_input_info = [=](CNNNetworkNGraphImpl& network, const DataPtr& inData) {
InputInfo::Ptr info(new InputInfo()); InputInfo::Ptr info(new InputInfo());
info->setInputData(inData); info->setInputData(inData);
Precision prc = info->getPrecision();
// Convert precision into native format (keep element size) if (!_new_api) {
prc = prc == Precision::Q78 Precision prc = info->getPrecision();
? Precision::I16
: prc == Precision::FP16 ? Precision::FP32 : static_cast<Precision::ePrecision>(prc); // Convert precision into native format (keep element size)
prc = prc == Precision::Q78
? Precision::I16
: prc == Precision::FP16 ? Precision::FP32 : static_cast<Precision::ePrecision>(prc);
info->setPrecision(prc);
}
info->setPrecision(prc);
network.setInputInfo(info); network.setInputInfo(info);
}; };
@ -141,13 +147,16 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const std::shared_ptr<Function>& nGra
keep_input_info(*this, ptr); keep_input_info(*this, ptr);
} }
for (auto& output : _outputData) {
// Convert precision into native format. Be consistent with possible conversion to CNNNetwork later. if (!_new_api) {
if (output.second->getPrecision() == Precision::I64) { for (auto& output : _outputData) {
output.second->setPrecision(Precision::I32); // Convert precision into native format. Be consistent with possible conversion to CNNNetwork later.
} else if (output.second->getPrecision() != Precision::FP32 && if (output.second->getPrecision() == Precision::I64) {
output.second->getPrecision() != Precision::I32) { output.second->setPrecision(Precision::I32);
output.second->setPrecision(Precision::FP32); } else if (output.second->getPrecision() != Precision::FP32 &&
output.second->getPrecision() != Precision::I32) {
output.second->setPrecision(Precision::FP32);
}
} }
} }
} }

View File

@ -40,7 +40,8 @@ IE_SUPPRESS_DEPRECATED_START
class INFERENCE_ENGINE_API_CLASS(CNNNetworkNGraphImpl) final : public ICNNNetwork { class INFERENCE_ENGINE_API_CLASS(CNNNetworkNGraphImpl) final : public ICNNNetwork {
public: public:
CNNNetworkNGraphImpl(const std::shared_ptr<::ngraph::Function>& nGraph, CNNNetworkNGraphImpl(const std::shared_ptr<::ngraph::Function>& nGraph,
const std::vector<IExtensionPtr>& exts = {}); const std::vector<IExtensionPtr>& exts = {},
bool newAPI = false);
CNNNetworkNGraphImpl(const CNNNetwork& nGraph); CNNNetworkNGraphImpl(const CNNNetwork& nGraph);
void getOutputsInfo(std::map<std::string, DataPtr>& out) const noexcept override; void getOutputsInfo(std::map<std::string, DataPtr>& out) const noexcept override;
@ -98,6 +99,7 @@ private:
std::map<std::string, DataPtr> _outputData; std::map<std::string, DataPtr> _outputData;
const std::vector<IExtensionPtr> _ie_extensions; const std::vector<IExtensionPtr> _ie_extensions;
std::unordered_map<std::string, std::string> _tensorNames; std::unordered_map<std::string, std::string> _tensorNames;
bool _new_api = false;
/** /**
* @brief Create DataPtr for nGraph operation * @brief Create DataPtr for nGraph operation

View File

@ -12,7 +12,9 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include "cnn_network_ngraph_impl.hpp"
#include "compilation_context.hpp" #include "compilation_context.hpp"
#include "cpp/ie_cnn_network.h"
#include "cpp/ie_plugin.hpp" #include "cpp/ie_plugin.hpp"
#include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" #include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
@ -1276,12 +1278,22 @@ std::shared_ptr<ngraph::Function> Core::read_model(const std::string& model, con
OV_CORE_CALL_STATEMENT(return _impl->ReadNetwork(model, weights).getFunction();); OV_CORE_CALL_STATEMENT(return _impl->ReadNetwork(model, weights).getFunction(););
} }
ExecutableNetwork Core::compile_model(const std::shared_ptr<const ngraph::Function>& network, namespace {
ie::CNNNetwork toCNN(const std::shared_ptr<const ngraph::Function>& model) {
return ie::CNNNetwork(
std::make_shared<ie::details::CNNNetworkNGraphImpl>(std::const_pointer_cast<ngraph::Function>(model),
std::vector<ie::IExtensionPtr>{},
true));
}
} // namespace
ExecutableNetwork Core::compile_model(const std::shared_ptr<const ngraph::Function>& model,
const std::string& deviceName, const std::string& deviceName,
const ConfigMap& config) { const ConfigMap& config) {
OV_CORE_CALL_STATEMENT({ OV_CORE_CALL_STATEMENT({
auto exec = auto exec = _impl->LoadNetwork(toCNN(model), deviceName, config);
_impl->LoadNetwork(ie::CNNNetwork(std::const_pointer_cast<ngraph::Function>(network)), deviceName, config);
return {exec.operator const InferenceEngine::details::SharedObjectLoader&().get(), return {exec.operator const InferenceEngine::details::SharedObjectLoader&().get(),
exec.operator std::shared_ptr<InferenceEngine::IExecutableNetworkInternal>&()}; exec.operator std::shared_ptr<InferenceEngine::IExecutableNetworkInternal>&()};
}); });
@ -1291,19 +1303,18 @@ ExecutableNetwork Core::compile_model(const std::string& modelPath,
const std::string& deviceName, const std::string& deviceName,
const ConfigMap& config) { const ConfigMap& config) {
OV_CORE_CALL_STATEMENT({ OV_CORE_CALL_STATEMENT({
// TODO: need to pass newAPI flag to preserve conversions of precisions
auto exec = _impl->LoadNetwork(modelPath, deviceName, config); auto exec = _impl->LoadNetwork(modelPath, deviceName, config);
return {exec.operator const InferenceEngine::details::SharedObjectLoader&().get(), return {exec.operator const InferenceEngine::details::SharedObjectLoader&().get(),
exec.operator std::shared_ptr<InferenceEngine::IExecutableNetworkInternal>&()}; exec.operator std::shared_ptr<InferenceEngine::IExecutableNetworkInternal>&()};
}); });
} }
ExecutableNetwork Core::compile_model(const std::shared_ptr<const ngraph::Function>& network, ExecutableNetwork Core::compile_model(const std::shared_ptr<const ngraph::Function>& model,
const RemoteContext& context, const RemoteContext& context,
const ConfigMap& config) { const ConfigMap& config) {
OV_CORE_CALL_STATEMENT({ OV_CORE_CALL_STATEMENT({
auto exec = _impl->LoadNetwork(ie::CNNNetwork(std::const_pointer_cast<ngraph::Function>(network)), auto exec = _impl->LoadNetwork(toCNN(model), context._impl, config);
context._impl,
config);
return {exec._so, exec._ptr}; return {exec._so, exec._ptr};
}); });
} }
@ -1312,20 +1323,18 @@ void Core::add_extension(const ie::IExtensionPtr& extension) {
OV_CORE_CALL_STATEMENT(_impl->AddExtension(extension);); OV_CORE_CALL_STATEMENT(_impl->AddExtension(extension););
} }
ExecutableNetwork Core::import_model(std::istream& networkModel, ExecutableNetwork Core::import_model(std::istream& modelStream,
const std::string& deviceName, const std::string& deviceName,
const ConfigMap& config) { const ConfigMap& config) {
OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model"); OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model");
OV_CORE_CALL_STATEMENT({ OV_CORE_CALL_STATEMENT({
auto exec = _impl->ImportNetwork(networkModel, deviceName, config); auto exec = _impl->ImportNetwork(modelStream, deviceName, config);
return {exec.operator const InferenceEngine::details::SharedObjectLoader&().get(), return {exec.operator const InferenceEngine::details::SharedObjectLoader&().get(),
exec.operator std::shared_ptr<InferenceEngine::IExecutableNetworkInternal>&()}; exec.operator std::shared_ptr<InferenceEngine::IExecutableNetworkInternal>&()};
}); });
} }
ExecutableNetwork Core::import_model(std::istream& networkModel, ExecutableNetwork Core::import_model(std::istream& modelStream, const RemoteContext& context, const ConfigMap& config) {
const RemoteContext& context,
const ConfigMap& config) {
OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model"); OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model");
using ExportMagic = std::array<char, 4>; using ExportMagic = std::array<char, 4>;
@ -1333,29 +1342,28 @@ ExecutableNetwork Core::import_model(std::istream& networkModel,
std::string deviceName; std::string deviceName;
ExportMagic magic = {}; ExportMagic magic = {};
auto currentPos = networkModel.tellg(); auto currentPos = modelStream.tellg();
networkModel.read(magic.data(), magic.size()); modelStream.read(magic.data(), magic.size());
if (exportMagic == magic) { if (exportMagic == magic) {
std::getline(networkModel, deviceName); std::getline(modelStream, deviceName);
} else { } else {
OPENVINO_ASSERT(false, OPENVINO_ASSERT(false,
"Passed compiled stream does not contain device name. " "Passed compiled stream does not contain device name. "
"Please, provide device name manually"); "Please, provide device name manually");
} }
networkModel.seekg(currentPos, networkModel.beg); modelStream.seekg(currentPos, modelStream.beg);
OV_CORE_CALL_STATEMENT({ OV_CORE_CALL_STATEMENT({
auto exec = _impl->GetCPPPluginByName(deviceName).import_model(networkModel, {}); auto exec = _impl->GetCPPPluginByName(deviceName).import_model(modelStream, {});
return {exec._so, exec._ptr}; return {exec._so, exec._ptr};
}); });
} }
SupportedOpsMap Core::query_model(const std::shared_ptr<const ngraph::Function>& network, SupportedOpsMap Core::query_model(const std::shared_ptr<const ngraph::Function>& model,
const std::string& deviceName, const std::string& deviceName,
const ConfigMap& config) const { const ConfigMap& config) const {
OV_CORE_CALL_STATEMENT({ OV_CORE_CALL_STATEMENT({
auto cnnNet = ie::CNNNetwork(std::const_pointer_cast<ngraph::Function>(network)); auto qnResult = _impl->QueryNetwork(toCNN(model), deviceName, config);
auto qnResult = _impl->QueryNetwork(cnnNet, deviceName, config);
return qnResult.supportedLayersMap; return qnResult.supportedLayersMap;
}); });
} }

View File

@ -36,6 +36,7 @@
#include "common_test_utils/file_utils.hpp" #include "common_test_utils/file_utils.hpp"
#include "common_test_utils/common_utils.hpp" #include "common_test_utils/common_utils.hpp"
#include "ie_precision.hpp"
#include "transformations/rt_info/primitives_priority_attribute.hpp" #include "transformations/rt_info/primitives_priority_attribute.hpp"
#include "cnn_network_ngraph_impl.hpp" #include "cnn_network_ngraph_impl.hpp"
@ -307,6 +308,37 @@ TEST(CNNNGraphImplTests, TestSetBatchDynamic) {
ASSERT_EQ(PARAMETER_MISMATCH, cnnNet.setBatchSize(2, nullptr)); // must not trigger conversion ASSERT_EQ(PARAMETER_MISMATCH, cnnNet.setBatchSize(2, nullptr)); // must not trigger conversion
} }
TEST(CNNNGraphImplTests, TestDoesChangePrecisionsWithNewAPI) {
std::shared_ptr<ngraph::Function> ngraph;
{
auto param = std::make_shared<ngraph::op::Parameter>(ngraph::element::Type_t::f16, ngraph::PartialShape::dynamic());
auto relu = std::make_shared<ngraph::op::Relu>(param);
auto result = std::make_shared<ngraph::op::Result>(relu);
ngraph = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
}
// new OpenVINO 2.0
{
auto ngraphImpl = std::make_shared<InferenceEngine::details::CNNNetworkNGraphImpl>(ngraph,
std::vector<InferenceEngine::IExtensionPtr>{}, true);
InferenceEngine::CNNNetwork cnnNet(ngraphImpl);
ASSERT_EQ(InferenceEngine::Precision::FP16,
cnnNet.getInputsInfo().begin()->second->getTensorDesc().getPrecision());
ASSERT_EQ(InferenceEngine::Precision::FP16,
cnnNet.getOutputsInfo().begin()->second->getTensorDesc().getPrecision());
}
// current API
{
auto ngraphImpl = std::make_shared<InferenceEngine::details::CNNNetworkNGraphImpl>(ngraph);
InferenceEngine::CNNNetwork cnnNet(ngraphImpl);
ASSERT_EQ(InferenceEngine::Precision::FP32,
cnnNet.getInputsInfo().begin()->second->getTensorDesc().getPrecision());
ASSERT_EQ(InferenceEngine::Precision::FP32,
cnnNet.getOutputsInfo().begin()->second->getTensorDesc().getPrecision());
}
}
TEST(CNNNGraphImplTests, TestSaveAffinity) { TEST(CNNNGraphImplTests, TestSaveAffinity) {
const std::string testAffinity = "testAffinity"; const std::string testAffinity = "testAffinity";
std::shared_ptr<ngraph::Function> ngraph; std::shared_ptr<ngraph::Function> ngraph;

View File

@ -0,0 +1,35 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include <mutex>
#include <string>
#include "openvino/runtime/core.hpp"
namespace ov {
namespace test {
class PluginCache {
public:
std::shared_ptr<ov::runtime::Core> core(const std::string& deviceToCheck = std::string());
static PluginCache& get();
void reset();
PluginCache(const PluginCache&) = delete;
PluginCache& operator=(const PluginCache&) = delete;
private:
PluginCache();
~PluginCache() = default;
std::mutex g_mtx;
std::shared_ptr<ov::runtime::Core> ov_core;
};
} // namespace test
} // namespace ov

View File

@ -0,0 +1,102 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "functional_test_utils/ov_plugin_cache.hpp"
#include <gtest/gtest.h>
#include <cstdlib>
#include <ie_plugin_config.hpp>
#include <unordered_map>
namespace ov {
namespace test {
namespace {
class TestListener : public testing::EmptyTestEventListener {
public:
void OnTestEnd(const testing::TestInfo& testInfo) override {
if (auto testResult = testInfo.result()) {
if (testResult->Failed()) {
PluginCache::get().reset();
}
}
}
};
} // namespace
PluginCache& PluginCache::get() {
static PluginCache instance;
return instance;
}
std::shared_ptr<ov::runtime::Core> PluginCache::core(const std::string& deviceToCheck) {
std::lock_guard<std::mutex> lock(g_mtx);
if (std::getenv("DISABLE_PLUGIN_CACHE") != nullptr) {
#ifndef NDEBUG
std::cout << "'DISABLE_PLUGIN_CACHE' environment variable is set. New Core object will be created!"
<< std::endl;
#endif
return std::make_shared<ov::runtime::Core>();
}
#ifndef NDEBUG
std::cout << "Access PluginCache ov core. OV Core use count: " << ov_core.use_count() << std::endl;
#endif
if (!ov_core) {
#ifndef NDEBUG
std::cout << "Created ov core." << std::endl;
#endif
ov_core = std::make_shared<ov::runtime::Core>();
}
assert(0 != ov_core.use_count());
// register template plugin if it is needed
try {
std::string pluginName = "templatePlugin";
pluginName += IE_BUILD_POSTFIX;
ov_core->register_plugin(pluginName, "TEMPLATE");
} catch (...) {
}
if (!deviceToCheck.empty()) {
std::vector<std::string> metrics = ov_core->get_metric(deviceToCheck, METRIC_KEY(SUPPORTED_METRICS));
if (std::find(metrics.begin(), metrics.end(), METRIC_KEY(AVAILABLE_DEVICES)) != metrics.end()) {
std::vector<std::string> availableDevices =
ov_core->get_metric(deviceToCheck, METRIC_KEY(AVAILABLE_DEVICES));
if (availableDevices.empty()) {
std::cerr << "No available devices for " << deviceToCheck << std::endl;
std::exit(EXIT_FAILURE);
}
#ifndef NDEBUG
std::cout << "Available devices for " << deviceToCheck << ":" << std::endl;
for (const auto& device : availableDevices) {
std::cout << " " << device << std::endl;
}
#endif
}
}
return ov_core;
}
void PluginCache::reset() {
std::lock_guard<std::mutex> lock(g_mtx);
#ifndef NDEBUG
std::cout << "Reset PluginCache. OV Core use count: " << ov_core.use_count() << std::endl;
#endif
ov_core.reset();
}
PluginCache::PluginCache() {
auto& listeners = testing::UnitTest::GetInstance()->listeners();
listeners.Append(new TestListener);
}
} // namespace test
} // namespace ov

View File

@ -9,6 +9,8 @@
*/ */
#pragma once #pragma once
#include <type_traits>
#include "openvino/core/coordinate.hpp" #include "openvino/core/coordinate.hpp"
#include "openvino/core/shape.hpp" #include "openvino/core/shape.hpp"
#include "openvino/core/type/element_type.hpp" #include "openvino/core/type/element_type.hpp"
@ -136,9 +138,9 @@ public:
* @return A host pointer to tensor memory casted to specified type `T`. * @return A host pointer to tensor memory casted to specified type `T`.
* @note Throws exception if specified type does not match with tensor element type * @note Throws exception if specified type does not match with tensor element type
*/ */
template <typename T> template <typename T, typename datatype = typename std::decay<T>::type>
T* data() const { T* data() const {
return static_cast<T*>(data(element::from<T>())); return static_cast<T*>(data(element::from<datatype>()));
} }
/** /**

View File

@ -117,7 +117,7 @@ void* Tensor::data(const element::Type element_type) const {
element::fundamental_type_for(get_element_type()), element::fundamental_type_for(get_element_type()),
", but it casted to ", ", but it casted to ",
element_type, element_type,
" with fundamental element type", " with fundamental element type ",
element::fundamental_type_for(element_type)); element::fundamental_type_for(element_type));
} }
return _impl->getTensorDesc().getBlockingDesc().getOffsetPadding() * get_element_type().size() + return _impl->getTensorDesc().getBlockingDesc().getOffsetPadding() * get_element_type().size() +