Fix code style for inference tests (#14243)

This commit is contained in:
Oleg Pipikin 2022-12-13 03:59:34 +01:00 committed by GitHub
parent 19d9ecd1ba
commit c492c6ada3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
39 changed files with 1687 additions and 1466 deletions

View File

@ -64,6 +64,7 @@ ov_add_test_target(
funcTestUtils funcTestUtils
INCLUDES INCLUDES
$<TARGET_PROPERTY:inference_engine_obj,SOURCE_DIR>/src $<TARGET_PROPERTY:inference_engine_obj,SOURCE_DIR>/src
ADD_CLANG_FORMAT
LABELS LABELS
OV OV
) )

View File

@ -11,7 +11,6 @@ using namespace std;
using namespace InferenceEngine; using namespace InferenceEngine;
using namespace InferenceEngine::details; using namespace InferenceEngine::details;
TEST(InferRequestCPPTests, throwsOnUninitializedSetBlob) { TEST(InferRequestCPPTests, throwsOnUninitializedSetBlob) {
InferRequest req; InferRequest req;
ASSERT_THROW(req.SetBlob({}, {}), InferenceEngine::NotAllocated); ASSERT_THROW(req.SetBlob({}, {}), InferenceEngine::NotAllocated);

View File

@ -3,12 +3,11 @@
// //
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <random>
#include <chrono>
#include <ie_blob.h> #include <ie_blob.h>
#include <blob_transform.hpp> #include <blob_transform.hpp>
#include <chrono>
#include <random>
using namespace ::testing; using namespace ::testing;
using namespace InferenceEngine; using namespace InferenceEngine;
@ -16,61 +15,64 @@ using namespace InferenceEngine;
using ChannelNum = size_t; using ChannelNum = size_t;
using BatchNum = size_t; using BatchNum = size_t;
using PrecisionType = InferenceEngine::Precision::ePrecision; using PrecisionType = InferenceEngine::Precision::ePrecision;
using IsInterleaved = bool; // true = interleaved, false = deinterleaved. using IsInterleaved = bool; // true = interleaved, false = deinterleaved.
using Dims = std::vector<size_t>; // dimensions are in the form of (N x C x D1 x D2 ... Dn), so Dims is vector (D1 x D2 ... Dn) using Dims =
std::vector<size_t>; // dimensions are in the form of (N x C x D1 x D2 ... Dn), so Dims is vector (D1 x D2 ... Dn)
namespace { namespace {
InferenceEngine::Layout setLayout(IsInterleaved isInterleaved, int dimsSize) { InferenceEngine::Layout setLayout(IsInterleaved isInterleaved, int dimsSize) {
if (dimsSize == 3) { if (dimsSize == 3) {
return (isInterleaved) ? InferenceEngine::Layout::NDHWC : InferenceEngine::Layout::NCDHW; return (isInterleaved) ? InferenceEngine::Layout::NDHWC : InferenceEngine::Layout::NCDHW;
} else if (dimsSize == 2) { } else if (dimsSize == 2) {
return (isInterleaved) ? InferenceEngine::Layout::NHWC : InferenceEngine::Layout::NCHW; return (isInterleaved) ? InferenceEngine::Layout::NHWC : InferenceEngine::Layout::NCHW;
} }
IE_THROW() << "Can't set layout"; IE_THROW() << "Can't set layout";
} }
// Support only for 4d and 5d blobs // Support only for 4d and 5d blobs
SizeVector SetDimVector(BatchNum batchNum, ChannelNum channelNum, Dims dims) { SizeVector SetDimVector(BatchNum batchNum, ChannelNum channelNum, Dims dims) {
if (dims.size() == 2) { if (dims.size() == 2) {
return SizeVector{ batchNum, channelNum, dims[0], dims[1] }; return SizeVector{batchNum, channelNum, dims[0], dims[1]};
} else if (dims.size() == 3) { } else if (dims.size() == 3) {
return SizeVector{ batchNum, channelNum, dims[0], dims[1], dims[2] }; return SizeVector{batchNum, channelNum, dims[0], dims[1], dims[2]};
} }
IE_THROW() << "Can't set dimVector"; IE_THROW() << "Can't set dimVector";
} }
// For FP16 and Q78 precision we use int16_t type // For FP16 and Q78 precision we use int16_t type
InferenceEngine::Blob::Ptr createBlob(InferenceEngine::Precision precision, SizeVector dimsVector, InferenceEngine::Layout layout) { InferenceEngine::Blob::Ptr createBlob(InferenceEngine::Precision precision,
SizeVector dimsVector,
InferenceEngine::Layout layout) {
InferenceEngine::TensorDesc tensorDesc(precision, dimsVector, layout); InferenceEngine::TensorDesc tensorDesc(precision, dimsVector, layout);
switch (precision) { switch (precision) {
case InferenceEngine::Precision::FP32: case InferenceEngine::Precision::FP32:
return make_shared_blob<float>(tensorDesc); return make_shared_blob<float>(tensorDesc);
case InferenceEngine::Precision::FP64: case InferenceEngine::Precision::FP64:
return make_shared_blob<double>(tensorDesc); return make_shared_blob<double>(tensorDesc);
case InferenceEngine::Precision::FP16: case InferenceEngine::Precision::FP16:
case InferenceEngine::Precision::I16: case InferenceEngine::Precision::I16:
case InferenceEngine::Precision::Q78: case InferenceEngine::Precision::Q78:
return make_shared_blob<int16_t>(tensorDesc); return make_shared_blob<int16_t>(tensorDesc);
case InferenceEngine::Precision::I32: case InferenceEngine::Precision::I32:
return make_shared_blob<int32_t>(tensorDesc); return make_shared_blob<int32_t>(tensorDesc);
case InferenceEngine::Precision::U32: case InferenceEngine::Precision::U32:
return make_shared_blob<uint32_t>(tensorDesc); return make_shared_blob<uint32_t>(tensorDesc);
case InferenceEngine::Precision::I64: case InferenceEngine::Precision::I64:
return make_shared_blob<int64_t>(tensorDesc); return make_shared_blob<int64_t>(tensorDesc);
case InferenceEngine::Precision::U64: case InferenceEngine::Precision::U64:
return make_shared_blob<uint64_t>(tensorDesc); return make_shared_blob<uint64_t>(tensorDesc);
case InferenceEngine::Precision::U16: case InferenceEngine::Precision::U16:
return make_shared_blob<uint16_t>(tensorDesc); return make_shared_blob<uint16_t>(tensorDesc);
case InferenceEngine::Precision::I4: case InferenceEngine::Precision::I4:
case InferenceEngine::Precision::I8: case InferenceEngine::Precision::I8:
case InferenceEngine::Precision::BIN: case InferenceEngine::Precision::BIN:
return make_shared_blob<int8_t>(tensorDesc); return make_shared_blob<int8_t>(tensorDesc);
case InferenceEngine::Precision::U4: case InferenceEngine::Precision::U4:
case InferenceEngine::Precision::U8: case InferenceEngine::Precision::U8:
return make_shared_blob<uint8_t>(tensorDesc); return make_shared_blob<uint8_t>(tensorDesc);
default: default:
IE_THROW() << "Unsupported precision"; IE_THROW() << "Unsupported precision";
} }
} }
@ -87,30 +89,32 @@ size_t GenerateRandom(size_t elem) {
// dims is the blob shape, e.g. {1, 3, 640, 480} // dims is the blob shape, e.g. {1, 3, 640, 480}
// random index[i] lays between 0 and dims[i]-1 // random index[i] lays between 0 and dims[i]-1
SizeVector GenerateRandomVector(SizeVector dims) { SizeVector GenerateRandomVector(SizeVector dims) {
SizeVector idx(dims.size()); SizeVector idx(dims.size());
for (auto i = 0; i < dims.size(); ++i) { for (auto i = 0; i < dims.size(); ++i) {
idx[i] = GenerateRandom(dims[i]); idx[i] = GenerateRandom(dims[i]);
} }
return idx; return idx;
} }
void PrintParams(InferenceEngine::Layout layout,
void PrintParams(InferenceEngine::Layout layout, SizeVector dims, std::string blobType, InferenceEngine::Precision precision) { SizeVector dims,
std::cout <<blobType <<"Blob params: " << layout << ", precision: "<< precision << ", dims: {"; std::string blobType,
for (int i = 0; i < dims.size(); i++) { InferenceEngine::Precision precision) {
std::cout << (i > 0 ? ", ": "") << dims[i]; std::cout << blobType << "Blob params: " << layout << ", precision: " << precision << ", dims: {";
for (int i = 0; i < dims.size(); i++) {
std::cout << (i > 0 ? ", " : "") << dims[i];
} }
std::cout << "}" << std::endl; std::cout << "}" << std::endl;
} }
// For FP16 and Q78 precision we use int16_t type // For FP16 and Q78 precision we use int16_t type
template<typename T> template <typename T>
void FillBlobRandom(Blob::Ptr& inputBlob) { void FillBlobRandom(Blob::Ptr& inputBlob) {
srand(1); srand(1);
auto inputBlobData = inputBlob->buffer().as<T*>(); auto inputBlobData = inputBlob->buffer().as<T*>();
for (size_t i = 0; i < inputBlob->size(); i++) { for (size_t i = 0; i < inputBlob->size(); i++) {
inputBlobData[i] = (T) (GenerateRandom(RAND_MAX) / static_cast<float>(RAND_MAX) * 100); inputBlobData[i] = (T)(GenerateRandom(RAND_MAX) / static_cast<float>(RAND_MAX) * 100);
} }
} }
@ -118,37 +122,36 @@ void FillBlobRandom(Blob::Ptr& inputBlob) {
void FillBlob(Blob::Ptr& inputBlob) { void FillBlob(Blob::Ptr& inputBlob) {
auto precision = inputBlob->getTensorDesc().getPrecision(); auto precision = inputBlob->getTensorDesc().getPrecision();
switch (precision) { switch (precision) {
case InferenceEngine::Precision::FP32: case InferenceEngine::Precision::FP32:
return FillBlobRandom<float>(inputBlob); return FillBlobRandom<float>(inputBlob);
case InferenceEngine::Precision::FP64: case InferenceEngine::Precision::FP64:
return FillBlobRandom<double>(inputBlob); return FillBlobRandom<double>(inputBlob);
case InferenceEngine::Precision::FP16: case InferenceEngine::Precision::FP16:
case InferenceEngine::Precision::I16: case InferenceEngine::Precision::I16:
case InferenceEngine::Precision::Q78: case InferenceEngine::Precision::Q78:
return FillBlobRandom<int16_t>(inputBlob); return FillBlobRandom<int16_t>(inputBlob);
case InferenceEngine::Precision::I32: case InferenceEngine::Precision::I32:
return FillBlobRandom<int32_t>(inputBlob); return FillBlobRandom<int32_t>(inputBlob);
case InferenceEngine::Precision::U32: case InferenceEngine::Precision::U32:
return FillBlobRandom<uint32_t>(inputBlob); return FillBlobRandom<uint32_t>(inputBlob);
case InferenceEngine::Precision::I64: case InferenceEngine::Precision::I64:
return FillBlobRandom<int64_t>(inputBlob); return FillBlobRandom<int64_t>(inputBlob);
case InferenceEngine::Precision::U64: case InferenceEngine::Precision::U64:
return FillBlobRandom<uint64_t>(inputBlob); return FillBlobRandom<uint64_t>(inputBlob);
case InferenceEngine::Precision::U16: case InferenceEngine::Precision::U16:
return FillBlobRandom<uint16_t>(inputBlob); return FillBlobRandom<uint16_t>(inputBlob);
case InferenceEngine::Precision::I4: case InferenceEngine::Precision::I4:
case InferenceEngine::Precision::I8: case InferenceEngine::Precision::I8:
case InferenceEngine::Precision::BIN: case InferenceEngine::Precision::BIN:
return FillBlobRandom<int8_t>(inputBlob); return FillBlobRandom<int8_t>(inputBlob);
case InferenceEngine::Precision::U4: case InferenceEngine::Precision::U4:
case InferenceEngine::Precision::U8: case InferenceEngine::Precision::U8:
return FillBlobRandom<uint8_t>(inputBlob); return FillBlobRandom<uint8_t>(inputBlob);
default: default:
IE_THROW() << "Cant fill blob with \"" << precision << "\" precision\n"; IE_THROW() << "Cant fill blob with \"" << precision << "\" precision\n";
} }
} }
template <typename T> template <typename T>
T GetElem(Blob::Ptr& blob, SizeVector idx) { T GetElem(Blob::Ptr& blob, SizeVector idx) {
T* src = blob->buffer().as<T*>() + blob->getTensorDesc().getBlockingDesc().getOffsetPadding(); T* src = blob->buffer().as<T*>() + blob->getTensorDesc().getBlockingDesc().getOffsetPadding();
@ -188,17 +191,17 @@ bool IsCorrectBlobCopy_Impl(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) {
EXPECT_TRUE(srcBlob->size() == dstBlob->size()); EXPECT_TRUE(srcBlob->size() == dstBlob->size());
int experimentsNum = SetExperimentsNum(srcBlob->size()); int experimentsNum = SetExperimentsNum(srcBlob->size());
int errorsCount = 0; int errorsCount = 0;
for ( ; experimentsNum > 0; --experimentsNum) { for (; experimentsNum > 0; --experimentsNum) {
SizeVector randomElemIdx = GenerateRandomVector(srcBlob->getTensorDesc().getDims()); SizeVector randomElemIdx = GenerateRandomVector(srcBlob->getTensorDesc().getDims());
auto srcElem = GetElem<T>(srcBlob, randomElemIdx); auto srcElem = GetElem<T>(srcBlob, randomElemIdx);
auto dstElem = GetElem<T>(dstBlob, randomElemIdx); auto dstElem = GetElem<T>(dstBlob, randomElemIdx);
if (srcElem != dstElem) { if (srcElem != dstElem) {
if (errorsCount < 10) { if (errorsCount < 10) {
errorsCount++; errorsCount++;
std::cout << "ERROR: srcElem = " << srcElem << ", dstElem = " << dstElem << std::endl; std::cout << "ERROR: srcElem = " << srcElem << ", dstElem = " << dstElem << std::endl;
} else { } else {
errorsCount++; errorsCount++;
} }
} }
} }
if (errorsCount > 0) { if (errorsCount > 0) {
@ -207,42 +210,42 @@ bool IsCorrectBlobCopy_Impl(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) {
return errorsCount == 0; return errorsCount == 0;
} }
bool IsCorrectBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) { bool IsCorrectBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) {
switch (srcBlob->getTensorDesc().getPrecision()) { switch (srcBlob->getTensorDesc().getPrecision()) {
case InferenceEngine::Precision::FP32: case InferenceEngine::Precision::FP32:
return IsCorrectBlobCopy_Impl<float>(srcBlob, dstBlob); return IsCorrectBlobCopy_Impl<float>(srcBlob, dstBlob);
case InferenceEngine::Precision::FP64: case InferenceEngine::Precision::FP64:
return IsCorrectBlobCopy_Impl<double>(srcBlob, dstBlob); return IsCorrectBlobCopy_Impl<double>(srcBlob, dstBlob);
case InferenceEngine::Precision::FP16: case InferenceEngine::Precision::FP16:
case InferenceEngine::Precision::I16: case InferenceEngine::Precision::I16:
case InferenceEngine::Precision::Q78: case InferenceEngine::Precision::Q78:
return IsCorrectBlobCopy_Impl<int16_t>(srcBlob, dstBlob); return IsCorrectBlobCopy_Impl<int16_t>(srcBlob, dstBlob);
case InferenceEngine::Precision::I32: case InferenceEngine::Precision::I32:
return IsCorrectBlobCopy_Impl<int32_t>(srcBlob, dstBlob); return IsCorrectBlobCopy_Impl<int32_t>(srcBlob, dstBlob);
case InferenceEngine::Precision::U32: case InferenceEngine::Precision::U32:
return IsCorrectBlobCopy_Impl<uint32_t >(srcBlob, dstBlob); return IsCorrectBlobCopy_Impl<uint32_t>(srcBlob, dstBlob);
case InferenceEngine::Precision::I64: case InferenceEngine::Precision::I64:
return IsCorrectBlobCopy_Impl<int64_t >(srcBlob, dstBlob); return IsCorrectBlobCopy_Impl<int64_t>(srcBlob, dstBlob);
case InferenceEngine::Precision::U64: case InferenceEngine::Precision::U64:
return IsCorrectBlobCopy_Impl<uint64_t >(srcBlob, dstBlob); return IsCorrectBlobCopy_Impl<uint64_t>(srcBlob, dstBlob);
case InferenceEngine::Precision::U16: case InferenceEngine::Precision::U16:
return IsCorrectBlobCopy_Impl<uint16_t>(srcBlob, dstBlob); return IsCorrectBlobCopy_Impl<uint16_t>(srcBlob, dstBlob);
case InferenceEngine::Precision::I4: case InferenceEngine::Precision::I4:
case InferenceEngine::Precision::I8: case InferenceEngine::Precision::I8:
case InferenceEngine::Precision::BIN: case InferenceEngine::Precision::BIN:
return IsCorrectBlobCopy_Impl<int8_t>(srcBlob, dstBlob); return IsCorrectBlobCopy_Impl<int8_t>(srcBlob, dstBlob);
case InferenceEngine::Precision::U4: case InferenceEngine::Precision::U4:
case InferenceEngine::Precision::U8: case InferenceEngine::Precision::U8:
return IsCorrectBlobCopy_Impl<uint8_t>(srcBlob, dstBlob); return IsCorrectBlobCopy_Impl<uint8_t>(srcBlob, dstBlob);
default: default:
return false; return false;
} }
} }
} // namespace } // namespace
using BlobCopyTest = ::testing::TestWithParam <std::tuple<IsInterleaved, IsInterleaved, BatchNum, ChannelNum, Dims, PrecisionType >>; using BlobCopyTest =
::testing::TestWithParam<std::tuple<IsInterleaved, IsInterleaved, BatchNum, ChannelNum, Dims, PrecisionType>>;
TEST_P(BlobCopyTest, BlobCopy) { TEST_P(BlobCopyTest, BlobCopy) {
IsInterleaved srcIsInterleaved = get<0>(GetParam()); IsInterleaved srcIsInterleaved = get<0>(GetParam());
@ -269,11 +272,13 @@ TEST_P(BlobCopyTest, BlobCopy) {
FillBlob(srcBlob); FillBlob(srcBlob);
auto start = std::chrono::high_resolution_clock::now(); auto start = std::chrono::high_resolution_clock::now();
blob_copy(srcBlob, dstBlob); blob_copy(srcBlob, dstBlob);
auto finish = std::chrono::high_resolution_clock::now(); auto finish = std::chrono::high_resolution_clock::now();
std::cout << "Blob_copy execution time : " << std::chrono::duration_cast<std::chrono::microseconds>(finish - start).count() << " micros" << std::endl; std::cout << "Blob_copy execution time : "
<< std::chrono::duration_cast<std::chrono::microseconds>(finish - start).count() << " micros"
<< std::endl;
ASSERT_TRUE(IsCorrectBlobCopy(srcBlob, dstBlob)) << "'blob_copy' function is not correct"; ASSERT_TRUE(IsCorrectBlobCopy(srcBlob, dstBlob)) << "'blob_copy' function is not correct";
} }
@ -282,24 +287,28 @@ namespace {
// is interleaved srcBlob? // is interleaved srcBlob?
std::vector<IsInterleaved> BlobCopy_srcLayoutParam = { std::vector<IsInterleaved> BlobCopy_srcLayoutParam = {
true, false, true,
false,
}; };
// is interleaved dstBlob? // is interleaved dstBlob?
std::vector<IsInterleaved> BlobCopy_dstLayoutParam = { std::vector<IsInterleaved> BlobCopy_dstLayoutParam = {
false, true, false,
true,
}; };
std::vector<BatchNum> BlobCopy_BatchNum = { std::vector<BatchNum> BlobCopy_BatchNum = {
1, 3, 1,
3,
}; };
std::vector<ChannelNum > BlobCopy_ChannelNum = { std::vector<ChannelNum> BlobCopy_ChannelNum = {
3, 7, 3,
7,
}; };
std::vector<Dims> BlobCopy_Dims = { std::vector<Dims> BlobCopy_Dims = {
{{10, 20, 30}}, {{10, 20, 30}},
{{60, 80}}, {{60, 80}},
}; };
// The 'blob_copy(4/5)_d' function is a template with the parameter-list <InferenceEngine::Precision::ePrecision PRC> // The 'blob_copy(4/5)_d' function is a template with the parameter-list <InferenceEngine::Precision::ePrecision PRC>
@ -308,25 +317,26 @@ std::vector<Dims> BlobCopy_Dims = {
// U8 is used for cases with the following accuracy: U8, I8 // U8 is used for cases with the following accuracy: U8, I8
// Cases with other precision are not supported // Cases with other precision are not supported
std::vector<PrecisionType> BlobCopy_PrecisionParams = { std::vector<PrecisionType> BlobCopy_PrecisionParams = {
InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP16,
InferenceEngine::Precision::U8, InferenceEngine::Precision::U8,
InferenceEngine::Precision::I8, InferenceEngine::Precision::I8,
InferenceEngine::Precision::U16, InferenceEngine::Precision::U16,
InferenceEngine::Precision::I16, InferenceEngine::Precision::I16,
InferenceEngine::Precision::U32, InferenceEngine::Precision::U32,
InferenceEngine::Precision::I32, InferenceEngine::Precision::I32,
}; };
} // namespace } // namespace
INSTANTIATE_TEST_SUITE_P(accuracy, BlobCopyTest, INSTANTIATE_TEST_SUITE_P(accuracy,
::testing::Combine(::testing::ValuesIn(BlobCopy_srcLayoutParam), BlobCopyTest,
::testing::ValuesIn(BlobCopy_dstLayoutParam), ::testing::Combine(::testing::ValuesIn(BlobCopy_srcLayoutParam),
::testing::ValuesIn(BlobCopy_BatchNum), ::testing::ValuesIn(BlobCopy_dstLayoutParam),
::testing::ValuesIn(BlobCopy_ChannelNum), ::testing::ValuesIn(BlobCopy_BatchNum),
::testing::ValuesIn(BlobCopy_Dims), ::testing::ValuesIn(BlobCopy_ChannelNum),
::testing::ValuesIn(BlobCopy_PrecisionParams))); ::testing::ValuesIn(BlobCopy_Dims),
::testing::ValuesIn(BlobCopy_PrecisionParams)));
namespace { namespace {
@ -335,8 +345,9 @@ bool IsEqualBlobCopy_Impl(Blob::Ptr& ref, Blob::Ptr& dst) {
EXPECT_TRUE(ref->size() == dst->size()); EXPECT_TRUE(ref->size() == dst->size());
auto refData = ref->buffer().as<T*>(); auto refData = ref->buffer().as<T*>();
auto dstData = dst->buffer().as<T*>(); auto dstData = dst->buffer().as<T*>();
return (std::equal(dstData, dstData + dst->size(), refData, return (std::equal(dstData, dstData + dst->size(), refData, [](T left, T right) {
[](T left, T right) { return left == right; })); return left == right;
}));
} }
bool IsEqualBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) { bool IsEqualBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) {
@ -424,7 +435,6 @@ void copy3DBlobsAllBytesWithReLayoutWrapper(const Blob::Ptr& srcLayoutBlob, Blob
} }
} }
std::vector<Dims> BlobCopySetLayout_Dims = { std::vector<Dims> BlobCopySetLayout_Dims = {
{{1, 10, 10}}, {{1, 10, 10}},
{{2, 100, 100}}, {{2, 100, 100}},
@ -468,7 +478,7 @@ TEST_P(BlobCopySetLayoutTest, BlobCopyWithNCHW_To_NHWC_After_setLayout) {
ASSERT_TRUE(IsEqualBlobCopy(ref, dst)) << "'blob_copy' after setLayout function is not correct"; ASSERT_TRUE(IsEqualBlobCopy(ref, dst)) << "'blob_copy' after setLayout function is not correct";
} }
INSTANTIATE_TEST_SUITE_P(accuracy, BlobCopySetLayoutTest, INSTANTIATE_TEST_SUITE_P(accuracy,
::testing::Combine(::testing::ValuesIn(BlobCopySetLayout_Dims), BlobCopySetLayoutTest,
::testing::ValuesIn(BlobCopySetLayout_Precisions))); ::testing::Combine(::testing::ValuesIn(BlobCopySetLayout_Dims),
::testing::ValuesIn(BlobCopySetLayout_Precisions)));

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,9 @@
// //
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <unordered_map> #include <unordered_map>
#include "caseless.hpp" #include "caseless.hpp"
#include "debug.h" #include "debug.h"
@ -39,7 +41,7 @@ TEST_F(CaselessTests, canFindCaslessInMap) {
} }
TEST_F(CaselessTests, canFindCaslessInUnordered) { TEST_F(CaselessTests, canFindCaslessInUnordered) {
caseless_unordered_map <string, int> storage = { caseless_unordered_map<string, int> storage = {
{"Abc", 1}, {"Abc", 1},
{"bC", 2}, {"bC", 2},
{"AbcD", 3}, {"AbcD", 3},

View File

@ -3,11 +3,13 @@
// //
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <common_test_utils/file_utils.hpp>
#include "cpp/ie_cnn_network.h" #include "cpp/ie_cnn_network.h"
#include "inference_engine.hpp" #include "inference_engine.hpp"
#include "openvino/opsets/opset.hpp" #include "openvino/opsets/opset.hpp"
#include "openvino/pass/serialize.hpp" #include "openvino/pass/serialize.hpp"
#include <common_test_utils/file_utils.hpp>
#include "openvino/util/file_util.hpp" #include "openvino/util/file_util.hpp"
using namespace InferenceEngine; using namespace InferenceEngine;
@ -82,13 +84,13 @@ static std::shared_ptr<ov::Model> CNNNetworkTests_create_model() {
auto param1 = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape::dynamic()); auto param1 = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape::dynamic());
param1->set_friendly_name("p1_friendly"); param1->set_friendly_name("p1_friendly");
param1->output(0).set_names({"p1_1", "p1_2"}); param1->output(0).set_names({"p1_1", "p1_2"});
auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape {-1, 3, 224, 224}); auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{-1, 3, 224, 224});
param2->set_friendly_name("p2_friendly"); param2->set_friendly_name("p2_friendly");
param2->output(0).set_names({"p2_1", "p2_2"}); param2->output(0).set_names({"p2_1", "p2_2"});
auto param3 = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape {1, 3, 224, 224}); auto param3 = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{1, 3, 224, 224});
param3->set_friendly_name("p3_friendly"); param3->set_friendly_name("p3_friendly");
param3->output(0).set_names({"p3_1", "p3_2"}); param3->output(0).set_names({"p3_1", "p3_2"});
return std::make_shared<ov::Model>(ov::OutputVector {param1, param2, param3}, return std::make_shared<ov::Model>(ov::OutputVector{param1, param2, param3},
ov::ParameterVector{param1, param2, param3}); ov::ParameterVector{param1, param2, param3});
} }
@ -151,12 +153,15 @@ protected:
std::string modelName = "CNNNetworkTests_LoadFromFileTest.xml"; std::string modelName = "CNNNetworkTests_LoadFromFileTest.xml";
std::string weightsName = "CNNNetworkTests_LoadFromFileTest.bin"; std::string weightsName = "CNNNetworkTests_LoadFromFileTest.bin";
InferenceEngine::Core core; InferenceEngine::Core core;
public: public:
void SetUp() override { void SetUp() override {
std::shared_ptr<ov::Model> model = CNNNetworkTests_create_model(); std::shared_ptr<ov::Model> model = CNNNetworkTests_create_model();
ov::pass::Serialize(modelName, weightsName).run_on_model(model); ov::pass::Serialize(modelName, weightsName).run_on_model(model);
ASSERT_NO_THROW(core.RegisterPlugin(ov::util::make_plugin_library_name(CommonTestUtils::getExecutableDirectory(), ASSERT_NO_THROW(
std::string("mock_engine") + IE_BUILD_POSTFIX), "mock")); core.RegisterPlugin(ov::util::make_plugin_library_name(CommonTestUtils::getExecutableDirectory(),
std::string("mock_engine") + IE_BUILD_POSTFIX),
"mock"));
} }
void TearDown() override { void TearDown() override {
@ -179,5 +184,4 @@ TEST_F(CNNNetworkTests_LoadFromFileTest, throwsHasDynamicInputs_fromPath) {
EXPECT_TRUE(std::string(e.what()).find("p3_2") == std::string::npos) << e.what(); EXPECT_TRUE(std::string(e.what()).find("p3_2") == std::string::npos) << e.what();
} }
} }
#endif //defined(ENABLE_OV_IR_FRONTEND) #endif // defined(ENABLE_OV_IR_FRONTEND)

View File

@ -2,46 +2,44 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <gtest/gtest.h>
#include <legacy/cnn_network_impl.hpp>
#include <legacy/details/ie_cnn_network_iterator.hpp>
#include <string>
#include <sstream>
#include <fstream>
#include <memory>
#include <map>
#include <cpp/ie_cnn_network.h> #include <cpp/ie_cnn_network.h>
#include <legacy/ie_util_internal.hpp> #include <gtest/gtest.h>
#include <ie_parameter.hpp>
#include <ie_core.hpp>
#include <legacy/net_pass.h> #include <legacy/net_pass.h>
#include <legacy/convert_function_to_cnn_network.hpp>
#include <legacy/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
#include <ngraph/pass/manager.hpp>
#include <common_test_utils/ngraph_test_utils.hpp>
#include <fstream>
#include <ie_core.hpp>
#include <ie_parameter.hpp>
#include <legacy/cnn_network_impl.hpp>
#include <legacy/convert_function_to_cnn_network.hpp>
#include <legacy/details/ie_cnn_network_iterator.hpp>
#include <legacy/ie_util_internal.hpp>
#include <legacy/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
#include <map>
#include <memory>
#include <ngraph/function.hpp>
#include <ngraph/op/constant.hpp>
#include <ngraph/op/convert.hpp>
#include <ngraph/op/maximum.hpp>
#include <ngraph/op/parameter.hpp>
#include <ngraph/op/prelu.hpp>
#include <ngraph/op/relu.hpp>
#include <ngraph/op/result.hpp>
#include <ngraph/opsets/opset3.hpp> #include <ngraph/opsets/opset3.hpp>
#include <ngraph/opsets/opset5.hpp> #include <ngraph/opsets/opset5.hpp>
#include <ngraph/opsets/opset8.hpp> #include <ngraph/opsets/opset8.hpp>
#include <ngraph/function.hpp> #include <ngraph/pass/manager.hpp>
#include <ngraph/variant.hpp> #include <ngraph/variant.hpp>
#include <ngraph/op/maximum.hpp>
#include <ngraph/op/constant.hpp>
#include <ngraph/op/convert.hpp>
#include <ngraph/op/parameter.hpp>
#include <ngraph/op/relu.hpp>
#include <ngraph/op/prelu.hpp>
#include <ngraph/op/result.hpp>
#include <common_test_utils/ngraph_test_utils.hpp>
#include <openvino/core/model.hpp> #include <openvino/core/model.hpp>
#include <openvino/core/node_vector.hpp> #include <openvino/core/node_vector.hpp>
#include <sstream>
#include <string>
#include "common_test_utils/file_utils.hpp" #include "cnn_network_ngraph_impl.hpp"
#include "common_test_utils/common_utils.hpp" #include "common_test_utils/common_utils.hpp"
#include "common_test_utils/file_utils.hpp"
#include "ie_precision.hpp" #include "ie_precision.hpp"
#include "transformations/rt_info/primitives_priority_attribute.hpp" #include "transformations/rt_info/primitives_priority_attribute.hpp"
#include "cnn_network_ngraph_impl.hpp"
using namespace testing; using namespace testing;
using namespace InferenceEngine; using namespace InferenceEngine;
@ -107,10 +105,17 @@ TEST(CNNNGraphImplTests, TestNMS5OutputNames) {
auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10});
auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75});
auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7});
auto nms = std::make_shared<ngraph::opset5::NonMaxSuppression>(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, auto nms = std::make_shared<ngraph::opset5::NonMaxSuppression>(
ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER, true); boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER,
true);
nms->set_friendly_name("nms"); nms->set_friendly_name("nms");
f = std::make_shared<ngraph::Function>(ngraph::OutputVector{nms->output(0), nms->output(1), nms->output(2)}, ngraph::ParameterVector{boxes, scores}); f = std::make_shared<ngraph::Function>(ngraph::OutputVector{nms->output(0), nms->output(1), nms->output(2)},
ngraph::ParameterVector{boxes, scores});
} }
InferenceEngine::CNNNetwork cnnNet(f); InferenceEngine::CNNNetwork cnnNet(f);
@ -286,7 +291,8 @@ TEST(CNNNGraphImplTests, TestSetBatchScalar) {
TEST(CNNNGraphImplTests, TestGetBatchDynamic) { TEST(CNNNGraphImplTests, TestGetBatchDynamic) {
std::shared_ptr<ngraph::Function> ngraph; std::shared_ptr<ngraph::Function> ngraph;
{ {
auto param = std::make_shared<ngraph::op::Parameter>(ngraph::element::Type_t::f32, ngraph::PartialShape{5, ngraph::Dimension::dynamic()}); auto param = std::make_shared<ngraph::op::Parameter>(ngraph::element::Type_t::f32,
ngraph::PartialShape{5, ngraph::Dimension::dynamic()});
auto relu = std::make_shared<ngraph::op::Relu>(param); auto relu = std::make_shared<ngraph::op::Relu>(param);
auto result = std::make_shared<ngraph::op::Result>(relu); auto result = std::make_shared<ngraph::op::Result>(relu);
ngraph = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); ngraph = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
@ -300,7 +306,8 @@ TEST(CNNNGraphImplTests, TestGetBatchDynamic) {
TEST(CNNNGraphImplTests, TestSetBatchDynamic) { TEST(CNNNGraphImplTests, TestSetBatchDynamic) {
std::shared_ptr<ngraph::Function> ngraph; std::shared_ptr<ngraph::Function> ngraph;
{ {
auto param = std::make_shared<ngraph::op::Parameter>(ngraph::element::Type_t::f32, ngraph::PartialShape::dynamic()); auto param =
std::make_shared<ngraph::op::Parameter>(ngraph::element::Type_t::f32, ngraph::PartialShape::dynamic());
auto relu = std::make_shared<ngraph::op::Relu>(param); auto relu = std::make_shared<ngraph::op::Relu>(param);
auto result = std::make_shared<ngraph::op::Result>(relu); auto result = std::make_shared<ngraph::op::Result>(relu);
ngraph = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); ngraph = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
@ -314,7 +321,8 @@ TEST(CNNNGraphImplTests, TestSetBatchDynamic) {
TEST(CNNNGraphImplTests, TestDoesChangePrecisionsWithNewAPI) { TEST(CNNNGraphImplTests, TestDoesChangePrecisionsWithNewAPI) {
std::shared_ptr<ngraph::Function> ngraph; std::shared_ptr<ngraph::Function> ngraph;
{ {
auto param = std::make_shared<ngraph::op::Parameter>(ngraph::element::Type_t::f16, ngraph::PartialShape::dynamic()); auto param =
std::make_shared<ngraph::op::Parameter>(ngraph::element::Type_t::f16, ngraph::PartialShape::dynamic());
auto relu = std::make_shared<ngraph::op::Relu>(param); auto relu = std::make_shared<ngraph::op::Relu>(param);
auto result = std::make_shared<ngraph::op::Result>(relu); auto result = std::make_shared<ngraph::op::Result>(relu);
ngraph = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); ngraph = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
@ -322,13 +330,15 @@ TEST(CNNNGraphImplTests, TestDoesChangePrecisionsWithNewAPI) {
// new OpenVINO 2.0 // new OpenVINO 2.0
{ {
auto ngraphImpl = std::make_shared<InferenceEngine::details::CNNNetworkNGraphImpl>(ngraph, auto ngraphImpl = std::make_shared<InferenceEngine::details::CNNNetworkNGraphImpl>(
std::vector<InferenceEngine::IExtensionPtr>{}, true); ngraph,
std::vector<InferenceEngine::IExtensionPtr>{},
true);
InferenceEngine::CNNNetwork cnnNet(ngraphImpl); InferenceEngine::CNNNetwork cnnNet(ngraphImpl);
ASSERT_EQ(InferenceEngine::Precision::FP16, ASSERT_EQ(InferenceEngine::Precision::FP16,
cnnNet.getInputsInfo().begin()->second->getTensorDesc().getPrecision()); cnnNet.getInputsInfo().begin()->second->getTensorDesc().getPrecision());
ASSERT_EQ(InferenceEngine::Precision::FP16, ASSERT_EQ(InferenceEngine::Precision::FP16,
cnnNet.getOutputsInfo().begin()->second->getTensorDesc().getPrecision()); cnnNet.getOutputsInfo().begin()->second->getTensorDesc().getPrecision());
} }
// current API // current API
@ -336,9 +346,9 @@ TEST(CNNNGraphImplTests, TestDoesChangePrecisionsWithNewAPI) {
auto ngraphImpl = std::make_shared<InferenceEngine::details::CNNNetworkNGraphImpl>(ngraph); auto ngraphImpl = std::make_shared<InferenceEngine::details::CNNNetworkNGraphImpl>(ngraph);
InferenceEngine::CNNNetwork cnnNet(ngraphImpl); InferenceEngine::CNNNetwork cnnNet(ngraphImpl);
ASSERT_EQ(InferenceEngine::Precision::FP32, ASSERT_EQ(InferenceEngine::Precision::FP32,
cnnNet.getInputsInfo().begin()->second->getTensorDesc().getPrecision()); cnnNet.getInputsInfo().begin()->second->getTensorDesc().getPrecision());
ASSERT_EQ(InferenceEngine::Precision::FP32, ASSERT_EQ(InferenceEngine::Precision::FP32,
cnnNet.getOutputsInfo().begin()->second->getTensorDesc().getPrecision()); cnnNet.getOutputsInfo().begin()->second->getTensorDesc().getPrecision());
} }
} }
@ -737,13 +747,13 @@ TEST(CNNNGraphImplTests, ReadMeanImageFromCNNNetReader) {
</net> </net>
)V0G0N"; )V0G0N";
InferenceEngine::Core core; InferenceEngine::Core core;
size_t hwSize = 22*22; size_t hwSize = 22 * 22;
size_t dataSize = hwSize*3; size_t dataSize = hwSize * 3;
Blob::Ptr weights = make_shared_blob<float>(TensorDesc(Precision::FP32, {dataSize}, Layout::C)); Blob::Ptr weights = make_shared_blob<float>(TensorDesc(Precision::FP32, {dataSize}, Layout::C));
weights->allocate(); weights->allocate();
{ {
auto lockData = weights->buffer(); auto lockData = weights->buffer();
float *dataPtr = lockData.as<float*>(); float* dataPtr = lockData.as<float*>();
for (size_t i = 0; i < dataSize; ++i) { for (size_t i = 0; i < dataSize; ++i) {
dataPtr[i] = 1; dataPtr[i] = 1;
@ -753,7 +763,7 @@ TEST(CNNNGraphImplTests, ReadMeanImageFromCNNNetReader) {
auto f = network.getFunction(); auto f = network.getFunction();
std::shared_ptr<ngraph::Function> f_ref; std::shared_ptr<ngraph::Function> f_ref;
auto data = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{1, 3, 22, 22}); auto data = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{1, 3, 22, 22});
{ {
auto mean_image = ngraph::opset1::Constant::create(ngraph::element::f32, ngraph::Shape{3, 22, 22}, {1}); auto mean_image = ngraph::opset1::Constant::create(ngraph::element::f32, ngraph::Shape{3, 22, 22}, {1});
auto sub = std::make_shared<ngraph::opset1::Subtract>(data, mean_image); auto sub = std::make_shared<ngraph::opset1::Subtract>(data, mean_image);
@ -837,15 +847,16 @@ TEST(CNNNGraphImplTests, ReadMeanValueFromCNNNetReader) {
std::shared_ptr<ngraph::Function> f_ref; std::shared_ptr<ngraph::Function> f_ref;
{ {
auto data = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{1, 3, 22, 22}); auto data = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{1, 3, 22, 22});
auto mean_image = ngraph::opset1::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1.1, 2.2, 3.3}); auto mean_image =
ngraph::opset1::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1.1, 2.2, 3.3});
auto sub = std::make_shared<ngraph::opset1::Subtract>(data, mean_image); auto sub = std::make_shared<ngraph::opset1::Subtract>(data, mean_image);
auto relu = std::make_shared<ngraph::opset1::Relu>(sub); auto relu = std::make_shared<ngraph::opset1::Relu>(sub);
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{relu}, ngraph::ParameterVector{data}); f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{relu}, ngraph::ParameterVector{data});
} }
const auto fc = FunctionsComparator::with_default() const auto fc = FunctionsComparator::with_default()
.enable(FunctionsComparator::ATTRIBUTES) .enable(FunctionsComparator::ATTRIBUTES)
.enable(FunctionsComparator::CONST_VALUES); .enable(FunctionsComparator::CONST_VALUES);
const auto res = fc.compare(f, f_ref); const auto res = fc.compare(f, f_ref);
EXPECT_TRUE(res.valid) << res.message; EXPECT_TRUE(res.valid) << res.message;
} }
@ -871,8 +882,7 @@ TEST(CNNNGraphImplTests, CanChangeInputPrecision) {
const auto inputsInfo = cnnNet.getInputsInfo(); const auto inputsInfo = cnnNet.getInputsInfo();
ASSERT_EQ(inputsInfo.at("input")->getPrecision(), Precision::FP32) ASSERT_EQ(inputsInfo.at("input")->getPrecision(), Precision::FP32) << "FP32 is default presision";
<< "FP32 is default presision";
} }
{ {
SCOPED_TRACE("Manually set input precision"); SCOPED_TRACE("Manually set input precision");
@ -886,8 +896,8 @@ TEST(CNNNGraphImplTests, CanChangeInputPrecision) {
SCOPED_TRACE("Convert to old format"); SCOPED_TRACE("Convert to old format");
// convert to old representation // convert to old representation
convertedNetwork = InferenceEngine::CNNNetwork( convertedNetwork =
std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet)); InferenceEngine::CNNNetwork(std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet));
} }
{ {
SCOPED_TRACE("After conversion"); SCOPED_TRACE("After conversion");
@ -895,7 +905,7 @@ TEST(CNNNGraphImplTests, CanChangeInputPrecision) {
const auto inputsInfo = convertedNetwork.getInputsInfo(); const auto inputsInfo = convertedNetwork.getInputsInfo();
ASSERT_EQ(inputsInfo.at("input")->getPrecision(), Precision::FP16) ASSERT_EQ(inputsInfo.at("input")->getPrecision(), Precision::FP16)
<< "Manually set presision should be left unchanged"; << "Manually set presision should be left unchanged";
} }
} }
@ -920,8 +930,7 @@ TEST(CNNNGraphImplTests, CanChangeInputLayout) {
const auto inputsInfo = cnnNet.getInputsInfo(); const auto inputsInfo = cnnNet.getInputsInfo();
ASSERT_EQ(inputsInfo.at("input")->getLayout(), Layout::NCHW) ASSERT_EQ(inputsInfo.at("input")->getLayout(), Layout::NCHW) << "NCHW is default layout";
<< "NCHW is default layout";
} }
{ {
SCOPED_TRACE("Manually set input layout"); SCOPED_TRACE("Manually set input layout");
@ -935,16 +944,15 @@ TEST(CNNNGraphImplTests, CanChangeInputLayout) {
SCOPED_TRACE("Convert to old format"); SCOPED_TRACE("Convert to old format");
// convert to old representation // convert to old representation
convertedNetwork = InferenceEngine::CNNNetwork( convertedNetwork =
std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet)); InferenceEngine::CNNNetwork(std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet));
} }
{ {
SCOPED_TRACE("After conversion"); SCOPED_TRACE("After conversion");
const auto inputsInfo = convertedNetwork.getInputsInfo(); const auto inputsInfo = convertedNetwork.getInputsInfo();
ASSERT_EQ(inputsInfo.at("input")->getLayout(), Layout::NHWC) ASSERT_EQ(inputsInfo.at("input")->getLayout(), Layout::NHWC) << "Manually set layout should be left unchanged";
<< "Manually set layout should be left unchanged";
} }
} }
@ -969,8 +977,7 @@ TEST(CNNNGraphImplTests, CanChangeOutputPrecision) {
const auto outputsInfo = cnnNet.getOutputsInfo(); const auto outputsInfo = cnnNet.getOutputsInfo();
ASSERT_EQ(outputsInfo.at("output")->getPrecision(), Precision::FP32) ASSERT_EQ(outputsInfo.at("output")->getPrecision(), Precision::FP32) << "FP32 is default presision";
<< "FP32 is default presision";
} }
{ {
SCOPED_TRACE("Manually set output precision"); SCOPED_TRACE("Manually set output precision");
@ -984,8 +991,8 @@ TEST(CNNNGraphImplTests, CanChangeOutputPrecision) {
SCOPED_TRACE("Convert to old format"); SCOPED_TRACE("Convert to old format");
// convert to old representation // convert to old representation
convertedNetwork = InferenceEngine::CNNNetwork( convertedNetwork =
std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet)); InferenceEngine::CNNNetwork(std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet));
} }
{ {
SCOPED_TRACE("After conversion"); SCOPED_TRACE("After conversion");
@ -993,7 +1000,7 @@ TEST(CNNNGraphImplTests, CanChangeOutputPrecision) {
const auto outputsInfo = convertedNetwork.getOutputsInfo(); const auto outputsInfo = convertedNetwork.getOutputsInfo();
ASSERT_EQ(outputsInfo.at("output")->getPrecision(), Precision::FP16) ASSERT_EQ(outputsInfo.at("output")->getPrecision(), Precision::FP16)
<< "Manually set presision should be left unchanged"; << "Manually set presision should be left unchanged";
} }
} }
@ -1018,8 +1025,7 @@ TEST(CNNNGraphImplTests, CanChangeOutputLayout) {
const auto outputsInfo = cnnNet.getOutputsInfo(); const auto outputsInfo = cnnNet.getOutputsInfo();
ASSERT_EQ(outputsInfo.at("output")->getLayout(), Layout::NCHW) ASSERT_EQ(outputsInfo.at("output")->getLayout(), Layout::NCHW) << "NCHW is default layout";
<< "NCHW is default layout";
} }
{ {
SCOPED_TRACE("Manually set output layout"); SCOPED_TRACE("Manually set output layout");
@ -1033,8 +1039,8 @@ TEST(CNNNGraphImplTests, CanChangeOutputLayout) {
SCOPED_TRACE("Convert to old format"); SCOPED_TRACE("Convert to old format");
// convert to old representation // convert to old representation
convertedNetwork = InferenceEngine::CNNNetwork( convertedNetwork =
std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet)); InferenceEngine::CNNNetwork(std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet));
} }
{ {
SCOPED_TRACE("After conversion"); SCOPED_TRACE("After conversion");
@ -1042,7 +1048,7 @@ TEST(CNNNGraphImplTests, CanChangeOutputLayout) {
const auto outputsInfo = convertedNetwork.getOutputsInfo(); const auto outputsInfo = convertedNetwork.getOutputsInfo();
ASSERT_EQ(outputsInfo.at("output")->getLayout(), Layout::NHWC) ASSERT_EQ(outputsInfo.at("output")->getLayout(), Layout::NHWC)
<< "Manually set layout should be left unchanged"; << "Manually set layout should be left unchanged";
} }
} }
@ -1068,8 +1074,9 @@ TEST(CNNNGraphImplTests, CanSetBatchReadValue) {
std::shared_ptr<ngraph::Function> ngraph; std::shared_ptr<ngraph::Function> ngraph;
{ {
auto input = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::f32, ngraph::Shape{1, 2}); auto input = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::f32, ngraph::Shape{1, 2});
auto constant = std::make_shared<ngraph::opset3::Constant>(ngraph::element::f32, ngraph::Shape{1, 2}, auto constant = std::make_shared<ngraph::opset3::Constant>(ngraph::element::f32,
std::vector<float>{1, 2}); ngraph::Shape{1, 2},
std::vector<float>{1, 2});
auto read_value = std::make_shared<ngraph::opset3::ReadValue>(constant, "variable_id"); auto read_value = std::make_shared<ngraph::opset3::ReadValue>(constant, "variable_id");
auto assign = std::make_shared<ngraph::opset3::Assign>(read_value, "variable_id"); auto assign = std::make_shared<ngraph::opset3::Assign>(read_value, "variable_id");
@ -1469,7 +1476,7 @@ TEST(CNNNGraphImplTests, AddOutputToExperimentalOp) {
} }
TEST(CNNNGraphImplTests, SaveOriginalResultNameForMultiOutputOp) { TEST(CNNNGraphImplTests, SaveOriginalResultNameForMultiOutputOp) {
std::string model = R"V0G0N( std::string model = R"V0G0N(
<net name="Activation" version="10"> <net name="Activation" version="10">
<layers> <layers>
<layer id="0" name="in0" type="Parameter" version="opset1"> <layer id="0" name="in0" type="Parameter" version="opset1">
@ -1630,7 +1637,7 @@ TEST(CNNNGraphImplTests, SaveOriginalResultNameForMultiOutputOp) {
data->allocate(); data->allocate();
{ {
auto lockData = data->buffer(); auto lockData = data->buffer();
float *dataPtr = lockData.as<float*>(); float* dataPtr = lockData.as<float*>();
for (size_t i = 0; i < 4; ++i) { for (size_t i = 0; i < 4; ++i) {
dataPtr[i] = 0; dataPtr[i] = 0;
@ -1657,7 +1664,7 @@ TEST(CNNNGraphImplTests, SaveOriginalResultNameForMultiOutputOp) {
} }
TEST(CNNNGraphImplTests, SaveOriginalResultNameForMultiOutputOpOpset6) { TEST(CNNNGraphImplTests, SaveOriginalResultNameForMultiOutputOpOpset6) {
std::string model = R"V0G0N( std::string model = R"V0G0N(
<net name="Activation" version="10"> <net name="Activation" version="10">
<layers> <layers>
<layer id="0" name="in0" type="Parameter" version="opset1"> <layer id="0" name="in0" type="Parameter" version="opset1">
@ -1818,7 +1825,7 @@ TEST(CNNNGraphImplTests, SaveOriginalResultNameForMultiOutputOpOpset6) {
data->allocate(); data->allocate();
{ {
auto lockData = data->buffer(); auto lockData = data->buffer();
float *dataPtr = lockData.as<float*>(); float* dataPtr = lockData.as<float*>();
for (size_t i = 0; i < 4; ++i) { for (size_t i = 0; i < 4; ++i) {
dataPtr[i] = 0; dataPtr[i] = 0;
@ -1856,8 +1863,14 @@ TEST(CNNNGraphImplTests, CheckUniqueNames) {
auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10});
auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75});
auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7});
auto nms = std::make_shared<ngraph::opset5::NonMaxSuppression>(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, auto nms = std::make_shared<ngraph::opset5::NonMaxSuppression>(
ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER, true); boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER,
true);
auto result1 = std::make_shared<ngraph::opset5::Result>(nms->output(0)); auto result1 = std::make_shared<ngraph::opset5::Result>(nms->output(0));
result1->set_friendly_name("result1"); result1->set_friendly_name("result1");
@ -1866,7 +1879,8 @@ TEST(CNNNGraphImplTests, CheckUniqueNames) {
auto result3 = std::make_shared<ngraph::opset5::Result>(nms->output(2)); auto result3 = std::make_shared<ngraph::opset5::Result>(nms->output(2));
result3->set_friendly_name("result3"); result3->set_friendly_name("result3");
nms->set_friendly_name("nms"); nms->set_friendly_name("nms");
f = std::make_shared<ngraph::Function>(ngraph::ResultVector{result1, result2, result3}, ngraph::ParameterVector{boxes, scores}); f = std::make_shared<ngraph::Function>(ngraph::ResultVector{result1, result2, result3},
ngraph::ParameterVector{boxes, scores});
} }
ASSERT_NO_THROW(InferenceEngine::CNNNetwork{f}); ASSERT_NO_THROW(InferenceEngine::CNNNetwork{f});
@ -1882,8 +1896,14 @@ TEST(CNNNGraphImplTests, CheckNonUniqueParameterName) {
auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10});
auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75});
auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7});
auto nms = std::make_shared<ngraph::opset5::NonMaxSuppression>(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, auto nms = std::make_shared<ngraph::opset5::NonMaxSuppression>(
ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER, true); boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER,
true);
auto result1 = std::make_shared<ngraph::opset5::Result>(nms->output(0)); auto result1 = std::make_shared<ngraph::opset5::Result>(nms->output(0));
result1->set_friendly_name("result1"); result1->set_friendly_name("result1");
@ -1892,7 +1912,8 @@ TEST(CNNNGraphImplTests, CheckNonUniqueParameterName) {
auto result3 = std::make_shared<ngraph::opset5::Result>(nms->output(2)); auto result3 = std::make_shared<ngraph::opset5::Result>(nms->output(2));
result3->set_friendly_name("result3"); result3->set_friendly_name("result3");
nms->set_friendly_name("nms"); nms->set_friendly_name("nms");
f = std::make_shared<ngraph::Function>(ngraph::ResultVector{result1, result2, result3}, ngraph::ParameterVector{boxes, scores}); f = std::make_shared<ngraph::Function>(ngraph::ResultVector{result1, result2, result3},
ngraph::ParameterVector{boxes, scores});
} }
ASSERT_THROW(InferenceEngine::CNNNetwork{f}, InferenceEngine::Exception); ASSERT_THROW(InferenceEngine::CNNNetwork{f}, InferenceEngine::Exception);
@ -1908,8 +1929,14 @@ TEST(CNNNGraphImplTests, CheckNonUniqueResultName) {
auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10});
auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75});
auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7});
auto nms = std::make_shared<ngraph::opset5::NonMaxSuppression>(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, auto nms = std::make_shared<ngraph::opset5::NonMaxSuppression>(
ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER, true); boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER,
true);
auto result1 = std::make_shared<ngraph::opset5::Result>(nms->output(0)); auto result1 = std::make_shared<ngraph::opset5::Result>(nms->output(0));
result1->set_friendly_name("result1"); result1->set_friendly_name("result1");
@ -1918,7 +1945,8 @@ TEST(CNNNGraphImplTests, CheckNonUniqueResultName) {
auto result3 = std::make_shared<ngraph::opset5::Result>(nms->output(2)); auto result3 = std::make_shared<ngraph::opset5::Result>(nms->output(2));
result3->set_friendly_name("result3"); result3->set_friendly_name("result3");
nms->set_friendly_name("nms"); nms->set_friendly_name("nms");
f = std::make_shared<ngraph::Function>(ngraph::ResultVector{result1, result2, result3}, ngraph::ParameterVector{boxes, scores}); f = std::make_shared<ngraph::Function>(ngraph::ResultVector{result1, result2, result3},
ngraph::ParameterVector{boxes, scores});
} }
ASSERT_THROW(InferenceEngine::CNNNetwork{f}, InferenceEngine::Exception); ASSERT_THROW(InferenceEngine::CNNNetwork{f}, InferenceEngine::Exception);
@ -1934,18 +1962,24 @@ TEST(CNNNGraphImplTests, CheckNonUniqueNewResultName) {
auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10});
auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75});
auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7});
auto nms = std::make_shared<ngraph::opset5::NonMaxSuppression>(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, auto nms = std::make_shared<ngraph::opset5::NonMaxSuppression>(
ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER, true); boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER,
true);
auto result1 = std::make_shared<ngraph::opset5::Result>(nms->output(0)); auto result1 = std::make_shared<ngraph::opset5::Result>(nms->output(0));
result1->set_friendly_name("result1"); result1->set_friendly_name("result1");
auto result3 = std::make_shared<ngraph::opset5::Result>(nms->output(2)); auto result3 = std::make_shared<ngraph::opset5::Result>(nms->output(2));
result3->set_friendly_name("result3"); result3->set_friendly_name("result3");
nms->set_friendly_name("nms"); nms->set_friendly_name("nms");
f = std::make_shared<ngraph::Function>(ngraph::ResultVector{result1, result3}, ngraph::ParameterVector{boxes, scores}); f = std::make_shared<ngraph::Function>(ngraph::ResultVector{result1, result3},
ngraph::ParameterVector{boxes, scores});
} }
CNNNetwork cnnNet; CNNNetwork cnnNet;
ASSERT_NO_THROW(cnnNet = InferenceEngine::CNNNetwork{f}); ASSERT_NO_THROW(cnnNet = InferenceEngine::CNNNetwork{f});
ASSERT_THROW(cnnNet.addOutput("nms", 1), InferenceEngine::Exception); ASSERT_THROW(cnnNet.addOutput("nms", 1), InferenceEngine::Exception);
@ -1960,7 +1994,8 @@ TEST(CNNNGraphImplTests, RemoveLoopDanglingParametersIfConcatEmptyTensor) {
auto a = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::Shape{2, 2}); auto a = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::Shape{2, 2});
auto ai = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::Shape{2, 2}); auto ai = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::Shape{2, 2});
auto b = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::Shape{2}); auto b = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::Shape{2});
auto b_broadcast = std::make_shared<ov::opset8::Broadcast>(b, ov::opset8::Constant::create(ngraph::element::i64, {2}, {0, 2})); auto b_broadcast =
std::make_shared<ov::opset8::Broadcast>(b, ov::opset8::Constant::create(ngraph::element::i64, {2}, {0, 2}));
auto bi = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::Shape{0, 2}); auto bi = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::Shape{0, 2});
{ {
auto concat = std::make_shared<ov::opset8::Concat>(ov::NodeVector{ai, bi}, 0); auto concat = std::make_shared<ov::opset8::Concat>(ov::NodeVector{ai, bi}, 0);

View File

@ -2,27 +2,27 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <ie_core.hpp> #include <gtest/gtest.h>
#include <ie_plugin_config.hpp>
#include <ie_extension.h> #include <ie_extension.h>
#include "openvino/util/file_util.hpp" #include <atomic>
#include <ngraph_functions/subgraph_builders.hpp> #include <chrono>
#include <functional_test_utils/test_model/test_model.hpp>
#include <common_test_utils/file_utils.hpp> #include <common_test_utils/file_utils.hpp>
#include <common_test_utils/test_assertions.hpp> #include <common_test_utils/test_assertions.hpp>
#include <gtest/gtest.h>
#include <thread>
#include <atomic>
#include <mutex>
#include <chrono>
#include <fstream> #include <fstream>
#include <functional_test_utils/test_model/test_model.hpp>
#include <ie_core.hpp>
#include <ie_plugin_config.hpp>
#include <mutex>
#include <ngraph_functions/subgraph_builders.hpp>
#include <thread>
#include "openvino/util/file_util.hpp"
#ifdef __GLIBC__ #ifdef __GLIBC__
#include <gnu/libc-version.h> # include <gnu/libc-version.h>
#if __GLIBC_MINOR__ >= 34 # if __GLIBC_MINOR__ >= 34
#define OV_TEST_GLIBC_VERSION_GREATER_2_34 # define OV_TEST_GLIBC_VERSION_GREATER_2_34
#endif # endif
#endif #endif
class CoreThreadingTests : public ::testing::Test { class CoreThreadingTests : public ::testing::Test {
@ -38,7 +38,8 @@ public:
testName += testInfo->name(); testName += testInfo->name();
testName = std::to_string(std::hash<std::string>()(testName)); testName = std::to_string(std::hash<std::string>()(testName));
std::stringstream ss; std::stringstream ss;
auto ts = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now().time_since_epoch()); auto ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::high_resolution_clock::now().time_since_epoch());
ss << testName << "_" << std::this_thread::get_id() << "_" << ts.count(); ss << testName << "_" << std::this_thread::get_id() << "_" << ts.count();
testName = ss.str(); testName = ss.str();
return testName; return testName;
@ -59,27 +60,27 @@ public:
const unsigned int threadsNum = 8) { const unsigned int threadsNum = 8) {
std::vector<std::thread> threads(threadsNum); std::vector<std::thread> threads(threadsNum);
for (auto & thread : threads) { for (auto& thread : threads) {
thread = std::thread([&](){ thread = std::thread([&]() {
for (unsigned int i = 0; i < iterations; ++i) { for (unsigned int i = 0; i < iterations; ++i) {
func(); func();
} }
}); });
} }
for (auto & thread : threads) { for (auto& thread : threads) {
if (thread.joinable()) if (thread.joinable())
thread.join(); thread.join();
} }
} }
void safeAddExtension(InferenceEngine::Core & ie) { void safeAddExtension(InferenceEngine::Core& ie) {
try { try {
auto extension = std::make_shared<InferenceEngine::Extension>( auto extension = std::make_shared<InferenceEngine::Extension>(
ov::util::make_plugin_library_name(CommonTestUtils::getExecutableDirectory(), ov::util::make_plugin_library_name(CommonTestUtils::getExecutableDirectory(),
std::string("template_extension") + IE_BUILD_POSTFIX)); std::string("template_extension") + IE_BUILD_POSTFIX));
ie.AddExtension(extension); ie.AddExtension(extension);
} catch (const InferenceEngine::Exception & ex) { } catch (const InferenceEngine::Exception& ex) {
ASSERT_STR_CONTAINS(ex.what(), "name: custom_opset. Opset"); ASSERT_STR_CONTAINS(ex.what(), "name: custom_opset. Opset");
} }
} }
@ -89,12 +90,13 @@ public:
TEST_F(CoreThreadingTests, SetConfigPluginDoesNotExist) { TEST_F(CoreThreadingTests, SetConfigPluginDoesNotExist) {
InferenceEngine::Core ie; InferenceEngine::Core ie;
std::map<std::string, std::string> localConfig = { std::map<std::string, std::string> localConfig = {
{ CONFIG_KEY(PERF_COUNT), InferenceEngine::PluginConfigParams::YES } {CONFIG_KEY(PERF_COUNT), InferenceEngine::PluginConfigParams::YES}};
};
runParallel([&] () { runParallel(
ie.SetConfig(localConfig); [&]() {
}, 10000); ie.SetConfig(localConfig);
},
10000);
} }
// TODO: CVS-68982 // TODO: CVS-68982
@ -104,13 +106,16 @@ TEST_F(CoreThreadingTests, SetConfigPluginDoesNotExist) {
TEST_F(CoreThreadingTests, RegisterPlugin) { TEST_F(CoreThreadingTests, RegisterPlugin) {
InferenceEngine::Core ie; InferenceEngine::Core ie;
std::atomic<int> index{0}; std::atomic<int> index{0};
runParallel([&] () { runParallel(
const std::string deviceName = std::to_string(index++); [&]() {
ie.RegisterPlugin(ov::util::make_plugin_library_name(CommonTestUtils::getExecutableDirectory(), const std::string deviceName = std::to_string(index++);
std::string("mock_engine") + IE_BUILD_POSTFIX), deviceName); ie.RegisterPlugin(ov::util::make_plugin_library_name(CommonTestUtils::getExecutableDirectory(),
ie.GetVersions(deviceName); std::string("mock_engine") + IE_BUILD_POSTFIX),
ie.UnregisterPlugin(deviceName); deviceName);
}, 4000); ie.GetVersions(deviceName);
ie.UnregisterPlugin(deviceName);
},
4000);
} }
// tested function: RegisterPlugins // tested function: RegisterPlugins
@ -118,7 +123,7 @@ TEST_F(CoreThreadingTests, RegisterPlugins) {
InferenceEngine::Core ie; InferenceEngine::Core ie;
std::atomic<unsigned int> index{0}; std::atomic<unsigned int> index{0};
auto getPluginXml = [&] () -> std::tuple<std::string, std::string> { auto getPluginXml = [&]() -> std::tuple<std::string, std::string> {
std::string indexStr = std::to_string(index++); std::string indexStr = std::to_string(index++);
std::string pluginsXML = "test_plugins" + indexStr + ".xml"; std::string pluginsXML = "test_plugins" + indexStr + ".xml";
std::ofstream file(pluginsXML); std::ofstream file(pluginsXML);
@ -140,38 +145,42 @@ TEST_F(CoreThreadingTests, RegisterPlugins) {
return std::tie(pluginsXML, indexStr); return std::tie(pluginsXML, indexStr);
}; };
runParallel([&] () { runParallel(
std::string fileName, deviceName; [&]() {
std::tie(fileName, deviceName) = getPluginXml(); std::string fileName, deviceName;
ie.RegisterPlugins(fileName); std::tie(fileName, deviceName) = getPluginXml();
ie.GetVersions(deviceName); ie.RegisterPlugins(fileName);
ASSERT_EQ(0, std::remove(fileName.c_str())); ie.GetVersions(deviceName);
}, 1000); ASSERT_EQ(0, std::remove(fileName.c_str()));
},
1000);
} }
#endif // !OPENVINO_STATIC_LIBRARY #endif // !OPENVINO_STATIC_LIBRARY
// tested function: GetAvailableDevices, UnregisterPlugin // tested function: GetAvailableDevices, UnregisterPlugin
// TODO: some initialization (e.g. thread/dlopen) sporadically fails during such stress-test scenario // TODO: some initialization (e.g. thread/dlopen) sporadically fails during such stress-test scenario
TEST_F(CoreThreadingTests, GetAvailableDevices) { TEST_F(CoreThreadingTests, GetAvailableDevices) {
#ifndef OV_TEST_GLIBC_VERSION_GREATER_2_34 #ifndef OV_TEST_GLIBC_VERSION_GREATER_2_34
GTEST_SKIP(); GTEST_SKIP();
#endif #endif
InferenceEngine::Core ie; InferenceEngine::Core ie;
runParallel([&] () { runParallel(
std::vector<std::string> devices = ie.GetAvailableDevices(); [&]() {
std::vector<std::string> devices = ie.GetAvailableDevices();
// unregister all the devices // unregister all the devices
for (auto && deviceName : devices) { for (auto&& deviceName : devices) {
try { try {
ie.UnregisterPlugin(deviceName); ie.UnregisterPlugin(deviceName);
} catch (const InferenceEngine::Exception & ex) { } catch (const InferenceEngine::Exception& ex) {
// if several threads unload plugin at once, the first thread does this // if several threads unload plugin at once, the first thread does this
// while all others will throw an exception that plugin is not registered // while all others will throw an exception that plugin is not registered
ASSERT_STR_CONTAINS(ex.what(), "name is not registered in the"); ASSERT_STR_CONTAINS(ex.what(), "name is not registered in the");
}
} }
} },
}, 30); 30);
} }
#if defined(ENABLE_OV_IR_FRONTEND) #if defined(ENABLE_OV_IR_FRONTEND)
@ -180,9 +189,12 @@ TEST_F(CoreThreadingTests, ReadNetwork) {
InferenceEngine::Core ie; InferenceEngine::Core ie;
auto network = ie.ReadNetwork(modelName, weightsName); auto network = ie.ReadNetwork(modelName, weightsName);
runParallel([&] () { runParallel(
safeAddExtension(ie); [&]() {
(void)ie.ReadNetwork(modelName, weightsName); safeAddExtension(ie);
}, 100, 12); (void)ie.ReadNetwork(modelName, weightsName);
},
100,
12);
} }
#endif //defined(ENABLE_OV_IR_FRONTEND) #endif // defined(ENABLE_OV_IR_FRONTEND)

View File

@ -2,9 +2,8 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <gtest/gtest.h>
#include <gmock/gmock-spec-builders.h> #include <gmock/gmock-spec-builders.h>
#include <gtest/gtest.h>
#include <ie_data.h> #include <ie_data.h>
using namespace ::testing; using namespace ::testing;
@ -22,9 +21,9 @@ protected:
class BlockingDescTest : public BlockingDesc { class BlockingDescTest : public BlockingDesc {
public: public:
BlockingDescTest(const SizeVector &blocked_dims, const SizeVector &order) : BlockingDesc(blocked_dims, order) {} BlockingDescTest(const SizeVector& blocked_dims, const SizeVector& order) : BlockingDesc(blocked_dims, order) {}
void fillDescTest(const SizeVector &blocked_dims, const SizeVector &order) { void fillDescTest(const SizeVector& blocked_dims, const SizeVector& order) {
fillDesc(blocked_dims, order); fillDesc(blocked_dims, order);
} }
}; };
@ -93,7 +92,7 @@ TEST_F(DataTests, canSetNotEmptyDimsForBlockingDescNCHW) {
} }
TEST_F(DataTests, setPrecision) { TEST_F(DataTests, setPrecision) {
Data data(data_name, { Precision::FP32, emptyDims, Layout::NCHW }); Data data(data_name, {Precision::FP32, emptyDims, Layout::NCHW});
EXPECT_EQ(Precision::FP32, data.getPrecision()); EXPECT_EQ(Precision::FP32, data.getPrecision());
EXPECT_EQ(Precision::FP32, data.getTensorDesc().getPrecision()); EXPECT_EQ(Precision::FP32, data.getTensorDesc().getPrecision());

View File

@ -3,9 +3,11 @@
// //
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "debug.h"
#include <string> #include <string>
#include "debug.h"
using DebugTests = ::testing::Test; using DebugTests = ::testing::Test;
TEST_F(DebugTests, tolowerWorksWithEmptyString) { TEST_F(DebugTests, tolowerWorksWithEmptyString) {

View File

@ -3,6 +3,7 @@
// //
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <cpp/ie_executable_network.hpp> #include <cpp/ie_executable_network.hpp>
using namespace ::testing; using namespace ::testing;

View File

@ -2,11 +2,11 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <string>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <ie_common.h>
#include <ie_precision.hpp> #include <ie_precision.hpp>
#include <ie_common.h> #include <string>
using Precision = InferenceEngine::Precision; using Precision = InferenceEngine::Precision;
@ -30,7 +30,7 @@ TEST_F(PrecisionTests, ShowsCorrectPrecisionNames) {
EXPECT_STREQ(Precision(Precision::U8).name(), "U8"); EXPECT_STREQ(Precision(Precision::U8).name(), "U8");
EXPECT_STREQ(Precision(Precision::MIXED).name(), "MIXED"); EXPECT_STREQ(Precision(Precision::MIXED).name(), "MIXED");
EXPECT_STREQ(Precision(Precision::UNSPECIFIED).name(), "UNSPECIFIED"); EXPECT_STREQ(Precision(Precision::UNSPECIFIED).name(), "UNSPECIFIED");
EXPECT_STREQ(Precision(static_cast<Precision::ePrecision >(-3)).name(), "UNSPECIFIED"); EXPECT_STREQ(Precision(static_cast<Precision::ePrecision>(-3)).name(), "UNSPECIFIED");
EXPECT_STREQ(Precision(1, "Custom Name").name(), "Custom Name"); EXPECT_STREQ(Precision(1, "Custom Name").name(), "Custom Name");
} }
@ -94,7 +94,7 @@ TEST_F(PrecisionTests, is_float) {
EXPECT_FALSE(Precision(Precision::U8).is_float()); EXPECT_FALSE(Precision(Precision::U8).is_float());
EXPECT_FALSE(Precision(Precision::MIXED).is_float()); EXPECT_FALSE(Precision(Precision::MIXED).is_float());
EXPECT_FALSE(Precision(10).is_float()); EXPECT_FALSE(Precision(10).is_float());
EXPECT_FALSE(Precision(static_cast<Precision::ePrecision >(-3)).is_float()); EXPECT_FALSE(Precision(static_cast<Precision::ePrecision>(-3)).is_float());
EXPECT_FALSE(Precision(Precision::UNSPECIFIED).is_float()); EXPECT_FALSE(Precision(Precision::UNSPECIFIED).is_float());
} }
@ -115,7 +115,7 @@ TEST_F(PrecisionTests, constructFromSTR) {
EXPECT_EQ(Precision(Precision::U4), Precision::FromStr("U4")); EXPECT_EQ(Precision(Precision::U4), Precision::FromStr("U4"));
EXPECT_EQ(Precision(Precision::U8), Precision::FromStr("U8")); EXPECT_EQ(Precision(Precision::U8), Precision::FromStr("U8"));
EXPECT_EQ(Precision(Precision::MIXED), Precision::FromStr("MIXED")); EXPECT_EQ(Precision(Precision::MIXED), Precision::FromStr("MIXED"));
EXPECT_EQ(Precision(static_cast<Precision::ePrecision >(-3)), Precision::FromStr("UNSPECIFIED")); EXPECT_EQ(Precision(static_cast<Precision::ePrecision>(-3)), Precision::FromStr("UNSPECIFIED"));
EXPECT_EQ(Precision(Precision::UNSPECIFIED), Precision::FromStr("UNSPECIFIED")); EXPECT_EQ(Precision(Precision::UNSPECIFIED), Precision::FromStr("UNSPECIFIED"));
} }
@ -141,14 +141,13 @@ TEST_F(PrecisionTests, canCompareCustomPrecisions) {
EXPECT_TRUE(p5 == p); EXPECT_TRUE(p5 == p);
} }
TEST_F(PrecisionTests, canUseInIfs) { TEST_F(PrecisionTests, canUseInIfs) {
Precision p; Precision p;
EXPECT_TRUE(!p); EXPECT_TRUE(!p);
p = Precision::FP32; p = Precision::FP32;
EXPECT_FALSE(!p); EXPECT_FALSE(!p);
EXPECT_TRUE(p); EXPECT_TRUE(p);
p = Precision(static_cast<Precision::ePrecision >(-3)); p = Precision(static_cast<Precision::ePrecision>(-3));
EXPECT_TRUE(!p); EXPECT_TRUE(!p);
} }

View File

@ -3,7 +3,9 @@
// //
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <ie_core.hpp> #include <ie_core.hpp>
#include "ngraph/ops.hpp" #include "ngraph/ops.hpp"
using namespace ::testing; using namespace ::testing;
@ -215,9 +217,9 @@ class LocaleTests : public ::testing::Test {
protected: protected:
void SetUp() override { void SetUp() override {
originalLocale = setlocale(LC_ALL, nullptr); originalLocale = setlocale(LC_ALL, nullptr);
} }
void TearDown() override { void TearDown() override {
setlocale(LC_ALL, originalLocale.c_str()); setlocale(LC_ALL, originalLocale.c_str());
} }
@ -231,7 +233,7 @@ protected:
auto funcs = net.getFunction(); auto funcs = net.getFunction();
for (const auto & op : funcs->get_ops()) { for (const auto& op : funcs->get_ops()) {
if (!isLSTM) { if (!isLSTM) {
if (op->get_friendly_name() == "output") { if (op->get_friendly_name() == "output") {
const auto roi = std::dynamic_pointer_cast<ngraph::op::v3::ROIAlign>(op); const auto roi = std::dynamic_pointer_cast<ngraph::op::v3::ROIAlign>(op);
@ -285,4 +287,4 @@ TEST_F(LocaleTests, DISABLED_WithUSLocaleCPP) {
testBody(); testBody();
std::locale::global(prev); std::locale::global(prev);
} }
#endif //defined(ENABLE_OV_IR_FRONTEND) #endif // defined(ENABLE_OV_IR_FRONTEND)

View File

@ -4,14 +4,13 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <string>
#include <memory>
#include <map>
#include <ngraph/opsets/opset4.hpp>
#include <ngraph/function.hpp>
#include <common_test_utils/ngraph_test_utils.hpp> #include <common_test_utils/ngraph_test_utils.hpp>
#include <map>
#include <memory>
#include <ngraph/function.hpp>
#include <ngraph/opsets/opset4.hpp>
#include <ngraph/pass/manager.hpp> #include <ngraph/pass/manager.hpp>
#include <string>
#include <transformations/init_node_info.hpp> #include <transformations/init_node_info.hpp>
#include <transformations/smart_reshape/matmul_sr.hpp> #include <transformations/smart_reshape/matmul_sr.hpp>
@ -32,7 +31,8 @@ struct ReshapeMatMulTestCase {
reshape_map new_shapes; reshape_map new_shapes;
}; };
class SmartReshapeMatMulTests : public CommonTestUtils::TestsCommon, public testing::WithParamInterface<std::tuple<ReshapeMatMulTestCase>> { class SmartReshapeMatMulTests : public CommonTestUtils::TestsCommon,
public testing::WithParamInterface<std::tuple<ReshapeMatMulTestCase>> {
public: public:
void SetUp() override { void SetUp() override {
const auto& test_case = std::get<0>(GetParam()); const auto& test_case = std::get<0>(GetParam());
@ -44,15 +44,21 @@ public:
auto input_B = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, test_case.B_shape); auto input_B = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, test_case.B_shape);
input_B->set_friendly_name("input_B"); input_B->set_friendly_name("input_B");
auto reshape_pattern = std::make_shared<ngraph::opset4::Constant>( auto reshape_pattern =
ngraph::element::i64, ngraph::Shape{test_case.reshape_pattern.size()}, test_case.reshape_pattern); std::make_shared<ngraph::opset4::Constant>(ngraph::element::i64,
ngraph::Shape{test_case.reshape_pattern.size()},
test_case.reshape_pattern);
reshape_pattern->set_friendly_name("reshape_pattern"); reshape_pattern->set_friendly_name("reshape_pattern");
auto reshape = std::make_shared<ngraph::opset4::Reshape>(test_case.reshape_is_A_input ? input_A : input_B, reshape_pattern, true); auto reshape = std::make_shared<ngraph::opset4::Reshape>(test_case.reshape_is_A_input ? input_A : input_B,
reshape_pattern,
true);
reshape->set_friendly_name("reshape"); reshape->set_friendly_name("reshape");
auto mat_mul = std::make_shared<ngraph::opset4::MatMul>(test_case.reshape_is_A_input ? reshape->output(0) : input_A->output(0), auto mat_mul = std::make_shared<ngraph::opset4::MatMul>(
test_case.reshape_is_A_input ? input_B->output(0) : reshape->output(0), test_case.reshape_is_A_input ? reshape->output(0) : input_A->output(0),
test_case.transpose_a, test_case.transpose_b); test_case.reshape_is_A_input ? input_B->output(0) : reshape->output(0),
test_case.transpose_a,
test_case.transpose_b);
reshape->set_friendly_name("matmul"); reshape->set_friendly_name("matmul");
auto result = std::make_shared<ngraph::op::Result>(mat_mul); auto result = std::make_shared<ngraph::op::Result>(mat_mul);
@ -62,15 +68,17 @@ public:
} }
InferenceEngine::details::CNNNetworkNGraphImpl network(ngraph); InferenceEngine::details::CNNNetworkNGraphImpl network(ngraph);
const auto & resp = network.reshape(test_case.new_shapes, nullptr); const auto& resp = network.reshape(test_case.new_shapes, nullptr);
ASSERT_EQ(resp, StatusCode::OK); ASSERT_EQ(resp, StatusCode::OK);
} }
}; };
TEST_P(SmartReshapeMatMulTests, ReshapeMatMul) { TEST_P(SmartReshapeMatMulTests, ReshapeMatMul) {}
}
INSTANTIATE_TEST_SUITE_P(NGraph, SmartReshapeMatMulTests, testing::Values( INSTANTIATE_TEST_SUITE_P(
NGraph,
SmartReshapeMatMulTests,
testing::Values(
ReshapeMatMulTestCase{true, {1, 20, 30}, {30, 40}, {20, -1}, false, false, {{"input_A", {2, 20, 30}}}}, ReshapeMatMulTestCase{true, {1, 20, 30}, {30, 40}, {20, -1}, false, false, {{"input_A", {2, 20, 30}}}},
ReshapeMatMulTestCase{true, {1, 20, 30}, {40, 30}, {20, -1}, false, true, {{"input_A", {2, 20, 30}}}}, ReshapeMatMulTestCase{true, {1, 20, 30}, {40, 30}, {20, -1}, false, true, {{"input_A", {2, 20, 30}}}},
ReshapeMatMulTestCase{true, {1, 30, 20}, {30, 20}, {-1, 20}, true, false, {{"input_A", {2, 30, 20}}}}, ReshapeMatMulTestCase{true, {1, 30, 20}, {30, 20}, {-1, 20}, true, false, {{"input_A", {2, 30, 20}}}},
@ -273,13 +281,15 @@ TEST(SmartReshapeTransposeMatMulTests, TransposeBothMatMulWithAttrFuse) {
std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr); std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
{ {
auto data_A = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, ngraph::Shape{2, 3, 2}); auto data_A = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, ngraph::Shape{2, 3, 2});
auto split_A = std::make_shared<ngraph::opset4::VariadicSplit>(data_A, auto split_A = std::make_shared<ngraph::opset4::VariadicSplit>(
ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}), data_A,
ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 1})); ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}),
ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 1}));
auto data_B = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, ngraph::Shape{2, 3, 5}); auto data_B = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, ngraph::Shape{2, 3, 5});
auto split_B = std::make_shared<ngraph::opset4::VariadicSplit>(data_B, auto split_B = std::make_shared<ngraph::opset4::VariadicSplit>(
ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}), data_B,
ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 1})); ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}),
ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 1}));
auto order = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {0, 2, 1}); auto order = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {0, 2, 1});
auto transpose_A = std::make_shared<ngraph::opset4::Transpose>(split_A->output(0), order); auto transpose_A = std::make_shared<ngraph::opset4::Transpose>(split_A->output(0), order);
auto transpose_B = std::make_shared<ngraph::opset4::Transpose>(split_B->output(1), order); auto transpose_B = std::make_shared<ngraph::opset4::Transpose>(split_B->output(1), order);
@ -294,13 +304,15 @@ TEST(SmartReshapeTransposeMatMulTests, TransposeBothMatMulWithAttrFuse) {
} }
{ {
auto data_A = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, ngraph::Shape{2, 3, 2}); auto data_A = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, ngraph::Shape{2, 3, 2});
auto split_A = std::make_shared<ngraph::opset4::VariadicSplit>(data_A, auto split_A = std::make_shared<ngraph::opset4::VariadicSplit>(
ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}), data_A,
ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 1})); ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}),
ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 1}));
auto data_B = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, ngraph::Shape{2, 3, 5}); auto data_B = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, ngraph::Shape{2, 3, 5});
auto split_B = std::make_shared<ngraph::opset4::VariadicSplit>(data_B, auto split_B = std::make_shared<ngraph::opset4::VariadicSplit>(
ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}), data_B,
ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 1})); ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}),
ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 1}));
auto matmul = std::make_shared<ngraph::opset4::MatMul>(split_A->output(0), split_B->output(1), true, false); auto matmul = std::make_shared<ngraph::opset4::MatMul>(split_A->output(0), split_B->output(1), true, false);
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{matmul}, ngraph::ParameterVector{data_A, data_B}); f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{matmul}, ngraph::ParameterVector{data_A, data_B});
} }

View File

@ -2,33 +2,31 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <cpp/ie_cnn_network.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <cpp/ie_cnn_network.h>
#include <string>
#include <sstream>
#include <fstream>
#include <algorithm> #include <algorithm>
#include <vector> #include <fstream>
#include <memory> #include <ie_core.hpp>
#include <map> #include <map>
#include <memory>
#include <ngraph/function.hpp> #include <ngraph/function.hpp>
#include <ngraph/op/interpolate.hpp> #include <ngraph/graph_util.hpp>
#include <ngraph/op/constant.hpp> #include <ngraph/op/constant.hpp>
#include <ngraph/op/parameter.hpp> #include <ngraph/op/interpolate.hpp>
#include <ngraph/op/op.hpp> #include <ngraph/op/op.hpp>
#include <ngraph/op/parameter.hpp>
#include <ngraph/op/relu.hpp> #include <ngraph/op/relu.hpp>
#include <ngraph/op/result.hpp> #include <ngraph/op/result.hpp>
#include <ngraph/opsets/opset.hpp> #include <ngraph/opsets/opset.hpp>
#include <ngraph/graph_util.hpp> #include <sstream>
#include <string>
#include <vector>
#include <ie_core.hpp> #include "common_test_utils/common_utils.hpp"
#include "common_test_utils/test_common.hpp"
#include "common_test_utils/data_utils.hpp" #include "common_test_utils/data_utils.hpp"
#include "common_test_utils/file_utils.hpp" #include "common_test_utils/file_utils.hpp"
#include "common_test_utils/common_utils.hpp" #include "common_test_utils/test_common.hpp"
#include "ie_common.h" #include "ie_common.h"
#include "openvino/core/partial_shape.hpp" #include "openvino/core/partial_shape.hpp"
#include "openvino/core/shape.hpp" #include "openvino/core/shape.hpp"
@ -156,13 +154,15 @@ TEST_F(NGraphReshapeTests, CNNReshapeSpatialReLUWithoutCloneFunction) {
ASSERT_EQ(cnnNetwork.getInputsInfo()["data"]->getInputData()->getDims(), (SizeVector{1, 3, 25, 25})); ASSERT_EQ(cnnNetwork.getInputsInfo()["data"]->getInputData()->getDims(), (SizeVector{1, 3, 25, 25}));
} }
class CustomTestOp: public ngraph::op::Op { class CustomTestOp : public ngraph::op::Op {
public: public:
OPENVINO_OP("CustomTestLayer", "test_extension"); OPENVINO_OP("CustomTestLayer", "test_extension");
CustomTestOp() = default; CustomTestOp() = default;
CustomTestOp(const ngraph::Output<ngraph::Node>& arg, bool test1, int64_t test2): CustomTestOp(const ngraph::Output<ngraph::Node>& arg, bool test1, int64_t test2)
Op({arg}), test1(test1), test2(test2) { : Op({arg}),
test1(test1),
test2(test2) {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
} }
@ -352,15 +352,15 @@ TEST_F(NGraphReshapeTests, ReshapeNewIRWithNewExtension2) {
SizeVector outDims = output["activation"]->getTensorDesc().getDims(); SizeVector outDims = output["activation"]->getTensorDesc().getDims();
ASSERT_EQ(outDims, refAfterReshape); ASSERT_EQ(outDims, refAfterReshape);
} }
#endif //defined(ENABLE_OV_IR_FRONTEND) #endif // defined(ENABLE_OV_IR_FRONTEND)
class BadExtension : public InferenceEngine::IExtension { class BadExtension : public InferenceEngine::IExtension {
public: public:
BadExtension() {} BadExtension() {}
void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override {}; void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override{};
void Unload() noexcept override {}; void Unload() noexcept override{};
std::map<std::string, ngraph::OpSet> getOpSets() override { std::map<std::string, ngraph::OpSet> getOpSets() override {
static std::map<std::string, ngraph::OpSet> opsets; static std::map<std::string, ngraph::OpSet> opsets;
@ -395,8 +395,8 @@ TEST_F(NGraphReshapeTests, TestInterpParameters) {
auto interp = std::make_shared<ngraph::op::v0::Interpolate>(inp, out_shape, attrs); auto interp = std::make_shared<ngraph::op::v0::Interpolate>(inp, out_shape, attrs);
auto output = std::make_shared<ngraph::op::Result>(interp); auto output = std::make_shared<ngraph::op::Result>(interp);
auto ngraph_function = std::make_shared<ngraph::Function>(ngraph::ResultVector{output}, auto ngraph_function =
ngraph::ParameterVector{inp}); std::make_shared<ngraph::Function>(ngraph::ResultVector{output}, ngraph::ParameterVector{inp});
CNNNetwork cnn(ngraph_function); CNNNetwork cnn(ngraph_function);
std::map<std::string, InferenceEngine::SizeVector> inShape; std::map<std::string, InferenceEngine::SizeVector> inShape;

View File

@ -3,6 +3,7 @@
// //
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <openvino/core/except.hpp> #include <openvino/core/except.hpp>
#include <openvino/runtime/compiled_model.hpp> #include <openvino/runtime/compiled_model.hpp>

View File

@ -353,4 +353,4 @@ TEST_F(OVExtensionTests, load_old_extension) {
TEST_F(OVExtensionTests, load_incorrect_extension) { TEST_F(OVExtensionTests, load_incorrect_extension) {
EXPECT_THROW(core.add_extension(getIncorrectExtensionPath()), ov::Exception); EXPECT_THROW(core.add_extension(getIncorrectExtensionPath()), ov::Exception);
} }
#endif //defined(ENABLE_OV_IR_FRONTEND) #endif // defined(ENABLE_OV_IR_FRONTEND)

View File

@ -6,9 +6,9 @@
#include <cpp/ie_infer_request.hpp> #include <cpp/ie_infer_request.hpp>
#include <openvino/core/except.hpp> #include <openvino/core/except.hpp>
#include <openvino/runtime/compiled_model.hpp>
#include <openvino/runtime/infer_request.hpp> #include <openvino/runtime/infer_request.hpp>
#include <openvino/runtime/remote_tensor.hpp> #include <openvino/runtime/remote_tensor.hpp>
#include <openvino/runtime/compiled_model.hpp>
using namespace ::testing; using namespace ::testing;
using namespace std; using namespace std;
@ -85,7 +85,6 @@ TEST(InferRequestOVTests, throwsOnUninitializedSetRemoteTensor) {
ASSERT_THROW(req.set_tensor(ov::Output<const ov::Node>(), remote_tensor), ov::Exception); ASSERT_THROW(req.set_tensor(ov::Output<const ov::Node>(), remote_tensor), ov::Exception);
} }
TEST(InferRequestOVTests, throwsOnGetCompiledModel) { TEST(InferRequestOVTests, throwsOnGetCompiledModel) {
ov::InferRequest req; ov::InferRequest req;
ASSERT_THROW(req.get_compiled_model(), ov::Exception); ASSERT_THROW(req.get_compiled_model(), ov::Exception);

View File

@ -2,13 +2,14 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <file_utils.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <file_utils.h>
#include "openvino/util/shared_object.hpp"
#include "common_test_utils/file_utils.hpp"
#include <cpp/ie_plugin.hpp> #include <cpp/ie_plugin.hpp>
#include "common_test_utils/file_utils.hpp"
#include "openvino/util/shared_object.hpp"
using namespace ::testing; using namespace ::testing;
using namespace std; using namespace std;
@ -19,7 +20,7 @@ protected:
std::string("mock_engine") + IE_BUILD_POSTFIX); std::string("mock_engine") + IE_BUILD_POSTFIX);
} }
void loadDll(const string &libraryName) { void loadDll(const string& libraryName) {
shared_object = ov::util::load_shared_object(libraryName.c_str()); shared_object = ov::util::load_shared_object(libraryName.c_str());
} }
std::shared_ptr<void> shared_object; std::shared_ptr<void> shared_object;
@ -27,7 +28,8 @@ protected:
using CreateF = void(std::shared_ptr<InferenceEngine::IInferencePlugin>&); using CreateF = void(std::shared_ptr<InferenceEngine::IInferencePlugin>&);
std::function<CreateF> make_std_function(const std::string& functionName) { std::function<CreateF> make_std_function(const std::string& functionName) {
std::function<CreateF> ptr(reinterpret_cast<CreateF*>(ov::util::get_symbol(shared_object, functionName.c_str()))); std::function<CreateF> ptr(
reinterpret_cast<CreateF*>(ov::util::get_symbol(shared_object, functionName.c_str())));
return ptr; return ptr;
} }
}; };

View File

@ -4,16 +4,17 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "ie_allocator.hpp"
#include "details/ie_pre_allocator.hpp"
#include <vector> #include <vector>
#include "details/ie_pre_allocator.hpp"
#include "ie_allocator.hpp"
using namespace ::testing; using namespace ::testing;
using namespace std; using namespace std;
using namespace InferenceEngine; using namespace InferenceEngine;
class PreallocatorTests: public ::testing::Test { class PreallocatorTests : public ::testing::Test {
protected: protected:
std::vector<float> mybuf; std::vector<float> mybuf;
void SetUp() override { void SetUp() override {
@ -24,10 +25,10 @@ class PreallocatorTests: public ::testing::Test {
}; };
TEST_F(PreallocatorTests, canAccessPreAllocatedMemory) { TEST_F(PreallocatorTests, canAccessPreAllocatedMemory) {
void * handle = allocator->alloc(3); void* handle = allocator->alloc(3);
float * ptr = reinterpret_cast<float*>(allocator->lock(handle)); float* ptr = reinterpret_cast<float*>(allocator->lock(handle));
mybuf = { 1.1f, 2.2f, 3.3f }; mybuf = {1.1f, 2.2f, 3.3f};
ASSERT_EQ(ptr, &*mybuf.begin()); ASSERT_EQ(ptr, &*mybuf.begin());
ASSERT_EQ(ptr[0], 1.1f); ASSERT_EQ(ptr[0], 1.1f);
@ -36,12 +37,12 @@ TEST_F(PreallocatorTests, canAccessPreAllocatedMemory) {
} }
TEST_F(PreallocatorTests, canNotAllocateMoreMemory) { TEST_F(PreallocatorTests, canNotAllocateMoreMemory) {
//large block such as 10k will result in nullptr // large block such as 10k will result in nullptr
EXPECT_EQ(nullptr, allocator->lock(allocator->alloc(10* sizeof(float) + 1))); EXPECT_EQ(nullptr, allocator->lock(allocator->alloc(10 * sizeof(float) + 1)));
EXPECT_NE(nullptr, allocator->lock(allocator->alloc(10* sizeof(float)))); EXPECT_NE(nullptr, allocator->lock(allocator->alloc(10 * sizeof(float))));
} }
TEST_F(PreallocatorTests, canNotLockWrongHandle) { TEST_F(PreallocatorTests, canNotLockWrongHandle) {
void * handle = allocator->alloc(3); void* handle = allocator->alloc(3);
EXPECT_EQ(nullptr, allocator->lock(1 + reinterpret_cast<int*>(handle))); EXPECT_EQ(nullptr, allocator->lock(1 + reinterpret_cast<int*>(handle)));
} }

View File

@ -3,6 +3,7 @@
// //
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <ie_preprocess.hpp> #include <ie_preprocess.hpp>
using namespace std; using namespace std;
@ -12,23 +13,22 @@ using PreProcessTests = ::testing::Test;
TEST_F(PreProcessTests, throwsOnSettingNullMeanImage) { TEST_F(PreProcessTests, throwsOnSettingNullMeanImage) {
InferenceEngine::PreProcessInfo info; InferenceEngine::PreProcessInfo info;
info.init(1); info.init(1);
ASSERT_THROW(info.setMeanImage(InferenceEngine::Blob::Ptr(nullptr)), ASSERT_THROW(info.setMeanImage(InferenceEngine::Blob::Ptr(nullptr)), InferenceEngine::Exception);
InferenceEngine::Exception);
} }
TEST_F(PreProcessTests, throwsOnSetting2DMeanImage) { TEST_F(PreProcessTests, throwsOnSetting2DMeanImage) {
InferenceEngine::PreProcessInfo info; InferenceEngine::PreProcessInfo info;
info.init(1); info.init(1);
InferenceEngine::Blob::Ptr blob(new InferenceEngine::TBlob<float>({ InferenceEngine::Precision::FP32, InferenceEngine::Blob::Ptr blob(
{1, 1}, InferenceEngine::Layout::HW})); new InferenceEngine::TBlob<float>({InferenceEngine::Precision::FP32, {1, 1}, InferenceEngine::Layout::HW}));
ASSERT_THROW(info.setMeanImage(blob), InferenceEngine::Exception); ASSERT_THROW(info.setMeanImage(blob), InferenceEngine::Exception);
} }
TEST_F(PreProcessTests, throwsOnSettingWrongSizeMeanImage) { TEST_F(PreProcessTests, throwsOnSettingWrongSizeMeanImage) {
InferenceEngine::PreProcessInfo info; InferenceEngine::PreProcessInfo info;
info.init(1); info.init(1);
InferenceEngine::TBlob<float>::Ptr blob(new InferenceEngine::TBlob<float>({ InferenceEngine::Precision::FP32, InferenceEngine::TBlob<float>::Ptr blob(
{ 2, 1, 1 }, InferenceEngine::Layout::CHW })); new InferenceEngine::TBlob<float>({InferenceEngine::Precision::FP32, {2, 1, 1}, InferenceEngine::Layout::CHW}));
blob->allocate(); blob->allocate();
ASSERT_THROW(info.setMeanImage(blob), InferenceEngine::Exception); ASSERT_THROW(info.setMeanImage(blob), InferenceEngine::Exception);
} }
@ -36,8 +36,8 @@ TEST_F(PreProcessTests, throwsOnSettingWrongSizeMeanImage) {
TEST_F(PreProcessTests, noThrowWithCorrectSizeMeanImage) { TEST_F(PreProcessTests, noThrowWithCorrectSizeMeanImage) {
InferenceEngine::PreProcessInfo info; InferenceEngine::PreProcessInfo info;
info.init(2); info.init(2);
InferenceEngine::TBlob<float>::Ptr blob(new InferenceEngine::TBlob<float>({ InferenceEngine::Precision::FP32, InferenceEngine::TBlob<float>::Ptr blob(
{ 2, 1, 1 }, InferenceEngine::Layout::CHW })); new InferenceEngine::TBlob<float>({InferenceEngine::Precision::FP32, {2, 1, 1}, InferenceEngine::Layout::CHW}));
blob->allocate(); blob->allocate();
ASSERT_NO_THROW(info.setMeanImage(blob)); ASSERT_NO_THROW(info.setMeanImage(blob));
} }

View File

@ -3,6 +3,7 @@
// //
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "description_buffer.hpp" #include "description_buffer.hpp"
using namespace std; using namespace std;

View File

@ -2,25 +2,26 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <file_utils.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <file_utils.h>
#include "openvino/util/shared_object.hpp"
#include "common_test_utils/file_utils.hpp"
#include <cpp/ie_plugin.hpp> #include <cpp/ie_plugin.hpp>
#include "common_test_utils/file_utils.hpp"
#include "openvino/util/shared_object.hpp"
using namespace std; using namespace std;
using namespace InferenceEngine; using namespace InferenceEngine;
using namespace InferenceEngine::details; using namespace InferenceEngine::details;
class SharedObjectLoaderTests: public ::testing::Test { class SharedObjectLoaderTests : public ::testing::Test {
protected: protected:
std::string get_mock_engine_name() { std::string get_mock_engine_name() {
return FileUtils::makePluginLibraryName<char>(CommonTestUtils::getExecutableDirectory(), return FileUtils::makePluginLibraryName<char>(CommonTestUtils::getExecutableDirectory(),
std::string("mock_engine") + IE_BUILD_POSTFIX); std::string("mock_engine") + IE_BUILD_POSTFIX);
} }
void loadDll(const string &libraryName) { void loadDll(const string& libraryName) {
sharedObjectLoader = ov::util::load_shared_object(libraryName.c_str()); sharedObjectLoader = ov::util::load_shared_object(libraryName.c_str());
} }
std::shared_ptr<void> sharedObjectLoader; std::shared_ptr<void> sharedObjectLoader;
@ -28,14 +29,14 @@ protected:
using CreateF = void(std::shared_ptr<IInferencePlugin>&); using CreateF = void(std::shared_ptr<IInferencePlugin>&);
std::function<CreateF> make_std_function(const std::string& functionName) { std::function<CreateF> make_std_function(const std::string& functionName) {
std::function<CreateF> ptr(reinterpret_cast<CreateF*>( std::function<CreateF> ptr(
ov::util::get_symbol(sharedObjectLoader, functionName.c_str()))); reinterpret_cast<CreateF*>(ov::util::get_symbol(sharedObjectLoader, functionName.c_str())));
return ptr; return ptr;
} }
}; };
typedef void*(*PluginEngineCreateFunc)(void); typedef void* (*PluginEngineCreateFunc)(void);
typedef void(*PluginEngineDestoryFunc)(void *); typedef void (*PluginEngineDestoryFunc)(void*);
TEST_F(SharedObjectLoaderTests, canLoadExistedPlugin) { TEST_F(SharedObjectLoaderTests, canLoadExistedPlugin) {
loadDll(get_mock_engine_name()); loadDll(get_mock_engine_name());

View File

@ -2,11 +2,11 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <vector>
#include <string>
#include "functional_test_utils/skip_tests_config.hpp" #include "functional_test_utils/skip_tests_config.hpp"
#include <string>
#include <vector>
std::vector<std::string> disabledTestPatterns() { std::vector<std::string> disabledTestPatterns() {
return {}; return {};
} }

View File

@ -2,15 +2,14 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <future>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <ie_system_conf.h>
#include <future>
#include <ie_parallel.hpp> #include <ie_parallel.hpp>
#include <thread>
#include <threading/ie_cpu_streams_executor.hpp> #include <threading/ie_cpu_streams_executor.hpp>
#include <threading/ie_immediate_executor.hpp> #include <threading/ie_immediate_executor.hpp>
#include <ie_system_conf.h>
#include <thread>
using namespace ::testing; using namespace ::testing;
using namespace std; using namespace std;
@ -28,18 +27,22 @@ TEST_P(TaskExecutorTests, canCreateTaskExecutor) {
EXPECT_NO_THROW(makeExecutor()); EXPECT_NO_THROW(makeExecutor());
} }
template<typename E, typename F> template <typename E, typename F>
static std::future<void> async(E& executor, F&& f) { static std::future<void> async(E& executor, F&& f) {
auto p = std::make_shared<std::packaged_task<void()>>(f); auto p = std::make_shared<std::packaged_task<void()>>(f);
auto future = p->get_future(); auto future = p->get_future();
executor->run([p] {(*p)();}); executor->run([p] {
(*p)();
});
return future; return future;
} }
TEST_P(TaskExecutorTests, canRunCustomFunction) { TEST_P(TaskExecutorTests, canRunCustomFunction) {
auto taskExecutor = GetParam()(); auto taskExecutor = GetParam()();
int i = 0; int i = 0;
auto f = async(taskExecutor, [&i] { i++; }); auto f = async(taskExecutor, [&i] {
i++;
});
f.wait(); f.wait();
ASSERT_NO_THROW(f.get()); ASSERT_NO_THROW(f.get());
} }
@ -48,10 +51,16 @@ TEST_P(TaskExecutorTests, canRun2FunctionsOneByOne) {
auto taskExecutor = GetParam()(); auto taskExecutor = GetParam()();
std::mutex m; std::mutex m;
int i = 0; int i = 0;
auto f1 = async(taskExecutor, [&]() {std::unique_lock<std::mutex> l{m}; i += 1; }); auto f1 = async(taskExecutor, [&]() {
std::unique_lock<std::mutex> l{m};
i += 1;
});
f1.wait(); f1.wait();
ASSERT_NO_THROW(f1.get()); ASSERT_NO_THROW(f1.get());
auto f2 = async(taskExecutor, [&]() {std::unique_lock<std::mutex> l{m}; i *= 2; }); auto f2 = async(taskExecutor, [&]() {
std::unique_lock<std::mutex> l{m};
i *= 2;
});
f2.wait(); f2.wait();
ASSERT_NO_THROW(f2.get()); ASSERT_NO_THROW(f2.get());
@ -73,10 +82,12 @@ TEST_P(TaskExecutorTests, canRunMultipleTasksWithExceptionInside) {
std::vector<std::future<void>> futures; std::vector<std::future<void>> futures;
for (int i = 0; i < MAX_NUMBER_OF_TASKS_IN_QUEUE; i++) { for (int i = 0; i < MAX_NUMBER_OF_TASKS_IN_QUEUE; i++) {
futures.emplace_back(async(taskExecutor, [] { throw std::bad_alloc(); })); futures.emplace_back(async(taskExecutor, [] {
throw std::bad_alloc();
}));
} }
for (auto &f : futures) { for (auto& f : futures) {
f.wait(); f.wait();
EXPECT_THROW(f.get(), std::bad_alloc); EXPECT_THROW(f.get(), std::bad_alloc);
} }
@ -94,16 +105,25 @@ TEST_P(TaskExecutorTests, canRunMultipleTasksFromMultipleThreads) {
auto p = std::make_shared<std::packaged_task<void()>>([&] { auto p = std::make_shared<std::packaged_task<void()>>([&] {
for (int k = 0; k < NUM_INTERNAL_ITERATIONS; k++) { for (int k = 0; k < NUM_INTERNAL_ITERATIONS; k++) {
++sharedVar; ++sharedVar;
}}); }
});
futures.emplace_back(p->get_future()); futures.emplace_back(p->get_future());
auto task = [p] {(*p)();}; auto task = [p] {
threads.emplace_back([task, taskExecutor] {taskExecutor->run(std::move(task));}); (*p)();
};
threads.emplace_back([task, taskExecutor] {
taskExecutor->run(std::move(task));
});
} }
for (auto&& f : futures) f.wait(); for (auto&& f : futures)
for (auto&& f : futures) ASSERT_NO_THROW(f.get()); f.wait();
for (auto&& f : futures)
ASSERT_NO_THROW(f.get());
ASSERT_EQ(THREAD_NUMBER * NUM_INTERNAL_ITERATIONS, sharedVar); ASSERT_EQ(THREAD_NUMBER * NUM_INTERNAL_ITERATIONS, sharedVar);
for (auto&& thread : threads) if (thread.joinable()) thread.join(); for (auto&& thread : threads)
if (thread.joinable())
thread.join();
} }
TEST_P(TaskExecutorTests, executorNotReleasedUntilTasksAreDone) { TEST_P(TaskExecutorTests, executorNotReleasedUntilTasksAreDone) {
@ -115,15 +135,18 @@ TEST_P(TaskExecutorTests, executorNotReleasedUntilTasksAreDone) {
{ {
auto taskExecutor = GetParam()(); auto taskExecutor = GetParam()();
for (int i = 0; i < MAX_NUMBER_OF_TASKS_IN_QUEUE; i++) { for (int i = 0; i < MAX_NUMBER_OF_TASKS_IN_QUEUE; i++) {
auto p = std::make_shared<std::packaged_task<void()>>( auto p = std::make_shared<std::packaged_task<void()>>([&] {
[&] { // intentionally block task for launching tasks after calling dtor for TaskExecutor
// intentionally block task for launching tasks after calling dtor for TaskExecutor std::unique_lock<std::mutex> lock(mutex_block_emulation);
std::unique_lock<std::mutex> lock(mutex_block_emulation); cv_block_emulation.wait(lock, [&isBlocked] {
cv_block_emulation.wait(lock, [&isBlocked] { return isBlocked; }); return isBlocked;
++sharedVar; });
}); ++sharedVar;
});
futures.emplace_back(p->get_future()); futures.emplace_back(p->get_future());
auto task = [p] {(*p)();}; auto task = [p] {
(*p)();
};
taskExecutor->run(std::move(task)); taskExecutor->run(std::move(task));
} }
} }
@ -132,7 +155,7 @@ TEST_P(TaskExecutorTests, executorNotReleasedUntilTasksAreDone) {
std::lock_guard<std::mutex> lock{mutex_block_emulation}; std::lock_guard<std::mutex> lock{mutex_block_emulation};
isBlocked = false; isBlocked = false;
} }
for (auto &f : futures) { for (auto& f : futures) {
cv_block_emulation.notify_all(); cv_block_emulation.notify_all();
f.wait(); f.wait();
} }
@ -160,12 +183,16 @@ TEST_P(ASyncTaskExecutorTests, startAsyncIsNotBlockedByAnotherTask) {
cv_task_started.notify_all(); cv_task_started.notify_all();
// intentionally block task for test purpose // intentionally block task for test purpose
std::unique_lock<std::mutex> lock(mutex_block_emulation); std::unique_lock<std::mutex> lock(mutex_block_emulation);
cv_block_emulation.wait(lock, [&isBlocked] { return !isBlocked; }); cv_block_emulation.wait(lock, [&isBlocked] {
return !isBlocked;
});
}); });
async(taskExecutor, [&] { async(taskExecutor, [&] {
std::unique_lock<std::mutex> lock(mutex_task_started); std::unique_lock<std::mutex> lock(mutex_task_started);
cv_task_started.wait(lock, [&isStarted] { return isStarted; }); cv_task_started.wait(lock, [&isStarted] {
return isStarted;
});
}); });
{ {
@ -176,12 +203,12 @@ TEST_P(ASyncTaskExecutorTests, startAsyncIsNotBlockedByAnotherTask) {
} }
TEST_P(ASyncTaskExecutorTests, runAndWaitDoesNotOwnTasks) { TEST_P(ASyncTaskExecutorTests, runAndWaitDoesNotOwnTasks) {
std::shared_ptr<void> sharedCounter(this, [] (ASyncTaskExecutorTests*) {}); std::shared_ptr<void> sharedCounter(this, [](ASyncTaskExecutorTests*) {});
auto taskExecutor = GetParam()(); auto taskExecutor = GetParam()();
std::atomic_int useCount = {0}; std::atomic_int useCount = {0};
std::vector<Task> tasks = {[sharedCounter, &useCount] { std::vector<Task> tasks = {[sharedCounter, &useCount] {
useCount = sharedCounter.use_count(); useCount = sharedCounter.use_count();
}}; }};
sharedCounter.reset(); sharedCounter.reset();
taskExecutor->runAndWait(tasks); taskExecutor->runAndWait(tasks);
ASSERT_EQ(1, useCount); ASSERT_EQ(1, useCount);
@ -193,25 +220,33 @@ static auto Executors = ::testing::Values(
[] { [] {
auto streams = getNumberOfCPUCores(); auto streams = getNumberOfCPUCores();
auto threads = parallel_get_max_threads(); auto threads = parallel_get_max_threads();
return std::make_shared<CPUStreamsExecutor>(IStreamsExecutor::Config{"TestCPUStreamsExecutor", return std::make_shared<CPUStreamsExecutor>(
streams, threads/streams, IStreamsExecutor::ThreadBindingType::NONE}); IStreamsExecutor::Config{"TestCPUStreamsExecutor",
streams,
threads / streams,
IStreamsExecutor::ThreadBindingType::NONE});
}, },
[] { [] {
auto streams = getNumberOfLogicalCPUCores(true); auto streams = getNumberOfLogicalCPUCores(true);
auto threads = parallel_get_max_threads(); auto threads = parallel_get_max_threads();
return std::make_shared<CPUStreamsExecutor>(IStreamsExecutor::Config{"TestCPUStreamsExecutor", return std::make_shared<CPUStreamsExecutor>(
streams, threads/streams, IStreamsExecutor::ThreadBindingType::NONE}); IStreamsExecutor::Config{"TestCPUStreamsExecutor",
streams,
threads / streams,
IStreamsExecutor::ThreadBindingType::NONE});
}, },
[] { [] {
auto streams = getNumberOfLogicalCPUCores(false); auto streams = getNumberOfLogicalCPUCores(false);
auto threads = parallel_get_max_threads(); auto threads = parallel_get_max_threads();
return std::make_shared<CPUStreamsExecutor>(IStreamsExecutor::Config{"TestCPUStreamsExecutor", return std::make_shared<CPUStreamsExecutor>(
streams, threads/streams, IStreamsExecutor::ThreadBindingType::NONE}); IStreamsExecutor::Config{"TestCPUStreamsExecutor",
streams,
threads / streams,
IStreamsExecutor::ThreadBindingType::NONE});
}, },
[] { [] {
return std::make_shared<ImmediateExecutor>(); return std::make_shared<ImmediateExecutor>();
} });
);
INSTANTIATE_TEST_SUITE_P(TaskExecutorTests, TaskExecutorTests, Executors); INSTANTIATE_TEST_SUITE_P(TaskExecutorTests, TaskExecutorTests, Executors);
@ -219,24 +254,29 @@ static auto AsyncExecutors = ::testing::Values(
[] { [] {
auto streams = getNumberOfCPUCores(); auto streams = getNumberOfCPUCores();
auto threads = parallel_get_max_threads(); auto threads = parallel_get_max_threads();
return std::make_shared<CPUStreamsExecutor>(IStreamsExecutor::Config{"TestCPUStreamsExecutor", return std::make_shared<CPUStreamsExecutor>(
streams, threads/streams, IStreamsExecutor::ThreadBindingType::NONE}); IStreamsExecutor::Config{"TestCPUStreamsExecutor",
streams,
threads / streams,
IStreamsExecutor::ThreadBindingType::NONE});
}, },
[] { [] {
auto streams = getNumberOfLogicalCPUCores(true); auto streams = getNumberOfLogicalCPUCores(true);
auto threads = parallel_get_max_threads(); auto threads = parallel_get_max_threads();
return std::make_shared<CPUStreamsExecutor>(IStreamsExecutor::Config{"TestCPUStreamsExecutor", return std::make_shared<CPUStreamsExecutor>(
streams, threads/streams, IStreamsExecutor::ThreadBindingType::NONE}); IStreamsExecutor::Config{"TestCPUStreamsExecutor",
streams,
threads / streams,
IStreamsExecutor::ThreadBindingType::NONE});
}, },
[] { [] {
auto streams = getNumberOfLogicalCPUCores(false); auto streams = getNumberOfLogicalCPUCores(false);
auto threads = parallel_get_max_threads(); auto threads = parallel_get_max_threads();
return std::make_shared<CPUStreamsExecutor>(IStreamsExecutor::Config{"TestCPUStreamsExecutor", return std::make_shared<CPUStreamsExecutor>(
streams, threads/streams, IStreamsExecutor::ThreadBindingType::NONE}); IStreamsExecutor::Config{"TestCPUStreamsExecutor",
} streams,
); threads / streams,
IStreamsExecutor::ThreadBindingType::NONE});
});
INSTANTIATE_TEST_SUITE_P(ASyncTaskExecutorTests, ASyncTaskExecutorTests, AsyncExecutors); INSTANTIATE_TEST_SUITE_P(ASyncTaskExecutorTests, ASyncTaskExecutorTests, AsyncExecutors);

View File

@ -3,12 +3,11 @@
// //
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <random>
#include <chrono>
#include <ie_layouts.h>
#include <ie_blob.h> #include <ie_blob.h>
#include <ie_layouts.h>
#include <chrono>
#include <random>
using namespace ::testing; using namespace ::testing;
using namespace std; using namespace std;
@ -17,7 +16,7 @@ using namespace InferenceEngine;
using TensorDescTests = ::testing::Test; using TensorDescTests = ::testing::Test;
TEST_F(TensorDescTests, CreateBlobWithIncorrectLayout) { TEST_F(TensorDescTests, CreateBlobWithIncorrectLayout) {
ASSERT_THROW(make_shared_blob<float>({ Precision::FP32, {1, 3, 32}, Layout::NC }), Exception); ASSERT_THROW(make_shared_blob<float>({Precision::FP32, {1, 3, 32}, Layout::NC}), Exception);
} }
TEST_F(TensorDescTests, CreateBlockedBlobNCHW) { TEST_F(TensorDescTests, CreateBlockedBlobNCHW) {
@ -94,7 +93,7 @@ TEST_F(TensorDescTests, SetLayout) {
TEST_F(TensorDescTests, setDimsForBLOCKED) { TEST_F(TensorDescTests, setDimsForBLOCKED) {
TensorDesc desc(Precision::FP32, {1, 2, 3, 4, 5, 6}, Layout::BLOCKED); TensorDesc desc(Precision::FP32, {1, 2, 3, 4, 5, 6}, Layout::BLOCKED);
SizeVector newDims {7, 7, 7, 7, 7, 7}; SizeVector newDims{7, 7, 7, 7, 7, 7};
desc.setDims(newDims); desc.setDims(newDims);
EXPECT_EQ(desc.getDims(), newDims); EXPECT_EQ(desc.getDims(), newDims);
EXPECT_EQ(desc.getBlockingDesc().getBlockDims(), newDims); EXPECT_EQ(desc.getBlockingDesc().getBlockDims(), newDims);
@ -103,7 +102,7 @@ TEST_F(TensorDescTests, setDimsForBLOCKED) {
TEST_F(TensorDescTests, setDimsForNHWC) { TEST_F(TensorDescTests, setDimsForNHWC) {
TensorDesc desc(Precision::FP32, {1, 2, 3, 4}, Layout::NHWC); TensorDesc desc(Precision::FP32, {1, 2, 3, 4}, Layout::NHWC);
auto refOrder = desc.getBlockingDesc().getOrder(); auto refOrder = desc.getBlockingDesc().getOrder();
SizeVector newDims {7, 7, 7, 7}; SizeVector newDims{7, 7, 7, 7};
desc.setDims(newDims); desc.setDims(newDims);
EXPECT_EQ(desc.getDims(), newDims); EXPECT_EQ(desc.getDims(), newDims);
EXPECT_EQ(desc.getLayout(), Layout::NHWC); EXPECT_EQ(desc.getLayout(), Layout::NHWC);

View File

@ -32,6 +32,7 @@ ov_add_test_target(
LINK_LIBRARIES LINK_LIBRARIES
unitTestUtils unitTestUtils
INCLUDES INCLUDES
ADD_CLANG_FORMAT
LABELS LABELS
OV OV
) )

View File

@ -2,19 +2,19 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <ie_blob.h>
#include <gtest/gtest.h>
#include <gmock/gmock-spec-builders.h> #include <gmock/gmock-spec-builders.h>
#include <gtest/gtest.h>
#include <ie_blob.h>
#include "unit_test_utils/mocks/mock_allocator.hpp" #include "unit_test_utils/mocks/mock_allocator.hpp"
#ifdef _WIN32 #ifdef _WIN32
#define UNUSED # define UNUSED
#else #else
#define UNUSED __attribute__((unused)) # define UNUSED __attribute__((unused))
#endif #endif
class BlobTests: public ::testing::Test { class BlobTests : public ::testing::Test {
protected: protected:
std::shared_ptr<MockAllocator> createMockAllocator() { std::shared_ptr<MockAllocator> createMockAllocator() {
return std::shared_ptr<MockAllocator>(new MockAllocator()); return std::shared_ptr<MockAllocator>(new MockAllocator());
@ -29,21 +29,21 @@ TEST_F(BlobTests, TBlobThrowsIfPtrForPreAllocatorIsNullPtr) {
// Testing TBlob(const TensorDesc& tensorDesc, const std::std::shared_ptr<IAllocator>& alloc) // Testing TBlob(const TensorDesc& tensorDesc, const std::std::shared_ptr<IAllocator>& alloc)
TEST_F(BlobTests, TBlobThrowsIfAllocatorIsNullPtr) { TEST_F(BlobTests, TBlobThrowsIfAllocatorIsNullPtr) {
ASSERT_THROW(InferenceEngine::TBlob<float>( ASSERT_THROW(InferenceEngine::TBlob<float>({InferenceEngine::Precision::FP32, {1}, InferenceEngine::C},
{InferenceEngine::Precision::FP32, {1}, InferenceEngine::C}, std::shared_ptr<InferenceEngine::IAllocator>()), std::shared_ptr<InferenceEngine::IAllocator>()),
InferenceEngine::Exception); InferenceEngine::Exception);
} }
TEST_F(BlobTests, canCreateBlobUsingDefaultAllocator) { TEST_F(BlobTests, canCreateBlobUsingDefaultAllocator) {
InferenceEngine::SizeVector v = {1, 2, 3}; InferenceEngine::SizeVector v = {1, 2, 3};
auto allocator = createMockAllocator(); auto allocator = createMockAllocator();
EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float))).WillRepeatedly(testing::Return(reinterpret_cast<void*>(1))); EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float)))
.WillRepeatedly(testing::Return(reinterpret_cast<void*>(1)));
EXPECT_CALL(*allocator.get(), free(::testing::_)).Times(1); EXPECT_CALL(*allocator.get(), free(::testing::_)).Times(1);
{ {
InferenceEngine::TBlob<float> blob({ InferenceEngine::Precision::FP32, v, InferenceEngine::CHW }, InferenceEngine::TBlob<float> blob({InferenceEngine::Precision::FP32, v, InferenceEngine::CHW},
std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator)); std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator));
blob.allocate(); blob.allocate();
} }
@ -53,32 +53,32 @@ TEST_F(BlobTests, secondAllocateWontMemLeak) {
InferenceEngine::SizeVector v = {1, 2, 3}; InferenceEngine::SizeVector v = {1, 2, 3};
auto allocator = createMockAllocator(); auto allocator = createMockAllocator();
EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float))).Times(2).WillRepeatedly(testing::Return(reinterpret_cast<void*>(1))); EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float)))
.Times(2)
.WillRepeatedly(testing::Return(reinterpret_cast<void*>(1)));
EXPECT_CALL(*allocator.get(), free(::testing::_)).Times(2).WillRepeatedly(testing::Return(true)); EXPECT_CALL(*allocator.get(), free(::testing::_)).Times(2).WillRepeatedly(testing::Return(true));
{ {
InferenceEngine::TBlob<float> blob({ InferenceEngine::Precision::FP32, v, InferenceEngine::CHW }, InferenceEngine::TBlob<float> blob({InferenceEngine::Precision::FP32, v, InferenceEngine::CHW},
std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator)); std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator));
blob.allocate(); blob.allocate();
blob.allocate(); blob.allocate();
} }
} }
TEST_F(BlobTests, doesNotUnlockIfLockFailed) { TEST_F(BlobTests, doesNotUnlockIfLockFailed) {
InferenceEngine::SizeVector v = {1, 2, 3}; InferenceEngine::SizeVector v = {1, 2, 3};
auto allocator = createMockAllocator(); auto allocator = createMockAllocator();
EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float))).WillRepeatedly(testing::Return(reinterpret_cast<void*>(1))); EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float)))
.WillRepeatedly(testing::Return(reinterpret_cast<void*>(1)));
EXPECT_CALL(*allocator.get(), lock(reinterpret_cast<void*>(1), InferenceEngine::LOCK_FOR_WRITE)).Times(1); EXPECT_CALL(*allocator.get(), lock(reinterpret_cast<void*>(1), InferenceEngine::LOCK_FOR_WRITE)).Times(1);
EXPECT_CALL(*allocator.get(), free(::testing::_)).Times(1); EXPECT_CALL(*allocator.get(), free(::testing::_)).Times(1);
InferenceEngine::TBlob<float> blob({ InferenceEngine::Precision::FP32, v, InferenceEngine::CHW }, InferenceEngine::TBlob<float> blob({InferenceEngine::Precision::FP32, v, InferenceEngine::CHW},
std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator)); std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator));
blob.allocate(); blob.allocate();
{ { float UNUSED* ptr = blob.data(); }
float UNUSED *ptr = blob.data();
}
} }
TEST_F(BlobTests, canAccessDataUsingAllocator) { TEST_F(BlobTests, canAccessDataUsingAllocator) {
@ -87,38 +87,41 @@ TEST_F(BlobTests, canAccessDataUsingAllocator) {
float data[] = {5.f, 6.f, 7.f}; float data[] = {5.f, 6.f, 7.f};
EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float))).WillRepeatedly(testing::Return(reinterpret_cast<void*>(1))); EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float)))
EXPECT_CALL(*allocator.get(), lock(reinterpret_cast<void*>(1), InferenceEngine::LOCK_FOR_WRITE)).WillRepeatedly(testing::Return(data)); .WillRepeatedly(testing::Return(reinterpret_cast<void*>(1)));
EXPECT_CALL(*allocator.get(), lock(reinterpret_cast<void*>(1), InferenceEngine::LOCK_FOR_WRITE))
.WillRepeatedly(testing::Return(data));
EXPECT_CALL(*allocator.get(), unlock(reinterpret_cast<void*>(1))).Times(1); EXPECT_CALL(*allocator.get(), unlock(reinterpret_cast<void*>(1))).Times(1);
EXPECT_CALL(*allocator.get(), free(::testing::_)).Times(1); EXPECT_CALL(*allocator.get(), free(::testing::_)).Times(1);
InferenceEngine::TBlob<float> blob({ InferenceEngine::Precision::FP32, v, InferenceEngine::CHW }, InferenceEngine::TBlob<float> blob({InferenceEngine::Precision::FP32, v, InferenceEngine::CHW},
std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator)); std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator));
blob.allocate(); blob.allocate();
{ {
float *ptr = blob.data(); float* ptr = blob.data();
ASSERT_EQ(ptr[2] , 7); ASSERT_EQ(ptr[2], 7);
} }
} }
TEST_F(BlobTests, canLockReadOnlyDataForRead) { TEST_F(BlobTests, canLockReadOnlyDataForRead) {
InferenceEngine::SizeVector v = {1, 2, 3}; InferenceEngine::SizeVector v = {1, 2, 3};
auto allocator = createMockAllocator(); auto allocator = createMockAllocator();
float data[] = {5, 6, 7}; float data[] = {5, 6, 7};
EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float))).WillRepeatedly(testing::Return(reinterpret_cast<void*>(1))); EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float)))
EXPECT_CALL(*allocator.get(), lock(::testing::_, InferenceEngine::LOCK_FOR_READ)).WillRepeatedly(testing::Return(data)); .WillRepeatedly(testing::Return(reinterpret_cast<void*>(1)));
EXPECT_CALL(*allocator.get(), lock(::testing::_, InferenceEngine::LOCK_FOR_READ))
.WillRepeatedly(testing::Return(data));
EXPECT_CALL(*allocator.get(), free(::testing::_)).Times(1); EXPECT_CALL(*allocator.get(), free(::testing::_)).Times(1);
EXPECT_CALL(*allocator.get(), unlock(reinterpret_cast<void*>(1))).Times(1); EXPECT_CALL(*allocator.get(), unlock(reinterpret_cast<void*>(1))).Times(1);
InferenceEngine::TBlob<float> blob({ InferenceEngine::Precision::FP32, v, InferenceEngine::CHW }, InferenceEngine::TBlob<float> blob({InferenceEngine::Precision::FP32, v, InferenceEngine::CHW},
std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator)); std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator));
blob.allocate(); blob.allocate();
const float *ptr = blob.readOnly(); const float* ptr = blob.readOnly();
ASSERT_EQ(ptr[2] , 7); ASSERT_EQ(ptr[2], 7);
} }
TEST_F(BlobTests, canAccessDataUsingBufferBaseMethod) { TEST_F(BlobTests, canAccessDataUsingBufferBaseMethod) {
@ -127,17 +130,19 @@ TEST_F(BlobTests, canAccessDataUsingBufferBaseMethod) {
float data[] = {5, 6, 7}; float data[] = {5, 6, 7};
EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float))).WillRepeatedly(testing::Return(reinterpret_cast<void*>(1))); EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float)))
EXPECT_CALL(*allocator.get(), lock(::testing::_, InferenceEngine::LOCK_FOR_WRITE)).WillRepeatedly(testing::Return(data)); .WillRepeatedly(testing::Return(reinterpret_cast<void*>(1)));
EXPECT_CALL(*allocator.get(), lock(::testing::_, InferenceEngine::LOCK_FOR_WRITE))
.WillRepeatedly(testing::Return(data));
EXPECT_CALL(*allocator.get(), unlock(reinterpret_cast<void*>(1))).Times(1); EXPECT_CALL(*allocator.get(), unlock(reinterpret_cast<void*>(1))).Times(1);
EXPECT_CALL(*allocator.get(), free(::testing::_)).Times(1); EXPECT_CALL(*allocator.get(), free(::testing::_)).Times(1);
InferenceEngine::TBlob<float> blob({ InferenceEngine::Precision::FP32, v, InferenceEngine::CHW }, InferenceEngine::TBlob<float> blob({InferenceEngine::Precision::FP32, v, InferenceEngine::CHW},
std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator)); std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator));
blob.allocate(); blob.allocate();
auto buffer = blob.rwmap(); auto buffer = blob.rwmap();
const float *ptr = buffer.as<const float *>(); const float* ptr = buffer.as<const float*>();
ASSERT_EQ(ptr[2] , 7); ASSERT_EQ(ptr[2], 7);
} }
TEST_F(BlobTests, canMoveFromTBlobWithSameType) { TEST_F(BlobTests, canMoveFromTBlobWithSameType) {
@ -146,30 +151,32 @@ TEST_F(BlobTests, canMoveFromTBlobWithSameType) {
uint8_t data[] = {5, 6}; uint8_t data[] = {5, 6};
EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(uint8_t))).WillRepeatedly(testing::Return(reinterpret_cast<void*>(1))); EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(uint8_t)))
EXPECT_CALL(*allocator.get(), lock(::testing::_, InferenceEngine::LOCK_FOR_WRITE)).WillRepeatedly(testing::Return(data)); .WillRepeatedly(testing::Return(reinterpret_cast<void*>(1)));
EXPECT_CALL(*allocator.get(), lock(::testing::_, InferenceEngine::LOCK_FOR_WRITE))
.WillRepeatedly(testing::Return(data));
EXPECT_CALL(*allocator.get(), unlock(reinterpret_cast<void*>(1))).Times(1); EXPECT_CALL(*allocator.get(), unlock(reinterpret_cast<void*>(1))).Times(1);
EXPECT_CALL(*allocator.get(), free(::testing::_)).Times(1); EXPECT_CALL(*allocator.get(), free(::testing::_)).Times(1);
InferenceEngine::TBlob<uint8_t > blob({ InferenceEngine::Precision::U8, v, InferenceEngine::CHW }, InferenceEngine::TBlob<uint8_t> blob({InferenceEngine::Precision::U8, v, InferenceEngine::CHW},
std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator)); std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator));
blob.allocate(); blob.allocate();
InferenceEngine::TBlob<uint8_t > newBlob(std::move(blob)); InferenceEngine::TBlob<uint8_t> newBlob(std::move(blob));
auto buffer = newBlob.rwmap(); auto buffer = newBlob.rwmap();
uint8_t *ptr = buffer.as <uint8_t *>(); uint8_t* ptr = buffer.as<uint8_t*>();
ASSERT_EQ(ptr[0] , data[0]); ASSERT_EQ(ptr[0], data[0]);
} }
TEST_F(BlobTests, saveDimsAndSizeAfterMove) { TEST_F(BlobTests, saveDimsAndSizeAfterMove) {
InferenceEngine::SizeVector v = {1, 2, 3}; InferenceEngine::SizeVector v = {1, 2, 3};
auto allocator = createMockAllocator(); auto allocator = createMockAllocator();
InferenceEngine::TBlob<uint8_t > blob({ InferenceEngine::Precision::U8, v, InferenceEngine::CHW }, InferenceEngine::TBlob<uint8_t> blob({InferenceEngine::Precision::U8, v, InferenceEngine::CHW},
std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator)); std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator));
InferenceEngine::TBlob<uint8_t > newBlob(std::move(blob)); InferenceEngine::TBlob<uint8_t> newBlob(std::move(blob));
ASSERT_EQ(newBlob.size(), 1 * 2 * 3); ASSERT_EQ(newBlob.size(), 1 * 2 * 3);
ASSERT_EQ(newBlob.getTensorDesc().getDims()[0], 1); ASSERT_EQ(newBlob.getTensorDesc().getDims()[0], 1);
@ -179,7 +186,7 @@ TEST_F(BlobTests, saveDimsAndSizeAfterMove) {
TEST_F(BlobTests, canCopyBlob) { TEST_F(BlobTests, canCopyBlob) {
InferenceEngine::SizeVector v = {1, 3}; InferenceEngine::SizeVector v = {1, 3};
InferenceEngine::TBlob<uint8_t> blob({ InferenceEngine::Precision::U8, v, InferenceEngine::HW }); InferenceEngine::TBlob<uint8_t> blob({InferenceEngine::Precision::U8, v, InferenceEngine::HW});
blob.allocate(); blob.allocate();
blob.data()[0] = 1; blob.data()[0] = 1;
blob.data()[1] = 2; blob.data()[1] = 2;
@ -187,20 +194,20 @@ TEST_F(BlobTests, canCopyBlob) {
InferenceEngine::TBlob<uint8_t> blob2(blob); InferenceEngine::TBlob<uint8_t> blob2(blob);
ASSERT_EQ(blob2.getTensorDesc().getDims().size(), blob.getTensorDesc().getDims().size()); ASSERT_EQ(blob2.getTensorDesc().getDims().size(), blob.getTensorDesc().getDims().size());
ASSERT_EQ(blob2.getTensorDesc().getDims()[0], blob.getTensorDesc().getDims()[0]); ASSERT_EQ(blob2.getTensorDesc().getDims()[0], blob.getTensorDesc().getDims()[0]);
ASSERT_EQ(blob2.getTensorDesc().getDims()[1], blob.getTensorDesc().getDims()[1]); ASSERT_EQ(blob2.getTensorDesc().getDims()[1], blob.getTensorDesc().getDims()[1]);
ASSERT_EQ(blob2.size(), blob.size()); ASSERT_EQ(blob2.size(), blob.size());
ASSERT_EQ(blob2.data()[0], blob.data()[0]); ASSERT_EQ(blob2.data()[0], blob.data()[0]);
ASSERT_EQ(blob2.data()[1], blob.data()[1]); ASSERT_EQ(blob2.data()[1], blob.data()[1]);
ASSERT_EQ(blob2.data()[2], blob.data()[2]); ASSERT_EQ(blob2.data()[2], blob.data()[2]);
} }
TEST_F(BlobTests, canCompareToNullPtrWithoutDereferencing) { TEST_F(BlobTests, canCompareToNullPtrWithoutDereferencing) {
InferenceEngine::SizeVector v = {1, 2, 3}; InferenceEngine::SizeVector v = {1, 2, 3};
auto allocator = createMockAllocator(); auto allocator = createMockAllocator();
InferenceEngine::TBlob<uint8_t> blob({ InferenceEngine::Precision::U8, v, InferenceEngine::CHW }, InferenceEngine::TBlob<uint8_t> blob({InferenceEngine::Precision::U8, v, InferenceEngine::CHW},
std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator)); std::dynamic_pointer_cast<InferenceEngine::IAllocator>(allocator));
ASSERT_TRUE(blob.readOnly() == nullptr); ASSERT_TRUE(blob.readOnly() == nullptr);
@ -213,35 +220,36 @@ TEST_F(BlobTests, canCompareToNullPtrWithoutDereferencing) {
} }
TEST_F(BlobTests, canCreateBlob) { TEST_F(BlobTests, canCreateBlob) {
InferenceEngine::SizeVector size = { 1, 1, 1 }; InferenceEngine::SizeVector size = {1, 1, 1};
InferenceEngine::TBlob<float> blob({ InferenceEngine::Precision::FP32, size, InferenceEngine::CHW }); InferenceEngine::TBlob<float> blob({InferenceEngine::Precision::FP32, size, InferenceEngine::CHW});
ASSERT_NE(blob.size(), 0); ASSERT_NE(blob.size(), 0);
ASSERT_EQ(blob.rwmap(), nullptr); ASSERT_EQ(blob.rwmap(), nullptr);
} }
TEST_F(BlobTests, canAllocateBlob) { TEST_F(BlobTests, canAllocateBlob) {
InferenceEngine::SizeVector size = { 1, 1, 1 }; InferenceEngine::SizeVector size = {1, 1, 1};
InferenceEngine::TBlob<float> blob({ InferenceEngine::Precision::FP32, size, InferenceEngine::CHW }); InferenceEngine::TBlob<float> blob({InferenceEngine::Precision::FP32, size, InferenceEngine::CHW});
blob.allocate(); blob.allocate();
float* buffer = static_cast<float*>(blob.data()); float* buffer = static_cast<float*>(blob.data());
ASSERT_NE(buffer, nullptr); ASSERT_NE(buffer, nullptr);
} }
TEST_F(BlobTests, canDeallocateBlob) { TEST_F(BlobTests, canDeallocateBlob) {
InferenceEngine::SizeVector size = { 1, 1, 1 }; InferenceEngine::SizeVector size = {1, 1, 1};
InferenceEngine::TBlob<float> blob({ InferenceEngine::Precision::FP32, size, InferenceEngine::CHW }); InferenceEngine::TBlob<float> blob({InferenceEngine::Precision::FP32, size, InferenceEngine::CHW});
blob.allocate(); blob.allocate();
blob.deallocate(); blob.deallocate();
ASSERT_EQ(nullptr, blob.data().as<float*>()); ASSERT_EQ(nullptr, blob.data().as<float*>());
} }
TEST_F(BlobTests, canCreateBlobWithoutDims) { TEST_F(BlobTests, canCreateBlobWithoutDims) {
InferenceEngine::TBlob<float> blob(InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, InferenceEngine::NCHW)); InferenceEngine::TBlob<float> blob(
InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, InferenceEngine::NCHW));
ASSERT_EQ(blob.getTensorDesc().getDims().size(), 0); ASSERT_EQ(blob.getTensorDesc().getDims().size(), 0);
} }
TEST_F(BlobTests, canReadDataFromConstBlob) { TEST_F(BlobTests, canReadDataFromConstBlob) {
InferenceEngine::TBlob<float> blob({ InferenceEngine::Precision::FP32, { 1, 1, 1 }, InferenceEngine::CHW }); InferenceEngine::TBlob<float> blob({InferenceEngine::Precision::FP32, {1, 1, 1}, InferenceEngine::CHW});
blob.allocate(); blob.allocate();
blob.data()[0] = 1.0f; blob.data()[0] = 1.0f;
InferenceEngine::TBlob<float> const blob2 = blob; InferenceEngine::TBlob<float> const blob2 = blob;
@ -250,15 +258,15 @@ TEST_F(BlobTests, canReadDataFromConstBlob) {
} }
TEST_F(BlobTests, canMakeSharedBlob) { TEST_F(BlobTests, canMakeSharedBlob) {
InferenceEngine::SizeVector size = { 1, 1, 1 }; InferenceEngine::SizeVector size = {1, 1, 1};
InferenceEngine::TBlob<float>::Ptr blob1 = InferenceEngine::make_shared_blob<float>( InferenceEngine::TBlob<float>::Ptr blob1 = InferenceEngine::make_shared_blob<float>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, InferenceEngine::NCHW)); InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, InferenceEngine::NCHW));
InferenceEngine::TBlob<float>::Ptr blob2 = InferenceEngine::make_shared_blob<float>( InferenceEngine::TBlob<float>::Ptr blob2 =
{ InferenceEngine::Precision::FP32, size, InferenceEngine::CHW }); InferenceEngine::make_shared_blob<float>({InferenceEngine::Precision::FP32, size, InferenceEngine::CHW});
InferenceEngine::TBlob<float>::Ptr blob3 InferenceEngine::TBlob<float>::Ptr blob3 =
= InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, { 0 }, InferenceEngine::C }); InferenceEngine::make_shared_blob<float>({InferenceEngine::Precision::FP32, {0}, InferenceEngine::C});
InferenceEngine::TBlob<float>::Ptr blob4 = InferenceEngine::make_shared_blob<float>( InferenceEngine::TBlob<float>::Ptr blob4 =
{ InferenceEngine::Precision::FP32, size, InferenceEngine::HWC }); InferenceEngine::make_shared_blob<float>({InferenceEngine::Precision::FP32, size, InferenceEngine::HWC});
ASSERT_EQ(blob1->size(), 0); ASSERT_EQ(blob1->size(), 0);
ASSERT_EQ(blob2->size(), 1); ASSERT_EQ(blob2->size(), 1);
ASSERT_EQ(blob3->size(), 0); ASSERT_EQ(blob3->size(), 0);
@ -296,7 +304,9 @@ TEST_F(BlobTests, DISABLED_canUseLockedMemoryAsRvalueReference) {
std::vector<float> dump; std::vector<float> dump;
std::vector<float> v({1.0f, 2.0f, 3.0f}); std::vector<float> v({1.0f, 2.0f, 3.0f});
auto blob = InferenceEngine::make_shared_blob<float>( auto blob = InferenceEngine::make_shared_blob<float>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, InferenceEngine::C), &v[0], v.size()); InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, InferenceEngine::C),
&v[0],
v.size());
for (auto e : *blob) { for (auto e : *blob) {
dump.push_back(e); dump.push_back(e);
} }
@ -312,7 +322,8 @@ TEST_F(BlobTests, canCreateBlobOnExistedMemory) {
float input[] = {0.1f, 0.2f, 0.3f}; float input[] = {0.1f, 0.2f, 0.3f};
{ {
auto b = InferenceEngine::make_shared_blob<float>( auto b = InferenceEngine::make_shared_blob<float>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {1, 2}, InferenceEngine::HW), input); InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {1, 2}, InferenceEngine::HW),
input);
auto i = b->begin(); auto i = b->begin();
ASSERT_NEAR(*i, 0.1, 0.00001); ASSERT_NEAR(*i, 0.1, 0.00001);
i++; i++;
@ -324,11 +335,10 @@ TEST_F(BlobTests, canCreateBlobOnExistedMemory) {
} }
} }
// SetShape // SetShape
TEST_F(BlobTests, canSetShape) { TEST_F(BlobTests, canSetShape) {
auto b = InferenceEngine::make_shared_blob<float>( auto b = InferenceEngine::make_shared_blob<float>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {1, 2, 3}, InferenceEngine::ANY)); InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {1, 2, 3}, InferenceEngine::ANY));
b->allocate(); b->allocate();
ASSERT_NO_THROW(b->setShape({4, 5, 6})); ASSERT_NO_THROW(b->setShape({4, 5, 6}));
@ -340,14 +350,12 @@ TEST_F(BlobTests, canSetShape) {
ASSERT_EQ(newDims[2], 6); ASSERT_EQ(newDims[2], 6);
} }
TEST_F(BlobTests, canModifyDataInRangedFor) { TEST_F(BlobTests, canModifyDataInRangedFor) {
InferenceEngine::SizeVector v = {1, 2, 3}; InferenceEngine::SizeVector v = {1, 2, 3};
InferenceEngine::TBlob<int> blob({ InferenceEngine::Precision::I32, v, InferenceEngine::CHW }); InferenceEngine::TBlob<int> blob({InferenceEngine::Precision::I32, v, InferenceEngine::CHW});
blob.allocate(); blob.allocate();
for (auto & data : blob) { for (auto& data : blob) {
data = 5; data = 5;
} }
@ -360,11 +368,15 @@ TEST_F(BlobTests, makeRoiBlobNchw) {
// we create main blob with NCHW layout. We will crop ROI from this blob. // we create main blob with NCHW layout. We will crop ROI from this blob.
InferenceEngine::SizeVector dims = {1, 3, 6, 5}; // RGB picture of size (WxH) = 5x6 InferenceEngine::SizeVector dims = {1, 3, 6, 5}; // RGB picture of size (WxH) = 5x6
InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<uint8_t>( InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<uint8_t>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NCHW)); InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NCHW));
blob->allocate(); blob->allocate();
// create ROI blob based on the already created blob // create ROI blob based on the already created blob
InferenceEngine::ROI roi = {0, 2, 1, 2, 4}; // cropped picture with: id = 0, (x,y) = (2,1), sizeX (W) = 2, sizeY (H) = 4 InferenceEngine::ROI roi = {0,
2,
1,
2,
4}; // cropped picture with: id = 0, (x,y) = (2,1), sizeX (W) = 2, sizeY (H) = 4
InferenceEngine::Blob::Ptr roiBlob = make_shared_blob(blob, roi); InferenceEngine::Blob::Ptr roiBlob = make_shared_blob(blob, roi);
// check that BlockingDesc is constructed properly for the ROI blob // check that BlockingDesc is constructed properly for the ROI blob
@ -382,11 +394,15 @@ TEST_F(BlobTests, makeRoiBlobNhwc) {
// we create main blob with NHWC layout. We will crop ROI from this blob. // we create main blob with NHWC layout. We will crop ROI from this blob.
InferenceEngine::SizeVector dims = {1, 3, 4, 8}; // RGB picture of size (WxH) = 8x4 InferenceEngine::SizeVector dims = {1, 3, 4, 8}; // RGB picture of size (WxH) = 8x4
InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<uint8_t>( InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<uint8_t>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NHWC)); InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NHWC));
blob->allocate(); blob->allocate();
// create ROI blob based on the already created blob // create ROI blob based on the already created blob
InferenceEngine::ROI roi = {0, 3, 2, 5, 2}; // cropped picture with: id = 0, (x,y) = (3,2), sizeX (W) = 5, sizeY (H) = 2 InferenceEngine::ROI roi = {0,
3,
2,
5,
2}; // cropped picture with: id = 0, (x,y) = (3,2), sizeX (W) = 5, sizeY (H) = 2
InferenceEngine::Blob::Ptr roiBlob = make_shared_blob(blob, roi); InferenceEngine::Blob::Ptr roiBlob = make_shared_blob(blob, roi);
// check that BlockingDesc is constructed properly for the ROI blob // check that BlockingDesc is constructed properly for the ROI blob
@ -404,11 +420,15 @@ TEST_F(BlobTests, makeRoiBlobWrongSize) {
// we create main blob with NCHW layout. We will crop ROI from this blob. // we create main blob with NCHW layout. We will crop ROI from this blob.
InferenceEngine::SizeVector dims = {1, 3, 4, 4}; // RGB picture of size (WxH) = 4x4 InferenceEngine::SizeVector dims = {1, 3, 4, 4}; // RGB picture of size (WxH) = 4x4
InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<uint8_t>( InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<uint8_t>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NCHW)); InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NCHW));
blob->allocate(); blob->allocate();
// try to create ROI blob with wrong size // try to create ROI blob with wrong size
InferenceEngine::ROI roi = {0, 1, 1, 4, 4}; // cropped picture with: id = 0, (x,y) = (1,1), sizeX (W) = 4, sizeY (H) = 4 InferenceEngine::ROI roi = {0,
1,
1,
4,
4}; // cropped picture with: id = 0, (x,y) = (1,1), sizeX (W) = 4, sizeY (H) = 4
ASSERT_THROW(make_shared_blob(blob, roi), InferenceEngine::Exception); ASSERT_THROW(make_shared_blob(blob, roi), InferenceEngine::Exception);
} }
@ -416,13 +436,9 @@ TEST_F(BlobTests, readRoiBlob) {
// Create original Blob // Create original Blob
const auto origDesc = const auto origDesc =
InferenceEngine::TensorDesc( InferenceEngine::TensorDesc(InferenceEngine::Precision::I32, {1, 3, 4, 8}, InferenceEngine::NCHW);
InferenceEngine::Precision::I32,
{1, 3, 4, 8},
InferenceEngine::NCHW);
const auto origBlob = const auto origBlob = InferenceEngine::make_shared_blob<int32_t>(origDesc);
InferenceEngine::make_shared_blob<int32_t>(origDesc);
origBlob->allocate(); origBlob->allocate();
// Fill the original Blob // Fill the original Blob
@ -468,14 +484,17 @@ TEST_F(BlobTests, makeRangeRoiBlobNchw) {
// we create main blob with NCHW layout. We will crop ROI from this blob. // we create main blob with NCHW layout. We will crop ROI from this blob.
InferenceEngine::SizeVector dims = {1, 3, 6, 5}; // RGB picture of size (WxH) = 5x6 InferenceEngine::SizeVector dims = {1, 3, 6, 5}; // RGB picture of size (WxH) = 5x6
InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<uint8_t>( InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<uint8_t>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NCHW)); InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NCHW));
blob->allocate(); blob->allocate();
// create ROI blob based on the already created blob // create ROI blob based on the already created blob
InferenceEngine::ROI roi = {0, 2, 1, 2, 4}; // cropped picture with: id = 0, (x,y) = (2,1), sizeX (W) = 2, sizeY (H) = 4 InferenceEngine::ROI roi = {0,
InferenceEngine::Blob::Ptr roiBlob = make_shared_blob(blob, 2,
{0, 0, roi.posY, roi.posX}, 1,
{1, 3, roi.posY + roi.sizeY, roi.posX + roi.sizeX}); 2,
4}; // cropped picture with: id = 0, (x,y) = (2,1), sizeX (W) = 2, sizeY (H) = 4
InferenceEngine::Blob::Ptr roiBlob =
make_shared_blob(blob, {0, 0, roi.posY, roi.posX}, {1, 3, roi.posY + roi.sizeY, roi.posX + roi.sizeX});
// check that BlockingDesc is constructed properly for the ROI blob // check that BlockingDesc is constructed properly for the ROI blob
InferenceEngine::SizeVector refDims = {1, 3, 4, 2}; InferenceEngine::SizeVector refDims = {1, 3, 4, 2};
@ -492,14 +511,17 @@ TEST_F(BlobTests, makeRangeRoiBlobNhwc) {
// we create main blob with NHWC layout. We will crop ROI from this blob. // we create main blob with NHWC layout. We will crop ROI from this blob.
InferenceEngine::SizeVector dims = {1, 3, 4, 8}; // RGB picture of size (WxH) = 8x4 InferenceEngine::SizeVector dims = {1, 3, 4, 8}; // RGB picture of size (WxH) = 8x4
InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<uint8_t>( InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<uint8_t>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NHWC)); InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NHWC));
blob->allocate(); blob->allocate();
// create ROI blob based on the already created blob // create ROI blob based on the already created blob
InferenceEngine::ROI roi = {0, 3, 2, 5, 2}; // cropped picture with: id = 0, (x,y) = (3,2), sizeX (W) = 5, sizeY (H) = 2 InferenceEngine::ROI roi = {0,
InferenceEngine::Blob::Ptr roiBlob = make_shared_blob(blob, 3,
{0, 0, roi.posY, roi.posX}, 2,
{1, 3, roi.posY + roi.sizeY, roi.posX + roi.sizeX}); 5,
2}; // cropped picture with: id = 0, (x,y) = (3,2), sizeX (W) = 5, sizeY (H) = 2
InferenceEngine::Blob::Ptr roiBlob =
make_shared_blob(blob, {0, 0, roi.posY, roi.posX}, {1, 3, roi.posY + roi.sizeY, roi.posX + roi.sizeX});
// check that BlockingDesc is constructed properly for the ROI blob // check that BlockingDesc is constructed properly for the ROI blob
InferenceEngine::SizeVector refDims = {1, 2, 5, 3}; InferenceEngine::SizeVector refDims = {1, 2, 5, 3};
@ -516,27 +538,26 @@ TEST_F(BlobTests, makeRangeRoiBlobWrongSize) {
// we create main blob with NCHW layout. We will crop ROI from this blob. // we create main blob with NCHW layout. We will crop ROI from this blob.
InferenceEngine::SizeVector dims = {1, 3, 4, 4}; // RGB picture of size (WxH) = 4x4 InferenceEngine::SizeVector dims = {1, 3, 4, 4}; // RGB picture of size (WxH) = 4x4
InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<uint8_t>( InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<uint8_t>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NCHW)); InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NCHW));
blob->allocate(); blob->allocate();
// try to create ROI blob with wrong size // try to create ROI blob with wrong size
InferenceEngine::ROI roi = {0, 1, 1, 4, 4}; // cropped picture with: id = 0, (x,y) = (1,1), sizeX (W) = 4, sizeY (H) = 4 InferenceEngine::ROI roi = {0,
ASSERT_THROW(make_shared_blob(blob, 1,
{0, 0, roi.posY, roi.posX}, 1,
{1, 3, roi.posY + roi.sizeY, roi.posX + roi.sizeX}), InferenceEngine::Exception); 4,
4}; // cropped picture with: id = 0, (x,y) = (1,1), sizeX (W) = 4, sizeY (H) = 4
ASSERT_THROW(make_shared_blob(blob, {0, 0, roi.posY, roi.posX}, {1, 3, roi.posY + roi.sizeY, roi.posX + roi.sizeX}),
InferenceEngine::Exception);
} }
TEST_F(BlobTests, readRangeRoiBlob) { TEST_F(BlobTests, readRangeRoiBlob) {
// Create original Blob // Create original Blob
const auto origDesc = const auto origDesc =
InferenceEngine::TensorDesc( InferenceEngine::TensorDesc(InferenceEngine::Precision::I32, {1, 3, 4, 8}, InferenceEngine::NCHW);
InferenceEngine::Precision::I32,
{1, 3, 4, 8},
InferenceEngine::NCHW);
const auto origBlob = const auto origBlob = InferenceEngine::make_shared_blob<int32_t>(origDesc);
InferenceEngine::make_shared_blob<int32_t>(origDesc);
origBlob->allocate(); origBlob->allocate();
// Fill the original Blob // Fill the original Blob
@ -555,9 +576,8 @@ TEST_F(BlobTests, readRangeRoiBlob) {
const auto roi = InferenceEngine::ROI(0, 4, 2, 4, 2); const auto roi = InferenceEngine::ROI(0, 4, 2, 4, 2);
const auto roiBlob = InferenceEngine::as<InferenceEngine::MemoryBlob>(origBlob->createROI( const auto roiBlob = InferenceEngine::as<InferenceEngine::MemoryBlob>(
{0, 0, roi.posY, roi.posX}, origBlob->createROI({0, 0, roi.posY, roi.posX}, {1, 3, roi.posY + roi.sizeY, roi.posX + roi.sizeX}));
{1, 3, roi.posY + roi.sizeY, roi.posX + roi.sizeX}));
ASSERT_NE(nullptr, roiBlob); ASSERT_NE(nullptr, roiBlob);
// Read ROI Blob // Read ROI Blob

View File

@ -2,22 +2,22 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <string>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <fstream>
#include <thread>
#include <chrono>
#include "compilation_context.hpp" #include <chrono>
#include "ngraph/function.hpp" #include <fstream>
#include "ngraph/ops.hpp" #include <string>
#include "ngraph/variant.hpp" #include <thread>
#include "ngraph/opsets/opset6.hpp"
#include "transformations/rt_info/fused_names_attribute.hpp"
#include "transformations/rt_info/primitives_priority_attribute.hpp"
#include "cpp/ie_cnn_network.h"
#include "common_test_utils/test_constants.hpp" #include "common_test_utils/test_constants.hpp"
#include "compilation_context.hpp"
#include "cpp/ie_cnn_network.h"
#include "ngraph/function.hpp"
#include "ngraph/ops.hpp"
#include "ngraph/opsets/opset6.hpp"
#include "ngraph/variant.hpp"
#include "transformations/rt_info/fused_names_attribute.hpp"
#include "transformations/rt_info/primitives_priority_attribute.hpp"
using namespace InferenceEngine; using namespace InferenceEngine;
using namespace ngraph; using namespace ngraph;
@ -40,9 +40,12 @@ static std::string generateTestFilePrefix() {
class FileGuard { class FileGuard {
std::string m_fileName; std::string m_fileName;
public: public:
explicit FileGuard(std::string name): m_fileName(std::move(name)) {} explicit FileGuard(std::string name) : m_fileName(std::move(name)) {}
~FileGuard() { std::remove(m_fileName.c_str()); } ~FileGuard() {
std::remove(m_fileName.c_str());
}
}; };
class NetworkContext_CalcFileInfoTests : public Test { class NetworkContext_CalcFileInfoTests : public Test {
@ -98,9 +101,8 @@ TEST_F(NetworkContext_CalcFileInfoTests, ExistingDiffFiles) {
TEST_F(NetworkContext_CalcFileInfoTests, ExistingFile_sameAbsPath) { TEST_F(NetworkContext_CalcFileInfoTests, ExistingFile_sameAbsPath) {
std::string file1 = m_fileName; std::string file1 = m_fileName;
std::string file2 = std::string(".") + CommonTestUtils::FileSeparator + m_fileName; std::string file2 = std::string(".") + CommonTestUtils::FileSeparator + m_fileName;
ASSERT_EQ(NetworkCompilationContext::calculateFileInfo(file1), ASSERT_EQ(NetworkCompilationContext::calculateFileInfo(file1), NetworkCompilationContext::calculateFileInfo(file2))
NetworkCompilationContext::calculateFileInfo(file2)) << << "Hash of [" << file1 << "] is not equal to hash of [" << file2 << "]";
"Hash of [" << file1 << "] is not equal to hash of [" << file2 << "]";
} }
TEST_F(NetworkContext_CalcFileInfoTests, DateModified) { TEST_F(NetworkContext_CalcFileInfoTests, DateModified) {
@ -172,35 +174,29 @@ static void checkCustomRt(const std::function<void(Node::RTMap&)>& emptyCb,
const std::function<void(Node::RTMap&, const std::string& name)>& nameCb) { const std::function<void(Node::RTMap&, const std::string& name)>& nameCb) {
auto net1 = createNetwork(); auto net1 = createNetwork();
auto net2 = createNetwork(); auto net2 = createNetwork();
auto & op1 = net1.getFunction()->get_ops().front()->get_rt_info(); auto& op1 = net1.getFunction()->get_ops().front()->get_rt_info();
auto & op2 = net2.getFunction()->get_ops().front()->get_rt_info(); auto& op2 = net2.getFunction()->get_ops().front()->get_rt_info();
emptyCb(op2); emptyCb(op2);
ASSERT_NE(NetworkCompilationContext::computeHash(net1, {}), ASSERT_NE(NetworkCompilationContext::computeHash(net1, {}), NetworkCompilationContext::computeHash(net2, {}));
NetworkCompilationContext::computeHash(net2, {}));
emptyCb(op1); emptyCb(op1);
ASSERT_EQ(NetworkCompilationContext::computeHash(net1, {}), ASSERT_EQ(NetworkCompilationContext::computeHash(net1, {}), NetworkCompilationContext::computeHash(net2, {}));
NetworkCompilationContext::computeHash(net2, {}));
nameCb(op1, "test"); nameCb(op1, "test");
ASSERT_NE(NetworkCompilationContext::computeHash(net1, {}), ASSERT_NE(NetworkCompilationContext::computeHash(net1, {}), NetworkCompilationContext::computeHash(net2, {}));
NetworkCompilationContext::computeHash(net2, {}));
nameCb(op2, "test"); nameCb(op2, "test");
ASSERT_EQ(NetworkCompilationContext::computeHash(net1, {}), ASSERT_EQ(NetworkCompilationContext::computeHash(net1, {}), NetworkCompilationContext::computeHash(net2, {}));
NetworkCompilationContext::computeHash(net2, {}));
nameCb(op1, "test2"); nameCb(op1, "test2");
ASSERT_NE(NetworkCompilationContext::computeHash(net1, {}), ASSERT_NE(NetworkCompilationContext::computeHash(net1, {}), NetworkCompilationContext::computeHash(net2, {}));
NetworkCompilationContext::computeHash(net2, {}));
} }
TEST(NetworkContext_CNNNetwork, HashOfSame) { TEST(NetworkContext_CNNNetwork, HashOfSame) {
auto net1 = createNetwork(); auto net1 = createNetwork();
auto net2 = createNetwork(); auto net2 = createNetwork();
ASSERT_EQ(NetworkCompilationContext::computeHash(net1, {}), ASSERT_EQ(NetworkCompilationContext::computeHash(net1, {}), NetworkCompilationContext::computeHash(net2, {}));
NetworkCompilationContext::computeHash(net2, {}));
} }
TEST(NetworkContext_CNNNetwork, HashWithConfig) { TEST(NetworkContext_CNNNetwork, HashWithConfig) {
@ -216,17 +212,15 @@ TEST(NetworkContext_CNNNetwork, HashWithPrimitivesPriority) {
auto net1 = createNetwork(); auto net1 = createNetwork();
auto net2 = createNetwork(); auto net2 = createNetwork();
auto net3 = createNetwork(); auto net3 = createNetwork();
auto & op2 = net2.getFunction()->get_ops().front()->get_rt_info(); auto& op2 = net2.getFunction()->get_ops().front()->get_rt_info();
op2[ov::PrimitivesPriority::get_type_info_static()] = ov::PrimitivesPriority("testPriority"); op2[ov::PrimitivesPriority::get_type_info_static()] = ov::PrimitivesPriority("testPriority");
auto & op3 = net3.getFunction()->get_ops().front()->get_rt_info(); auto& op3 = net3.getFunction()->get_ops().front()->get_rt_info();
op3["PrimitivesPriority"] = "testPriority"; op3["PrimitivesPriority"] = "testPriority";
ASSERT_NE(NetworkCompilationContext::computeHash(net1, {}), ASSERT_NE(NetworkCompilationContext::computeHash(net1, {}), NetworkCompilationContext::computeHash(net2, {}));
NetworkCompilationContext::computeHash(net2, {}));
ASSERT_EQ(NetworkCompilationContext::computeHash(net2, {}), ASSERT_EQ(NetworkCompilationContext::computeHash(net2, {}), NetworkCompilationContext::computeHash(net3, {}));
NetworkCompilationContext::computeHash(net3, {}));
} }
TEST(NetworkContext_CNNNetwork, HashWithFusedNames) { TEST(NetworkContext_CNNNetwork, HashWithFusedNames) {
@ -253,17 +247,15 @@ TEST(NetworkContext_CNNNetwork, HashWithAffinity) {
auto net1 = createNetwork(); auto net1 = createNetwork();
auto net2 = createNetwork(); auto net2 = createNetwork();
auto net3 = createNetwork(); auto net3 = createNetwork();
auto & op2 = net2.getFunction()->get_ops().front()->get_rt_info(); auto& op2 = net2.getFunction()->get_ops().front()->get_rt_info();
op2["affinity"] = "testAffinity"; op2["affinity"] = "testAffinity";
auto & op3 = net3.getFunction()->get_ops().front()->get_rt_info(); auto& op3 = net3.getFunction()->get_ops().front()->get_rt_info();
op3["affinity"] = "testAffinity"; op3["affinity"] = "testAffinity";
ASSERT_NE(NetworkCompilationContext::computeHash(net1, {}), ASSERT_NE(NetworkCompilationContext::computeHash(net1, {}), NetworkCompilationContext::computeHash(net2, {}));
NetworkCompilationContext::computeHash(net2, {}));
ASSERT_EQ(NetworkCompilationContext::computeHash(net2, {}), ASSERT_EQ(NetworkCompilationContext::computeHash(net2, {}), NetworkCompilationContext::computeHash(net3, {}));
NetworkCompilationContext::computeHash(net3, {}));
} }
TEST(NetworkContext_CNNNetwork, HashWithFutureRt_string) { TEST(NetworkContext_CNNNetwork, HashWithFutureRt_string) {
@ -271,20 +263,18 @@ TEST(NetworkContext_CNNNetwork, HashWithFutureRt_string) {
auto net2 = createNetwork(); auto net2 = createNetwork();
auto net3 = createNetwork(); auto net3 = createNetwork();
auto & op1 = net1.getFunction()->get_ops().front()->get_rt_info(); auto& op1 = net1.getFunction()->get_ops().front()->get_rt_info();
op1["someFutureKey"] = "hello"; op1["someFutureKey"] = "hello";
auto & op2 = net2.getFunction()->get_ops().front()->get_rt_info(); auto& op2 = net2.getFunction()->get_ops().front()->get_rt_info();
op2["someFutureKey"] = "hello"; op2["someFutureKey"] = "hello";
auto & op3 = net3.getFunction()->get_ops().front()->get_rt_info(); auto& op3 = net3.getFunction()->get_ops().front()->get_rt_info();
op3["someFutureKey"] = "olleh"; op3["someFutureKey"] = "olleh";
ASSERT_EQ(NetworkCompilationContext::computeHash(net1, {}), ASSERT_EQ(NetworkCompilationContext::computeHash(net1, {}), NetworkCompilationContext::computeHash(net2, {}));
NetworkCompilationContext::computeHash(net2, {}));
ASSERT_NE(NetworkCompilationContext::computeHash(net2, {}), ASSERT_NE(NetworkCompilationContext::computeHash(net2, {}), NetworkCompilationContext::computeHash(net3, {}));
NetworkCompilationContext::computeHash(net3, {}));
} }
TEST(NetworkContext_CNNNetwork, HashWithFutureRt_int64) { TEST(NetworkContext_CNNNetwork, HashWithFutureRt_int64) {
@ -292,20 +282,18 @@ TEST(NetworkContext_CNNNetwork, HashWithFutureRt_int64) {
auto net2 = createNetwork(); auto net2 = createNetwork();
auto net3 = createNetwork(); auto net3 = createNetwork();
auto & op1 = net1.getFunction()->get_ops().front()->get_rt_info(); auto& op1 = net1.getFunction()->get_ops().front()->get_rt_info();
op1["someFutureKey"] = int64_t(42); op1["someFutureKey"] = int64_t(42);
auto & op2 = net2.getFunction()->get_ops().front()->get_rt_info(); auto& op2 = net2.getFunction()->get_ops().front()->get_rt_info();
op2["someFutureKey"] = int64_t(42); op2["someFutureKey"] = int64_t(42);
auto & op3 = net3.getFunction()->get_ops().front()->get_rt_info(); auto& op3 = net3.getFunction()->get_ops().front()->get_rt_info();
op3["someFutureKey"] = int64_t(43); op3["someFutureKey"] = int64_t(43);
ASSERT_EQ(NetworkCompilationContext::computeHash(net1, {}), ASSERT_EQ(NetworkCompilationContext::computeHash(net1, {}), NetworkCompilationContext::computeHash(net2, {}));
NetworkCompilationContext::computeHash(net2, {}));
ASSERT_NE(NetworkCompilationContext::computeHash(net2, {}), ASSERT_NE(NetworkCompilationContext::computeHash(net2, {}), NetworkCompilationContext::computeHash(net3, {}));
NetworkCompilationContext::computeHash(net3, {}));
} }
TEST(NetworkContext_CNNNetwork, HashWithLayout) { TEST(NetworkContext_CNNNetwork, HashWithLayout) {
@ -321,20 +309,15 @@ TEST(NetworkContext_CNNNetwork, HashWithLayout) {
fun5->get_results()[0]->set_layout(ov::Layout()); fun5->get_results()[0]->set_layout(ov::Layout());
auto net5 = CNNNetwork(fun5); auto net5 = CNNNetwork(fun5);
EXPECT_EQ(NetworkCompilationContext::computeHash(net1, {}), EXPECT_EQ(NetworkCompilationContext::computeHash(net1, {}), NetworkCompilationContext::computeHash(net2, {}));
NetworkCompilationContext::computeHash(net2, {}));
EXPECT_NE(NetworkCompilationContext::computeHash(net2, {}), EXPECT_NE(NetworkCompilationContext::computeHash(net2, {}), NetworkCompilationContext::computeHash(net3, {}));
NetworkCompilationContext::computeHash(net3, {}));
EXPECT_NE(NetworkCompilationContext::computeHash(net3, {}), EXPECT_NE(NetworkCompilationContext::computeHash(net3, {}), NetworkCompilationContext::computeHash(net3_1, {}));
NetworkCompilationContext::computeHash(net3_1, {}));
EXPECT_NE(NetworkCompilationContext::computeHash(net3, {}), EXPECT_NE(NetworkCompilationContext::computeHash(net3, {}), NetworkCompilationContext::computeHash(net4, {}));
NetworkCompilationContext::computeHash(net4, {}));
EXPECT_EQ(NetworkCompilationContext::computeHash(net4, {}), EXPECT_EQ(NetworkCompilationContext::computeHash(net4, {}), NetworkCompilationContext::computeHash(net5, {}));
NetworkCompilationContext::computeHash(net5, {}));
} }
TEST(NetworkContext_CNNNetwork, HashWithTensorNames) { TEST(NetworkContext_CNNNetwork, HashWithTensorNames) {
@ -361,11 +344,9 @@ TEST(NetworkContext_CNNNetwork, HashWithTensorNames) {
auto net2 = CNNNetwork(fun2); auto net2 = CNNNetwork(fun2);
auto net3 = CNNNetwork(fun3); auto net3 = CNNNetwork(fun3);
ASSERT_EQ(NetworkCompilationContext::computeHash(net1, {}), ASSERT_EQ(NetworkCompilationContext::computeHash(net1, {}), NetworkCompilationContext::computeHash(net2, {}));
NetworkCompilationContext::computeHash(net2, {}));
ASSERT_NE(NetworkCompilationContext::computeHash(net2, {}), ASSERT_NE(NetworkCompilationContext::computeHash(net2, {}), NetworkCompilationContext::computeHash(net3, {}));
NetworkCompilationContext::computeHash(net3, {}));
} }
TEST(NetworkContext_CNNNetwork, HashWithDifferentResults) { TEST(NetworkContext_CNNNetwork, HashWithDifferentResults) {
@ -374,15 +355,13 @@ TEST(NetworkContext_CNNNetwork, HashWithDifferentResults) {
net2.getFunction()->remove_result(net2.getFunction()->get_results().front()); net2.getFunction()->remove_result(net2.getFunction()->get_results().front());
auto net3 = createNetwork(); auto net3 = createNetwork();
net3.getFunction()->remove_result(net3.getFunction()->get_results().front()); net3.getFunction()->remove_result(net3.getFunction()->get_results().front());
ASSERT_NE(NetworkCompilationContext::computeHash(net1, {}), ASSERT_NE(NetworkCompilationContext::computeHash(net1, {}), NetworkCompilationContext::computeHash(net2, {}));
NetworkCompilationContext::computeHash(net2, {})); ASSERT_EQ(NetworkCompilationContext::computeHash(net2, {}), NetworkCompilationContext::computeHash(net3, {}));
ASSERT_EQ(NetworkCompilationContext::computeHash(net2, {}),
NetworkCompilationContext::computeHash(net3, {}));
} }
TEST(NetworkContext_CNNNetwork, HashWithDifferentMeanValues) { TEST(NetworkContext_CNNNetwork, HashWithDifferentMeanValues) {
auto updatePreprocess = [&](CNNNetwork& cnnNet) { auto updatePreprocess = [&](CNNNetwork& cnnNet) {
auto &preProcess = cnnNet.getInputsInfo().begin()->second->getPreProcess(); auto& preProcess = cnnNet.getInputsInfo().begin()->second->getPreProcess();
preProcess.init(3); preProcess.init(3);
preProcess[0]->stdScale = 2; preProcess[0]->stdScale = 2;
preProcess[1]->stdScale = 3; preProcess[1]->stdScale = 3;
@ -397,10 +376,8 @@ TEST(NetworkContext_CNNNetwork, HashWithDifferentMeanValues) {
updatePreprocess(net2); updatePreprocess(net2);
auto net3 = createNetwork(); auto net3 = createNetwork();
updatePreprocess(net3); updatePreprocess(net3);
ASSERT_NE(NetworkCompilationContext::computeHash(net1, {}), ASSERT_NE(NetworkCompilationContext::computeHash(net1, {}), NetworkCompilationContext::computeHash(net2, {}));
NetworkCompilationContext::computeHash(net2, {})); ASSERT_EQ(NetworkCompilationContext::computeHash(net2, {}), NetworkCompilationContext::computeHash(net3, {}));
ASSERT_EQ(NetworkCompilationContext::computeHash(net2, {}),
NetworkCompilationContext::computeHash(net3, {}));
} }
// Verify all internal hash calculations are thread-safe (like ngraph::function serialization) // Verify all internal hash calculations are thread-safe (like ngraph::function serialization)
@ -455,11 +432,9 @@ TEST(NetworkContext_ModelName, HashOfExistingFile) {
std::ofstream os(file1); std::ofstream os(file1);
os << "test"; os << "test";
} }
ASSERT_EQ(NetworkCompilationContext::computeHash(file1, {}), ASSERT_EQ(NetworkCompilationContext::computeHash(file1, {}), NetworkCompilationContext::computeHash(file1, {}));
NetworkCompilationContext::computeHash(file1, {}));
ASSERT_EQ(NetworkCompilationContext::computeHash(file1, {}), ASSERT_EQ(NetworkCompilationContext::computeHash(file1, {}), NetworkCompilationContext::computeHash(file2, {}));
NetworkCompilationContext::computeHash(file2, {}));
ASSERT_NE(NetworkCompilationContext::computeHash(file1, {{"key", "value"}}), ASSERT_NE(NetworkCompilationContext::computeHash(file1, {{"key", "value"}}),
NetworkCompilationContext::computeHash(file2, {})); NetworkCompilationContext::computeHash(file2, {}));

View File

@ -2,10 +2,11 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <ie_compound_blob.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <random> #include <ie_compound_blob.h>
#include <chrono> #include <chrono>
#include <random>
using namespace ::testing; using namespace ::testing;
using namespace std; using namespace std;
@ -138,8 +139,7 @@ TEST(BlobConversionTests, blobSharesOwnershipOnCast) {
TEST_F(CompoundBlobTests, cannotCreateCompoundBlobFromNullptr) { TEST_F(CompoundBlobTests, cannotCreateCompoundBlobFromNullptr) {
Blob::Ptr valid = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); Blob::Ptr valid = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW));
EXPECT_THROW(make_shared_blob<CompoundBlob>(std::vector<Blob::Ptr>({valid, nullptr})), EXPECT_THROW(make_shared_blob<CompoundBlob>(std::vector<Blob::Ptr>({valid, nullptr})), InferenceEngine::Exception);
InferenceEngine::Exception);
} }
TEST_F(CompoundBlobTests, canCreateEmptyCompoundBlob) { TEST_F(CompoundBlobTests, canCreateEmptyCompoundBlob) {
@ -174,7 +174,7 @@ TEST_F(CompoundBlobTests, cannotCreateCompoundBlobFromCompoundBlob) {
verifyCompoundBlob(_test_blob); verifyCompoundBlob(_test_blob);
EXPECT_THROW(make_shared_blob<CompoundBlob>(std::vector<Blob::Ptr>({blob, _test_blob})), EXPECT_THROW(make_shared_blob<CompoundBlob>(std::vector<Blob::Ptr>({blob, _test_blob})),
InferenceEngine::Exception); InferenceEngine::Exception);
} }
TEST_F(CompoundBlobTests, compoundBlobHoldsCorrectDataInCorrectOrder) { TEST_F(CompoundBlobTests, compoundBlobHoldsCorrectDataInCorrectOrder) {
@ -202,7 +202,7 @@ TEST_F(CompoundBlobTests, compoundBlobHoldsCorrectDataInCorrectOrder) {
MemoryBlob::Ptr mb = as<MemoryBlob>(blob); MemoryBlob::Ptr mb = as<MemoryBlob>(blob);
ASSERT_NE(nullptr, mb); ASSERT_NE(nullptr, mb);
auto lm = mb->rwmap(); auto lm = mb->rwmap();
EXPECT_EQ(static_cast<uint8_t>(i + MAGIC_NUMBER), lm.as<uint8_t *>()[0]); EXPECT_EQ(static_cast<uint8_t>(i + MAGIC_NUMBER), lm.as<uint8_t*>()[0]);
} }
} }
@ -220,9 +220,9 @@ TEST_F(CompoundBlobTests, compoundBlobHoldsReferencesToBlobs) {
CompoundBlob::Ptr compound_blob = as<CompoundBlob>(_test_blob); CompoundBlob::Ptr compound_blob = as<CompoundBlob>(_test_blob);
Blob::Ptr b0 = compound_blob->getBlob(0); Blob::Ptr b0 = compound_blob->getBlob(0);
MemoryBlob::CPtr mb0 = as<MemoryBlob>(b0); MemoryBlob::CPtr mb0 = as<MemoryBlob>(b0);
EXPECT_EQ(12, mb0->rmap().as<const uint8_t *>()[0]); EXPECT_EQ(12, mb0->rmap().as<const uint8_t*>()[0]);
blob->rwmap().as<uint8_t*>()[0] = 34; blob->rwmap().as<uint8_t*>()[0] = 34;
EXPECT_EQ(34, mb0->rmap().as<const uint8_t *>()[0]); EXPECT_EQ(34, mb0->rmap().as<const uint8_t*>()[0]);
} }
TEST_F(CompoundBlobTests, compoundBlobHoldsValidDataWhenUnderlyingBlobIsDestroyed) { TEST_F(CompoundBlobTests, compoundBlobHoldsValidDataWhenUnderlyingBlobIsDestroyed) {
@ -242,38 +242,32 @@ TEST_F(CompoundBlobTests, compoundBlobHoldsValidDataWhenUnderlyingBlobIsDestroye
ASSERT_NE(nullptr, compound_blob->getBlob(0)); ASSERT_NE(nullptr, compound_blob->getBlob(0));
MemoryBlob::CPtr mb0 = as<MemoryBlob>(compound_blob->getBlob(0)); MemoryBlob::CPtr mb0 = as<MemoryBlob>(compound_blob->getBlob(0));
ASSERT_NE(nullptr, mb0); ASSERT_NE(nullptr, mb0);
EXPECT_EQ(stored_value, mb0->rmap().as<const uint8_t *>()[0]); EXPECT_EQ(stored_value, mb0->rmap().as<const uint8_t*>()[0]);
} }
TEST_F(NV12BlobTests, cannotCreateNV12BlobFromNullptrBlobs) { TEST_F(NV12BlobTests, cannotCreateNV12BlobFromNullptrBlobs) {
Blob::Ptr valid = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 4, 4}, NHWC)); Blob::Ptr valid = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 4, 4}, NHWC));
EXPECT_THROW(make_shared_blob<NV12Blob>(valid, nullptr), EXPECT_THROW(make_shared_blob<NV12Blob>(valid, nullptr), InferenceEngine::Exception);
InferenceEngine::Exception); EXPECT_THROW(make_shared_blob<NV12Blob>(nullptr, valid), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<NV12Blob>(nullptr, valid),
InferenceEngine::Exception);
} }
TEST_F(NV12BlobTests, cannotCreateNV12BlobFromCompoundBlobs) { TEST_F(NV12BlobTests, cannotCreateNV12BlobFromCompoundBlobs) {
Blob::Ptr blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 4, 4}, NHWC)); Blob::Ptr blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 4, 4}, NHWC));
auto cblob = make_shared_blob<CompoundBlob>(std::vector<Blob::Ptr>({blob})); auto cblob = make_shared_blob<CompoundBlob>(std::vector<Blob::Ptr>({blob}));
EXPECT_THROW(make_shared_blob<NV12Blob>(cblob, blob), EXPECT_THROW(make_shared_blob<NV12Blob>(cblob, blob), InferenceEngine::Exception);
InferenceEngine::Exception); EXPECT_THROW(make_shared_blob<NV12Blob>(blob, cblob), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<NV12Blob>(blob, cblob),
InferenceEngine::Exception);
} }
TEST_F(NV12BlobTests, cannotCreateNV12BlobFromPlanesWithDifferentElementSize) { TEST_F(NV12BlobTests, cannotCreateNV12BlobFromPlanesWithDifferentElementSize) {
Blob::Ptr blob_u8 = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 4, 4}, NHWC)); Blob::Ptr blob_u8 = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 4, 4}, NHWC));
Blob::Ptr blob_float = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 2, 2, 2}, NHWC)); Blob::Ptr blob_float = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 2, 2, 2}, NHWC));
EXPECT_THROW(make_shared_blob<NV12Blob>(blob_u8, blob_float), EXPECT_THROW(make_shared_blob<NV12Blob>(blob_u8, blob_float), InferenceEngine::Exception);
InferenceEngine::Exception);
} }
TEST_F(NV12BlobTests, cannotCreateNV12BlobFromPlanesWithNonU8Precision) { TEST_F(NV12BlobTests, cannotCreateNV12BlobFromPlanesWithNonU8Precision) {
Blob::Ptr float_y_blob = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 1, 4, 4}, NHWC)); Blob::Ptr float_y_blob = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 1, 4, 4}, NHWC));
Blob::Ptr float_uv_blob = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 2, 2, 2}, NHWC)); Blob::Ptr float_uv_blob = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 2, 2, 2}, NHWC));
EXPECT_THROW(make_shared_blob<NV12Blob>(float_y_blob, float_uv_blob), EXPECT_THROW(make_shared_blob<NV12Blob>(float_y_blob, float_uv_blob), InferenceEngine::Exception);
InferenceEngine::Exception);
} }
TEST_F(NV12BlobTests, cannotCreateNV12BlobFromPlanesWithInconsistentBatchSize) { TEST_F(NV12BlobTests, cannotCreateNV12BlobFromPlanesWithInconsistentBatchSize) {
@ -324,9 +318,9 @@ TEST_F(NV12BlobTests, canCreateNV12BlobFromTwoPlanes) {
} }
TEST_F(NV12BlobTests, canCreateNV12BlobFromTwoMovedPlanes) { TEST_F(NV12BlobTests, canCreateNV12BlobFromTwoMovedPlanes) {
NV12Blob::Ptr nv12_blob = make_shared_blob<NV12Blob>( NV12Blob::Ptr nv12_blob =
make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 6, 8}, NHWC)), make_shared_blob<NV12Blob>(make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 6, 8}, NHWC)),
make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 2, 3, 4}, NHWC))); make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 2, 3, 4}, NHWC)));
verifyCompoundBlob(nv12_blob); verifyCompoundBlob(nv12_blob);
} }
@ -342,10 +336,10 @@ TEST_F(I420BlobTests, canCreateI420BlobFromThreePlanes) {
} }
TEST_F(I420BlobTests, canCreateI420BlobFromThreeMovedPlanes) { TEST_F(I420BlobTests, canCreateI420BlobFromThreeMovedPlanes) {
I420Blob::Ptr i420_blob = make_shared_blob<I420Blob>( I420Blob::Ptr i420_blob =
make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 6, 8}, NHWC)), make_shared_blob<I420Blob>(make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 6, 8}, NHWC)),
make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC)), make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC)),
make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC))); make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC)));
verifyCompoundBlob(i420_blob); verifyCompoundBlob(i420_blob);
} }
@ -360,7 +354,7 @@ TEST_F(I420BlobTests, cannotCreateI420BlobFromCompoundBlobs) {
Blob::Ptr u_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC)); Blob::Ptr u_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC));
Blob::Ptr v_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC)); Blob::Ptr v_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 3, 4}, NHWC));
auto make_cblob = [](Blob::Ptr const& b){ auto make_cblob = [](Blob::Ptr const& b) {
return make_shared_blob<CompoundBlob>(std::vector<Blob::Ptr>({b})); return make_shared_blob<CompoundBlob>(std::vector<Blob::Ptr>({b}));
}; };
@ -369,13 +363,13 @@ TEST_F(I420BlobTests, cannotCreateI420BlobFromCompoundBlobs) {
auto c_v_blob = make_cblob(v_blob); auto c_v_blob = make_cblob(v_blob);
using ie_exception_t = InferenceEngine::Exception; using ie_exception_t = InferenceEngine::Exception;
EXPECT_THROW(make_shared_blob<I420Blob>(c_y_blob, u_blob, v_blob), ie_exception_t); EXPECT_THROW(make_shared_blob<I420Blob>(c_y_blob, u_blob, v_blob), ie_exception_t);
EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, c_u_blob, v_blob), ie_exception_t); EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, c_u_blob, v_blob), ie_exception_t);
EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, u_blob, c_v_blob), ie_exception_t); EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, u_blob, c_v_blob), ie_exception_t);
} }
TEST_F(I420BlobTests, cannotCreateI420BlobFromPlanesWithDifferentElementSize) { TEST_F(I420BlobTests, cannotCreateI420BlobFromPlanesWithDifferentElementSize) {
Blob::Ptr y_blob_u8 = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 4, 4}, NHWC)); Blob::Ptr y_blob_u8 = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1, 1, 4, 4}, NHWC));
Blob::Ptr u_blob_float = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 1, 2, 2}, NHWC)); Blob::Ptr u_blob_float = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 1, 2, 2}, NHWC));
Blob::Ptr v_blob_float = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 1, 2, 2}, NHWC)); Blob::Ptr v_blob_float = make_shared_blob<float>(TensorDesc(Precision::FP32, {1, 1, 2, 2}, NHWC));
@ -423,5 +417,3 @@ TEST_F(I420BlobTests, cannotCreateI420BlobFromPlanesWithWrongHeightRatio) {
EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, u_blob, v_blob), InferenceEngine::Exception); EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, u_blob, v_blob), InferenceEngine::Exception);
EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, v_blob, u_blob), InferenceEngine::Exception); EXPECT_THROW(make_shared_blob<I420Blob>(y_blob, v_blob, u_blob), InferenceEngine::Exception);
} }

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <string>
#include <gtest/gtest.h>
#include <gmock/gmock.h> #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <string>
#include "ie_common.h" #include "ie_common.h"
@ -25,7 +26,6 @@ TEST(ExceptionTests, CanDefineExceptionContent) {
ASSERT_STREQ(exception.what(), ""); ASSERT_STREQ(exception.what(), "");
} }
#ifndef NDEBUG #ifndef NDEBUG
TEST(ExceptionTests, ExceptionShowsCorrectMessageDebugVersion) { TEST(ExceptionTests, ExceptionShowsCorrectMessageDebugVersion) {
std::string message = "exception"; std::string message = "exception";
@ -33,9 +33,8 @@ TEST(ExceptionTests, ExceptionShowsCorrectMessageDebugVersion) {
try { try {
lineNum = __LINE__ + 1; lineNum = __LINE__ + 1;
IE_THROW() << message; IE_THROW() << message;
} } catch (InferenceEngine::Exception& iex) {
catch (InferenceEngine::Exception &iex) { std::string ref_message = std::string{"\n"} + __FILE__ + ":" + std::to_string(lineNum) + " " + message;
std::string ref_message = std::string {"\n"} + __FILE__ + ":" + std::to_string(lineNum) + " " + message;
ASSERT_STREQ(iex.what(), ref_message.c_str()); ASSERT_STREQ(iex.what(), ref_message.c_str());
} }
} }
@ -44,8 +43,7 @@ TEST(ExceptionTests, ExceptionShowsCorrectMessageReleaseVersion) {
std::string message = "exception"; std::string message = "exception";
try { try {
IE_THROW() << message; IE_THROW() << message;
} } catch (InferenceEngine::Exception& iex) {
catch (InferenceEngine::Exception &iex) {
std::string ref_message = message; std::string ref_message = message;
ASSERT_STREQ(iex.what(), ref_message.c_str()); ASSERT_STREQ(iex.what(), ref_message.c_str());
} }
@ -56,7 +54,7 @@ TEST(ExceptionTests, ExceptionCanBeCaughtAsStandard) {
ASSERT_THROW(IE_THROW(), std::exception); ASSERT_THROW(IE_THROW(), std::exception);
} }
#ifdef NDEBUG // disabled for debug as macros calls assert() #ifdef NDEBUG // disabled for debug as macros calls assert()
TEST(ExceptionTests, ExceptionWithAssertThrowsNothingIfTrue) { TEST(ExceptionTests, ExceptionWithAssertThrowsNothingIfTrue) {
ASSERT_NO_THROW(IE_ASSERT(true) << "shouldn't assert if true"); ASSERT_NO_THROW(IE_ASSERT(true) << "shouldn't assert if true");
} }

View File

@ -2,28 +2,29 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <gtest/gtest.h> #include "cpp/ie_executable_network.hpp"
#include <gmock/gmock.h> #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "cpp/ie_executable_network.hpp"
#include "cpp/ie_executable_network_base.hpp" #include "cpp/ie_executable_network_base.hpp"
#include "cpp/ie_plugin.hpp" #include "cpp/ie_plugin.hpp"
#include "unit_test_utils/mocks/mock_iexecutable_network.hpp"
#include "unit_test_utils/mocks/mock_iinfer_request.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_ivariable_state_internal.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_ivariable_state_internal.hpp"
#include "unit_test_utils/mocks/mock_iexecutable_network.hpp"
#include "unit_test_utils/mocks/mock_iinfer_request.hpp"
using testing::_; using testing::_;
using testing::MatcherCast; using testing::MatcherCast;
using testing::Throw;
using testing::Ref; using testing::Ref;
using testing::Return; using testing::Return;
using testing::SetArgReferee; using testing::SetArgReferee;
using testing::Throw;
// TODO: add tests for the next methods: // TODO: add tests for the next methods:
// 1. void Export(const std::string& modelFileName) // 1. void Export(const std::string& modelFileName)
@ -34,13 +35,12 @@ using testing::SetArgReferee;
// 7. Parameter GetMetric(const std::string& name) const // 7. Parameter GetMetric(const std::string& name) const
// 8. RemoteContext::Ptr GetContext() // 8. RemoteContext::Ptr GetContext()
class ExecutableNetworkTests : public ::testing::Test { class ExecutableNetworkTests : public ::testing::Test {
protected: protected:
std::shared_ptr<MockIExecutableNetworkInternal> mockIExeNet; std::shared_ptr<MockIExecutableNetworkInternal> mockIExeNet;
ov::SoPtr<IExecutableNetworkInternal> exeNetwork; ov::SoPtr<IExecutableNetworkInternal> exeNetwork;
MockIInferencePlugin* mockIPlugin; MockIInferencePlugin* mockIPlugin;
InferencePlugin plugin; InferencePlugin plugin;
void TearDown() override { void TearDown() override {
mockIExeNet.reset(); mockIExeNet.reset();
@ -58,37 +58,36 @@ protected:
}; };
TEST_F(ExecutableNetworkTests, GetOutputsInfoThrowsIfReturnErr) { TEST_F(ExecutableNetworkTests, GetOutputsInfoThrowsIfReturnErr) {
EXPECT_CALL(*mockIExeNet.get(), GetOutputsInfo()) EXPECT_CALL(*mockIExeNet.get(), GetOutputsInfo()).Times(1).WillOnce(Throw(InferenceEngine::GeneralError{""}));
.Times(1)
.WillOnce(Throw(InferenceEngine::GeneralError{""}));
ASSERT_THROW(exeNetwork->GetOutputsInfo(), InferenceEngine::Exception); ASSERT_THROW(exeNetwork->GetOutputsInfo(), InferenceEngine::Exception);
} }
TEST_F(ExecutableNetworkTests, GetOutputsInfo) { TEST_F(ExecutableNetworkTests, GetOutputsInfo) {
InferenceEngine::ConstOutputsDataMap data; InferenceEngine::ConstOutputsDataMap data;
EXPECT_CALL(*mockIExeNet.get(), GetOutputsInfo()).Times(1).WillRepeatedly(Return(InferenceEngine::ConstOutputsDataMap{})); EXPECT_CALL(*mockIExeNet.get(), GetOutputsInfo())
.Times(1)
.WillRepeatedly(Return(InferenceEngine::ConstOutputsDataMap{}));
ASSERT_NO_THROW(data = exeNetwork->GetOutputsInfo()); ASSERT_NO_THROW(data = exeNetwork->GetOutputsInfo());
ASSERT_EQ(data, InferenceEngine::ConstOutputsDataMap{}); ASSERT_EQ(data, InferenceEngine::ConstOutputsDataMap{});
} }
TEST_F(ExecutableNetworkTests, GetInputsInfoThrowsIfReturnErr) { TEST_F(ExecutableNetworkTests, GetInputsInfoThrowsIfReturnErr) {
EXPECT_CALL(*mockIExeNet.get(), GetInputsInfo()) EXPECT_CALL(*mockIExeNet.get(), GetInputsInfo()).Times(1).WillOnce(Throw(InferenceEngine::GeneralError{""}));
.Times(1)
.WillOnce(Throw(InferenceEngine::GeneralError{""}));
ASSERT_THROW(exeNetwork->GetInputsInfo(), InferenceEngine::Exception); ASSERT_THROW(exeNetwork->GetInputsInfo(), InferenceEngine::Exception);
} }
TEST_F(ExecutableNetworkTests, GetInputsInfo) { TEST_F(ExecutableNetworkTests, GetInputsInfo) {
EXPECT_CALL(*mockIExeNet.get(), GetInputsInfo()).Times(1).WillRepeatedly(Return(InferenceEngine::ConstInputsDataMap{})); EXPECT_CALL(*mockIExeNet.get(), GetInputsInfo())
.Times(1)
.WillRepeatedly(Return(InferenceEngine::ConstInputsDataMap{}));
InferenceEngine::ConstInputsDataMap info; InferenceEngine::ConstInputsDataMap info;
ASSERT_NO_THROW(info = exeNetwork->GetInputsInfo()); ASSERT_NO_THROW(info = exeNetwork->GetInputsInfo());
ASSERT_EQ(info, InferenceEngine::ConstInputsDataMap{}); ASSERT_EQ(info, InferenceEngine::ConstInputsDataMap{});
} }
class ExecutableNetworkWithIInferReqTests : public ExecutableNetworkTests { class ExecutableNetworkWithIInferReqTests : public ExecutableNetworkTests {
protected: protected:
std::shared_ptr<MockIInferRequestInternal> mockIInferReq_p; std::shared_ptr<MockIInferRequestInternal> mockIInferReq_p;
@ -119,9 +118,7 @@ TEST_F(ExecutableNetworkWithIInferReqTests, QueryStateThrowsIfReturnErr) {
EXPECT_CALL(*mockIExeNet.get(), CreateInferRequest()).WillOnce(Return(mockIInferReq_p)); EXPECT_CALL(*mockIExeNet.get(), CreateInferRequest()).WillOnce(Return(mockIInferReq_p));
IInferRequestInternal::Ptr actualInferReq; IInferRequestInternal::Ptr actualInferReq;
ASSERT_NO_THROW(actualInferReq = exeNetwork->CreateInferRequest()); ASSERT_NO_THROW(actualInferReq = exeNetwork->CreateInferRequest());
EXPECT_CALL(*mockIInferReq_p.get(), QueryState()) EXPECT_CALL(*mockIInferReq_p.get(), QueryState()).Times(1).WillOnce(Throw(InferenceEngine::GeneralError{""}));
.Times(1)
.WillOnce(Throw(InferenceEngine::GeneralError{""}));
EXPECT_THROW(actualInferReq->QueryState(), InferenceEngine::Exception); EXPECT_THROW(actualInferReq->QueryState(), InferenceEngine::Exception);
} }
@ -131,8 +128,8 @@ TEST_F(ExecutableNetworkWithIInferReqTests, QueryState) {
ASSERT_NO_THROW(actualInferReq = exeNetwork->CreateInferRequest()); ASSERT_NO_THROW(actualInferReq = exeNetwork->CreateInferRequest());
auto mockIMemState_p = std::make_shared<MockIVariableStateInternal>(); auto mockIMemState_p = std::make_shared<MockIVariableStateInternal>();
EXPECT_CALL(*mockIInferReq_p.get(), QueryState()) EXPECT_CALL(*mockIInferReq_p.get(), QueryState())
.Times(1) .Times(1)
.WillOnce(Return(std::vector<std::shared_ptr<InferenceEngine::IVariableStateInternal>>(1, mockIMemState_p))); .WillOnce(Return(std::vector<std::shared_ptr<InferenceEngine::IVariableStateInternal>>(1, mockIMemState_p)));
std::vector<InferenceEngine::IVariableStateInternal::Ptr> MemState_v; std::vector<InferenceEngine::IVariableStateInternal::Ptr> MemState_v;
EXPECT_NO_THROW(MemState_v = actualInferReq->QueryState()); EXPECT_NO_THROW(MemState_v = actualInferReq->QueryState());
EXPECT_EQ(MemState_v.size(), 1); EXPECT_EQ(MemState_v.size(), 1);

View File

@ -3,6 +3,7 @@
// //
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <threading/ie_executor_manager.hpp> #include <threading/ie_executor_manager.hpp>
using namespace ::testing; using namespace ::testing;

View File

@ -2,17 +2,16 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <gtest/gtest.h>
#include <string>
#include <memory>
#include <ie_extension.h>
#include <file_utils.h> #include <file_utils.h>
#include <gtest/gtest.h>
#include <ie_extension.h>
#include <memory>
#include <ngraph/opsets/opset.hpp> #include <ngraph/opsets/opset.hpp>
#include <string>
#include "common_test_utils/test_common.hpp"
#include "common_test_utils/file_utils.hpp" #include "common_test_utils/file_utils.hpp"
#include "common_test_utils/test_common.hpp"
using namespace InferenceEngine; using namespace InferenceEngine;
@ -20,7 +19,7 @@ using ExtensionTests = ::testing::Test;
std::string getExtensionPath() { std::string getExtensionPath() {
return FileUtils::makePluginLibraryName<char>(CommonTestUtils::getExecutableDirectory(), return FileUtils::makePluginLibraryName<char>(CommonTestUtils::getExecutableDirectory(),
std::string("template_extension") + IE_BUILD_POSTFIX); std::string("template_extension") + IE_BUILD_POSTFIX);
} }
#ifndef OPENVINO_STATIC_LIBRARY #ifndef OPENVINO_STATIC_LIBRARY
@ -41,8 +40,7 @@ TEST(ExtensionTests, testGetImplTypes) {
TEST(ExtensionTests, testGetImplTypesThrowsIfNgraphNodeIsNullPtr) { TEST(ExtensionTests, testGetImplTypesThrowsIfNgraphNodeIsNullPtr) {
IExtensionPtr extension = std::make_shared<Extension>(getExtensionPath()); IExtensionPtr extension = std::make_shared<Extension>(getExtensionPath());
ASSERT_THROW(extension->getImplTypes(std::shared_ptr<ngraph::Node> ()), ASSERT_THROW(extension->getImplTypes(std::shared_ptr<ngraph::Node>()), InferenceEngine::Exception);
InferenceEngine::Exception);
} }
TEST(ExtensionTests, testGetImplementation) { TEST(ExtensionTests, testGetImplementation) {
@ -54,8 +52,7 @@ TEST(ExtensionTests, testGetImplementation) {
TEST(ExtensionTests, testGetImplementationThrowsIfNgraphNodeIsNullPtr) { TEST(ExtensionTests, testGetImplementationThrowsIfNgraphNodeIsNullPtr) {
IExtensionPtr extension = std::make_shared<Extension>(getExtensionPath()); IExtensionPtr extension = std::make_shared<Extension>(getExtensionPath());
ASSERT_THROW(extension->getImplementation(std::shared_ptr<ngraph::Node> (), ""), ASSERT_THROW(extension->getImplementation(std::shared_ptr<ngraph::Node>(), ""), InferenceEngine::Exception);
InferenceEngine::Exception);
} }
#endif // OPENVINO_STATIC_LIBRARY #endif // OPENVINO_STATIC_LIBRARY

View File

@ -3,6 +3,7 @@
// //
#include <ie_locked_memory.hpp> #include <ie_locked_memory.hpp>
#include "unit_test_utils/mocks/mock_allocator.hpp" #include "unit_test_utils/mocks/mock_allocator.hpp"
using namespace InferenceEngine; using namespace InferenceEngine;
@ -12,11 +13,12 @@ TEST(LockedMemoryTest, canUnlockMemoryAfterUsage) {
std::unique_ptr<MockAllocator> allocator(new MockAllocator()); std::unique_ptr<MockAllocator> allocator(new MockAllocator());
char array[] = {1, 2, 3}; char array[] = {1, 2, 3};
EXPECT_CALL(*allocator.get(), lock(reinterpret_cast<void*>(1), _)).WillRepeatedly(Return(reinterpret_cast<void*>(array))); EXPECT_CALL(*allocator.get(), lock(reinterpret_cast<void*>(1), _))
.WillRepeatedly(Return(reinterpret_cast<void*>(array)));
EXPECT_CALL(*allocator.get(), unlock(_)).Times(1); EXPECT_CALL(*allocator.get(), unlock(_)).Times(1);
{ {
auto x = LockedMemory<char>(allocator.get(), reinterpret_cast<void*>(1), 1); auto x = LockedMemory<char>(allocator.get(), reinterpret_cast<void*>(1), 1);
//force locking of memory // force locking of memory
auto t = x[0]; auto t = x[0];
(void)t; (void)t;
} }
@ -26,11 +28,12 @@ TEST(LockedMemoryTest, canReadFromLockedMemory) {
std::unique_ptr<MockAllocator> allocator(new MockAllocator()); std::unique_ptr<MockAllocator> allocator(new MockAllocator());
char array[] = {1, 2, 3, 4, 5}; char array[] = {1, 2, 3, 4, 5};
EXPECT_CALL(*allocator.get(), lock(reinterpret_cast<void*>(1), _)).WillRepeatedly(Return(reinterpret_cast<void*>(array))); EXPECT_CALL(*allocator.get(), lock(reinterpret_cast<void*>(1), _))
.WillRepeatedly(Return(reinterpret_cast<void*>(array)));
EXPECT_CALL(*allocator.get(), unlock(_)).Times(1); EXPECT_CALL(*allocator.get(), unlock(_)).Times(1);
{ {
auto x = LockedMemory<char>(allocator.get(), reinterpret_cast<void*>(1), 0); auto x = LockedMemory<char>(allocator.get(), reinterpret_cast<void*>(1), 0);
//we are getting first element // we are getting first element
ASSERT_EQ(1, x[0]); ASSERT_EQ(1, x[0]);
} }
} }
@ -39,12 +42,13 @@ TEST(LockedMemoryTest, canWriteToLockedMemory) {
std::unique_ptr<MockAllocator> allocator(new MockAllocator()); std::unique_ptr<MockAllocator> allocator(new MockAllocator());
char array[] = {1, 2, 3, 4, 5}; char array[] = {1, 2, 3, 4, 5};
EXPECT_CALL(*allocator.get(), lock(reinterpret_cast<void*>(1), _)).WillRepeatedly(Return(reinterpret_cast<void*>(array))); EXPECT_CALL(*allocator.get(), lock(reinterpret_cast<void*>(1), _))
.WillRepeatedly(Return(reinterpret_cast<void*>(array)));
EXPECT_CALL(*allocator.get(), unlock(_)).Times(1); EXPECT_CALL(*allocator.get(), unlock(_)).Times(1);
{ {
auto x = LockedMemory<char>(allocator.get(), reinterpret_cast<void*>(1), 0); auto x = LockedMemory<char>(allocator.get(), reinterpret_cast<void*>(1), 0);
//we are getting first element // we are getting first element
ASSERT_EQ(std::distance(array, &x[0]), 0); ASSERT_EQ(std::distance(array, &x[0]), 0);
x[0] = 5; x[0] = 5;
} }

View File

@ -2,49 +2,54 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <vector> #include "memory_solver.hpp"
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <ie_common.h> #include <ie_common.h>
#include "memory_solver.hpp" #include <vector>
using Box = MemorySolver::Box; using Box = MemorySolver::Box;
TEST(MemSolverTest, CanConstruct) { TEST(MemSolverTest, CanConstruct) {
{ // Empty vector<Box> { // Empty vector<Box>
MemorySolver ms(std::vector<Box>{}); MemorySolver ms(std::vector<Box>{});
} }
{ // vector with default Box { // vector with default Box
MemorySolver ms(std::vector<Box>{{}}); MemorySolver ms(std::vector<Box>{{}});
} }
{ // vector with Box with non-default Box { // vector with Box with non-default Box
MemorySolver ms(std::vector<Box>{{1, 3, 3}}); MemorySolver ms(std::vector<Box>{{1, 3, 3}});
} }
{ // vector with Box with size == 0 { // vector with Box with size == 0
MemorySolver ms(std::vector<Box>{{0, 0, 0}}); MemorySolver ms(std::vector<Box>{{0, 0, 0}});
} }
{ // vector with Box with finish == -1 { // vector with Box with finish == -1
MemorySolver ms(std::vector<Box>{{3, -1, 6}}); MemorySolver ms(std::vector<Box>{{3, -1, 6}});
} }
// TODO: enable after implement TODO from memory_solver.hpp#L66 // TODO: enable after implement TODO from memory_solver.hpp#L66
// { // vector with Box with negative values // { // vector with Box with negative values
// MemorySolver ms(std::vector<Box> {{-5, -5, -5, -5}}); // MemorySolver ms(std::vector<Box> {{-5, -5, -5, -5}});
// } // }
} }
// |
// | ____ ____
// | __|____||____|
// |__|____||____|_____
// 0 1 2 3 4
TEST(MemSolverTest, GetOffset) { TEST(MemSolverTest, GetOffset) {
int n = 0; int n = 0;
std::vector<Box> boxes{ // | std::vector<Box> boxes{
{n, ++n, 2, 0}, // | ____ ____ {n, ++n, 2, 0},
{n, ++n, 2, 1}, // | __|____||____| {n, ++n, 2, 1},
{n, ++n, 2, 2}, // |__|____||____|_____ {n, ++n, 2, 2},
{n, ++n, 2, 3}, // 0 1 2 3 4 {n, ++n, 2, 3},
}; };
MemorySolver ms(boxes); MemorySolver ms(boxes);
@ -56,13 +61,18 @@ TEST(MemSolverTest, GetOffset) {
EXPECT_EQ(ms.getOffset(2) + ms.getOffset(3), 2); EXPECT_EQ(ms.getOffset(2) + ms.getOffset(3), 2);
} }
// |
// | ____ ____
// | __|____||____|
// |__|____||____|_____
// 0 1 2 3 4
TEST(MemSolverTest, GetOffsetThrowException) { TEST(MemSolverTest, GetOffsetThrowException) {
int n = 0, id = 0; int n = 0, id = 0;
std::vector<Box> boxes{ // | std::vector<Box> boxes{
{n, ++n, 2, id++}, // | ____ ____ {n, ++n, 2, id++},
{n, ++n, 2, id++}, // | __|____||____| {n, ++n, 2, id++},
{n, ++n, 2, id++}, // |__|____||____|_____ {n, ++n, 2, id++},
{n, ++n, 2, id++}, // 0 1 2 3 4 {n, ++n, 2, id++},
}; };
MemorySolver ms(boxes); MemorySolver ms(boxes);
@ -71,13 +81,18 @@ TEST(MemSolverTest, GetOffsetThrowException) {
EXPECT_THROW(ms.getOffset(100), InferenceEngine::Exception); EXPECT_THROW(ms.getOffset(100), InferenceEngine::Exception);
} }
// |
// | ____
// | __|____|__
// |__|____||____|__
// 0 1 2 3
TEST(MemSolverTest, LinearAndEven) { TEST(MemSolverTest, LinearAndEven) {
int n = 0; int n = 0;
std::vector<Box> boxes{ // | std::vector<Box> boxes{
{n, ++n, 2}, // | ____ {n, ++n, 2},
{n, ++n, 2}, // | __|____|__ {n, ++n, 2},
{n, ++n, 2}, // |__|____||____|__ {n, ++n, 2},
}; // 0 1 2 3 };
MemorySolver ms(boxes); MemorySolver ms(boxes);
EXPECT_EQ(ms.solve(), 4); EXPECT_EQ(ms.solve(), 4);
@ -85,13 +100,18 @@ TEST(MemSolverTest, LinearAndEven) {
EXPECT_EQ(ms.maxTopDepth(), 2); EXPECT_EQ(ms.maxTopDepth(), 2);
} }
// | ____
// | |____|__
// | ____ | |
// |__|____||____|__
// 0 1 2 3
TEST(MemSolverTest, LinearAndNotEven) { TEST(MemSolverTest, LinearAndNotEven) {
int n = 0; int n = 0;
std::vector<Box> boxes{ // | ____ std::vector<Box> boxes{
{n, ++n, 2}, // | |____|__ {n, ++n, 2},
{n, ++n, 2}, // | ____ | | {n, ++n, 2},
{n, ++n, 3}, // |__|____||____|__ {n, ++n, 3},
}; // 0 1 2 3 };
MemorySolver ms(boxes); MemorySolver ms(boxes);
EXPECT_EQ(ms.solve(), 5); EXPECT_EQ(ms.solve(), 5);
@ -99,14 +119,18 @@ TEST(MemSolverTest, LinearAndNotEven) {
EXPECT_EQ(ms.maxTopDepth(), 2); EXPECT_EQ(ms.maxTopDepth(), 2);
} }
// | _______
// | |_______|_____
// | _______ | |
// |__|_______|___|_______|__
// 2 3 4 5 6 7 8
TEST(MemSolverTest, LinearWithEmptyExecIndexes) { TEST(MemSolverTest, LinearWithEmptyExecIndexes) {
int n = 2; int n = 2;
std::vector<Box> boxes{ // | _______ std::vector<Box> boxes{
{n, n += 2, 2}, // | |_______|_____ {n, n += 2, 2},
{n, n += 2, 2}, // | _______ | | {n, n += 2, 2},
{n, n += 2, 3}, // |__|_______|___|_______|__ {n, n += 2, 3},
}; // 2 3 4 5 6 7 8 };
MemorySolver ms(boxes); MemorySolver ms(boxes);
EXPECT_EQ(ms.solve(), 5); EXPECT_EQ(ms.solve(), 5);
@ -114,12 +138,17 @@ TEST(MemSolverTest, LinearWithEmptyExecIndexes) {
EXPECT_EQ(ms.maxTopDepth(), 2); EXPECT_EQ(ms.maxTopDepth(), 2);
} }
// | __________
// | ____ |_3________|
// | |_4__|_____ | |
// |__|_2________||_1__|___
// 2 3 4 5 6 7 8
TEST(MemSolverTest, DISABLED_Unefficiency) { TEST(MemSolverTest, DISABLED_Unefficiency) {
std::vector<Box> boxes{ // | __________ std::vector<Box> boxes{
{6, 7, 3}, // | ____ |_3________| {6, 7, 3},
{2, 5, 2}, // | |_4__|_____ | | {2, 5, 2},
{5, 8, 2}, // |__|_2________||_1__|___ {5, 8, 2},
{2, 3, 2}, // 2 3 4 5 6 7 8 {2, 3, 2},
}; };
MemorySolver ms(boxes); MemorySolver ms(boxes);
@ -128,12 +157,17 @@ TEST(MemSolverTest, DISABLED_Unefficiency) {
EXPECT_EQ(ms.maxTopDepth(), 2); EXPECT_EQ(ms.maxTopDepth(), 2);
} }
// | __________
// | ____ |_3________|
// | |_4__|_____ | |
// |__|_2________||_1__|___
// 2 3 4 5 6 7 8
TEST(MemSolverTest, OverlappingBoxes) { TEST(MemSolverTest, OverlappingBoxes) {
std::vector<Box> boxes{ // | __________ std::vector<Box> boxes{
{6, 7, 4}, // | ____ |_3________| {6, 7, 4},
{2, 5, 3}, // | |_4__|_____ | | {2, 5, 3},
{5, 8, 2}, // |__|_2________||_1__|___ {5, 8, 2},
{2, 3, 2}, // 2 3 4 5 6 7 8 {2, 3, 2},
}; };
MemorySolver ms(boxes); MemorySolver ms(boxes);
@ -142,13 +176,19 @@ TEST(MemSolverTest, OverlappingBoxes) {
EXPECT_EQ(ms.maxTopDepth(), 2); EXPECT_EQ(ms.maxTopDepth(), 2);
} }
// | ____
// | |____| ____
// | |____|__
// | ____ |_______|
// |__|____|___|_|_________
// 0 1 2 3 4 5 6
TEST(MemSolverTest, EndOnSeveralBegins) { TEST(MemSolverTest, EndOnSeveralBegins) {
std::vector<Box> boxes{ // | ____ std::vector<Box> boxes{
{0, 1, 2}, // | |____| ____ {0, 1, 2},
{1, 2, 2}, // | |____|__ {1, 2, 2},
{3, 3, 2}, // | ____ |_______| {3, 3, 2},
{3, 5, 2}, // |__|____|___|_|_________ {3, 5, 2},
{3, 4, 2}, // 0 1 2 3 4 5 6 {3, 4, 2},
}; };
MemorySolver ms(boxes); MemorySolver ms(boxes);
@ -157,13 +197,19 @@ TEST(MemSolverTest, EndOnSeveralBegins) {
EXPECT_EQ(ms.maxTopDepth(), 3); EXPECT_EQ(ms.maxTopDepth(), 3);
} }
// | _____________
// | |_____________>>
// | |____|__
// | ____ |_______>>
// |__|____|___|_|_________
// 0 1 2 3 4 5 6
TEST(MemSolverTest, ToEndBoxes) { TEST(MemSolverTest, ToEndBoxes) {
std::vector<Box> boxes{ // | _____________ std::vector<Box> boxes{
{0, 1, 2}, // | |_____________>> {0, 1, 2},
{1, -1, 2}, // | |____|__ {1, -1, 2},
{3, 3, 2}, // | ____ |_______>> {3, 3, 2},
{3, -1, 2}, // |__|____|___|_|_________ {3, -1, 2},
{3, 4, 2}, // 0 1 2 3 4 5 6 {3, 4, 2},
}; };
MemorySolver ms(boxes); MemorySolver ms(boxes);
@ -172,13 +218,19 @@ TEST(MemSolverTest, ToEndBoxes) {
EXPECT_EQ(ms.maxTopDepth(), 4); EXPECT_EQ(ms.maxTopDepth(), 4);
} }
// | _
// | ____ |_>>
// | |____|__
// | ____ |_______|
// |__|____|___|_|_________
// 0 1 2 3 4 5 6
TEST(MemSolverTest, LastAndToEndBox) { TEST(MemSolverTest, LastAndToEndBox) {
std::vector<Box> boxes{ // | _ std::vector<Box> boxes{
{0, 1, 2}, // | ____ |_>> {0, 1, 2},
{6, -1, 2}, // | |____|__ {6, -1, 2},
{3, 3, 2}, // | ____ |_______| {3, 3, 2},
{3, 5, 2}, // |__|____|___|_|_________ {3, 5, 2},
{3, 4, 2}, // 0 1 2 3 4 5 6 {3, 4, 2},
}; };
MemorySolver ms(boxes); MemorySolver ms(boxes);
@ -189,33 +241,34 @@ TEST(MemSolverTest, LastAndToEndBox) {
TEST(MemSolverTest, OptimalAlexnet) { TEST(MemSolverTest, OptimalAlexnet) {
std::vector<std::vector<int>> shapes{ std::vector<std::vector<int>> shapes{
{3, 227, 227}, // in {3, 227, 227}, // in
{96, 55, 55}, // conv1 {96, 55, 55}, // conv1
{96, 55, 55}, // relu1 {96, 55, 55}, // relu1
{96, 55, 55}, // norm1 {96, 55, 55}, // norm1
{96, 27, 27}, // pool1 {96, 27, 27}, // pool1
{256, 27, 27}, // conv2 {256, 27, 27}, // conv2
{256, 27, 27}, // relu2 {256, 27, 27}, // relu2
{256, 27, 27}, // norm2 {256, 27, 27}, // norm2
{256, 13, 13}, // pool2 {256, 13, 13}, // pool2
{384, 13, 13}, // conv3 {384, 13, 13}, // conv3
{384, 13, 13}, // relu3 {384, 13, 13}, // relu3
{384, 13, 13}, // conv4 {384, 13, 13}, // conv4
{384, 13, 13}, // relu4 {384, 13, 13}, // relu4
{256, 13, 13}, // conv5 {256, 13, 13}, // conv5
{256, 13, 13}, // relu5 {256, 13, 13}, // relu5
{256, 6, 6}, // pool5 {256, 6, 6}, // pool5
{1, 1, 4069}, // fc6 {1, 1, 4069}, // fc6
{1, 1, 4069}, // relu6 {1, 1, 4069}, // relu6
{1, 1, 4069}, // fc7 {1, 1, 4069}, // fc7
{1, 1, 4069}, // relu7 {1, 1, 4069}, // relu7
{1, 1, 1000}, // fc8 {1, 1, 1000}, // fc8
{1, 1, 1000}, // loss {1, 1, 1000}, // loss
}; };
int n = 0; int n = 0;
std::vector<Box> boxes; std::vector<Box> boxes;
for (const auto &sh : shapes) boxes.push_back({n, ++n, sh[0] * sh[1] * sh[2]}); for (const auto& sh : shapes)
boxes.push_back({n, ++n, sh[0] * sh[1] * sh[2]});
// For linear topology bottom score is reachable minRequired == maxDepth // For linear topology bottom score is reachable minRequired == maxDepth
MemorySolver ms(boxes); MemorySolver ms(boxes);
@ -223,13 +276,19 @@ TEST(MemSolverTest, OptimalAlexnet) {
EXPECT_EQ(ms.maxTopDepth(), 2); EXPECT_EQ(ms.maxTopDepth(), 2);
} }
// | _____________
// | _____|___1_________|
// | |_2_____| ____
// | | | | |
// |__|_3__|______|_3__|___
// 2 3 4 5 6 7 8
TEST(MemSolverTest, NoOverlapping) { TEST(MemSolverTest, NoOverlapping) {
int n = 0; // | _____________ int n = 0;
std::vector<Box> boxes{ // | _____|___1_________| std::vector<Box> boxes{
{4, 8, 1, n++}, // | |_2_____| ____ {4, 8, 1, n++},
{6, 7, 3, n++}, // | | | | | {6, 7, 3, n++},
{2, 3, 3, n++}, // |__|_3__|______|_3__|___ {2, 3, 3, n++},
{2, 4, 2, n++}, // 2 3 4 5 6 7 8 {2, 4, 2, n++},
}; };
MemorySolver ms(boxes); MemorySolver ms(boxes);
@ -240,8 +299,8 @@ TEST(MemSolverTest, NoOverlapping) {
auto no_overlap = [&](Box box1, Box box2) -> bool { auto no_overlap = [&](Box box1, Box box2) -> bool {
int off1 = ms.getOffset(box1.id); int off1 = ms.getOffset(box1.id);
int off2 = ms.getOffset(box2.id); int off2 = ms.getOffset(box2.id);
return box1.finish < box2.start || box1.start > box2.finish || return box1.finish < box2.start || box1.start > box2.finish || off1 + box1.size <= off2 ||
off1 + box1.size <= off2 || off1 >= off2 + box2.size; off1 >= off2 + box2.size;
}; };
for (int i = 0; i < n; i++) for (int i = 0; i < n; i++)
@ -249,13 +308,19 @@ TEST(MemSolverTest, NoOverlapping) {
ASSERT_TRUE(no_overlap(boxes[i], boxes[j])) << "Box overlapping is detected"; ASSERT_TRUE(no_overlap(boxes[i], boxes[j])) << "Box overlapping is detected";
} }
// | _______
// | |_2_____|__
// | ____ | |
// | __|_1__| | |
// |__|_1__|______|_3__|___
// 2 3 4 5 6 7 8
TEST(MemSolverTest, BestSolution1) { TEST(MemSolverTest, BestSolution1) {
int n = 0; // | _______ int n = 0;
std::vector<Box> boxes{ // | |_2_____|__ std::vector<Box> boxes{
{2, 3, 1, n++}, // | ____ | | {2, 3, 1, n++},
{3, 4, 1, n++}, // | __|_1__| | | {3, 4, 1, n++},
{4, 6, 2, n++}, // |__|_1__|______|_3__|___ {4, 6, 2, n++},
{6, 7, 3, n++}, // 2 3 4 5 6 7 8 {6, 7, 3, n++},
}; };
MemorySolver ms(boxes); MemorySolver ms(boxes);
@ -264,12 +329,11 @@ TEST(MemSolverTest, BestSolution1) {
auto no_overlap = [&](Box box1, Box box2) -> bool { auto no_overlap = [&](Box box1, Box box2) -> bool {
int off1 = ms.getOffset(box1.id); int off1 = ms.getOffset(box1.id);
int off2 = ms.getOffset(box2.id); int off2 = ms.getOffset(box2.id);
return box1.finish < box2.start || box1.start > box2.finish || return box1.finish < box2.start || box1.start > box2.finish || off1 + box1.size <= off2 ||
off1 + box1.size <= off2 || off1 >= off2 + box2.size; off1 >= off2 + box2.size;
}; };
for (int i = 0; i < n; i++) for (int i = 0; i < n; i++)
for (int j = i + 1; j < n; j++) for (int j = i + 1; j < n; j++)
ASSERT_TRUE(no_overlap(boxes[i], boxes[j])) << "Box overlapping is detected"; ASSERT_TRUE(no_overlap(boxes[i], boxes[j])) << "Box overlapping is detected";
} }

View File

@ -23,7 +23,8 @@ TEST_F(PrecisionUtilsTests, FP32ToFP16PositiveInfinity) {
} }
TEST_F(PrecisionUtilsTests, FP32ToFP16NegativeInfinity) { TEST_F(PrecisionUtilsTests, FP32ToFP16NegativeInfinity) {
const auto fp16ConvertedInf = InferenceEngine::PrecisionUtils::f32tof16(-1 * std::numeric_limits<float>::infinity()); const auto fp16ConvertedInf =
InferenceEngine::PrecisionUtils::f32tof16(-1 * std::numeric_limits<float>::infinity());
ASSERT_EQ(fp16ConvertedInf, negativeInf); ASSERT_EQ(fp16ConvertedInf, negativeInf);
} }
@ -43,6 +44,7 @@ TEST_F(PrecisionUtilsTests, FP32ToFP16MaximumValue) {
} }
TEST_F(PrecisionUtilsTests, FP32ToFP16LowestValue) { TEST_F(PrecisionUtilsTests, FP32ToFP16LowestValue) {
const auto fp16ConvertedLowestValue = InferenceEngine::PrecisionUtils::f32tof16(std::numeric_limits<float>::lowest()); const auto fp16ConvertedLowestValue =
InferenceEngine::PrecisionUtils::f32tof16(std::numeric_limits<float>::lowest());
ASSERT_EQ(fp16ConvertedLowestValue, lowestNumber); ASSERT_EQ(fp16ConvertedLowestValue, lowestNumber);
} }

View File

@ -3,14 +3,14 @@
// //
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "common_test_utils/test_common.hpp"
#include "precision_utils.h" #include "common_test_utils/test_common.hpp"
#include "ie_precision.hpp" #include "ie_precision.hpp"
#include "precision_utils.h"
#ifdef USE_OPENCV #ifdef USE_OPENCV
#include <opencv2/core.hpp> # include <opencv2/core.hpp>
using namespace InferenceEngine; using namespace InferenceEngine;

View File

@ -2,16 +2,16 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <memory> #include "system_allocator.hpp"
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <memory>
#include "common_test_utils/test_common.hpp" #include "common_test_utils/test_common.hpp"
#include "system_allocator.hpp"
using namespace InferenceEngine; using namespace InferenceEngine;
class SystemAllocatorReleaseTests : public CommonTestUtils::TestsCommon { class SystemAllocatorReleaseTests : public CommonTestUtils::TestsCommon {};
};
class SystemAllocatorTests : public CommonTestUtils::TestsCommon { class SystemAllocatorTests : public CommonTestUtils::TestsCommon {
protected: protected:
@ -34,30 +34,31 @@ protected:
} }
std::unique_ptr<SystemMemoryAllocator> allocator; std::unique_ptr<SystemMemoryAllocator> allocator;
public: public:
}; };
TEST_F(SystemAllocatorTests, canAllocate) { TEST_F(SystemAllocatorTests, canAllocate) {
void *handle0 = allocator->alloc(0); void* handle0 = allocator->alloc(0);
void *handle1 = allocator->alloc(100); void* handle1 = allocator->alloc(100);
EXPECT_NE(handle0, nullptr); EXPECT_NE(handle0, nullptr);
EXPECT_NE(handle1, nullptr); EXPECT_NE(handle1, nullptr);
delete[] reinterpret_cast<char *>(handle0); delete[] reinterpret_cast<char*>(handle0);
delete[] reinterpret_cast<char *>(handle1); delete[] reinterpret_cast<char*>(handle1);
} }
TEST_F(SystemAllocatorTests, canFree) { TEST_F(SystemAllocatorTests, canFree) {
EXPECT_TRUE(allocator->free(nullptr)); EXPECT_TRUE(allocator->free(nullptr));
void *handle0 = reinterpret_cast<void *>(new char[0]); void* handle0 = reinterpret_cast<void*>(new char[0]);
void *handle1 = reinterpret_cast<void *>(new char[100]); void* handle1 = reinterpret_cast<void*>(new char[100]);
EXPECT_TRUE(allocator->free(handle0)); EXPECT_TRUE(allocator->free(handle0));
EXPECT_TRUE(allocator->free(handle1)); EXPECT_TRUE(allocator->free(handle1));
} }
TEST_F(SystemAllocatorTests, canLockAndUnlockAllocatedMemory) { TEST_F(SystemAllocatorTests, canLockAndUnlockAllocatedMemory) {
// large block such as 10k will result in sigsegv if not allocated // large block such as 10k will result in sigsegv if not allocated
void *handle = allocator->alloc(10000); void* handle = allocator->alloc(10000);
char *ptr = reinterpret_cast<char *>(allocator->lock(handle)); char* ptr = reinterpret_cast<char*>(allocator->lock(handle));
ptr[9999] = 11; ptr[9999] = 11;
EXPECT_EQ(ptr[9999], 11); EXPECT_EQ(ptr[9999], 11);
allocator->unlock(ptr); allocator->unlock(ptr);