Add bert-base-ner in MemLeak tests (#19817)

* Add `bert-base-ner` in MemLeak tests

* Fix segfault caused by `fillTensorRandom()`
This commit is contained in:
Vitaliy Urusovskij 2023-09-19 11:27:23 +04:00 committed by GitHub
parent 475ad32cc4
commit 6b5a22a656
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 73 additions and 41 deletions

View File

@ -10,6 +10,13 @@
#include <random> #include <random>
template <typename T>
using uniformDistribution = typename std::conditional<
std::is_floating_point<T>::value,
std::uniform_real_distribution<T>,
typename std::conditional<std::is_integral<T>::value, std::uniform_int_distribution<T>, void>::type>::type;
/** /**
* @brief Determine if InferenceEngine blob means image or not (OV API 1.0) * @brief Determine if InferenceEngine blob means image or not (OV API 1.0)
*/ */
@ -84,27 +91,20 @@ void fillBlobRandom(InferenceEngine::Blob::Ptr &inputBlob) {
/** /**
* @brief Fill InferenceEngine tensor with random values (OV API 2.0) * @brief Fill InferenceEngine tensor with random values (OV API 2.0)
*/ */
template<typename T, typename U> template<typename T, typename T2>
ov::Tensor fillTensorRandom(T &input) { void fillTensorRandom(ov::Tensor& tensor,
ov::Tensor tensor{input.get_element_type(), input.get_shape()}; T rand_min = std::numeric_limits<uint8_t>::min(),
std::vector<U> values(ov::shape_size(input.get_shape())); T rand_max = std::numeric_limits<uint8_t>::max()) {
std::mt19937 gen(0);
std::random_device rd; size_t tensor_size = tensor.get_size();
std::mt19937 gen(rd()); if (0 == tensor_size) {
throw std::runtime_error(
if (std::is_floating_point<U>::value == true) { "Models with dynamic shapes aren't supported. Input tensors must have specific shapes before inference");
std::uniform_real_distribution<> distrib_f = std::uniform_real_distribution<>(0, std::numeric_limits<U>::max());
for (size_t i = 0; i < values.size(); ++i)
values[i] = distrib_f(gen);
} else {
std::uniform_int_distribution<> distrib_i = std::uniform_int_distribution<>(0, std::numeric_limits<U>::max());
for (size_t i = 0; i < values.size(); ++i)
values[i] = distrib_i(gen);
} }
T* data = tensor.data<T>();
std::memcpy(tensor.data(), values.data(), sizeof(U) * values.size()); uniformDistribution<T2> distribution(rand_min, rand_max);
for (size_t i = 0; i < tensor_size; i++)
return tensor; data[i] = static_cast<T>(distribution(gen));
} }
@ -141,26 +141,34 @@ void fillBlobImInfo(InferenceEngine::Blob::Ptr &inputBlob,
template<typename T> template<typename T>
void fillTensors(ov::InferRequest &infer_request, std::vector<T> &inputs) { void fillTensors(ov::InferRequest &infer_request, std::vector<T> &inputs) {
for (size_t i = 0; i < inputs.size(); ++i) { for (size_t i = 0; i < inputs.size(); ++i) {
ov::Tensor input_tensor; auto input_tensor = infer_request.get_tensor(inputs[i]);
auto type = inputs[i].get_element_type();
if (inputs[i].get_element_type() == ov::element::f32) { if (type == ov::element::f32) {
input_tensor = fillTensorRandom<T, float>(inputs[i]); fillTensorRandom<float, float>(input_tensor);
} else if (inputs[i].get_element_type() == ov::element::f64) { } else if (type == ov::element::f64) {
input_tensor = fillTensorRandom<T, double>(inputs[i]); fillTensorRandom<double, double>(input_tensor);
} else if (inputs[i].get_element_type() == ov::element::f16) { } else if (type == ov::element::f16) {
input_tensor = fillTensorRandom<T, short>(inputs[i]); fillTensorRandom<ov::float16, float>(input_tensor);
} else if (inputs[i].get_element_type() == ov::element::i32) { } else if (type == ov::element::i32) {
input_tensor = fillTensorRandom<T, int32_t>(inputs[i]); fillTensorRandom<int32_t, int32_t>(input_tensor);
} else if (inputs[i].get_element_type() == ov::element::i64) { } else if (type == ov::element::i64) {
input_tensor = fillTensorRandom<T, int64_t>(inputs[i]); fillTensorRandom<int64_t, int64_t>(input_tensor);
} else if (inputs[i].get_element_type() == ov::element::u8) { } else if ((type == ov::element::u8) || (type == ov::element::boolean)) {
input_tensor = fillTensorRandom<T, uint8_t>(inputs[i]); // uniform_int_distribution<uint8_t> is not allowed in the C++17
} else if (inputs[i].get_element_type() == ov::element::i8) { // standard and vs2017/19
input_tensor = fillTensorRandom<T, int8_t>(inputs[i]); fillTensorRandom<uint8_t, uint32_t>(input_tensor);
} else if (inputs[i].get_element_type() == ov::element::u16) { } else if (type == ov::element::i8) {
input_tensor = fillTensorRandom<T, uint16_t>(inputs[i]); // uniform_int_distribution<int8_t> is not allowed in the C++17 standard
} else if (inputs[i].get_element_type() == ov::element::i16) { // and vs2017/19
input_tensor = fillTensorRandom<T, int16_t>(inputs[i]); fillTensorRandom<int8_t, int32_t>(input_tensor,
std::numeric_limits<int8_t>::min(),
std::numeric_limits<int8_t>::max());
} else if (type == ov::element::u16) {
fillTensorRandom<uint16_t, uint16_t>(input_tensor);
} else if (type == ov::element::i16) {
fillTensorRandom<int16_t, int16_t>(input_tensor);
} else if (type == ov::element::boolean) {
fillTensorRandom<uint8_t, uint32_t>(input_tensor, 0, 1);
} else { } else {
throw std::logic_error( throw std::logic_error(
"Input precision is not supported for " + inputs[i].get_element_type().get_type_name()); "Input precision is not supported for " + inputs[i].get_element_type().get_type_name());

View File

@ -24,6 +24,14 @@
<model name="mtcnn-r" precision="FP32" source="omz" /> <model name="mtcnn-r" precision="FP32" source="omz" />
</device> </device>
<device name="CPU" iterations="1000" processes="1" threads="1">
<model name="bert-base-ner" precision="FP32" source="omz" />
</device>
<device name="GPU" iterations="1000" processes="1" threads="1">
<model name="bert-base-ner" precision="FP32" source="omz" />
</device>
<device name="CPU" iterations="1000" processes="1" threads="1"> <device name="CPU" iterations="1000" processes="1" threads="1">
<model name="alexnet" precision="FP32" source="omz" /> <model name="alexnet" precision="FP32" source="omz" />
<model name="mobilenet-ssd" precision="FP32" source="omz" /> <model name="mobilenet-ssd" precision="FP32" source="omz" />

View File

@ -8,4 +8,12 @@
<model name="mobilenet-ssd" precision="FP32" source="omz" /> <model name="mobilenet-ssd" precision="FP32" source="omz" />
</device> </device>
<device name="CPU" iterations="30" processes="1" threads="1">
<model name="bert-base-ner" precision="FP32" source="omz" />
</device>
<device name="GPU" iterations="30" processes="1" threads="1">
<model name="bert-base-ner" precision="FP32" source="omz" />
</device>
</cases> </cases>

View File

@ -24,6 +24,14 @@
<model name="mtcnn-r" precision="FP32" source="omz" /> <model name="mtcnn-r" precision="FP32" source="omz" />
</device> </device>
<device name="CPU" iterations="5000" processes="1" threads="1">
<model name="bert-base-ner" precision="FP32" source="omz" />
</device>
<device name="GPU" iterations="5000" processes="1" threads="1">
<model name="bert-base-ner" precision="FP32" source="omz" />
</device>
<device name="CPU" iterations="5000" processes="1" threads="1"> <device name="CPU" iterations="5000" processes="1" threads="1">
<model name="alexnet" precision="FP32" source="omz" /> <model name="alexnet" precision="FP32" source="omz" />
<model name="mobilenet-ssd" precision="FP32" source="omz" /> <model name="mobilenet-ssd" precision="FP32" source="omz" />