Refactor engines utils (#18856)
* Refactor engines utils * Apply comments * Apply comments 2
This commit is contained in:
parent
83d7b9d372
commit
fb45deb65e
@ -11,7 +11,7 @@
|
||||
#include "common_test_utils/all_close.hpp"
|
||||
#include "common_test_utils/ndarray.hpp"
|
||||
#include "common_test_utils/test_tools.hpp"
|
||||
#include "engines_util/random.hpp"
|
||||
#include "engines_util/execute_tools.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "ngraph/graph_util.hpp"
|
||||
|
@ -537,6 +537,34 @@ TEST(eval, evaluate_broadcast_v3_explicit_dyn) {
|
||||
ASSERT_EQ(result_val, expec);
|
||||
}
|
||||
|
||||
class TestOpMultiOut : public op::Op {
|
||||
public:
|
||||
OPENVINO_OP("TestOpMultiOut");
|
||||
TestOpMultiOut() = default;
|
||||
|
||||
TestOpMultiOut(const Output<Node>& output_1, const Output<Node>& output_2) : Op({output_1, output_2}) {
|
||||
validate_and_infer_types();
|
||||
}
|
||||
|
||||
void validate_and_infer_types() override {
|
||||
set_output_size(2);
|
||||
set_output_type(0, get_input_element_type(0), get_input_partial_shape(0));
|
||||
set_output_type(1, get_input_element_type(1), get_input_partial_shape(1));
|
||||
}
|
||||
|
||||
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override {
|
||||
return std::make_shared<TestOpMultiOut>(new_args.at(0), new_args.at(1));
|
||||
}
|
||||
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override {
|
||||
inputs[0]->read(outputs[0]->get_data_ptr(), inputs[0]->get_size_in_bytes());
|
||||
inputs[1]->read(outputs[1]->get_data_ptr(), inputs[1]->get_size_in_bytes());
|
||||
return true;
|
||||
}
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
};
|
||||
|
||||
TEST(eval, test_op_multi_out) {
|
||||
auto p = make_shared<op::Parameter>(element::f32, PartialShape{2, 3});
|
||||
auto p2 = make_shared<op::Parameter>(element::f64, PartialShape{2, 2});
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include "default_opset.hpp"
|
||||
#include "openvino/opsets/opset12.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/constant_folding.hpp"
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
#include "default_opset.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include "common_test_utils/type_prop.hpp"
|
||||
#include "default_opset.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "ngraph/type.hpp"
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
#include "common_test_utils/test_tools.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
#include "common_test_utils/test_tools.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
#include "default_opset.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
#include "default_opset.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/constant_folding.hpp"
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
#include "default_opset.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
#include "common_test_utils/test_tools.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include "common_test_utils/test_tools.hpp"
|
||||
#include "engines_util/execute_tools.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
#include "common_test_utils/test_tools.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
#include "default_opset.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
#include "editor.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include "default_opset.hpp"
|
||||
#include "editor.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "ngraph/op/util/op_types.hpp"
|
||||
|
@ -6,7 +6,6 @@
|
||||
|
||||
#include <fstream>
|
||||
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "paddle_utils.hpp"
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
|
@ -8,7 +8,6 @@
|
||||
|
||||
#include "common_test_utils/test_control.hpp"
|
||||
#include "engines_util/test_case.hpp"
|
||||
#include "engines_util/test_engines.hpp"
|
||||
#include "utils.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
|
@ -1,45 +0,0 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "engine_traits.hpp"
|
||||
#include "ngraph/function.hpp"
|
||||
|
||||
namespace ngraph {
|
||||
namespace test {
|
||||
enum class TestCaseType { STATIC, DYNAMIC };
|
||||
|
||||
namespace {
|
||||
/// A factory that can create engines supporting devices but not dynamic backends.
|
||||
/// Currently: IE_CPU_Backend and IE_GPU_Backend
|
||||
template <typename Engine>
|
||||
typename std::enable_if<supports_devices<Engine>::value, Engine>::type create_engine_impl(
|
||||
const std::shared_ptr<ngraph::Function> function,
|
||||
const TestCaseType) {
|
||||
return Engine{function};
|
||||
}
|
||||
|
||||
/// A factory that can create engines which support dynamic backends
|
||||
/// but do not support devices. Currently: INTERPRETER_Engine
|
||||
template <typename Engine>
|
||||
typename std::enable_if<supports_dynamic<Engine>::value, Engine>::type create_engine_impl(
|
||||
const std::shared_ptr<ngraph::Function> function,
|
||||
const TestCaseType tct) {
|
||||
if (tct == TestCaseType::DYNAMIC) {
|
||||
return Engine::dynamic(function);
|
||||
} else {
|
||||
return Engine{function};
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
/// A factory that is able to create all types of test Engines
|
||||
/// in both static and dynamic mode
|
||||
template <typename Engine>
|
||||
Engine create_engine(const std::shared_ptr<ngraph::Function> function, const TestCaseType tct) {
|
||||
return create_engine_impl<Engine>(function, tct);
|
||||
};
|
||||
} // namespace test
|
||||
} // namespace ngraph
|
@ -1,28 +0,0 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace ngraph {
|
||||
namespace test {
|
||||
/// These templates should be specialized for each test engine and they should contain
|
||||
/// a "static constexpr const bool value" member set to true or false.
|
||||
/// These traits are used in engine_factory.hpp
|
||||
|
||||
/// Indicates that a given Engine can be constructed for different devices (IE engines)
|
||||
template <typename Engine>
|
||||
struct supports_devices;
|
||||
|
||||
/// Indicates that a given Engine supports dynamic shapes
|
||||
template <typename Engine>
|
||||
struct supports_dynamic;
|
||||
|
||||
/// Example:
|
||||
///
|
||||
// template <>
|
||||
// struct supports_dynamic<EngineName> {
|
||||
// static constexpr const bool value = true;
|
||||
// };
|
||||
} // namespace test
|
||||
} // namespace ngraph
|
@ -65,10 +65,15 @@ shared_ptr<Function> make_test_graph() {
|
||||
return f0;
|
||||
}
|
||||
|
||||
template <>
|
||||
void copy_data<bool>(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<bool>& data) {
|
||||
std::vector<char> data_char(data.begin(), data.end());
|
||||
copy_data(tv, data_char);
|
||||
template <typename T>
|
||||
void init_int_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, T min, T max) {
|
||||
size_t size = tv->get_element_count();
|
||||
std::uniform_int_distribution<T> dist(min, max);
|
||||
std::vector<T> vec(size);
|
||||
for (T& element : vec) {
|
||||
element = dist(engine);
|
||||
}
|
||||
tv->write(vec.data(), vec.size() * sizeof(T));
|
||||
}
|
||||
|
||||
template <>
|
||||
@ -104,6 +109,17 @@ void init_int_tv<uint8_t>(ngraph::runtime::Tensor* tv, std::default_random_engin
|
||||
tv->write(vec.data(), vec.size() * sizeof(uint8_t));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void init_real_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, T min, T max) {
|
||||
size_t size = tv->get_element_count();
|
||||
std::uniform_real_distribution<T> dist(min, max);
|
||||
std::vector<T> vec(size);
|
||||
for (T& element : vec) {
|
||||
element = dist(engine);
|
||||
}
|
||||
tv->write(vec.data(), vec.size() * sizeof(T));
|
||||
}
|
||||
|
||||
void random_init(ngraph::runtime::Tensor* tv, std::default_random_engine& engine) {
|
||||
element::Type et = tv->get_element_type();
|
||||
if (et == element::boolean) {
|
||||
@ -133,21 +149,6 @@ void random_init(ngraph::runtime::Tensor* tv, std::default_random_engine& engine
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
string get_results_str(const std::vector<char>& ref_data, const std::vector<char>& actual_data, size_t max_results) {
|
||||
stringstream ss;
|
||||
size_t num_results = std::min(static_cast<size_t>(max_results), ref_data.size());
|
||||
ss << "First " << num_results << " results";
|
||||
for (size_t i = 0; i < num_results; ++i) {
|
||||
ss << std::endl
|
||||
<< std::setw(4) << i << " ref: " << std::setw(16) << std::left << static_cast<int>(ref_data[i])
|
||||
<< " actual: " << std::setw(16) << std::left << static_cast<int>(actual_data[i]);
|
||||
}
|
||||
ss << std::endl;
|
||||
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
::testing::AssertionResult test_ordered_ops(shared_ptr<Function> f, const NodeVector& required_ops) {
|
||||
unordered_set<Node*> seen;
|
||||
for (auto& node_ptr : f->get_ordered_ops()) {
|
||||
@ -177,9 +178,3 @@ string get_results_str(const std::vector<char>& ref_data, const std::vector<char
|
||||
}
|
||||
return ::testing::AssertionSuccess();
|
||||
}
|
||||
|
||||
bool ngraph::TestOpMultiOut::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
||||
inputs[0]->read(outputs[0]->get_data_ptr(), inputs[0]->get_size_in_bytes());
|
||||
inputs[1]->read(outputs[1]->get_data_ptr(), inputs[1]->get_size_in_bytes());
|
||||
return true;
|
||||
}
|
||||
|
@ -4,144 +4,51 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <exception>
|
||||
#include <fstream>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <random>
|
||||
#include <vector>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "ngraph/function.hpp"
|
||||
#include "ngraph/log.hpp"
|
||||
#include "ngraph/node.hpp"
|
||||
#include "ngraph/op/op.hpp"
|
||||
#include "ngraph/runtime/host_tensor.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
#include "ngraph/type/element_type_traits.hpp"
|
||||
#include "openvino/core/model.hpp"
|
||||
#include "openvino/core/node.hpp"
|
||||
#include "openvino/core/shape.hpp"
|
||||
#include "openvino/op/op.hpp"
|
||||
|
||||
namespace ngraph {
|
||||
class TestOpMultiOut : public op::Op {
|
||||
public:
|
||||
OPENVINO_OP("TestOpMultiOut");
|
||||
TestOpMultiOut() = default;
|
||||
|
||||
TestOpMultiOut(const Output<Node>& output_1, const Output<Node>& output_2) : Op({output_1, output_2}) {
|
||||
validate_and_infer_types();
|
||||
}
|
||||
void validate_and_infer_types() override {
|
||||
set_output_size(2);
|
||||
set_output_type(0, get_input_element_type(0), get_input_partial_shape(0));
|
||||
set_output_type(1, get_input_element_type(1), get_input_partial_shape(1));
|
||||
}
|
||||
|
||||
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override {
|
||||
return std::make_shared<TestOpMultiOut>(new_args.at(0), new_args.at(1));
|
||||
}
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
};
|
||||
} // namespace ngraph
|
||||
|
||||
bool validate_list(const std::vector<std::shared_ptr<ngraph::Node>>& nodes);
|
||||
std::shared_ptr<ngraph::Function> make_test_graph();
|
||||
bool validate_list(const std::vector<std::shared_ptr<ov::Node>>& nodes);
|
||||
std::shared_ptr<ov::Model> make_test_graph();
|
||||
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
template <typename T>
|
||||
void copy_data(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<T>& data) {
|
||||
void copy_data(const std::shared_ptr<ngraph::runtime::Tensor>& tv, const std::vector<T>& data) {
|
||||
size_t data_size = data.size() * sizeof(T);
|
||||
if (data_size > 0) {
|
||||
tv->write(data.data(), data_size);
|
||||
}
|
||||
}
|
||||
|
||||
template <ngraph::element::Type_t ET>
|
||||
ngraph::HostTensorPtr make_host_tensor(const ngraph::Shape& shape,
|
||||
const std::vector<typename ngraph::element_type_traits<ET>::value_type>& data) {
|
||||
template <>
|
||||
inline void copy_data<bool>(const std::shared_ptr<ngraph::runtime::Tensor>& tv, const std::vector<bool>& data) {
|
||||
std::vector<char> data_char(data.begin(), data.end());
|
||||
copy_data(tv, data_char);
|
||||
}
|
||||
|
||||
template <ov::element::Type_t ET>
|
||||
ngraph::HostTensorPtr make_host_tensor(const ov::Shape& shape,
|
||||
const std::vector<typename ov::element_type_traits<ET>::value_type>& data) {
|
||||
NGRAPH_CHECK(shape_size(shape) == data.size(), "Incorrect number of initialization elements");
|
||||
auto host_tensor = std::make_shared<ngraph::HostTensor>(ET, shape);
|
||||
auto host_tensor = std::make_shared<ngraph::runtime::HostTensor>(ET, shape);
|
||||
copy_data(host_tensor, data);
|
||||
return host_tensor;
|
||||
}
|
||||
|
||||
template <>
|
||||
void copy_data<bool>(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<bool>& data);
|
||||
|
||||
template <typename T>
|
||||
void write_vector(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<T>& values) {
|
||||
tv->write(values.data(), values.size() * sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::vector<std::shared_ptr<T>> get_ops_of_type(std::shared_ptr<ngraph::Function> f) {
|
||||
std::vector<std::shared_ptr<T>> ops;
|
||||
for (auto op : f->get_ops()) {
|
||||
if (auto cop = ngraph::as_type_ptr<T>(op)) {
|
||||
ops.push_back(cop);
|
||||
}
|
||||
}
|
||||
|
||||
return ops;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void init_int_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, T min, T max) {
|
||||
size_t size = tv->get_element_count();
|
||||
std::uniform_int_distribution<T> dist(min, max);
|
||||
std::vector<T> vec(size);
|
||||
for (T& element : vec) {
|
||||
element = dist(engine);
|
||||
}
|
||||
tv->write(vec.data(), vec.size() * sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void init_real_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, T min, T max) {
|
||||
size_t size = tv->get_element_count();
|
||||
std::uniform_real_distribution<T> dist(min, max);
|
||||
std::vector<T> vec(size);
|
||||
for (T& element : vec) {
|
||||
element = dist(engine);
|
||||
}
|
||||
tv->write(vec.data(), vec.size() * sizeof(T));
|
||||
}
|
||||
|
||||
void random_init(ngraph::runtime::Tensor* tv, std::default_random_engine& engine);
|
||||
|
||||
template <typename T>
|
||||
std::string get_results_str(const std::vector<T>& ref_data,
|
||||
const std::vector<T>& actual_data,
|
||||
size_t max_results = 16) {
|
||||
std::stringstream ss;
|
||||
size_t num_results = std::min(static_cast<size_t>(max_results), ref_data.size());
|
||||
ss << "First " << num_results << " results";
|
||||
for (size_t i = 0; i < num_results; ++i) {
|
||||
ss << std::endl
|
||||
// use unary + operator to force integral values to be displayed as numbers
|
||||
<< std::setw(4) << i << " ref: " << std::setw(16) << std::left << +ref_data[i]
|
||||
<< " actual: " << std::setw(16) << std::left << +actual_data[i];
|
||||
}
|
||||
ss << std::endl;
|
||||
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
template <>
|
||||
std::string get_results_str(const std::vector<char>& ref_data,
|
||||
const std::vector<char>& actual_data,
|
||||
size_t max_results);
|
||||
|
||||
testing::AssertionResult test_ordered_ops(std::shared_ptr<ngraph::Function> f, const ngraph::NodeVector& required_ops);
|
||||
|
||||
template <ngraph::element::Type_t ET>
|
||||
ngraph::HostTensorPtr make_host_tensor(const ngraph::Shape& shape) {
|
||||
auto host_tensor = std::make_shared<ngraph::HostTensor>(ET, shape);
|
||||
template <ov::element::Type_t ET>
|
||||
ngraph::HostTensorPtr make_host_tensor(const ov::Shape& shape) {
|
||||
auto host_tensor = std::make_shared<ngraph::runtime::HostTensor>(ET, shape);
|
||||
static std::default_random_engine engine(2112);
|
||||
random_init(host_tensor.get(), engine);
|
||||
return host_tensor;
|
||||
}
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
testing::AssertionResult test_ordered_ops(std::shared_ptr<ov::Model> f, const ov::NodeVector& required_ops);
|
||||
|
@ -1,51 +0,0 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <random>
|
||||
|
||||
#include "execute_tools.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
#include "ngraph/type/element_type.hpp"
|
||||
#include "ngraph/util.hpp"
|
||||
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
namespace ngraph {
|
||||
namespace test {
|
||||
/// \brief A predictable pseudo-random number generator
|
||||
/// The seed is initialized so that we get repeatable pseudo-random numbers for tests
|
||||
template <typename T>
|
||||
class Uniform {
|
||||
public:
|
||||
Uniform(T min, T max, T seed = 0)
|
||||
: m_engine(seed),
|
||||
m_distribution(min, max),
|
||||
m_r(std::bind(m_distribution, m_engine)) {}
|
||||
|
||||
/// \brief Randomly initialize a tensor
|
||||
/// \param ptv The tensor to initialize
|
||||
const std::shared_ptr<runtime::Tensor> initialize(const std::shared_ptr<runtime::Tensor>& ptv) {
|
||||
std::vector<T> vec = read_vector<T>(ptv);
|
||||
initialize(vec);
|
||||
write_vector(ptv, vec);
|
||||
return ptv;
|
||||
}
|
||||
/// \brief Randomly initialize a vector
|
||||
/// \param vec The tensor to initialize
|
||||
void initialize(std::vector<T>& vec) {
|
||||
for (T& elt : vec) {
|
||||
elt = m_r();
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
std::default_random_engine m_engine;
|
||||
std::uniform_real_distribution<T> m_distribution;
|
||||
std::function<T()> m_r;
|
||||
};
|
||||
} // namespace test
|
||||
} // namespace ngraph
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
@ -1,34 +0,0 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_utils.hpp"
|
||||
|
||||
#include <cmath>
|
||||
#include <sstream>
|
||||
|
||||
testing::AssertionResult ngraph::test::compare_with_tolerance(const std::vector<float>& expected,
|
||||
const std::vector<float>& results,
|
||||
const float tolerance) {
|
||||
auto comparison_result = testing::AssertionSuccess();
|
||||
|
||||
std::stringstream msg;
|
||||
msg << std::setprecision(std::numeric_limits<long double>::digits10 + 1);
|
||||
|
||||
bool rc = true;
|
||||
|
||||
for (std::size_t j = 0; j < expected.size(); ++j) {
|
||||
float diff = std::fabs(results[j] - expected[j]);
|
||||
if (diff > tolerance) {
|
||||
msg << expected[j] << " is not close to " << results[j] << " at index " << j << "\n";
|
||||
rc = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!rc) {
|
||||
comparison_result = testing::AssertionFailure();
|
||||
comparison_result << msg.str();
|
||||
}
|
||||
|
||||
return comparison_result;
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace ngraph {
|
||||
namespace test {
|
||||
testing::AssertionResult compare_with_tolerance(const std::vector<float>& expected_results,
|
||||
const std::vector<float>& results,
|
||||
const float tolerance);
|
||||
}
|
||||
} // namespace ngraph
|
@ -5,9 +5,9 @@
|
||||
#include "test_case.hpp"
|
||||
|
||||
#include "common_test_utils/all_close_f.hpp"
|
||||
#include "common_test_utils/data_utils.hpp"
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
#include "openvino/util/file_util.hpp"
|
||||
#include "shared_utils.hpp"
|
||||
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
namespace {
|
||||
@ -17,21 +17,46 @@ compare_values(const ov::Tensor& expected, const ov::Tensor& result, const size_
|
||||
return ov::test::utils::all_close_f(expected, result, static_cast<int>(tolerance_bits));
|
||||
}
|
||||
|
||||
testing::AssertionResult compare_with_fp_tolerance(const ov::Tensor& expected_tensor,
|
||||
const ov::Tensor& result_tensor,
|
||||
testing::AssertionResult compare_with_tolerance(const std::vector<float>& expected,
|
||||
const std::vector<float>& results,
|
||||
const float tolerance) {
|
||||
auto comparison_result = testing::AssertionSuccess();
|
||||
|
||||
auto exp_host_t = std::make_shared<ngraph::HostTensor>(expected_tensor.get_element_type(),
|
||||
expected_tensor.get_shape(),
|
||||
expected_tensor.data());
|
||||
auto res_host_t = std::make_shared<ngraph::HostTensor>(result_tensor.get_element_type(),
|
||||
result_tensor.get_shape(),
|
||||
result_tensor.data());
|
||||
const auto expected = read_vector<float>(exp_host_t);
|
||||
const auto result = read_vector<float>(res_host_t);
|
||||
std::stringstream msg;
|
||||
msg << std::setprecision(std::numeric_limits<long double>::digits10 + 1);
|
||||
|
||||
return ngraph::test::compare_with_tolerance(expected, result, tolerance);
|
||||
bool rc = true;
|
||||
|
||||
for (std::size_t j = 0; j < expected.size(); ++j) {
|
||||
float diff = std::fabs(results[j] - expected[j]);
|
||||
if (diff > tolerance) {
|
||||
msg << expected[j] << " is not close to " << results[j] << " at index " << j << "\n";
|
||||
rc = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!rc) {
|
||||
comparison_result = testing::AssertionFailure();
|
||||
comparison_result << msg.str();
|
||||
}
|
||||
|
||||
return comparison_result;
|
||||
}
|
||||
|
||||
testing::AssertionResult compare_with_fp_tolerance(const ov::Tensor& expected_tensor,
|
||||
const ov::Tensor& result_tensor,
|
||||
const float tolerance) {
|
||||
OPENVINO_ASSERT(expected_tensor.get_element_type() == ov::element::f32);
|
||||
|
||||
std::vector<float> expected(expected_tensor.get_size());
|
||||
ov::Tensor expected_view(expected_tensor.get_element_type(), expected_tensor.get_shape(), expected.data());
|
||||
expected_tensor.copy_to(expected_view);
|
||||
|
||||
std::vector<float> result(result_tensor.get_size());
|
||||
ov::Tensor result_view(result_tensor.get_element_type(), result_tensor.get_shape(), result.data());
|
||||
result_tensor.copy_to(result_view);
|
||||
|
||||
return compare_with_tolerance(expected, result, tolerance);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -44,33 +69,19 @@ compare_values(const ov::Tensor& expected, const ov::Tensor& result, const size_
|
||||
template <typename T>
|
||||
typename std::enable_if<std::is_class<T>::value, testing::AssertionResult>::type
|
||||
compare_values(const ov::Tensor& expected_tensor, const ov::Tensor& result_tensor, const size_t tolerance_bits) {
|
||||
auto exp_host_t = std::make_shared<ngraph::HostTensor>(expected_tensor.get_element_type(),
|
||||
expected_tensor.get_shape(),
|
||||
expected_tensor.data());
|
||||
auto res_host_t = std::make_shared<ngraph::HostTensor>(result_tensor.get_element_type(),
|
||||
result_tensor.get_shape(),
|
||||
result_tensor.data());
|
||||
const auto expected = read_vector<T>(exp_host_t);
|
||||
const auto result = read_vector<T>(res_host_t);
|
||||
auto expected_tensor_converted =
|
||||
ov::test::utils::make_tensor_with_precision_convert(expected_tensor, ov::element::f64);
|
||||
auto result_tensor_converted = ov::test::utils::make_tensor_with_precision_convert(result_tensor, ov::element::f64);
|
||||
|
||||
// TODO: add testing infrastructure for float16 and bfloat16 to avoid cast to double
|
||||
std::vector<double> expected_double(expected.size());
|
||||
std::vector<double> result_double(result.size());
|
||||
|
||||
NGRAPH_CHECK(expected.size() == result.size(), "Number of expected and computed results don't match");
|
||||
|
||||
for (size_t i = 0; i < expected.size(); ++i) {
|
||||
expected_double[i] = static_cast<double>(expected[i]);
|
||||
result_double[i] = static_cast<double>(result[i]);
|
||||
}
|
||||
|
||||
return ov::test::utils::all_close_f(expected_double, result_double, static_cast<int>(tolerance_bits));
|
||||
return ov::test::utils::all_close_f(expected_tensor_converted,
|
||||
result_tensor_converted,
|
||||
static_cast<int>(tolerance_bits));
|
||||
}
|
||||
}; // namespace
|
||||
|
||||
namespace ngraph {
|
||||
namespace test {
|
||||
std::shared_ptr<Function> function_from_ir(const std::string& xml_path, const std::string& bin_path) {
|
||||
std::shared_ptr<ov::Model> function_from_ir(const std::string& xml_path, const std::string& bin_path) {
|
||||
ov::Core c;
|
||||
return c.read_model(xml_path, bin_path);
|
||||
}
|
||||
@ -180,7 +191,7 @@ testing::AssertionResult TestCase::compare_results_with_tolerance_as_fp(float to
|
||||
return comparison_result;
|
||||
}
|
||||
|
||||
TestCase::TestCase(const std::shared_ptr<Function>& function, const std::string& dev) : m_function{function} {
|
||||
TestCase::TestCase(const std::shared_ptr<ov::Model>& function, const std::string& dev) : m_function{function} {
|
||||
try {
|
||||
// Register template plugin
|
||||
m_core.register_plugin(
|
||||
|
@ -9,10 +9,7 @@
|
||||
#include "common_test_utils/all_close.hpp"
|
||||
#include "common_test_utils/all_close_f.hpp"
|
||||
#include "common_test_utils/test_tools.hpp"
|
||||
#include "engine_factory.hpp"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "ngraph/function.hpp"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "openvino/runtime/core.hpp"
|
||||
#include "openvino/util/file_util.hpp"
|
||||
|
||||
@ -28,19 +25,19 @@ inline std::string backend_name_to_device(const std::string& backend_name) {
|
||||
OPENVINO_THROW("Unsupported backend name");
|
||||
}
|
||||
|
||||
std::shared_ptr<Function> function_from_ir(const std::string& xml_path, const std::string& bin_path = {});
|
||||
std::shared_ptr<ov::Model> function_from_ir(const std::string& xml_path, const std::string& bin_path = {});
|
||||
|
||||
class TestCase {
|
||||
public:
|
||||
TestCase(const std::shared_ptr<Function>& function, const std::string& dev = "TEMPLATE");
|
||||
TestCase(const std::shared_ptr<ov::Model>& function, const std::string& dev = "TEMPLATE");
|
||||
|
||||
template <typename T>
|
||||
void add_input(const Shape& shape, const std::vector<T>& values) {
|
||||
const auto params = m_function->get_parameters();
|
||||
NGRAPH_CHECK(m_input_index < params.size(), "All function parameters already have inputs.");
|
||||
OPENVINO_ASSERT(m_input_index < params.size(), "All function parameters already have inputs.");
|
||||
|
||||
const auto& input_pshape = params.at(m_input_index)->get_partial_shape();
|
||||
NGRAPH_CHECK(input_pshape.compatible(shape),
|
||||
OPENVINO_ASSERT(input_pshape.compatible(shape),
|
||||
"Provided input shape ",
|
||||
shape,
|
||||
" is not compatible with nGraph function's expected input shape ",
|
||||
@ -64,7 +61,7 @@ public:
|
||||
m_request.set_input_tensor(m_input_index, tensor);
|
||||
} else {
|
||||
auto tensor = m_request.get_input_tensor(m_input_index);
|
||||
NGRAPH_CHECK(tensor.get_size() >= values.size(),
|
||||
OPENVINO_ASSERT(tensor.get_size() >= values.size(),
|
||||
"Tensor and values have different sizes. Tensor (",
|
||||
tensor.get_shape(),
|
||||
") size: ",
|
||||
@ -81,7 +78,7 @@ public:
|
||||
void add_input(const std::vector<T>& values) {
|
||||
const auto& input_pshape = m_function->get_parameters().at(m_input_index)->get_partial_shape();
|
||||
|
||||
NGRAPH_CHECK(input_pshape.is_static(),
|
||||
OPENVINO_ASSERT(input_pshape.is_static(),
|
||||
"Input number ",
|
||||
m_input_index,
|
||||
" in the tested graph has dynamic shape. You need to provide ",
|
||||
@ -99,18 +96,14 @@ public:
|
||||
|
||||
template <typename T>
|
||||
void add_input_from_file(const Shape& shape, const std::string& basepath, const std::string& filename) {
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
const auto filepath = ngraph::file_util::path_join(basepath, filename);
|
||||
const auto filepath = ov::util::path_join({basepath, filename});
|
||||
add_input_from_file<T>(shape, filepath);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void add_input_from_file(const std::string& basepath, const std::string& filename) {
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
const auto filepath = ngraph::file_util::path_join(basepath, filename);
|
||||
const auto filepath = ov::util::path_join({basepath, filename});
|
||||
add_input_from_file<T>(filepath);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -129,10 +122,10 @@ public:
|
||||
void add_expected_output(const Shape& expected_shape, const std::vector<T>& values) {
|
||||
const auto results = m_function->get_results();
|
||||
|
||||
NGRAPH_CHECK(m_output_index < results.size(), "All model results already have expected outputs.");
|
||||
OPENVINO_ASSERT(m_output_index < results.size(), "All model results already have expected outputs.");
|
||||
|
||||
const auto& output_pshape = results.at(m_output_index)->get_output_partial_shape(0);
|
||||
NGRAPH_CHECK(output_pshape.compatible(expected_shape),
|
||||
OPENVINO_ASSERT(output_pshape.compatible(expected_shape),
|
||||
"Provided expected output shape ",
|
||||
expected_shape,
|
||||
" is not compatible with OpenVINO model's output shape ",
|
||||
@ -152,24 +145,24 @@ public:
|
||||
void add_expected_output(const std::vector<T>& values) {
|
||||
const auto results = m_function->get_results();
|
||||
|
||||
NGRAPH_CHECK(m_output_index < results.size(), "All model results already have expected outputs.");
|
||||
OPENVINO_ASSERT(m_output_index < results.size(), "All model results already have expected outputs.");
|
||||
|
||||
const auto shape = results.at(m_output_index)->get_shape();
|
||||
add_expected_output<T>(shape, values);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void add_expected_output_from_file(const ngraph::Shape& expected_shape,
|
||||
void add_expected_output_from_file(const ov::Shape& expected_shape,
|
||||
const std::string& basepath,
|
||||
const std::string& filename) {
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
const auto filepath = ngraph::file_util::path_join(basepath, filename);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
const auto filepath = ov::util::path_join({basepath, filename});
|
||||
add_expected_output_from_file<T>(expected_shape, filepath);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void add_expected_output_from_file(const ngraph::Shape& expected_shape, const std::string& filepath) {
|
||||
void add_expected_output_from_file(const ov::Shape& expected_shape, const std::string& filepath) {
|
||||
const auto values = read_binary_file<T>(filepath);
|
||||
add_expected_output<T>(expected_shape, values);
|
||||
}
|
||||
@ -208,7 +201,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<Function> m_function;
|
||||
std::shared_ptr<ov::Model> m_function;
|
||||
ov::Core m_core;
|
||||
ov::InferRequest m_request;
|
||||
std::vector<ov::Tensor> m_expected_outputs;
|
||||
|
@ -1,48 +0,0 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
namespace ngraph {
|
||||
namespace test {
|
||||
/// An interface that each test case engine needs to implement. This interface wraps
|
||||
/// a couple of generic methods which are required by the TestCase class to execute
|
||||
/// a unit test for a given ngraph::Function.
|
||||
/// The interface operates on C++ types while internally it can use implementation-specific
|
||||
/// types, containers and structures.
|
||||
class TestCaseEngine {
|
||||
public:
|
||||
virtual ~TestCaseEngine() noexcept = default;
|
||||
|
||||
/// Performs the inference using data stored as internal state
|
||||
virtual void infer() = 0;
|
||||
|
||||
/// Resets the internal state so that the test can be executed again
|
||||
virtual void reset() = 0;
|
||||
|
||||
/// Compares computed and expected results, returns AssertionSuccess or AssertionFailure
|
||||
virtual testing::AssertionResult compare_results(const size_t tolerance_bits) = 0;
|
||||
|
||||
/// Compares computed and expected results, returns AssertionSuccess or AssertionFailure
|
||||
virtual testing::AssertionResult compare_results_with_tolerance_as_fp(const float tolerance) = 0;
|
||||
|
||||
/// Additionally the interface implementing class needs to define
|
||||
/// the following 2 methods. They are called from the TestCase class
|
||||
/// but they can't be a part of interface since they need to be declared as templates
|
||||
|
||||
/// Passes data (along with its shape) to the next available input.
|
||||
/// The data should be stored as internal state, not necessarily as vectors
|
||||
// template <typename T>
|
||||
// void add_input(const Shape& shape, const std::vector<T>& values)
|
||||
|
||||
/// Sets the expected data (along with its shape) for the next available output
|
||||
/// The data should be stored as internal state, not necessarily as vectors
|
||||
// template <typename T>
|
||||
// void add_expected_output(const ngraph::Shape& expected_shape,
|
||||
// const std::vector<T>& values)
|
||||
};
|
||||
} // namespace test
|
||||
} // namespace ngraph
|
@ -1,11 +0,0 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
// Builds a class name for a given backend prefix
|
||||
// The prefix should come from cmake
|
||||
// Example: INTERPRETER -> INTERPRETER_Engine
|
||||
// Example: IE_CPU -> IE_CPU_Engine
|
||||
#define ENGINE_CLASS_NAME(backend) backend##_Engine
|
@ -86,6 +86,9 @@ make_reshape_view(const InferenceEngine::Blob::Ptr &blob, InferenceEngine::SizeV
|
||||
*/
|
||||
size_t byte_size(const InferenceEngine::TensorDesc &tdesc);
|
||||
|
||||
ov::Tensor make_tensor_with_precision_convert(const ov::Tensor& tensor, ov::element::Type prc);
|
||||
|
||||
|
||||
template<typename T>
|
||||
inline void fill_roi_raw_ptr(T* data, size_t data_size, const uint32_t range, const int32_t height, const int32_t width, const float omega,
|
||||
const bool is_roi_max_mode, const int32_t seed = 1) {
|
||||
|
@ -285,27 +285,52 @@ void fill_data_with_broadcast(ov::Tensor& tensor, ov::Tensor& values) {
|
||||
}
|
||||
|
||||
template<ov::element::Type_t SRC_E, ov::element::Type_t DST_E>
|
||||
void copy_with_convert(ov::Tensor& src_tensor, ov::Tensor& dst_tensor) {
|
||||
void copy_tensor_with_convert(const ov::Tensor& src_tensor, ov::Tensor& dst_tensor) {
|
||||
using SRC_TYPE = typename ov::fundamental_type_for<SRC_E>;
|
||||
using DST_TYPE = typename ov::fundamental_type_for<DST_E>;
|
||||
|
||||
OPENVINO_ASSERT(src_tensor.get_size() == dst_tensor.get_size());
|
||||
|
||||
auto src_ptr = src_tensor.data<SRC_TYPE>();
|
||||
auto src_size = src_tensor.get_size();
|
||||
|
||||
auto dst_ptr = dst_tensor.data<DST_TYPE>();
|
||||
|
||||
std::copy(src_ptr, src_ptr + src_size, dst_ptr);
|
||||
auto converter = [] (SRC_TYPE value) {return static_cast<DST_TYPE>(value);};
|
||||
|
||||
std::transform(src_ptr, src_ptr + src_size, dst_ptr, converter);
|
||||
}
|
||||
|
||||
ov::Tensor make_with_precision_convert(ov::Tensor& tensor, ov::element::Type prc) {
|
||||
ov::Tensor make_tensor_with_precision_convert(const ov::Tensor& tensor, ov::element::Type prc) {
|
||||
ov::Tensor new_tensor(prc, tensor.get_shape());
|
||||
auto src_prc = tensor.get_element_type();
|
||||
|
||||
#define CASE(_PRC) case ov::element::_PRC: \
|
||||
copy_with_convert<ov::element::Type_t::f32, ov::element::Type_t::_PRC> (tensor, new_tensor); break
|
||||
switch (prc) {
|
||||
CASE(f32); CASE(f16); CASE(i64); CASE(u64); CASE(i32); CASE(u32); CASE(i16); CASE(u16); CASE(i8); CASE(u8);
|
||||
default: OPENVINO_THROW("Unsupported precision case");
|
||||
#define CASE0(SRC_PRC, DST_PRC) case ov::element::DST_PRC : \
|
||||
copy_tensor_with_convert<ov::element::SRC_PRC, ov::element::DST_PRC> (tensor, new_tensor); break;
|
||||
|
||||
#define CASE(SRC_PRC) \
|
||||
case ov::element::SRC_PRC: \
|
||||
switch (prc) { \
|
||||
CASE0(SRC_PRC, bf16) \
|
||||
CASE0(SRC_PRC, f16) \
|
||||
CASE0(SRC_PRC, f32) \
|
||||
CASE0(SRC_PRC, f64) \
|
||||
CASE0(SRC_PRC, i8) \
|
||||
CASE0(SRC_PRC, i16) \
|
||||
CASE0(SRC_PRC, i32) \
|
||||
CASE0(SRC_PRC, i64) \
|
||||
CASE0(SRC_PRC, u8) \
|
||||
CASE0(SRC_PRC, u16) \
|
||||
CASE0(SRC_PRC, u32) \
|
||||
CASE0(SRC_PRC, u64) \
|
||||
default: OPENVINO_THROW("Unsupported precision case: ", prc.c_type_string()); \
|
||||
} break;
|
||||
|
||||
switch (src_prc) {
|
||||
CASE(f64); CASE(f32); CASE(f16); CASE(bf16); CASE(i64); CASE(u64); CASE(i32); CASE(u32); CASE(i16); CASE(u16); CASE(i8); CASE(u8);
|
||||
default: OPENVINO_THROW("Unsupported precision case: ", src_prc.c_type_string());
|
||||
}
|
||||
#undef CASE0
|
||||
#undef CASE
|
||||
|
||||
return new_tensor;
|
||||
@ -320,7 +345,7 @@ void fill_data_with_broadcast(ov::Tensor& tensor, size_t axis, std::vector<float
|
||||
values_tensor = ov::Tensor(ov::element::f32, value_dims, values.data());
|
||||
|
||||
if (prc != ov::element::f32) {
|
||||
values_tensor = make_with_precision_convert(values_tensor, prc);
|
||||
values_tensor = make_tensor_with_precision_convert(values_tensor, prc);
|
||||
}
|
||||
|
||||
fill_data_with_broadcast(tensor, values_tensor);
|
||||
|
Loading…
Reference in New Issue
Block a user