Refactor engines utils (#18856)
* Refactor engines utils * Apply comments * Apply comments 2
This commit is contained in:
parent
83d7b9d372
commit
fb45deb65e
@ -11,7 +11,7 @@
|
|||||||
#include "common_test_utils/all_close.hpp"
|
#include "common_test_utils/all_close.hpp"
|
||||||
#include "common_test_utils/ndarray.hpp"
|
#include "common_test_utils/ndarray.hpp"
|
||||||
#include "common_test_utils/test_tools.hpp"
|
#include "common_test_utils/test_tools.hpp"
|
||||||
#include "engines_util/random.hpp"
|
#include "engines_util/execute_tools.hpp"
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
#include "ngraph/file_util.hpp"
|
#include "ngraph/file_util.hpp"
|
||||||
#include "ngraph/graph_util.hpp"
|
#include "ngraph/graph_util.hpp"
|
||||||
|
@ -537,6 +537,34 @@ TEST(eval, evaluate_broadcast_v3_explicit_dyn) {
|
|||||||
ASSERT_EQ(result_val, expec);
|
ASSERT_EQ(result_val, expec);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class TestOpMultiOut : public op::Op {
|
||||||
|
public:
|
||||||
|
OPENVINO_OP("TestOpMultiOut");
|
||||||
|
TestOpMultiOut() = default;
|
||||||
|
|
||||||
|
TestOpMultiOut(const Output<Node>& output_1, const Output<Node>& output_2) : Op({output_1, output_2}) {
|
||||||
|
validate_and_infer_types();
|
||||||
|
}
|
||||||
|
|
||||||
|
void validate_and_infer_types() override {
|
||||||
|
set_output_size(2);
|
||||||
|
set_output_type(0, get_input_element_type(0), get_input_partial_shape(0));
|
||||||
|
set_output_type(1, get_input_element_type(1), get_input_partial_shape(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override {
|
||||||
|
return std::make_shared<TestOpMultiOut>(new_args.at(0), new_args.at(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||||
|
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override {
|
||||||
|
inputs[0]->read(outputs[0]->get_data_ptr(), inputs[0]->get_size_in_bytes());
|
||||||
|
inputs[1]->read(outputs[1]->get_data_ptr(), inputs[1]->get_size_in_bytes());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||||
|
};
|
||||||
|
|
||||||
TEST(eval, test_op_multi_out) {
|
TEST(eval, test_op_multi_out) {
|
||||||
auto p = make_shared<op::Parameter>(element::f32, PartialShape{2, 3});
|
auto p = make_shared<op::Parameter>(element::f32, PartialShape{2, 3});
|
||||||
auto p2 = make_shared<op::Parameter>(element::f64, PartialShape{2, 2});
|
auto p2 = make_shared<op::Parameter>(element::f64, PartialShape{2, 2});
|
||||||
|
@ -28,7 +28,6 @@
|
|||||||
#include "default_opset.hpp"
|
#include "default_opset.hpp"
|
||||||
#include "openvino/opsets/opset12.hpp"
|
#include "openvino/opsets/opset12.hpp"
|
||||||
#include "engines_util/test_case.hpp"
|
#include "engines_util/test_case.hpp"
|
||||||
#include "engines_util/test_engines.hpp"
|
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
#include "ngraph/ngraph.hpp"
|
#include "ngraph/ngraph.hpp"
|
||||||
#include "ngraph/pass/constant_folding.hpp"
|
#include "ngraph/pass/constant_folding.hpp"
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
#include "common_test_utils/file_utils.hpp"
|
#include "common_test_utils/file_utils.hpp"
|
||||||
#include "default_opset.hpp"
|
#include "default_opset.hpp"
|
||||||
#include "engines_util/test_case.hpp"
|
#include "engines_util/test_case.hpp"
|
||||||
#include "engines_util/test_engines.hpp"
|
|
||||||
#include "onnx_import/onnx.hpp"
|
#include "onnx_import/onnx.hpp"
|
||||||
#include "common_test_utils/test_control.hpp"
|
#include "common_test_utils/test_control.hpp"
|
||||||
|
|
||||||
|
@ -8,7 +8,6 @@
|
|||||||
#include "common_test_utils/type_prop.hpp"
|
#include "common_test_utils/type_prop.hpp"
|
||||||
#include "default_opset.hpp"
|
#include "default_opset.hpp"
|
||||||
#include "engines_util/test_case.hpp"
|
#include "engines_util/test_case.hpp"
|
||||||
#include "engines_util/test_engines.hpp"
|
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
#include "ngraph/file_util.hpp"
|
#include "ngraph/file_util.hpp"
|
||||||
#include "ngraph/type.hpp"
|
#include "ngraph/type.hpp"
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
#include "common_test_utils/test_control.hpp"
|
#include "common_test_utils/test_control.hpp"
|
||||||
#include "common_test_utils/test_tools.hpp"
|
#include "common_test_utils/test_tools.hpp"
|
||||||
#include "engines_util/test_case.hpp"
|
#include "engines_util/test_case.hpp"
|
||||||
#include "engines_util/test_engines.hpp"
|
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
#include "ngraph/ngraph.hpp"
|
#include "ngraph/ngraph.hpp"
|
||||||
#include "onnx_import/onnx.hpp"
|
#include "onnx_import/onnx.hpp"
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
#include "common_test_utils/test_control.hpp"
|
#include "common_test_utils/test_control.hpp"
|
||||||
#include "common_test_utils/test_tools.hpp"
|
#include "common_test_utils/test_tools.hpp"
|
||||||
#include "engines_util/test_case.hpp"
|
#include "engines_util/test_case.hpp"
|
||||||
#include "engines_util/test_engines.hpp"
|
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
#include "ngraph/ngraph.hpp"
|
#include "ngraph/ngraph.hpp"
|
||||||
#include "onnx_import/onnx.hpp"
|
#include "onnx_import/onnx.hpp"
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
#include "common_test_utils/file_utils.hpp"
|
#include "common_test_utils/file_utils.hpp"
|
||||||
#include "default_opset.hpp"
|
#include "default_opset.hpp"
|
||||||
#include "engines_util/test_case.hpp"
|
#include "engines_util/test_case.hpp"
|
||||||
#include "engines_util/test_engines.hpp"
|
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
#include "ngraph/file_util.hpp"
|
#include "ngraph/file_util.hpp"
|
||||||
#include "onnx_import/onnx.hpp"
|
#include "onnx_import/onnx.hpp"
|
||||||
|
@ -25,7 +25,6 @@
|
|||||||
#include "common_test_utils/file_utils.hpp"
|
#include "common_test_utils/file_utils.hpp"
|
||||||
#include "default_opset.hpp"
|
#include "default_opset.hpp"
|
||||||
#include "engines_util/test_case.hpp"
|
#include "engines_util/test_case.hpp"
|
||||||
#include "engines_util/test_engines.hpp"
|
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
#include "ngraph/ngraph.hpp"
|
#include "ngraph/ngraph.hpp"
|
||||||
#include "ngraph/pass/constant_folding.hpp"
|
#include "ngraph/pass/constant_folding.hpp"
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
#include "common_test_utils/file_utils.hpp"
|
#include "common_test_utils/file_utils.hpp"
|
||||||
#include "default_opset.hpp"
|
#include "default_opset.hpp"
|
||||||
#include "engines_util/test_case.hpp"
|
#include "engines_util/test_case.hpp"
|
||||||
#include "engines_util/test_engines.hpp"
|
|
||||||
#include "onnx_import/onnx.hpp"
|
#include "onnx_import/onnx.hpp"
|
||||||
#include "common_test_utils/test_control.hpp"
|
#include "common_test_utils/test_control.hpp"
|
||||||
|
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
#include "common_test_utils/test_control.hpp"
|
#include "common_test_utils/test_control.hpp"
|
||||||
#include "common_test_utils/test_tools.hpp"
|
#include "common_test_utils/test_tools.hpp"
|
||||||
#include "engines_util/test_case.hpp"
|
#include "engines_util/test_case.hpp"
|
||||||
#include "engines_util/test_engines.hpp"
|
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
#include "ngraph/ngraph.hpp"
|
#include "ngraph/ngraph.hpp"
|
||||||
#include "onnx_import/onnx.hpp"
|
#include "onnx_import/onnx.hpp"
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
#include "common_test_utils/test_tools.hpp"
|
#include "common_test_utils/test_tools.hpp"
|
||||||
#include "engines_util/execute_tools.hpp"
|
#include "engines_util/execute_tools.hpp"
|
||||||
#include "engines_util/test_case.hpp"
|
#include "engines_util/test_case.hpp"
|
||||||
#include "engines_util/test_engines.hpp"
|
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
#include "ngraph/ngraph.hpp"
|
#include "ngraph/ngraph.hpp"
|
||||||
#include "onnx_import/onnx.hpp"
|
#include "onnx_import/onnx.hpp"
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
#include "common_test_utils/test_control.hpp"
|
#include "common_test_utils/test_control.hpp"
|
||||||
#include "common_test_utils/test_tools.hpp"
|
#include "common_test_utils/test_tools.hpp"
|
||||||
#include "engines_util/test_case.hpp"
|
#include "engines_util/test_case.hpp"
|
||||||
#include "engines_util/test_engines.hpp"
|
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
#include "ngraph/ngraph.hpp"
|
#include "ngraph/ngraph.hpp"
|
||||||
#include "onnx_import/onnx.hpp"
|
#include "onnx_import/onnx.hpp"
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
#include "common_test_utils/file_utils.hpp"
|
#include "common_test_utils/file_utils.hpp"
|
||||||
#include "default_opset.hpp"
|
#include "default_opset.hpp"
|
||||||
#include "engines_util/test_case.hpp"
|
#include "engines_util/test_case.hpp"
|
||||||
#include "engines_util/test_engines.hpp"
|
|
||||||
#include "onnx_import/onnx.hpp"
|
#include "onnx_import/onnx.hpp"
|
||||||
#include "common_test_utils/test_control.hpp"
|
#include "common_test_utils/test_control.hpp"
|
||||||
|
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
#include "common_test_utils/file_utils.hpp"
|
#include "common_test_utils/file_utils.hpp"
|
||||||
#include "editor.hpp"
|
#include "editor.hpp"
|
||||||
#include "engines_util/test_case.hpp"
|
#include "engines_util/test_case.hpp"
|
||||||
#include "engines_util/test_engines.hpp"
|
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
#include "ngraph/ngraph.hpp"
|
#include "ngraph/ngraph.hpp"
|
||||||
#include "common_test_utils/test_control.hpp"
|
#include "common_test_utils/test_control.hpp"
|
||||||
|
@ -12,7 +12,6 @@
|
|||||||
#include "default_opset.hpp"
|
#include "default_opset.hpp"
|
||||||
#include "editor.hpp"
|
#include "editor.hpp"
|
||||||
#include "engines_util/test_case.hpp"
|
#include "engines_util/test_case.hpp"
|
||||||
#include "engines_util/test_engines.hpp"
|
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
#include "ngraph/file_util.hpp"
|
#include "ngraph/file_util.hpp"
|
||||||
#include "ngraph/op/util/op_types.hpp"
|
#include "ngraph/op/util/op_types.hpp"
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
|
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
|
|
||||||
#include "engines_util/test_engines.hpp"
|
|
||||||
#include "ngraph/ngraph.hpp"
|
#include "ngraph/ngraph.hpp"
|
||||||
#include "paddle_utils.hpp"
|
#include "paddle_utils.hpp"
|
||||||
#include "common_test_utils/test_control.hpp"
|
#include "common_test_utils/test_control.hpp"
|
||||||
|
@ -8,7 +8,6 @@
|
|||||||
|
|
||||||
#include "common_test_utils/test_control.hpp"
|
#include "common_test_utils/test_control.hpp"
|
||||||
#include "engines_util/test_case.hpp"
|
#include "engines_util/test_case.hpp"
|
||||||
#include "engines_util/test_engines.hpp"
|
|
||||||
#include "utils.hpp"
|
#include "utils.hpp"
|
||||||
|
|
||||||
using namespace ngraph;
|
using namespace ngraph;
|
||||||
|
@ -1,45 +0,0 @@
|
|||||||
// Copyright (C) 2018-2023 Intel Corporation
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
//
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "engine_traits.hpp"
|
|
||||||
#include "ngraph/function.hpp"
|
|
||||||
|
|
||||||
namespace ngraph {
|
|
||||||
namespace test {
|
|
||||||
enum class TestCaseType { STATIC, DYNAMIC };
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
/// A factory that can create engines supporting devices but not dynamic backends.
|
|
||||||
/// Currently: IE_CPU_Backend and IE_GPU_Backend
|
|
||||||
template <typename Engine>
|
|
||||||
typename std::enable_if<supports_devices<Engine>::value, Engine>::type create_engine_impl(
|
|
||||||
const std::shared_ptr<ngraph::Function> function,
|
|
||||||
const TestCaseType) {
|
|
||||||
return Engine{function};
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A factory that can create engines which support dynamic backends
|
|
||||||
/// but do not support devices. Currently: INTERPRETER_Engine
|
|
||||||
template <typename Engine>
|
|
||||||
typename std::enable_if<supports_dynamic<Engine>::value, Engine>::type create_engine_impl(
|
|
||||||
const std::shared_ptr<ngraph::Function> function,
|
|
||||||
const TestCaseType tct) {
|
|
||||||
if (tct == TestCaseType::DYNAMIC) {
|
|
||||||
return Engine::dynamic(function);
|
|
||||||
} else {
|
|
||||||
return Engine{function};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
/// A factory that is able to create all types of test Engines
|
|
||||||
/// in both static and dynamic mode
|
|
||||||
template <typename Engine>
|
|
||||||
Engine create_engine(const std::shared_ptr<ngraph::Function> function, const TestCaseType tct) {
|
|
||||||
return create_engine_impl<Engine>(function, tct);
|
|
||||||
};
|
|
||||||
} // namespace test
|
|
||||||
} // namespace ngraph
|
|
@ -1,28 +0,0 @@
|
|||||||
// Copyright (C) 2018-2023 Intel Corporation
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
//
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
namespace ngraph {
|
|
||||||
namespace test {
|
|
||||||
/// These templates should be specialized for each test engine and they should contain
|
|
||||||
/// a "static constexpr const bool value" member set to true or false.
|
|
||||||
/// These traits are used in engine_factory.hpp
|
|
||||||
|
|
||||||
/// Indicates that a given Engine can be constructed for different devices (IE engines)
|
|
||||||
template <typename Engine>
|
|
||||||
struct supports_devices;
|
|
||||||
|
|
||||||
/// Indicates that a given Engine supports dynamic shapes
|
|
||||||
template <typename Engine>
|
|
||||||
struct supports_dynamic;
|
|
||||||
|
|
||||||
/// Example:
|
|
||||||
///
|
|
||||||
// template <>
|
|
||||||
// struct supports_dynamic<EngineName> {
|
|
||||||
// static constexpr const bool value = true;
|
|
||||||
// };
|
|
||||||
} // namespace test
|
|
||||||
} // namespace ngraph
|
|
@ -65,10 +65,15 @@ shared_ptr<Function> make_test_graph() {
|
|||||||
return f0;
|
return f0;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <>
|
template <typename T>
|
||||||
void copy_data<bool>(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<bool>& data) {
|
void init_int_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, T min, T max) {
|
||||||
std::vector<char> data_char(data.begin(), data.end());
|
size_t size = tv->get_element_count();
|
||||||
copy_data(tv, data_char);
|
std::uniform_int_distribution<T> dist(min, max);
|
||||||
|
std::vector<T> vec(size);
|
||||||
|
for (T& element : vec) {
|
||||||
|
element = dist(engine);
|
||||||
|
}
|
||||||
|
tv->write(vec.data(), vec.size() * sizeof(T));
|
||||||
}
|
}
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
@ -104,6 +109,17 @@ void init_int_tv<uint8_t>(ngraph::runtime::Tensor* tv, std::default_random_engin
|
|||||||
tv->write(vec.data(), vec.size() * sizeof(uint8_t));
|
tv->write(vec.data(), vec.size() * sizeof(uint8_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void init_real_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, T min, T max) {
|
||||||
|
size_t size = tv->get_element_count();
|
||||||
|
std::uniform_real_distribution<T> dist(min, max);
|
||||||
|
std::vector<T> vec(size);
|
||||||
|
for (T& element : vec) {
|
||||||
|
element = dist(engine);
|
||||||
|
}
|
||||||
|
tv->write(vec.data(), vec.size() * sizeof(T));
|
||||||
|
}
|
||||||
|
|
||||||
void random_init(ngraph::runtime::Tensor* tv, std::default_random_engine& engine) {
|
void random_init(ngraph::runtime::Tensor* tv, std::default_random_engine& engine) {
|
||||||
element::Type et = tv->get_element_type();
|
element::Type et = tv->get_element_type();
|
||||||
if (et == element::boolean) {
|
if (et == element::boolean) {
|
||||||
@ -133,21 +149,6 @@ void random_init(ngraph::runtime::Tensor* tv, std::default_random_engine& engine
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <>
|
|
||||||
string get_results_str(const std::vector<char>& ref_data, const std::vector<char>& actual_data, size_t max_results) {
|
|
||||||
stringstream ss;
|
|
||||||
size_t num_results = std::min(static_cast<size_t>(max_results), ref_data.size());
|
|
||||||
ss << "First " << num_results << " results";
|
|
||||||
for (size_t i = 0; i < num_results; ++i) {
|
|
||||||
ss << std::endl
|
|
||||||
<< std::setw(4) << i << " ref: " << std::setw(16) << std::left << static_cast<int>(ref_data[i])
|
|
||||||
<< " actual: " << std::setw(16) << std::left << static_cast<int>(actual_data[i]);
|
|
||||||
}
|
|
||||||
ss << std::endl;
|
|
||||||
|
|
||||||
return ss.str();
|
|
||||||
}
|
|
||||||
|
|
||||||
::testing::AssertionResult test_ordered_ops(shared_ptr<Function> f, const NodeVector& required_ops) {
|
::testing::AssertionResult test_ordered_ops(shared_ptr<Function> f, const NodeVector& required_ops) {
|
||||||
unordered_set<Node*> seen;
|
unordered_set<Node*> seen;
|
||||||
for (auto& node_ptr : f->get_ordered_ops()) {
|
for (auto& node_ptr : f->get_ordered_ops()) {
|
||||||
@ -177,9 +178,3 @@ string get_results_str(const std::vector<char>& ref_data, const std::vector<char
|
|||||||
}
|
}
|
||||||
return ::testing::AssertionSuccess();
|
return ::testing::AssertionSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ngraph::TestOpMultiOut::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
|
||||||
inputs[0]->read(outputs[0]->get_data_ptr(), inputs[0]->get_size_in_bytes());
|
|
||||||
inputs[1]->read(outputs[1]->get_data_ptr(), inputs[1]->get_size_in_bytes());
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
@ -4,144 +4,51 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <exception>
|
|
||||||
#include <fstream>
|
|
||||||
#include <iomanip>
|
|
||||||
#include <iostream>
|
|
||||||
#include <list>
|
|
||||||
#include <memory>
|
|
||||||
#include <random>
|
#include <random>
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
#include "ngraph/file_util.hpp"
|
|
||||||
#include "ngraph/function.hpp"
|
|
||||||
#include "ngraph/log.hpp"
|
|
||||||
#include "ngraph/node.hpp"
|
|
||||||
#include "ngraph/op/op.hpp"
|
|
||||||
#include "ngraph/runtime/host_tensor.hpp"
|
#include "ngraph/runtime/host_tensor.hpp"
|
||||||
#include "ngraph/runtime/tensor.hpp"
|
#include "openvino/core/model.hpp"
|
||||||
#include "ngraph/type/element_type_traits.hpp"
|
#include "openvino/core/node.hpp"
|
||||||
|
#include "openvino/core/shape.hpp"
|
||||||
|
#include "openvino/op/op.hpp"
|
||||||
|
|
||||||
namespace ngraph {
|
bool validate_list(const std::vector<std::shared_ptr<ov::Node>>& nodes);
|
||||||
class TestOpMultiOut : public op::Op {
|
std::shared_ptr<ov::Model> make_test_graph();
|
||||||
public:
|
|
||||||
OPENVINO_OP("TestOpMultiOut");
|
|
||||||
TestOpMultiOut() = default;
|
|
||||||
|
|
||||||
TestOpMultiOut(const Output<Node>& output_1, const Output<Node>& output_2) : Op({output_1, output_2}) {
|
|
||||||
validate_and_infer_types();
|
|
||||||
}
|
|
||||||
void validate_and_infer_types() override {
|
|
||||||
set_output_size(2);
|
|
||||||
set_output_type(0, get_input_element_type(0), get_input_partial_shape(0));
|
|
||||||
set_output_type(1, get_input_element_type(1), get_input_partial_shape(1));
|
|
||||||
}
|
|
||||||
|
|
||||||
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override {
|
|
||||||
return std::make_shared<TestOpMultiOut>(new_args.at(0), new_args.at(1));
|
|
||||||
}
|
|
||||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
|
||||||
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
|
|
||||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
|
||||||
};
|
|
||||||
} // namespace ngraph
|
|
||||||
|
|
||||||
bool validate_list(const std::vector<std::shared_ptr<ngraph::Node>>& nodes);
|
|
||||||
std::shared_ptr<ngraph::Function> make_test_graph();
|
|
||||||
|
|
||||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void copy_data(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<T>& data) {
|
void copy_data(const std::shared_ptr<ngraph::runtime::Tensor>& tv, const std::vector<T>& data) {
|
||||||
size_t data_size = data.size() * sizeof(T);
|
size_t data_size = data.size() * sizeof(T);
|
||||||
if (data_size > 0) {
|
if (data_size > 0) {
|
||||||
tv->write(data.data(), data_size);
|
tv->write(data.data(), data_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <ngraph::element::Type_t ET>
|
template <>
|
||||||
ngraph::HostTensorPtr make_host_tensor(const ngraph::Shape& shape,
|
inline void copy_data<bool>(const std::shared_ptr<ngraph::runtime::Tensor>& tv, const std::vector<bool>& data) {
|
||||||
const std::vector<typename ngraph::element_type_traits<ET>::value_type>& data) {
|
std::vector<char> data_char(data.begin(), data.end());
|
||||||
|
copy_data(tv, data_char);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <ov::element::Type_t ET>
|
||||||
|
ngraph::HostTensorPtr make_host_tensor(const ov::Shape& shape,
|
||||||
|
const std::vector<typename ov::element_type_traits<ET>::value_type>& data) {
|
||||||
NGRAPH_CHECK(shape_size(shape) == data.size(), "Incorrect number of initialization elements");
|
NGRAPH_CHECK(shape_size(shape) == data.size(), "Incorrect number of initialization elements");
|
||||||
auto host_tensor = std::make_shared<ngraph::HostTensor>(ET, shape);
|
auto host_tensor = std::make_shared<ngraph::runtime::HostTensor>(ET, shape);
|
||||||
copy_data(host_tensor, data);
|
copy_data(host_tensor, data);
|
||||||
return host_tensor;
|
return host_tensor;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <>
|
|
||||||
void copy_data<bool>(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<bool>& data);
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void write_vector(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<T>& values) {
|
|
||||||
tv->write(values.data(), values.size() * sizeof(T));
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
std::vector<std::shared_ptr<T>> get_ops_of_type(std::shared_ptr<ngraph::Function> f) {
|
|
||||||
std::vector<std::shared_ptr<T>> ops;
|
|
||||||
for (auto op : f->get_ops()) {
|
|
||||||
if (auto cop = ngraph::as_type_ptr<T>(op)) {
|
|
||||||
ops.push_back(cop);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ops;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void init_int_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, T min, T max) {
|
|
||||||
size_t size = tv->get_element_count();
|
|
||||||
std::uniform_int_distribution<T> dist(min, max);
|
|
||||||
std::vector<T> vec(size);
|
|
||||||
for (T& element : vec) {
|
|
||||||
element = dist(engine);
|
|
||||||
}
|
|
||||||
tv->write(vec.data(), vec.size() * sizeof(T));
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void init_real_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, T min, T max) {
|
|
||||||
size_t size = tv->get_element_count();
|
|
||||||
std::uniform_real_distribution<T> dist(min, max);
|
|
||||||
std::vector<T> vec(size);
|
|
||||||
for (T& element : vec) {
|
|
||||||
element = dist(engine);
|
|
||||||
}
|
|
||||||
tv->write(vec.data(), vec.size() * sizeof(T));
|
|
||||||
}
|
|
||||||
|
|
||||||
void random_init(ngraph::runtime::Tensor* tv, std::default_random_engine& engine);
|
void random_init(ngraph::runtime::Tensor* tv, std::default_random_engine& engine);
|
||||||
|
|
||||||
template <typename T>
|
template <ov::element::Type_t ET>
|
||||||
std::string get_results_str(const std::vector<T>& ref_data,
|
ngraph::HostTensorPtr make_host_tensor(const ov::Shape& shape) {
|
||||||
const std::vector<T>& actual_data,
|
auto host_tensor = std::make_shared<ngraph::runtime::HostTensor>(ET, shape);
|
||||||
size_t max_results = 16) {
|
|
||||||
std::stringstream ss;
|
|
||||||
size_t num_results = std::min(static_cast<size_t>(max_results), ref_data.size());
|
|
||||||
ss << "First " << num_results << " results";
|
|
||||||
for (size_t i = 0; i < num_results; ++i) {
|
|
||||||
ss << std::endl
|
|
||||||
// use unary + operator to force integral values to be displayed as numbers
|
|
||||||
<< std::setw(4) << i << " ref: " << std::setw(16) << std::left << +ref_data[i]
|
|
||||||
<< " actual: " << std::setw(16) << std::left << +actual_data[i];
|
|
||||||
}
|
|
||||||
ss << std::endl;
|
|
||||||
|
|
||||||
return ss.str();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <>
|
|
||||||
std::string get_results_str(const std::vector<char>& ref_data,
|
|
||||||
const std::vector<char>& actual_data,
|
|
||||||
size_t max_results);
|
|
||||||
|
|
||||||
testing::AssertionResult test_ordered_ops(std::shared_ptr<ngraph::Function> f, const ngraph::NodeVector& required_ops);
|
|
||||||
|
|
||||||
template <ngraph::element::Type_t ET>
|
|
||||||
ngraph::HostTensorPtr make_host_tensor(const ngraph::Shape& shape) {
|
|
||||||
auto host_tensor = std::make_shared<ngraph::HostTensor>(ET, shape);
|
|
||||||
static std::default_random_engine engine(2112);
|
static std::default_random_engine engine(2112);
|
||||||
random_init(host_tensor.get(), engine);
|
random_init(host_tensor.get(), engine);
|
||||||
return host_tensor;
|
return host_tensor;
|
||||||
}
|
}
|
||||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||||
|
|
||||||
|
testing::AssertionResult test_ordered_ops(std::shared_ptr<ov::Model> f, const ov::NodeVector& required_ops);
|
||||||
|
@ -1,51 +0,0 @@
|
|||||||
// Copyright (C) 2018-2023 Intel Corporation
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
//
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <functional>
|
|
||||||
#include <random>
|
|
||||||
|
|
||||||
#include "execute_tools.hpp"
|
|
||||||
#include "ngraph/runtime/tensor.hpp"
|
|
||||||
#include "ngraph/type/element_type.hpp"
|
|
||||||
#include "ngraph/util.hpp"
|
|
||||||
|
|
||||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
|
||||||
namespace ngraph {
|
|
||||||
namespace test {
|
|
||||||
/// \brief A predictable pseudo-random number generator
|
|
||||||
/// The seed is initialized so that we get repeatable pseudo-random numbers for tests
|
|
||||||
template <typename T>
|
|
||||||
class Uniform {
|
|
||||||
public:
|
|
||||||
Uniform(T min, T max, T seed = 0)
|
|
||||||
: m_engine(seed),
|
|
||||||
m_distribution(min, max),
|
|
||||||
m_r(std::bind(m_distribution, m_engine)) {}
|
|
||||||
|
|
||||||
/// \brief Randomly initialize a tensor
|
|
||||||
/// \param ptv The tensor to initialize
|
|
||||||
const std::shared_ptr<runtime::Tensor> initialize(const std::shared_ptr<runtime::Tensor>& ptv) {
|
|
||||||
std::vector<T> vec = read_vector<T>(ptv);
|
|
||||||
initialize(vec);
|
|
||||||
write_vector(ptv, vec);
|
|
||||||
return ptv;
|
|
||||||
}
|
|
||||||
/// \brief Randomly initialize a vector
|
|
||||||
/// \param vec The tensor to initialize
|
|
||||||
void initialize(std::vector<T>& vec) {
|
|
||||||
for (T& elt : vec) {
|
|
||||||
elt = m_r();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected:
|
|
||||||
std::default_random_engine m_engine;
|
|
||||||
std::uniform_real_distribution<T> m_distribution;
|
|
||||||
std::function<T()> m_r;
|
|
||||||
};
|
|
||||||
} // namespace test
|
|
||||||
} // namespace ngraph
|
|
||||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
|
@ -1,34 +0,0 @@
|
|||||||
// Copyright (C) 2018-2023 Intel Corporation
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
//
|
|
||||||
|
|
||||||
#include "shared_utils.hpp"
|
|
||||||
|
|
||||||
#include <cmath>
|
|
||||||
#include <sstream>
|
|
||||||
|
|
||||||
testing::AssertionResult ngraph::test::compare_with_tolerance(const std::vector<float>& expected,
|
|
||||||
const std::vector<float>& results,
|
|
||||||
const float tolerance) {
|
|
||||||
auto comparison_result = testing::AssertionSuccess();
|
|
||||||
|
|
||||||
std::stringstream msg;
|
|
||||||
msg << std::setprecision(std::numeric_limits<long double>::digits10 + 1);
|
|
||||||
|
|
||||||
bool rc = true;
|
|
||||||
|
|
||||||
for (std::size_t j = 0; j < expected.size(); ++j) {
|
|
||||||
float diff = std::fabs(results[j] - expected[j]);
|
|
||||||
if (diff > tolerance) {
|
|
||||||
msg << expected[j] << " is not close to " << results[j] << " at index " << j << "\n";
|
|
||||||
rc = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!rc) {
|
|
||||||
comparison_result = testing::AssertionFailure();
|
|
||||||
comparison_result << msg.str();
|
|
||||||
}
|
|
||||||
|
|
||||||
return comparison_result;
|
|
||||||
}
|
|
@ -1,17 +0,0 @@
|
|||||||
// Copyright (C) 2018-2023 Intel Corporation
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
//
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
namespace ngraph {
|
|
||||||
namespace test {
|
|
||||||
testing::AssertionResult compare_with_tolerance(const std::vector<float>& expected_results,
|
|
||||||
const std::vector<float>& results,
|
|
||||||
const float tolerance);
|
|
||||||
}
|
|
||||||
} // namespace ngraph
|
|
@ -5,9 +5,9 @@
|
|||||||
#include "test_case.hpp"
|
#include "test_case.hpp"
|
||||||
|
|
||||||
#include "common_test_utils/all_close_f.hpp"
|
#include "common_test_utils/all_close_f.hpp"
|
||||||
|
#include "common_test_utils/data_utils.hpp"
|
||||||
#include "common_test_utils/file_utils.hpp"
|
#include "common_test_utils/file_utils.hpp"
|
||||||
#include "openvino/util/file_util.hpp"
|
#include "openvino/util/file_util.hpp"
|
||||||
#include "shared_utils.hpp"
|
|
||||||
|
|
||||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||||
namespace {
|
namespace {
|
||||||
@ -17,21 +17,46 @@ compare_values(const ov::Tensor& expected, const ov::Tensor& result, const size_
|
|||||||
return ov::test::utils::all_close_f(expected, result, static_cast<int>(tolerance_bits));
|
return ov::test::utils::all_close_f(expected, result, static_cast<int>(tolerance_bits));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
testing::AssertionResult compare_with_tolerance(const std::vector<float>& expected,
|
||||||
|
const std::vector<float>& results,
|
||||||
|
const float tolerance) {
|
||||||
|
auto comparison_result = testing::AssertionSuccess();
|
||||||
|
|
||||||
|
std::stringstream msg;
|
||||||
|
msg << std::setprecision(std::numeric_limits<long double>::digits10 + 1);
|
||||||
|
|
||||||
|
bool rc = true;
|
||||||
|
|
||||||
|
for (std::size_t j = 0; j < expected.size(); ++j) {
|
||||||
|
float diff = std::fabs(results[j] - expected[j]);
|
||||||
|
if (diff > tolerance) {
|
||||||
|
msg << expected[j] << " is not close to " << results[j] << " at index " << j << "\n";
|
||||||
|
rc = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!rc) {
|
||||||
|
comparison_result = testing::AssertionFailure();
|
||||||
|
comparison_result << msg.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
return comparison_result;
|
||||||
|
}
|
||||||
|
|
||||||
testing::AssertionResult compare_with_fp_tolerance(const ov::Tensor& expected_tensor,
|
testing::AssertionResult compare_with_fp_tolerance(const ov::Tensor& expected_tensor,
|
||||||
const ov::Tensor& result_tensor,
|
const ov::Tensor& result_tensor,
|
||||||
const float tolerance) {
|
const float tolerance) {
|
||||||
auto comparison_result = testing::AssertionSuccess();
|
OPENVINO_ASSERT(expected_tensor.get_element_type() == ov::element::f32);
|
||||||
|
|
||||||
auto exp_host_t = std::make_shared<ngraph::HostTensor>(expected_tensor.get_element_type(),
|
std::vector<float> expected(expected_tensor.get_size());
|
||||||
expected_tensor.get_shape(),
|
ov::Tensor expected_view(expected_tensor.get_element_type(), expected_tensor.get_shape(), expected.data());
|
||||||
expected_tensor.data());
|
expected_tensor.copy_to(expected_view);
|
||||||
auto res_host_t = std::make_shared<ngraph::HostTensor>(result_tensor.get_element_type(),
|
|
||||||
result_tensor.get_shape(),
|
|
||||||
result_tensor.data());
|
|
||||||
const auto expected = read_vector<float>(exp_host_t);
|
|
||||||
const auto result = read_vector<float>(res_host_t);
|
|
||||||
|
|
||||||
return ngraph::test::compare_with_tolerance(expected, result, tolerance);
|
std::vector<float> result(result_tensor.get_size());
|
||||||
|
ov::Tensor result_view(result_tensor.get_element_type(), result_tensor.get_shape(), result.data());
|
||||||
|
result_tensor.copy_to(result_view);
|
||||||
|
|
||||||
|
return compare_with_tolerance(expected, result, tolerance);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
@ -44,33 +69,19 @@ compare_values(const ov::Tensor& expected, const ov::Tensor& result, const size_
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
typename std::enable_if<std::is_class<T>::value, testing::AssertionResult>::type
|
typename std::enable_if<std::is_class<T>::value, testing::AssertionResult>::type
|
||||||
compare_values(const ov::Tensor& expected_tensor, const ov::Tensor& result_tensor, const size_t tolerance_bits) {
|
compare_values(const ov::Tensor& expected_tensor, const ov::Tensor& result_tensor, const size_t tolerance_bits) {
|
||||||
auto exp_host_t = std::make_shared<ngraph::HostTensor>(expected_tensor.get_element_type(),
|
auto expected_tensor_converted =
|
||||||
expected_tensor.get_shape(),
|
ov::test::utils::make_tensor_with_precision_convert(expected_tensor, ov::element::f64);
|
||||||
expected_tensor.data());
|
auto result_tensor_converted = ov::test::utils::make_tensor_with_precision_convert(result_tensor, ov::element::f64);
|
||||||
auto res_host_t = std::make_shared<ngraph::HostTensor>(result_tensor.get_element_type(),
|
|
||||||
result_tensor.get_shape(),
|
|
||||||
result_tensor.data());
|
|
||||||
const auto expected = read_vector<T>(exp_host_t);
|
|
||||||
const auto result = read_vector<T>(res_host_t);
|
|
||||||
|
|
||||||
// TODO: add testing infrastructure for float16 and bfloat16 to avoid cast to double
|
return ov::test::utils::all_close_f(expected_tensor_converted,
|
||||||
std::vector<double> expected_double(expected.size());
|
result_tensor_converted,
|
||||||
std::vector<double> result_double(result.size());
|
static_cast<int>(tolerance_bits));
|
||||||
|
|
||||||
NGRAPH_CHECK(expected.size() == result.size(), "Number of expected and computed results don't match");
|
|
||||||
|
|
||||||
for (size_t i = 0; i < expected.size(); ++i) {
|
|
||||||
expected_double[i] = static_cast<double>(expected[i]);
|
|
||||||
result_double[i] = static_cast<double>(result[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ov::test::utils::all_close_f(expected_double, result_double, static_cast<int>(tolerance_bits));
|
|
||||||
}
|
}
|
||||||
}; // namespace
|
}; // namespace
|
||||||
|
|
||||||
namespace ngraph {
|
namespace ngraph {
|
||||||
namespace test {
|
namespace test {
|
||||||
std::shared_ptr<Function> function_from_ir(const std::string& xml_path, const std::string& bin_path) {
|
std::shared_ptr<ov::Model> function_from_ir(const std::string& xml_path, const std::string& bin_path) {
|
||||||
ov::Core c;
|
ov::Core c;
|
||||||
return c.read_model(xml_path, bin_path);
|
return c.read_model(xml_path, bin_path);
|
||||||
}
|
}
|
||||||
@ -180,7 +191,7 @@ testing::AssertionResult TestCase::compare_results_with_tolerance_as_fp(float to
|
|||||||
return comparison_result;
|
return comparison_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
TestCase::TestCase(const std::shared_ptr<Function>& function, const std::string& dev) : m_function{function} {
|
TestCase::TestCase(const std::shared_ptr<ov::Model>& function, const std::string& dev) : m_function{function} {
|
||||||
try {
|
try {
|
||||||
// Register template plugin
|
// Register template plugin
|
||||||
m_core.register_plugin(
|
m_core.register_plugin(
|
||||||
|
@ -9,10 +9,7 @@
|
|||||||
#include "common_test_utils/all_close.hpp"
|
#include "common_test_utils/all_close.hpp"
|
||||||
#include "common_test_utils/all_close_f.hpp"
|
#include "common_test_utils/all_close_f.hpp"
|
||||||
#include "common_test_utils/test_tools.hpp"
|
#include "common_test_utils/test_tools.hpp"
|
||||||
#include "engine_factory.hpp"
|
|
||||||
#include "ngraph/file_util.hpp"
|
#include "ngraph/file_util.hpp"
|
||||||
#include "ngraph/function.hpp"
|
|
||||||
#include "ngraph/ngraph.hpp"
|
|
||||||
#include "openvino/runtime/core.hpp"
|
#include "openvino/runtime/core.hpp"
|
||||||
#include "openvino/util/file_util.hpp"
|
#include "openvino/util/file_util.hpp"
|
||||||
|
|
||||||
@ -28,25 +25,25 @@ inline std::string backend_name_to_device(const std::string& backend_name) {
|
|||||||
OPENVINO_THROW("Unsupported backend name");
|
OPENVINO_THROW("Unsupported backend name");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<Function> function_from_ir(const std::string& xml_path, const std::string& bin_path = {});
|
std::shared_ptr<ov::Model> function_from_ir(const std::string& xml_path, const std::string& bin_path = {});
|
||||||
|
|
||||||
class TestCase {
|
class TestCase {
|
||||||
public:
|
public:
|
||||||
TestCase(const std::shared_ptr<Function>& function, const std::string& dev = "TEMPLATE");
|
TestCase(const std::shared_ptr<ov::Model>& function, const std::string& dev = "TEMPLATE");
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void add_input(const Shape& shape, const std::vector<T>& values) {
|
void add_input(const Shape& shape, const std::vector<T>& values) {
|
||||||
const auto params = m_function->get_parameters();
|
const auto params = m_function->get_parameters();
|
||||||
NGRAPH_CHECK(m_input_index < params.size(), "All function parameters already have inputs.");
|
OPENVINO_ASSERT(m_input_index < params.size(), "All function parameters already have inputs.");
|
||||||
|
|
||||||
const auto& input_pshape = params.at(m_input_index)->get_partial_shape();
|
const auto& input_pshape = params.at(m_input_index)->get_partial_shape();
|
||||||
NGRAPH_CHECK(input_pshape.compatible(shape),
|
OPENVINO_ASSERT(input_pshape.compatible(shape),
|
||||||
"Provided input shape ",
|
"Provided input shape ",
|
||||||
shape,
|
shape,
|
||||||
" is not compatible with nGraph function's expected input shape ",
|
" is not compatible with nGraph function's expected input shape ",
|
||||||
input_pshape,
|
input_pshape,
|
||||||
" for input ",
|
" for input ",
|
||||||
m_input_index);
|
m_input_index);
|
||||||
|
|
||||||
auto t_shape = m_request.get_input_tensor(m_input_index).get_shape();
|
auto t_shape = m_request.get_input_tensor(m_input_index).get_shape();
|
||||||
bool is_dynamic = false;
|
bool is_dynamic = false;
|
||||||
@ -64,13 +61,13 @@ public:
|
|||||||
m_request.set_input_tensor(m_input_index, tensor);
|
m_request.set_input_tensor(m_input_index, tensor);
|
||||||
} else {
|
} else {
|
||||||
auto tensor = m_request.get_input_tensor(m_input_index);
|
auto tensor = m_request.get_input_tensor(m_input_index);
|
||||||
NGRAPH_CHECK(tensor.get_size() >= values.size(),
|
OPENVINO_ASSERT(tensor.get_size() >= values.size(),
|
||||||
"Tensor and values have different sizes. Tensor (",
|
"Tensor and values have different sizes. Tensor (",
|
||||||
tensor.get_shape(),
|
tensor.get_shape(),
|
||||||
") size: ",
|
") size: ",
|
||||||
tensor.get_size(),
|
tensor.get_size(),
|
||||||
" and values size is ",
|
" and values size is ",
|
||||||
values.size());
|
values.size());
|
||||||
std::copy(values.begin(), values.end(), tensor.data<T>());
|
std::copy(values.begin(), values.end(), tensor.data<T>());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,11 +78,11 @@ public:
|
|||||||
void add_input(const std::vector<T>& values) {
|
void add_input(const std::vector<T>& values) {
|
||||||
const auto& input_pshape = m_function->get_parameters().at(m_input_index)->get_partial_shape();
|
const auto& input_pshape = m_function->get_parameters().at(m_input_index)->get_partial_shape();
|
||||||
|
|
||||||
NGRAPH_CHECK(input_pshape.is_static(),
|
OPENVINO_ASSERT(input_pshape.is_static(),
|
||||||
"Input number ",
|
"Input number ",
|
||||||
m_input_index,
|
m_input_index,
|
||||||
" in the tested graph has dynamic shape. You need to provide ",
|
" in the tested graph has dynamic shape. You need to provide ",
|
||||||
"shape information when setting values for this input.");
|
"shape information when setting values for this input.");
|
||||||
|
|
||||||
add_input<T>(input_pshape.to_shape(), values);
|
add_input<T>(input_pshape.to_shape(), values);
|
||||||
}
|
}
|
||||||
@ -99,18 +96,14 @@ public:
|
|||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void add_input_from_file(const Shape& shape, const std::string& basepath, const std::string& filename) {
|
void add_input_from_file(const Shape& shape, const std::string& basepath, const std::string& filename) {
|
||||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
const auto filepath = ov::util::path_join({basepath, filename});
|
||||||
const auto filepath = ngraph::file_util::path_join(basepath, filename);
|
|
||||||
add_input_from_file<T>(shape, filepath);
|
add_input_from_file<T>(shape, filepath);
|
||||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void add_input_from_file(const std::string& basepath, const std::string& filename) {
|
void add_input_from_file(const std::string& basepath, const std::string& filename) {
|
||||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
const auto filepath = ov::util::path_join({basepath, filename});
|
||||||
const auto filepath = ngraph::file_util::path_join(basepath, filename);
|
|
||||||
add_input_from_file<T>(filepath);
|
add_input_from_file<T>(filepath);
|
||||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
@ -129,16 +122,16 @@ public:
|
|||||||
void add_expected_output(const Shape& expected_shape, const std::vector<T>& values) {
|
void add_expected_output(const Shape& expected_shape, const std::vector<T>& values) {
|
||||||
const auto results = m_function->get_results();
|
const auto results = m_function->get_results();
|
||||||
|
|
||||||
NGRAPH_CHECK(m_output_index < results.size(), "All model results already have expected outputs.");
|
OPENVINO_ASSERT(m_output_index < results.size(), "All model results already have expected outputs.");
|
||||||
|
|
||||||
const auto& output_pshape = results.at(m_output_index)->get_output_partial_shape(0);
|
const auto& output_pshape = results.at(m_output_index)->get_output_partial_shape(0);
|
||||||
NGRAPH_CHECK(output_pshape.compatible(expected_shape),
|
OPENVINO_ASSERT(output_pshape.compatible(expected_shape),
|
||||||
"Provided expected output shape ",
|
"Provided expected output shape ",
|
||||||
expected_shape,
|
expected_shape,
|
||||||
" is not compatible with OpenVINO model's output shape ",
|
" is not compatible with OpenVINO model's output shape ",
|
||||||
output_pshape,
|
output_pshape,
|
||||||
" for output ",
|
" for output ",
|
||||||
m_output_index);
|
m_output_index);
|
||||||
|
|
||||||
ov::Tensor tensor(results[m_output_index]->get_output_element_type(0), expected_shape);
|
ov::Tensor tensor(results[m_output_index]->get_output_element_type(0), expected_shape);
|
||||||
std::copy(values.begin(), values.end(), tensor.data<T>());
|
std::copy(values.begin(), values.end(), tensor.data<T>());
|
||||||
@ -152,24 +145,24 @@ public:
|
|||||||
void add_expected_output(const std::vector<T>& values) {
|
void add_expected_output(const std::vector<T>& values) {
|
||||||
const auto results = m_function->get_results();
|
const auto results = m_function->get_results();
|
||||||
|
|
||||||
NGRAPH_CHECK(m_output_index < results.size(), "All model results already have expected outputs.");
|
OPENVINO_ASSERT(m_output_index < results.size(), "All model results already have expected outputs.");
|
||||||
|
|
||||||
const auto shape = results.at(m_output_index)->get_shape();
|
const auto shape = results.at(m_output_index)->get_shape();
|
||||||
add_expected_output<T>(shape, values);
|
add_expected_output<T>(shape, values);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void add_expected_output_from_file(const ngraph::Shape& expected_shape,
|
void add_expected_output_from_file(const ov::Shape& expected_shape,
|
||||||
const std::string& basepath,
|
const std::string& basepath,
|
||||||
const std::string& filename) {
|
const std::string& filename) {
|
||||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||||
const auto filepath = ngraph::file_util::path_join(basepath, filename);
|
const auto filepath = ov::util::path_join({basepath, filename});
|
||||||
add_expected_output_from_file<T>(expected_shape, filepath);
|
add_expected_output_from_file<T>(expected_shape, filepath);
|
||||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void add_expected_output_from_file(const ngraph::Shape& expected_shape, const std::string& filepath) {
|
void add_expected_output_from_file(const ov::Shape& expected_shape, const std::string& filepath) {
|
||||||
const auto values = read_binary_file<T>(filepath);
|
const auto values = read_binary_file<T>(filepath);
|
||||||
add_expected_output<T>(expected_shape, values);
|
add_expected_output<T>(expected_shape, values);
|
||||||
}
|
}
|
||||||
@ -208,7 +201,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::shared_ptr<Function> m_function;
|
std::shared_ptr<ov::Model> m_function;
|
||||||
ov::Core m_core;
|
ov::Core m_core;
|
||||||
ov::InferRequest m_request;
|
ov::InferRequest m_request;
|
||||||
std::vector<ov::Tensor> m_expected_outputs;
|
std::vector<ov::Tensor> m_expected_outputs;
|
||||||
|
@ -1,48 +0,0 @@
|
|||||||
// Copyright (C) 2018-2023 Intel Corporation
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
//
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
namespace ngraph {
|
|
||||||
namespace test {
|
|
||||||
/// An interface that each test case engine needs to implement. This interface wraps
|
|
||||||
/// a couple of generic methods which are required by the TestCase class to execute
|
|
||||||
/// a unit test for a given ngraph::Function.
|
|
||||||
/// The interface operates on C++ types while internally it can use implementation-specific
|
|
||||||
/// types, containers and structures.
|
|
||||||
class TestCaseEngine {
|
|
||||||
public:
|
|
||||||
virtual ~TestCaseEngine() noexcept = default;
|
|
||||||
|
|
||||||
/// Performs the inference using data stored as internal state
|
|
||||||
virtual void infer() = 0;
|
|
||||||
|
|
||||||
/// Resets the internal state so that the test can be executed again
|
|
||||||
virtual void reset() = 0;
|
|
||||||
|
|
||||||
/// Compares computed and expected results, returns AssertionSuccess or AssertionFailure
|
|
||||||
virtual testing::AssertionResult compare_results(const size_t tolerance_bits) = 0;
|
|
||||||
|
|
||||||
/// Compares computed and expected results, returns AssertionSuccess or AssertionFailure
|
|
||||||
virtual testing::AssertionResult compare_results_with_tolerance_as_fp(const float tolerance) = 0;
|
|
||||||
|
|
||||||
/// Additionally the interface implementing class needs to define
|
|
||||||
/// the following 2 methods. They are called from the TestCase class
|
|
||||||
/// but they can't be a part of interface since they need to be declared as templates
|
|
||||||
|
|
||||||
/// Passes data (along with its shape) to the next available input.
|
|
||||||
/// The data should be stored as internal state, not necessarily as vectors
|
|
||||||
// template <typename T>
|
|
||||||
// void add_input(const Shape& shape, const std::vector<T>& values)
|
|
||||||
|
|
||||||
/// Sets the expected data (along with its shape) for the next available output
|
|
||||||
/// The data should be stored as internal state, not necessarily as vectors
|
|
||||||
// template <typename T>
|
|
||||||
// void add_expected_output(const ngraph::Shape& expected_shape,
|
|
||||||
// const std::vector<T>& values)
|
|
||||||
};
|
|
||||||
} // namespace test
|
|
||||||
} // namespace ngraph
|
|
@ -1,11 +0,0 @@
|
|||||||
// Copyright (C) 2018-2023 Intel Corporation
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
//
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
// Builds a class name for a given backend prefix
|
|
||||||
// The prefix should come from cmake
|
|
||||||
// Example: INTERPRETER -> INTERPRETER_Engine
|
|
||||||
// Example: IE_CPU -> IE_CPU_Engine
|
|
||||||
#define ENGINE_CLASS_NAME(backend) backend##_Engine
|
|
@ -86,6 +86,9 @@ make_reshape_view(const InferenceEngine::Blob::Ptr &blob, InferenceEngine::SizeV
|
|||||||
*/
|
*/
|
||||||
size_t byte_size(const InferenceEngine::TensorDesc &tdesc);
|
size_t byte_size(const InferenceEngine::TensorDesc &tdesc);
|
||||||
|
|
||||||
|
ov::Tensor make_tensor_with_precision_convert(const ov::Tensor& tensor, ov::element::Type prc);
|
||||||
|
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline void fill_roi_raw_ptr(T* data, size_t data_size, const uint32_t range, const int32_t height, const int32_t width, const float omega,
|
inline void fill_roi_raw_ptr(T* data, size_t data_size, const uint32_t range, const int32_t height, const int32_t width, const float omega,
|
||||||
const bool is_roi_max_mode, const int32_t seed = 1) {
|
const bool is_roi_max_mode, const int32_t seed = 1) {
|
||||||
|
@ -498,9 +498,9 @@ template<typename T>
|
|||||||
|
|
||||||
|
|
||||||
::testing::AssertionResult all_close_f(const ov::Tensor& a,
|
::testing::AssertionResult all_close_f(const ov::Tensor& a,
|
||||||
const ov::Tensor& b,
|
const ov::Tensor& b,
|
||||||
int tolerance_bits,
|
int tolerance_bits,
|
||||||
float min_signal) {
|
float min_signal) {
|
||||||
if (a.get_element_type() != b.get_element_type()) {
|
if (a.get_element_type() != b.get_element_type()) {
|
||||||
return ::testing::AssertionFailure() << "Cannot compare tensors with different element types";
|
return ::testing::AssertionFailure() << "Cannot compare tensors with different element types";
|
||||||
}
|
}
|
||||||
|
@ -285,27 +285,52 @@ void fill_data_with_broadcast(ov::Tensor& tensor, ov::Tensor& values) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template<ov::element::Type_t SRC_E, ov::element::Type_t DST_E>
|
template<ov::element::Type_t SRC_E, ov::element::Type_t DST_E>
|
||||||
void copy_with_convert(ov::Tensor& src_tensor, ov::Tensor& dst_tensor) {
|
void copy_tensor_with_convert(const ov::Tensor& src_tensor, ov::Tensor& dst_tensor) {
|
||||||
using SRC_TYPE = typename ov::fundamental_type_for<SRC_E>;
|
using SRC_TYPE = typename ov::fundamental_type_for<SRC_E>;
|
||||||
using DST_TYPE = typename ov::fundamental_type_for<DST_E>;
|
using DST_TYPE = typename ov::fundamental_type_for<DST_E>;
|
||||||
|
|
||||||
|
OPENVINO_ASSERT(src_tensor.get_size() == dst_tensor.get_size());
|
||||||
|
|
||||||
auto src_ptr = src_tensor.data<SRC_TYPE>();
|
auto src_ptr = src_tensor.data<SRC_TYPE>();
|
||||||
auto src_size = src_tensor.get_size();
|
auto src_size = src_tensor.get_size();
|
||||||
|
|
||||||
auto dst_ptr = dst_tensor.data<DST_TYPE>();
|
auto dst_ptr = dst_tensor.data<DST_TYPE>();
|
||||||
|
|
||||||
std::copy(src_ptr, src_ptr + src_size, dst_ptr);
|
auto converter = [] (SRC_TYPE value) {return static_cast<DST_TYPE>(value);};
|
||||||
|
|
||||||
|
std::transform(src_ptr, src_ptr + src_size, dst_ptr, converter);
|
||||||
}
|
}
|
||||||
|
|
||||||
ov::Tensor make_with_precision_convert(ov::Tensor& tensor, ov::element::Type prc) {
|
ov::Tensor make_tensor_with_precision_convert(const ov::Tensor& tensor, ov::element::Type prc) {
|
||||||
ov::Tensor new_tensor(prc, tensor.get_shape());
|
ov::Tensor new_tensor(prc, tensor.get_shape());
|
||||||
|
auto src_prc = tensor.get_element_type();
|
||||||
|
|
||||||
#define CASE(_PRC) case ov::element::_PRC: \
|
#define CASE0(SRC_PRC, DST_PRC) case ov::element::DST_PRC : \
|
||||||
copy_with_convert<ov::element::Type_t::f32, ov::element::Type_t::_PRC> (tensor, new_tensor); break
|
copy_tensor_with_convert<ov::element::SRC_PRC, ov::element::DST_PRC> (tensor, new_tensor); break;
|
||||||
switch (prc) {
|
|
||||||
CASE(f32); CASE(f16); CASE(i64); CASE(u64); CASE(i32); CASE(u32); CASE(i16); CASE(u16); CASE(i8); CASE(u8);
|
#define CASE(SRC_PRC) \
|
||||||
default: OPENVINO_THROW("Unsupported precision case");
|
case ov::element::SRC_PRC: \
|
||||||
|
switch (prc) { \
|
||||||
|
CASE0(SRC_PRC, bf16) \
|
||||||
|
CASE0(SRC_PRC, f16) \
|
||||||
|
CASE0(SRC_PRC, f32) \
|
||||||
|
CASE0(SRC_PRC, f64) \
|
||||||
|
CASE0(SRC_PRC, i8) \
|
||||||
|
CASE0(SRC_PRC, i16) \
|
||||||
|
CASE0(SRC_PRC, i32) \
|
||||||
|
CASE0(SRC_PRC, i64) \
|
||||||
|
CASE0(SRC_PRC, u8) \
|
||||||
|
CASE0(SRC_PRC, u16) \
|
||||||
|
CASE0(SRC_PRC, u32) \
|
||||||
|
CASE0(SRC_PRC, u64) \
|
||||||
|
default: OPENVINO_THROW("Unsupported precision case: ", prc.c_type_string()); \
|
||||||
|
} break;
|
||||||
|
|
||||||
|
switch (src_prc) {
|
||||||
|
CASE(f64); CASE(f32); CASE(f16); CASE(bf16); CASE(i64); CASE(u64); CASE(i32); CASE(u32); CASE(i16); CASE(u16); CASE(i8); CASE(u8);
|
||||||
|
default: OPENVINO_THROW("Unsupported precision case: ", src_prc.c_type_string());
|
||||||
}
|
}
|
||||||
|
#undef CASE0
|
||||||
#undef CASE
|
#undef CASE
|
||||||
|
|
||||||
return new_tensor;
|
return new_tensor;
|
||||||
@ -320,7 +345,7 @@ void fill_data_with_broadcast(ov::Tensor& tensor, size_t axis, std::vector<float
|
|||||||
values_tensor = ov::Tensor(ov::element::f32, value_dims, values.data());
|
values_tensor = ov::Tensor(ov::element::f32, value_dims, values.data());
|
||||||
|
|
||||||
if (prc != ov::element::f32) {
|
if (prc != ov::element::f32) {
|
||||||
values_tensor = make_with_precision_convert(values_tensor, prc);
|
values_tensor = make_tensor_with_precision_convert(values_tensor, prc);
|
||||||
}
|
}
|
||||||
|
|
||||||
fill_data_with_broadcast(tensor, values_tensor);
|
fill_data_with_broadcast(tensor, values_tensor);
|
||||||
|
Loading…
Reference in New Issue
Block a user