This commit is contained in:
Efode, Irina 2021-10-11 18:23:26 +03:00
parent b80b87b2d9
commit c66d8626e4
7 changed files with 751 additions and 2 deletions

View File

@ -0,0 +1,117 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <typeindex>
#include <string>
#include <vector>
#include <memory>
#include <tuple>
#include <gtest/gtest.h>
#include <openvino/core/node.hpp>
#include <openvino/core/function.hpp>
#include <openvino/pass/manager.hpp>
#include <openvino/core/type/bfloat16.hpp>
#include <ie_plugin_config.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/common_utils.hpp"
#include "common_test_utils/test_common.hpp"
#include "common_test_utils/test_constants.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "functional_test_utils/ov_plugin_cache.hpp"
//#include "functional_test_utils/ov_tensor_utils.hpp"
#include "functional_test_utils/precision_utils.hpp"
#include "functional_test_utils/layer_test_utils/summary.hpp"
#include "functional_test_utils/layer_test_utils/environment.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/pass/convert_prc.hpp"
namespace ov {
namespace test {
class SubgraphBaseTest : public CommonTestUtils::TestsCommon {
public:
virtual void run();
virtual void serialize();
virtual void query_model();
void TearDown() override {
if (!configuration.empty()) {
ov::test::PluginCache::get().core().reset();
}
}
protected:
void compare(const std::vector<ov::runtime::Tensor> &expected,
const std::vector<ov::runtime::Tensor> &actual);
// virtual void compare_desc(const ov::runtime::Tensor &expected, const ov::runtime::Tensor &actual);
// std::shared_ptr<ov::Function> get_function();
// std::map<std::string, std::string> &get_configuration();
// std::string get_runtime_precision(const std::string &layerName);
// std::string get_runtime_precision_by_type(const std::string &layerType);
//#ifndef NDEBUG
//
// void show_runtime_precision();
//
//#endif
// virtual void configure_model();
virtual void compile_model();
//
virtual void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes);
//
virtual void infer();
//
virtual void validate();
//
//
// virtual std::vector<ov::runtime::Tensor> get_outputs();
std::shared_ptr<ov::runtime::Core> core = ov::test::PluginCache::get().core();
std::string targetDevice;
std::map<std::string, std::string> configuration;
std::shared_ptr<ngraph::Function> function;
std::shared_ptr<ngraph::Function> functionRefs;
ov::element::Type inPrc;
ov::element::Type outPrc;
std::map<std::string, ov::runtime::Tensor> inputs;
std::vector<ngraph::PartialShape> inputDynamicShapes;
std::vector<std::vector<ngraph::Shape>> targetStaticShapes;
ov::runtime::ExecutableNetwork executableNetwork;
ov::runtime::InferRequest inferRequest;
constexpr static const auto disable_treshold = std::numeric_limits<double>::max();
double abs_threshold = disable_treshold;
double rel_threshold = disable_treshold;
LayerTestsUtils::Summary& summary = LayerTestsUtils::Summary::getInstance();;
private:
// ov::runtime::Tensor generate_input(const ov::HostTensor &type, const ov::Shape &shape) const;
// void compare(const std::pair<ov::element::Type, std::vector<std::uint8_t>> &expected,
// const ov::runtime::Tensor &actual);
void resize_ngraph_function(const std::vector<ngraph::Shape>& targetInputStaticShapes);
std::vector<ov::runtime::Tensor> calculate_refs();
std::vector<ov::runtime::Tensor> get_outputs();
};
} // namespace test
} // namespace ov

View File

@ -0,0 +1,307 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <fstream>
#include <signal.h>
#ifdef _WIN32
#include <process.h>
#endif
#include <transformations/serialize.hpp>
#include <ngraph/opsets/opset.hpp>
#include <pugixml.hpp>
#include <common_test_utils/file_utils.hpp>
#include <thread>
#include <functional_test_utils/ov_tensor_utils.hpp>
#include "openvino/core/variant.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
namespace ov {
namespace test {
void SubgraphBaseTest::run() {
auto crashHandler = [](int errCode) {
auto &s = LayerTestsUtils::Summary::getInstance();
s.saveReport();
std::cerr << "Unexpected application crash with code: " << errCode << std::endl;
std::abort();
};
signal(SIGSEGV, crashHandler);
LayerTestsUtils::PassRate::Statuses status =
FuncTestUtils::SkipTestsConfig::currentTestIsDisabled() ?
LayerTestsUtils::PassRate::Statuses::SKIPPED : LayerTestsUtils::PassRate::Statuses::CRASHED;
summary.setDeviceName(targetDevice);
summary.updateOPsStats(function, status);
SKIP_IF_CURRENT_TEST_IS_DISABLED();
OPENVINO_ASSERT(targetStaticShapes.empty(), "Target Static Shape is empty!!!");
std::string errorMessage;
try {
compile_model();
for (const auto& targetStaticShapeVec : targetStaticShapes) {
try {
if (!inputDynamicShapes.empty()) {
// resize ngraph function according new target shape
resize_ngraph_function(targetStaticShapeVec);
}
generate_inputs(targetStaticShapeVec);
infer();
validate();
status = LayerTestsUtils::PassRate::Statuses::PASSED;
} catch (const std::exception &ex) {
// OPENVINO_ASSERT("Incorrect target static shape: ", CommonTestUtils::vec2str(targetStaticShape), std::endl, ex.what());
}
}
} catch (const std::exception &ex) {
status = LayerTestsUtils::PassRate::Statuses::FAILED;
errorMessage = ex.what();
} catch (...) {
status = LayerTestsUtils::PassRate::Statuses::FAILED;
errorMessage = "Unknown failure occurred.";
}
summary.updateOPsStats(function, status);
GTEST_FATAL_FAILURE_(errorMessage.c_str());
}
void SubgraphBaseTest::serialize() {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
std::string output_name = GetTestName().substr(0, CommonTestUtils::maxFileNameLength) + "_" + GetTimestamp();
std::string out_xml_path = output_name + ".xml";
std::string out_bin_path = output_name + ".bin";
ngraph::pass::Manager manager;
manager.register_pass<ngraph::pass::Serialize>(out_xml_path, out_bin_path);
manager.run_passes(function);
function->validate_nodes_and_infer_types();
auto result = core->read_model(out_xml_path, out_bin_path);
bool success;
std::string message;
std::tie(success, message) =
compare_functions(result, function, false, false, false,
true, // precision
true); // attributes
EXPECT_TRUE(success) << message;
CommonTestUtils::removeIRFiles(out_xml_path, out_bin_path);
}
void SubgraphBaseTest::query_model() {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
auto queryNetworkResult = core->query_model(function, targetDevice);
std::set<std::string> expected;
for (auto&& node : function->get_ops()) {
expected.insert(node->get_friendly_name());
}
std::set<std::string> actual;
for (auto&& res : queryNetworkResult) {
actual.insert(res.first);
}
ASSERT_EQ(expected, actual);
}
//ov::runtime::Tensor SubgraphBaseTest::generate_input(const element::Type& type, const ov::Shape& shape) const {
// return create_and_fill_tensor(type, shape);
//}
//
//void SubgraphBaseTest::compare(const std::vector<std::pair<element::Type, std::vector<std::uint8_t>>>& expectedOutputs,
// const std::vector<ov::runtime::Tensor>& actualOutputs) {
// for (std::size_t outputIndex = 0; outputIndex < expectedOutputs.size(); ++outputIndex) {
// const auto& expected = expectedOutputs[outputIndex];
// const auto& actual = actualOutputs[outputIndex];
// compare(expected, actual);
// }
//}
//
//void SubgraphBaseTest::compare(const std::pair<element::Type, std::vector<std::uint8_t>>& expected,
// const ov::runtime::Tensor& actual) {
void SubgraphBaseTest::compare(const std::vector<ov::runtime::Tensor> &expected,
const std::vector<ov::runtime::Tensor> &actual) {
ASSERT_EQ(expected.size(), actual.size());
for (size_t i = 0; i < expected.size(); i++) {
ov::test::utils::compare(expected[i], actual[i], abs_threshold, rel_threshold);
}
// return ::ov::test::utils::compare(expected_tensor, actual, abs_threshold, rel_threshold);
}
//void SubgraphBaseTest::compare_desc(const ov::runtime::Tensor& expected, const ov::runtime::Tensor& actual) {
// ASSERT_EQ(expected.get_element_type(), actual.get_element_type());
// ASSERT_EQ(expected.get_shape(), actual.get_shape());
//}
//void SubgraphBaseTest::configure_model() {
// // configure input precision
// {
// auto params = function->get_parameters();
// for (auto& param : params) {
// param->get_output_tensor(0).set_element_type(inPrc);
// }
// }
//
// // configure output precision
// {
// auto results = function->get_results();
// for (auto& result : results) {
// result->get_output_tensor(0).set_element_type(outPrc);
// }
// }
//}
void SubgraphBaseTest::compile_model() {
// configure_model();
executableNetwork = core->compile_model(function, targetDevice, configuration);
}
void SubgraphBaseTest::generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) {
inputs.clear();
const auto& params = function->get_parameters();
for (int i = 0; i < params.size(); ++i) {
const auto& param = params[i];
ov::runtime::Tensor tensor = ov::test::utils::create_and_fill_tensor(param->get_element_type(), targetInputStaticShapes[i]);
inputs.insert({param->get_friendly_name(), tensor});
}
}
void SubgraphBaseTest::infer() {
inferRequest = executableNetwork.create_infer_request();
for (const auto& input : inputs) {
inferRequest.set_tensor(input.first, input.second);
}
inferRequest.infer();
}
//
std::vector<ov::runtime::Tensor> SubgraphBaseTest::calculate_refs() {
// nGraph interpreter does not support f16/bf16
ngraph::pass::ConvertPrecision<element::Type_t::f16, element::Type_t::f32>().run_on_function(function);
ngraph::pass::ConvertPrecision<element::Type_t::bf16, element::Type_t::f32>().run_on_function(function);
function->validate_nodes_and_infer_types();
// auto referenceInputs = std::vector<std::vector<uint8_t>>(inputs.size());
// auto refInputsTypes = std::vector<element::Type>(inputs.size());
// for (const auto& inputTensor : inputs) {
// const auto &input = inputTensor.second;
// const auto &inputSize = input.get_byte_size();
// auto &referenceInput = referenceInputs[i];
// referenceInput.resize(inputSize);
// const auto buffer = static_cast<uint8_t*>(input.data());
// std::copy(buffer, buffer + inputSize, referenceInput.data());
// refInputsTypes[i] = input.get_element_type();
// }
auto referenceInputs = inputs;
// auto expectedOutputs = ngraph::helpers::interpreterFunction(function, referenceInputs, refInputsTypes);
auto expectedOutputs = ngraph::helpers::interpreterFunction(function, referenceInputs);
return expectedOutputs;
}
//
std::vector<ov::runtime::Tensor> SubgraphBaseTest::get_outputs() {
auto outputs = std::vector<ov::runtime::Tensor>{};
for (const auto& output : executableNetwork.get_results()) {
const auto& name = output->input_value(0).get_node()->get_friendly_name();
outputs.push_back(inferRequest.get_tensor(name));
}
return outputs;
}
//
void SubgraphBaseTest::validate() {
auto expectedOutputs = calculate_refs();
const auto& actualOutputs = get_outputs();
if (expectedOutputs.empty()) {
return;
}
OPENVINO_ASSERT(actualOutputs.size() == expectedOutputs.size(),
"nGraph interpreter has ", expectedOutputs.size(), " outputs, while IE ", actualOutputs.size());
compare(expectedOutputs, actualOutputs);
}
//std::string SubgraphBaseTest::get_runtime_precision(const std::string& layerName) {
// const auto function = executableNetwork.get_runtime_function();
// for (const auto& op : function->get_ops()) {
// const auto name = op->get_friendly_name();
// if (name == layerName) {
// const auto& rtInfo = op->get_rt_info();
// const auto& it = rtInfo.find("runtimePrecision");
//
// OPENVINO_ASSERT(it != rtInfo.end(), "Runtime precision is not found for node: ", name);
//
// const auto rtPrecisionPtr = ngraph::as_type_ptr<ngraph::VariantWrapper<std::string>>(it->second);
// return rtPrecisionPtr->get();
// }
// }
// return {};
//}
//std::string SubgraphBaseTest::get_runtime_precision_by_type(const std::string& layerType) {
// const auto function = executableNetwork.get_runtime_function();
//
// for (const auto& op : function->get_ops()) {
// const auto& rtInfo = op->get_rt_info();
// const auto& typeIt = rtInfo.find("layerType");
//
// OPENVINO_ASSERT(typeIt != rtInfo.end(), "Layer is not found for type: ", layerType);
//
// const auto type = ngraph::as_type_ptr<ngraph::VariantWrapper<std::string>>(typeIt->second)->get();
// if (type == layerType) {
// const auto& it = rtInfo.find("runtimePrecision");
//
// OPENVINO_ASSERT(it != rtInfo.end(), "Runtime precision is not found for node: ", type);
//
// const auto rtPrecisionPtr = ngraph::as_type_ptr<ngraph::VariantWrapper<std::string>>(it->second);
// return rtPrecisionPtr->get();
// }
// }
//
// return {};
//}
//#ifndef NDEBUG
//void SubgraphBaseTest::show_runtime_precision() {
// const auto function = executableNetwork.get_runtime_function();
//
// for (const auto& op : function->get_ops()) {
// const auto& rtInfo = op->get_rt_info();
// const auto& typeIt = rtInfo.find("layerType");
//
// const auto type = ngraph::as_type_ptr<ngraph::VariantWrapper<std::string>>(typeIt->second)->get();
// const auto& it = rtInfo.find("runtimePrecision");
//
// const auto rtPrecisionPtr = ngraph::as_type_ptr<ngraph::VariantWrapper<std::string>>(it->second);
// std::cout << type << ": " << rtPrecisionPtr->get() << std::endl;
// }
//}
//#endif
//std::shared_ptr<ngraph::Function> SubgraphBaseTest::get_function() {
// return function;
//}
//
//std::map<std::string, std::string>& SubgraphBaseTest::get_configuration() {
// return configuration;
//}
void SubgraphBaseTest::resize_ngraph_function(const std::vector<ngraph::Shape>& targetInputStaticShapes) {
auto params = function->get_parameters();
std::map<std::string, ngraph::PartialShape> shapes;
ASSERT_LE(params.size(), targetInputStaticShapes.size());
for (size_t i = 0; i < params.size(); i++) {
shapes.insert({*params[i]->get_output_tensor(0).get_names().begin(), targetInputStaticShapes[i]});
}
function->reshape(shapes);
functionRefs->reshape(shapes);
}
} // namespace test
} // namespace ov

View File

@ -20,8 +20,6 @@ std::string SoftMaxLayerTest::getTestCaseName(const testing::TestParamInfo<softM
result << "netPRC=" << netPrecision.name() << "_";
result << "inPRC=" << inPrc.name() << "_";
result << "outPRC=" << outPrc.name() << "_";
result << "inL=" << inLayout << "_";
result << "outL=" << outLayout << "_";
result << "IS=" << CommonTestUtils::partialShape2str({shapes.first}) << "_";
result << "TS=";
for (const auto& item : shapes.second) {

View File

@ -0,0 +1,27 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <openvino/runtime/tensor.hpp>
namespace ov {
namespace test {
namespace utils {
ov::runtime::Tensor create_and_fill_tensor(
const ov::element::Type element_type,
const ov::Shape &shape,
const uint32_t range = 10,
const int32_t start_from = 0,
const int32_t resolution = 1,
const int seed = 1);
void compare(
const ov::runtime::Tensor &expected,
const ov::runtime::Tensor &actual,
const double abs_threshold,
const double rel_threshold);
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -0,0 +1,191 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <queue>
#include "openvino/core/type/element_type_traits.hpp"
#include "ngraph/coordinate_transform.hpp"
#include "common_test_utils/data_utils.hpp"
#include "functional_test_utils/ov_tensor_utils.hpp"
namespace ov {
namespace test {
namespace utils {
ov::runtime::Tensor create_and_fill_tensor(
const ov::element::Type element_type,
const ov::Shape& shape,
const uint32_t range,
const int32_t start_from,
const int32_t resolution,
const int seed) {
auto tensor = ov::runtime::Tensor{element_type, shape};
#define CASE(X) case X: ::CommonTestUtils::fill_data_random( \
tensor.data<element_type_traits<X>::value_type>(), \
shape_size(shape), \
range, start_from, resolution, seed); break;
switch (element_type) {
CASE(ov::element::Type_t::boolean)
CASE(ov::element::Type_t::bf16)
CASE(ov::element::Type_t::f16)
CASE(ov::element::Type_t::f32)
CASE(ov::element::Type_t::f64)
CASE(ov::element::Type_t::i4)
CASE(ov::element::Type_t::i8)
CASE(ov::element::Type_t::i16)
CASE(ov::element::Type_t::i32)
CASE(ov::element::Type_t::i64)
CASE(ov::element::Type_t::u1)
CASE(ov::element::Type_t::u4)
CASE(ov::element::Type_t::u8)
CASE(ov::element::Type_t::u16)
CASE(ov::element::Type_t::u32)
CASE(ov::element::Type_t::u64)
default: OPENVINO_UNREACHABLE("Unsupported element type: ", element_type);
}
#undef CASE
return tensor;
}
template<typename ExpectedT, typename ActualT>
void compare(
const ov::runtime::Tensor& expected,
const ov::runtime::Tensor& actual,
const double abs_threshold_ = std::numeric_limits<double>::max(),
const double rel_threshold_ = std::numeric_limits<double>::max()) {
auto expected_shape = expected.get_shape();
auto actual_shape = actual.get_shape();
ASSERT_EQ(expected_shape, actual_shape);
ASSERT_NE(shape_size(actual_shape), 0);
auto expected_data = expected.data<ExpectedT>();
auto actual_data = actual.data<ActualT>();
double abs_threshold = abs_threshold_;
double rel_threshold = rel_threshold_;
if (abs_threshold == std::numeric_limits<double>::max() && rel_threshold == std::numeric_limits<double>::max()) {
if (sizeof(ExpectedT) == 1 || sizeof(ActualT) == 1) {
abs_threshold = 1.;
} else {
std::vector<double> abs_values;
abs_values.reserve(shape_size(expected_shape));
for (auto&& c : ngraph::CoordinateTransformBasic{expected_shape}) {
abs_values.push_back(std::fabs(
static_cast<double>(
expected_data[std::inner_product(c.begin(), c.end(), expected.get_strides().begin(), 0)])));
}
std::sort(abs_values.begin(), abs_values.end());
double abs_median;
if (abs_values.size() % 2 == 0) {
abs_median = (abs_values.at(abs_values.size()/2) + abs_values.at(abs_values.size()/2 + 1))/2.;
} else {
abs_median = abs_values.at(abs_values.size()/2);
}
abs_threshold = 0.05 * abs_median;
if (std::is_integral<ExpectedT>::value) {
abs_threshold = std::ceil(abs_threshold);
}
}
}
OPENVINO_ASSERT((!std::isnan(abs_threshold) && !std::isnan(rel_threshold)),
"abs_threshold: ", abs_threshold, " rel_threshold: ", rel_threshold);
struct Error {
double max = 0.;
double mean = 0.;
Coordinate max_coordinate;
size_t count = 0;
} abs_error, rel_error;
auto less = [] (double a, double b) {
auto eps = std::numeric_limits<double>::epsilon();
return (b - a) > (std::fmax(std::fabs(a), std::fabs(b)) * eps);
};
for (auto&& c : ngraph::CoordinateTransformBasic{expected_shape}) {
double expected_value = expected_data[std::inner_product(c.begin(), c.end(), expected.get_strides().begin(), 0)];
double actual_value = actual_data[std::inner_product(c.begin(), c.end(), actual.get_strides().begin(), 0)];
auto error = [&] (Error& err, double val, double threshold) {
if (less(err.max, val)) {
err.max = val;
err.max_coordinate = c;
}
err.mean += val;
err.count += less(threshold, val);
};
OPENVINO_ASSERT(!std::isnan(expected_value), "Expected value is NAN on coordinate: ", c);
OPENVINO_ASSERT(!std::isnan(actual_value), "Actual value is NAN on coordinate: ", c);
auto abs = std::fabs(expected_value - actual_value);
auto rel = expected_value ? (abs/std::fabs(expected_value)) : abs;
error(abs_error, abs, abs_threshold);
error(rel_error, rel, rel_threshold);
}
abs_error.mean /= shape_size(expected_shape);
rel_error.mean /= shape_size(expected_shape);
OPENVINO_ASSERT((less(abs_error.max, abs_threshold) && less(rel_error.max, rel_threshold)),
"abs_max < abs_threshold && rel_max < rel_threshold",
"\n\t abs_max: " , abs_error.max,
"\n\t\t coordinate " , abs_error.max_coordinate,
"; abs errors count " , abs_error.count , "; abs mean ",
abs_error.mean , "; abs threshold " , abs_threshold,
"\n\t rel_max: " , rel_error.max,
"\n\t\t coordinate " , rel_error.max_coordinate,
"; rel errors count " , rel_error.count , "; rel mean ",
rel_error.mean , "; rel threshold " , rel_threshold);
}
void compare(
const ov::runtime::Tensor& expected,
const ov::runtime::Tensor& actual,
const double abs_threshold,
const double rel_threshold) {
#define CASE0(X, Y) case Y : compare< \
element_type_traits<X>::value_type, \
element_type_traits<Y>::value_type>( \
expected, actual, abs_threshold, rel_threshold); break;
#define CASE(X) \
case X: \
switch (actual.get_element_type()) { \
CASE0(X, ov::element::Type_t::boolean) \
CASE0(X, ov::element::Type_t::bf16) \
CASE0(X, ov::element::Type_t::f16) \
CASE0(X, ov::element::Type_t::f32) \
CASE0(X, ov::element::Type_t::f64) \
CASE0(X, ov::element::Type_t::i4) \
CASE0(X, ov::element::Type_t::i8) \
CASE0(X, ov::element::Type_t::i16) \
CASE0(X, ov::element::Type_t::i32) \
CASE0(X, ov::element::Type_t::i64) \
CASE0(X, ov::element::Type_t::u1) \
CASE0(X, ov::element::Type_t::u4) \
CASE0(X, ov::element::Type_t::u8) \
CASE0(X, ov::element::Type_t::u16) \
CASE0(X, ov::element::Type_t::u32) \
CASE0(X, ov::element::Type_t::u64) \
default: OPENVINO_UNREACHABLE("Unsupported element type: ", \
"expected ", expected.get_element_type(), \
", actual ", actual.get_element_type()); \
} break;
switch (expected.get_element_type()) {
CASE(ov::element::Type_t::boolean)
CASE(ov::element::Type_t::bf16)
CASE(ov::element::Type_t::f16)
CASE(ov::element::Type_t::f32)
CASE(ov::element::Type_t::f64)
CASE(ov::element::Type_t::i4)
CASE(ov::element::Type_t::i8)
CASE(ov::element::Type_t::i16)
CASE(ov::element::Type_t::i32)
CASE(ov::element::Type_t::i64)
CASE(ov::element::Type_t::u1)
CASE(ov::element::Type_t::u4)
CASE(ov::element::Type_t::u8)
CASE(ov::element::Type_t::u16)
CASE(ov::element::Type_t::u32)
CASE(ov::element::Type_t::u64)
default: OPENVINO_UNREACHABLE("Unsupported element type: ", expected.get_element_type());
}
#undef CASE0
#undef CASE
}
} // namespace utils
} // namespace test
} // namespace ov

View File

@ -269,6 +269,10 @@ std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>>
const std::vector<std::vector<std::uint8_t>> &inputs,
const std::vector<ngraph::element::Type> &inputTypes = {});
std::vector<ov::runtime::Tensor>
interpreterFunction(const std::shared_ptr<Function> &function,
const std::map<std::string, ov::runtime::Tensor>& inputs);
//
// This function compares two nGraph functions and requires them to have exactly one output
// Check nodes types

View File

@ -144,6 +144,111 @@ std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>>
return outputs;
}
//std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>>
std::vector<ov::runtime::Tensor>
interpreterFunction(const std::shared_ptr<Function> &function,
const std::map<std::string, ov::runtime::Tensor>& inputs) {
// const std::vector<std::vector<std::uint8_t>> &inputs,
// const std::vector<ngraph::element::Type> &inputTypes) {
runtime::Backend::set_backend_shared_library_search_directory("");
auto backend = runtime::Backend::create("INTERPRETER");
const auto &parameters = function->get_parameters();
const auto &parametersNumber = parameters.size();
const auto &inputsNumber = inputs.size();
NGRAPH_CHECK(parametersNumber == inputsNumber,
"Got function (", function->get_friendly_name(), ") with ", parametersNumber, " parameters, but ",
inputsNumber, " input blobs");
// if (!inputTypes.empty()) {
// NGRAPH_CHECK(inputTypes.size() == inputsNumber,
// "Got function (", function->get_friendly_name(), ") with ", inputsNumber, " inputs, but ",
// inputTypes.size(), " types");
// }
// auto inputTensors = std::vector<std::shared_ptr<runtime::Tensor>>{};
// for (size_t i = 0; i < parametersNumber; ++i) {
// const auto &parameter = parameters[i];
// const auto &parameterIndex = function->get_parameter_index(parameter);
// const auto &parameterShape = parameter->get_shape();
// const auto &parameterType = parameter->get_element_type();
// const auto &parameterSize = shape_size(parameterShape) * parameterType.size();
//
// auto input = inputs[parameterIndex];
// const auto inType = inputTypes.empty() ? element::undefined : inputTypes[i];
//
// if (inType != element::undefined && inType != parameterType) {
// input = convertOutputPrecision(input, inType, parameterType, shape_size(parameterShape));
// }
//
// const auto &inputSize = input.size();
// NGRAPH_CHECK(parameterSize == inputSize,
// "Got parameter (", parameter->get_friendly_name(), ") of size ", parameterSize,
// " bytes, but corresponding input with index ", parameterIndex,
// " has ", inputSize, " bytes");
//
// auto tensor = backend->create_tensor(parameterType, parameterShape);
// tensor->write(input.data(), parameterSize);
// inputTensors.push_back(tensor);
// }
auto inputTensors = std::vector<std::shared_ptr<runtime::Tensor>>{};
for (size_t i = 0; i < parametersNumber; ++i) {
const auto &parameter = parameters[i];
// const auto &parameterIndex = function->get_parameter_index(parameter);
const auto &parameterShape = parameter->get_shape();
const auto &parameterType = parameter->get_element_type();
const auto &parameterSize = shape_size(parameterShape) * parameterType.size();
auto inputIt = inputs.find(parameter->get_friendly_name());
if (inputIt == inputs.end()) {
// runtime error
}
auto input = inputIt->second;
// const auto inType = inputTypes.empty() ? element::undefined : inputTypes[i];
const auto inType = input.get_element_type();
// TODO: iefode
// if (inType != element::undefined && inType != parameterType) {
// input = convertOutputPrecision(input, inType, parameterType, shape_size(parameterShape));
// }
// const auto &inputSize = input.size();
// NGRAPH_CHECK(parameterSize == inputSize,
// "Got parameter (", parameter->get_friendly_name(), ") of size ", parameterSize,
// " bytes, but corresponding input with index ", parameterIndex,
// " has ", inputSize, " bytes");
auto tensor = backend->create_tensor(parameterType, parameterShape);
tensor->write(input.data(), parameterSize);
inputTensors.push_back(tensor);
}
auto outputTensors = std::vector<std::shared_ptr<runtime::Tensor>>{};
const auto &results = function->get_results();
for (size_t i = 0; i < results.size(); ++i) {
outputTensors.push_back(std::make_shared<HostTensor>());
}
auto handle = backend->compile(function);
handle->call_with_validate(outputTensors, inputTensors);
// std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>> outputs(results.size());
std::vector<ov::runtime::Tensor> outputs(results.size());
// for (size_t resultIndex = 0; resultIndex < results.size(); resultIndex++) {
// auto& output = outputs[resultIndex];
// output.first = results[resultIndex]->get_element_type();
// const auto& outputTensor = outputTensors[resultIndex];
// output.second.resize(ceil(shape_size(outputTensor->get_shape()) * outputTensor->get_element_type().bitwidth() / 8.f));
// outputTensors[resultIndex]->read(output.second.data(), output.second.size());
// }
for (const auto& outTensor : outputTensors) {
ov::runtime::Tensor a;
outTensor->read(&a, outTensor->get_size_in_bytes());
outputs.push_back(a);
}
return outputs;
}
std::shared_ptr<Function> foldFunction(const std::shared_ptr<Function> &function,
const std::vector<std::vector<std::uint8_t>> &inputs,
const std::vector<ngraph::element::Type> &inputTypes) {