Set output blobs precision for IE tests (#3905)
* Calling SetPrecission on cnn network outputs * added tests * get_output_name refactor * add missing test file * run tests on all backends (or disable if backend is not available) * fixed tests * fixed TestEngine
This commit is contained in:
parent
a5c6ed8e03
commit
4e33dac7fa
@ -357,7 +357,8 @@ if (NGRAPH_ONNX_IMPORT_ENABLE AND NOT NGRAPH_USE_PROTOBUF_LITE)
|
||||
onnx/onnx_import_provenance.in.cpp
|
||||
onnx/onnx_import_reshape.in.cpp
|
||||
onnx/onnx_import_rnn.in.cpp
|
||||
onnx/onnx_import_quant.in.cpp)
|
||||
onnx/onnx_import_quant.in.cpp
|
||||
onnx/onnx_test_utils.in.cpp)
|
||||
list(APPEND SRC
|
||||
onnx/onnx_import_exceptions.cpp
|
||||
onnx/onnx_import_library.cpp
|
||||
|
@ -56,16 +56,16 @@ NGRAPH_TEST(${BACKEND_NAME}, bucketize_left_edge)
|
||||
|
||||
const auto data = make_shared<op::Parameter>(element::i32, data_shape);
|
||||
const auto buckets = make_shared<op::Parameter>(element::f32, bucket_shape);
|
||||
const auto bucketize = make_shared<op::v3::Bucketize>(data, buckets, element::i64, false);
|
||||
const auto bucketize = make_shared<op::v3::Bucketize>(data, buckets, element::i32, false);
|
||||
const auto f = make_shared<Function>(bucketize, ParameterVector{data, buckets});
|
||||
|
||||
vector<int32_t> data_vect = {8, 1, 2, 1, 8, 5, 1, 5, 0, 20};
|
||||
vector<float> buckets_vect = {1.f, 4.f, 10.f, 20.f};
|
||||
vector<int> expected_vect = {2, 1, 1, 1, 2, 2, 1, 2, 0, 4};
|
||||
vector<int32_t> expected_vect = {2, 1, 1, 1, 2, 2, 1, 2, 0, 4};
|
||||
|
||||
auto test_case = test::TestCase<TestEngine>(f);
|
||||
test_case.add_input<int32_t>(data_shape, data_vect);
|
||||
test_case.add_input<float>(bucket_shape, buckets_vect);
|
||||
test_case.add_expected_output<int>(data_shape, expected_vect);
|
||||
test_case.add_expected_output<int32_t>(data_shape, expected_vect);
|
||||
test_case.run();
|
||||
}
|
||||
|
74
ngraph/test/models/onnx/add_abc_3d.prototxt
Normal file
74
ngraph/test/models/onnx/add_abc_3d.prototxt
Normal file
@ -0,0 +1,74 @@
|
||||
ir_version: 3
|
||||
producer_name: "nGraph ONNX Importer"
|
||||
graph {
|
||||
node {
|
||||
input: "A"
|
||||
input: "B"
|
||||
output: "X"
|
||||
name: "add_node1"
|
||||
op_type: "Add"
|
||||
}
|
||||
node {
|
||||
input: "X"
|
||||
input: "C"
|
||||
output: "Y"
|
||||
name: "add_node2"
|
||||
op_type: "Add"
|
||||
}
|
||||
name: "test_graph"
|
||||
input {
|
||||
name: "A"
|
||||
type {
|
||||
tensor_type {
|
||||
elem_type: 1
|
||||
shape {
|
||||
dim {
|
||||
dim_value: 3
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
input {
|
||||
name: "B"
|
||||
type {
|
||||
tensor_type {
|
||||
elem_type: 1
|
||||
shape {
|
||||
dim {
|
||||
dim_value: 3
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
input {
|
||||
name: "C"
|
||||
type {
|
||||
tensor_type {
|
||||
elem_type: 1
|
||||
shape {
|
||||
dim {
|
||||
dim_value: 3
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
output {
|
||||
name: "Y"
|
||||
type {
|
||||
tensor_type {
|
||||
elem_type: 1
|
||||
shape {
|
||||
dim {
|
||||
dim_value: 3
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
opset_import {
|
||||
version: 4
|
||||
}
|
88
ngraph/test/onnx/onnx_test_utils.in.cpp
Normal file
88
ngraph/test/onnx/onnx_test_utils.in.cpp
Normal file
@ -0,0 +1,88 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2021 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include <algorithm>
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
#include "default_opset.hpp"
|
||||
#include "ngraph/file_util.hpp"
|
||||
#include "ngraph/op/util/op_types.hpp"
|
||||
#include "onnx_import/editor/editor.hpp"
|
||||
#include "onnx_import/onnx.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
|
||||
#include "util/all_close.hpp"
|
||||
#include "util/all_close_f.hpp"
|
||||
#include "util/engine/test_engines.hpp"
|
||||
#include "util/test_case.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
#include "util/test_tools.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
static std::string s_manifest = "${MANIFEST}";
|
||||
|
||||
template <typename T>
|
||||
class ElemTypesTests : public ::testing::Test
|
||||
{
|
||||
};
|
||||
TYPED_TEST_CASE_P(ElemTypesTests);
|
||||
|
||||
TYPED_TEST_P(ElemTypesTests, onnx_test_add_abc_set_precission)
|
||||
{
|
||||
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
|
||||
using DataType = TypeParam;
|
||||
const element::Type ng_type = element::from<DataType>();
|
||||
|
||||
onnx_import::ONNXModelEditor editor{
|
||||
file_util::path_join(SERIALIZED_ZOO, "onnx/add_abc_3d.prototxt")};
|
||||
|
||||
editor.set_input_types({{"A", ng_type}, {"B", ng_type}, {"C", ng_type}});
|
||||
|
||||
const auto function = onnx_import::import_onnx_model(editor);
|
||||
auto test_case = test::TestCase<TestEngine>(function);
|
||||
test_case.add_input<DataType>(std::vector<DataType>{1, 2, 3});
|
||||
test_case.add_input<DataType>(std::vector<DataType>{4, 5, 6});
|
||||
test_case.add_input<DataType>(std::vector<DataType>{7, 8, 9});
|
||||
test_case.add_expected_output<DataType>(Shape{3}, std::vector<DataType>{12, 15, 18});
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
TYPED_TEST_P(ElemTypesTests, onnx_test_split_multioutput_set_precission)
|
||||
{
|
||||
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
|
||||
using DataType = TypeParam;
|
||||
const element::Type ng_type = element::from<DataType>();
|
||||
|
||||
onnx_import::ONNXModelEditor editor{
|
||||
file_util::path_join(SERIALIZED_ZOO, "onnx/split_equal_parts_default.prototxt")};
|
||||
|
||||
editor.set_input_types({{"input", ng_type}});
|
||||
|
||||
const auto function = onnx_import::import_onnx_model(editor);
|
||||
auto test_case = test::TestCase<TestEngine>(function);
|
||||
test_case.add_input<DataType>(std::vector<DataType>{1, 2, 3, 4, 5, 6});
|
||||
test_case.add_expected_output<DataType>(Shape{2}, std::vector<DataType>{1, 2});
|
||||
test_case.add_expected_output<DataType>(Shape{2}, std::vector<DataType>{3, 4});
|
||||
test_case.add_expected_output<DataType>(Shape{2}, std::vector<DataType>{5, 6});
|
||||
test_case.run();
|
||||
}
|
||||
|
||||
REGISTER_TYPED_TEST_CASE_P(ElemTypesTests,
|
||||
onnx_test_add_abc_set_precission,
|
||||
onnx_test_split_multioutput_set_precission);
|
||||
typedef ::testing::Types<int8_t, int16_t, int32_t, uint8_t, float> ElemTypes;
|
||||
INSTANTIATE_TYPED_TEST_CASE_P(${BACKEND_NAME}, ElemTypesTests, ElemTypes);
|
@ -1586,5 +1586,7 @@ IE_GPU.onnx_model_gather_elements_int32_axis_0
|
||||
IE_GPU.onnx_model_gather_elements_int8_axis_1
|
||||
IE_GPU.onnx_model_gather_elements_float_3D_axis_2
|
||||
|
||||
# incorrect result for Minimum if u16 type is unsupported
|
||||
# incorrect result for Minimum if u16 type is used
|
||||
minimum_u16
|
||||
# incorrect result on Windows if i16 type is used
|
||||
IE_CPU/ElemTypesTests/1.onnx_test_add_abc_set_precission
|
@ -123,6 +123,41 @@ namespace
|
||||
}
|
||||
};
|
||||
|
||||
namespace
|
||||
{
|
||||
InferenceEngine::Precision ng_type_to_precission(const element::Type& target_type)
|
||||
{
|
||||
#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic error "-Wswitch"
|
||||
#pragma GCC diagnostic error "-Wswitch-enum"
|
||||
#endif
|
||||
switch (target_type)
|
||||
{
|
||||
case element::Type_t::boolean: return InferenceEngine::Precision::BOOL; break;
|
||||
case element::Type_t::bf16: return InferenceEngine::Precision::BF16; break;
|
||||
case element::Type_t::f16: return InferenceEngine::Precision::FP16; break;
|
||||
case element::Type_t::f32: return InferenceEngine::Precision::FP32; break;
|
||||
case element::Type_t::f64: return InferenceEngine::Precision::FP64; break;
|
||||
case element::Type_t::i8: return InferenceEngine::Precision::I8; break;
|
||||
case element::Type_t::i16: return InferenceEngine::Precision::I16; break;
|
||||
case element::Type_t::i32: return InferenceEngine::Precision::I32; break;
|
||||
case element::Type_t::i64: return InferenceEngine::Precision::I64; break;
|
||||
case element::Type_t::u8: return InferenceEngine::Precision::U8; break;
|
||||
case element::Type_t::u16: return InferenceEngine::Precision::U16; break;
|
||||
case element::Type_t::u32: return InferenceEngine::Precision::U32; break;
|
||||
case element::Type_t::u64: return InferenceEngine::Precision::U64; break;
|
||||
case element::Type_t::u1: throw std::runtime_error("unsupported type");
|
||||
case element::Type_t::undefined: throw std::runtime_error("unsupported type");
|
||||
case element::Type_t::dynamic: throw std::runtime_error("unsupported type");
|
||||
}
|
||||
#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
throw std::runtime_error("unsupported type");
|
||||
}
|
||||
}
|
||||
|
||||
test::IE_Engine::IE_Engine(const std::shared_ptr<Function> function, const char* device)
|
||||
: m_function{function}
|
||||
{
|
||||
@ -131,6 +166,13 @@ test::IE_Engine::IE_Engine(const std::shared_ptr<Function> function, const char*
|
||||
m_network_inputs = cnn_network.getInputsInfo();
|
||||
m_network_outputs = cnn_network.getOutputsInfo();
|
||||
|
||||
for (const auto& result : m_function->get_results())
|
||||
{
|
||||
const auto& out_name = get_output_name(result);
|
||||
m_network_outputs[out_name]->setPrecision(
|
||||
ng_type_to_precission(result->get_element_type()));
|
||||
}
|
||||
|
||||
InferenceEngine::Core ie;
|
||||
auto exe_network = ie.LoadNetwork(cnn_network, device);
|
||||
m_inference_req = exe_network.CreateInferRequest();
|
||||
@ -172,6 +214,32 @@ testing::AssertionResult test::IE_Engine::compare_results(const size_t tolerance
|
||||
return comparison_result;
|
||||
}
|
||||
|
||||
std::string test::IE_Engine::get_output_name(const std::shared_ptr<op::v0::Result>& ng_result)
|
||||
{
|
||||
if (m_function->get_results().size() == 1)
|
||||
{
|
||||
// ng_result argument is ignored
|
||||
return m_network_outputs.begin()->first;
|
||||
}
|
||||
else
|
||||
{
|
||||
const auto& prev_layer = ng_result->input_value(0);
|
||||
auto network_out_name = prev_layer.get_node_shared_ptr()->get_friendly_name();
|
||||
if (prev_layer.get_node_shared_ptr()->get_output_size() != 1)
|
||||
{
|
||||
network_out_name += "." + std::to_string(prev_layer.get_index());
|
||||
}
|
||||
|
||||
NGRAPH_CHECK(m_network_outputs.count(network_out_name) == 1,
|
||||
"nGraph function's output number ",
|
||||
m_allocated_expected_outputs,
|
||||
" was not found in the CNNNetwork built from it. Function's output name: ",
|
||||
network_out_name);
|
||||
|
||||
return network_out_name;
|
||||
}
|
||||
}
|
||||
|
||||
testing::AssertionResult
|
||||
test::IE_Engine::compare_results_with_tolerance_as_fp(const float tolerance)
|
||||
{
|
||||
|
@ -89,37 +89,11 @@ namespace ngraph
|
||||
template <typename T>
|
||||
void add_expected_output(const ngraph::Shape& expected_shape,
|
||||
const std::vector<T>& values)
|
||||
{
|
||||
std::string network_out_name;
|
||||
InferenceEngine::DataPtr network_output;
|
||||
if (m_function->get_results().size() == 1)
|
||||
{
|
||||
network_out_name = m_network_outputs.begin()->first;
|
||||
network_output = m_network_outputs.begin()->second;
|
||||
}
|
||||
else
|
||||
{
|
||||
const auto& function_output =
|
||||
m_function->get_results()[m_allocated_expected_outputs];
|
||||
|
||||
// determine output name in IE convention
|
||||
// (based on name of node which produces the result)
|
||||
const auto& prev_layer = function_output->input_value(0);
|
||||
network_out_name = prev_layer.get_node_shared_ptr()->get_friendly_name();
|
||||
if (prev_layer.get_node_shared_ptr()->get_output_size() != 1)
|
||||
{
|
||||
network_out_name += "." + std::to_string(prev_layer.get_index());
|
||||
}
|
||||
|
||||
NGRAPH_CHECK(
|
||||
m_network_outputs.count(network_out_name) == 1,
|
||||
"nGraph function's output number ",
|
||||
m_allocated_expected_outputs,
|
||||
" was not found in the CNNNetwork built from it. Function's output name: ",
|
||||
network_out_name);
|
||||
|
||||
network_output = m_network_outputs[network_out_name];
|
||||
}
|
||||
std::string network_out_name = get_output_name(function_output);
|
||||
InferenceEngine::DataPtr network_output = m_network_outputs[network_out_name];
|
||||
|
||||
auto blob =
|
||||
std::make_shared<InferenceEngine::TBlob<T>>(network_output->getTensorDesc());
|
||||
@ -158,6 +132,9 @@ namespace ngraph
|
||||
|
||||
/// Retrieves a set of all ops IE can execute
|
||||
std::set<NodeTypeInfo> get_ie_ops() const;
|
||||
|
||||
// Get IE blob which corresponds to result of nG Function
|
||||
std::string get_output_name(const std::shared_ptr<op::v0::Result>& ng_result);
|
||||
};
|
||||
|
||||
class IE_CPU_Engine final : public IE_Engine
|
||||
|
Loading…
Reference in New Issue
Block a user