Enable CPU and Interpreter Loop tests (#3042)

This commit is contained in:
Mateusz Bencer 2020-11-13 13:32:19 +01:00 committed by GitHub
parent bda370c5da
commit 2b23eb8ade
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 634 additions and 80 deletions

View File

@ -105,38 +105,28 @@ namespace ngraph
termination_cond =
ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true});
}
else if (ngraph::op::is_constant(ng_inputs.at(1).get_node_shared_ptr()))
else if (ngraph::op::is_constant(ng_inputs.at(1).get_node_shared_ptr()) &&
as_type_ptr<default_opset::Constant>(
ng_inputs.at(1).get_node_shared_ptr())
->cast_vector<bool>()[0] == false)
{
const auto term_cond_const = as_type_ptr<default_opset::Constant>(
ng_inputs.at(1).get_node_shared_ptr());
if (term_cond_const->cast_vector<bool>()[0])
// no iteration is performed so initial values are returned
OutputVector node_outputs;
// final values
for (const auto& dep : loop_carried_dependencies)
{
termination_cond =
ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true});
node_outputs.push_back(dep);
}
else
// scan outputs
for (const auto& dep : loop_carried_dependencies)
{
// no iteration is performed so initial values are returned
OutputVector node_outputs;
// final values
for (const auto& dep : loop_carried_dependencies)
{
node_outputs.push_back(dep);
}
// scan outputs
for (const auto& dep : loop_carried_dependencies)
{
node_outputs.push_back(dep);
}
return node_outputs;
node_outputs.push_back(dep);
}
return node_outputs;
}
else
{
// It is temporary solution caused by not supported termination_cond==false
// (for not consant case) by nG Loop
termination_cond =
ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true});
termination_cond = ng_inputs.at(1);
}
const int64_t concat_axis = 0;
@ -162,13 +152,6 @@ namespace ngraph
body_outputs[0] =
ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true});
}
else
{
NGRAPH_WARN
<< "ONNX Loop: No identity or constant termination condition output "
<< "body is not supported in current version\n";
// TODO: It should be removed after introduction fix to nG Loop
}
CHECK_VALID_NODE(node,
body_inputs.size() >= loop_carried_dependencies.size() + 2,
@ -189,7 +172,7 @@ namespace ngraph
ParameterVector body_params(body_inputs.begin() + 2, body_inputs.end());
body_params.emplace(body_params.begin(),
body_inputs[0]); // termination condition body input
body_inputs[0]); // current iteration body input
const auto body = std::make_shared<ngraph::Function>(body_outputs, body_params);
auto loop = std::make_shared<default_opset::Loop>(trip_count, termination_cond);
ngraph::opset5::Loop::SpecialBodyPorts spec_ports{0, 0};

View File

@ -0,0 +1,185 @@
ir_version: 6
producer_name: "nGraph ONNX Importer"
graph {
name: "basic loop"
node {
input: "trip_count"
input: "cond_in"
input: "a_init"
output: "a_final"
op_type: "Loop"
attribute {
name: "body"
g {
node {
input: "a_in"
input: "b"
output: "current_a"
name: "loop_body_add"
op_type: "Add"
}
node {
input: "i"
input: "threshold"
output: "cond_out"
name: "condition_calc"
op_type: "Less"
}
node {
input: "current_a"
output: "a_out"
name: "output_accumulator"
op_type: "Identity"
}
name: "simple add"
initializer {
dims: 1
dims: 2
data_type: 1
float_data: 1
float_data: 1
name: "b"
}
input {
name: "i"
type {
tensor_type {
elem_type: 7
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "cond"
type {
tensor_type {
elem_type: 9
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "a_in"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 2
}
}
}
}
}
output {
name: "cond_out"
type {
tensor_type {
elem_type: 9
shape {
dim {
dim_value: 1
}
}
}
}
}
output {
name: "current_a"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 2
}
}
}
}
}
}
type: GRAPH
}
}
initializer {
dims: 1
data_type: 7
int64_data: 5
name: "threshold"
}
input {
name: "trip_count"
type {
tensor_type {
elem_type: 7
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "cond_in"
type {
tensor_type {
elem_type: 9
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "a_init"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 2
}
}
}
}
}
output {
name: "a_final"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 2
}
}
}
}
}
}
opset_import {
version: 11
}

View File

@ -0,0 +1,171 @@
ir_version: 6
producer_name: "nGraph ONNX Importer"
graph {
name: "basic loop"
node {
input: "trip_count"
input: "cond_in"
input: "a_init"
output: "a_final"
op_type: "Loop"
attribute {
name: "body"
g {
node {
input: "a_in"
input: "b"
output: "current_a"
name: "loop_body_add"
op_type: "Add"
}
node {
input: "i"
input: "threshold"
output: "cond_out"
name: "condition_calc"
op_type: "Less"
}
node {
input: "current_a"
output: "a_out"
name: "output_accumulator"
op_type: "Identity"
}
name: "simple add"
initializer {
dims: 1
dims: 2
data_type: 1
float_data: 1
float_data: 1
name: "b"
}
input {
name: "i"
type {
tensor_type {
elem_type: 7
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "cond"
type {
tensor_type {
elem_type: 9
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "a_in"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 2
}
}
}
}
}
output {
name: "cond_out"
type {
tensor_type {
elem_type: 9
shape {
dim {
dim_value: 1
}
}
}
}
}
output {
name: "current_a"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 2
}
}
}
}
}
}
type: GRAPH
}
}
initializer {
dims: 1
data_type: 7
int64_data: 10
name: "trip_count"
}
initializer {
dims: 1
data_type: 7
int64_data: 3
name: "threshold"
}
initializer {
dims: 1
data_type: 9
int32_data: 00000001
name: "cond_in"
}
input {
name: "a_init"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 2
}
}
}
}
}
output {
name: "a_final"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 2
}
}
}
}
}
}
opset_import {
version: 11
}

View File

@ -0,0 +1,172 @@
ir_version: 6
producer_name: "nGraph ONNX Importer"
graph {
name: "basic loop"
node {
input: ""
input: "cond_in"
input: "a_init"
output: "a_final"
op_type: "Loop"
attribute {
name: "body"
g {
node {
input: "a_in"
input: "b"
output: "current_a"
name: "loop_body_add"
op_type: "Add"
}
node {
input: "i"
input: "threshold"
output: "cond_out"
name: "condition_calc"
op_type: "Less"
}
node {
input: "current_a"
output: "a_out"
name: "output_accumulator"
op_type: "Identity"
}
name: "simple add"
initializer {
dims: 1
dims: 2
data_type: 1
float_data: 1
float_data: 1
name: "b"
}
input {
name: "i"
type {
tensor_type {
elem_type: 7
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "cond"
type {
tensor_type {
elem_type: 9
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "a_in"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 2
}
}
}
}
}
output {
name: "cond_out"
type {
tensor_type {
elem_type: 9
shape {
dim {
dim_value: 1
}
}
}
}
}
output {
name: "current_a"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 2
}
}
}
}
}
}
type: GRAPH
}
}
initializer {
dims: 1
data_type: 7
int64_data: 5
name: "threshold"
}
input {
name: "cond_in"
type {
tensor_type {
elem_type: 9
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "a_init"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 2
}
}
}
}
}
output {
name: "a_final"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 2
}
}
}
}
}
}
opset_import {
version: 11
}

View File

@ -81,6 +81,24 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_controlflow_loop_2d_no_identity_termination_co
const auto function = onnx_import::import_onnx_model(file_util::path_join(
SERIALIZED_ZOO, "onnx/loop/loop_2d_add_no_identity_termination_cond.prototxt"));
auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(function);
// termination condition
test_case.add_input<bool>({true});
// a_init
test_case.add_input<float>({0.f, 0.f});
test_case.add_expected_output<float>(Shape{1, 2}, {6.f, 6.f});
test_case.add_expected_output<float>(
Shape{6, 2}, {1.f, 1.f, 2.f, 2.f, 3.f, 3.f, 4.f, 4.f, 5.f, 5.f, 6.f, 6.f});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_controlflow_loop_2d_no_identity_termination_cond_static_shapes)
{
const auto function = onnx_import::import_onnx_model(file_util::path_join(
SERIALIZED_ZOO,
"onnx/loop/loop_2d_add_no_identity_termination_cond_static_shapes.prototxt"));
auto test_case = test::TestCase<TestEngine>(function);
// termination condition
test_case.add_input<bool>({true});
@ -88,8 +106,6 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_controlflow_loop_2d_no_identity_termination_co
test_case.add_input<float>({0.f, 0.f});
test_case.add_expected_output<float>(Shape{1, 2}, {6.f, 6.f});
test_case.add_expected_output<float>(Shape{5, 2},
{1.f, 1.f, 2.f, 2.f, 3.f, 3.f, 4.f, 4.f, 5.f, 5.f});
test_case.run();
}
@ -119,12 +135,27 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_controlflow_loop_2d_const_no_identity_terminat
const auto function = onnx_import::import_onnx_model(file_util::path_join(
SERIALIZED_ZOO, "onnx/loop/loop_2d_add_const_no_identity_termination_cond.prototxt"));
auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(function);
// a_init
test_case.add_input<float>({0.f, 0.f});
test_case.add_expected_output<float>(Shape{1, 2}, {4.f, 4.f});
test_case.add_expected_output<float>(Shape{4, 2}, {1, 1, 2, 2, 3, 3, 4, 4});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME},
onnx_controlflow_loop_2d_const_no_identity_termination_cond_static_shapes)
{
const auto function = onnx_import::import_onnx_model(file_util::path_join(
SERIALIZED_ZOO,
"onnx/loop/loop_2d_add_const_no_identity_termination_cond_static_shapes.prototxt"));
auto test_case = test::TestCase<TestEngine>(function);
// a_init
test_case.add_input<float>({0.f, 0.f});
test_case.add_expected_output<float>(Shape{1, 2}, {3.f, 3.f});
test_case.add_expected_output<float>(Shape{3, 2}, {1.f, 1.f, 2.f, 2.f, 3.f, 3.f});
test_case.add_expected_output<float>(Shape{1, 2}, {4.f, 4.f});
test_case.run();
}
@ -139,6 +170,29 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_controlflow_loop_2d_both_cond_and_trip_count_a
const auto function = onnx_import::import_onnx_model(file_util::path_join(
SERIALIZED_ZOO, "onnx/loop/loop_2d_add_cond_and_trip_count_as_inputs.prototxt"));
auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(function);
// trip count
test_case.add_input<int64_t>({10});
// termination condition
test_case.add_input<bool>({true});
// a_init
test_case.add_input<float>({0.f, 0.f});
test_case.add_expected_output<float>(Shape{1, 2}, {6.f, 6.f});
test_case.add_expected_output<float>(
Shape{6, 2}, {1.f, 1.f, 2.f, 2.f, 3.f, 3.f, 4.f, 4.f, 5.f, 5.f, 6.f, 6.f});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME},
onnx_controlflow_loop_2d_both_cond_and_trip_count_as_inputs_static_shapes)
{
const auto function = onnx_import::import_onnx_model(file_util::path_join(
SERIALIZED_ZOO,
"onnx/loop/loop_2d_add_cond_and_trip_count_as_inputs_static_shapes.prototxt"));
auto test_case = test::TestCase<TestEngine>(function);
// trip count
test_case.add_input<int64_t>({10});
@ -149,9 +203,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_controlflow_loop_2d_both_cond_and_trip_count_a
// a_init
test_case.add_input<float>({0.f, 0.f});
test_case.add_expected_output<float>(Shape{1, 2}, {5.f, 5.f});
test_case.add_expected_output<float>(Shape{5, 2},
{1.f, 1.f, 2.f, 2.f, 3.f, 3.f, 4.f, 4.f, 5.f, 5.f});
test_case.add_expected_output<float>(Shape{1, 2}, {6.f, 6.f});
test_case.run();
}
@ -278,7 +330,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_controlflow_loop_2d_trip_count_dynamic)
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/loop/loop_2d_add_trip_count_dynamic.prototxt"));
auto test_case = test::TestCase<TestEngine>(function);
auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(function);
// trip count
test_case.add_input<int64_t>({3});
// a_init
@ -359,7 +411,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_controlflow_loop_no_variadic_inputs_and_output
const auto function = onnx_import::import_onnx_model(file_util::path_join(
SERIALIZED_ZOO, "onnx/loop/loop_no_variadic_inputs_and_outputs.prototxt"));
auto test_case = test::TestCase<TestEngine>(function);
auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(function);
// trip_count
test_case.add_input<int64_t>({1});
// init condition
@ -375,7 +427,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_controlflow_loop_power)
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/loop/loop_pow.prototxt"));
auto test_case = test::TestCase<TestEngine>(function);
auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(function);
// trip_count
test_case.add_input<int64_t>({5});
// pow init

View File

@ -286,12 +286,6 @@ bool runtime::dynamic::DynamicExecutable::call(
std::vector<std::shared_ptr<runtime::Tensor>> wrapped_outputs;
const ResultVector& results = clone->get_results();
for (auto& result : results)
{
NGRAPH_CHECK(result->get_output_partial_shape(0).is_static(),
"Shape staticization failed for result node ",
*result);
}
NGRAPH_CHECK(results.size() == outputs.size());
for (size_t i = 0; i < outputs.size(); i++)
@ -300,7 +294,7 @@ bool runtime::dynamic::DynamicExecutable::call(
std::dynamic_pointer_cast<runtime::dynamic::DynamicTensor>(outputs[i]))
{
dynamic_tensor->make_storage(results[i]->get_output_element_type(0),
results[i]->get_output_shape(0));
results[i]->get_output_partial_shape(0));
wrapped_outputs.push_back(dynamic_tensor->get_wrapped_tensor());
}
else
@ -387,7 +381,7 @@ void runtime::dynamic::DynamicTensor::release_storage()
}
void runtime::dynamic::DynamicTensor::make_storage(const element::Type& element_type,
const Shape& shape)
const PartialShape& shape)
{
NGRAPH_CHECK(element_type.is_static(), "make_storage requires a static element type");
NGRAPH_CHECK(get_element_type().is_dynamic() || get_element_type() == element_type,
@ -400,7 +394,14 @@ void runtime::dynamic::DynamicTensor::make_storage(const element::Type& element_
shape,
" which is incompatible with dynamic tensor shape ",
get_partial_shape());
m_wrapped_tensor = m_wrapped_backend->create_tensor(element_type, shape);
if (shape.is_static())
{
m_wrapped_tensor = m_wrapped_backend->create_tensor(element_type, shape.get_shape());
}
else
{
m_wrapped_tensor = m_wrapped_backend->create_dynamic_tensor(element_type, shape);
}
}
const std::shared_ptr<ngraph::runtime::Tensor>&

View File

@ -140,7 +140,7 @@ public:
virtual void read(void* p, size_t n) const override;
bool has_storage() const;
void release_storage();
void make_storage(const element::Type& element_type, const Shape& shape);
void make_storage(const element::Type& element_type, const PartialShape& shape);
const std::shared_ptr<ngraph::runtime::Tensor>& get_wrapped_tensor() const;
private:

View File

@ -1506,30 +1506,20 @@ IE_GPU.builder_opset1_collapse_dyn_shape
IE_GPU.onnx_model_fake_quantize_const_inputs_infer
IE_GPU.onnx_model_fake_quantize_nonconst_inputs_infer
# No Constant/Identity termination condition output body is not supported by current nG Loop
# is_termination_condition_always_true returns false
# Not supported dynamic shapes cases for Loop
onnx_controlflow_loop_2d_no_identity_termination_cond
onnx_controlflow_loop_2d_no_identity_termination_cond_false
onnx_controlflow_loop_2d_const_no_identity_termination_cond
onnx_controlflow_loop_2d_both_cond_and_trip_count_as_inputs
#dynamic trip count
onnx_controlflow_loop_2d_trip_count_dynamic
onnx_controlflow_loop_no_variadic_inputs_and_outputs
onnx_controlflow_loop_power
# Input body shape is changed during Loop iterations
# Exception is throw during Loop shape inference
# Is it expected?
onnx_controlflow_loop_concat_values
# dynamic trip count
onnx_controlflow_loop_2d_trip_count_dynamic
# Infinitive Loop is not supported
onnx_controlflow_loop_infinite
# Loop is not supported yet by IE backend
onnx_controlflow_loop_2d_add
onnx_controlflow_loop_2d_no_identity_termination_cond_false
onnx_controlflow_loop_add_initializer_from_parent_scope
onnx_controlflow_loop_add_node_from_parent_scope
onnx_controlflow_loop_add_value_the_same_node_from_parent_and_subgraph
onnx_controlflow_loop_scalars
onnx_controlflow_loop_2d_add_const_cond
onnx_controlflow_loop_no_variadic_inputs_and_outputs
onnx_controlflow_loop_power

View File

@ -127,21 +127,14 @@ INTERPRETER.ctc_greedy_decoder_f16
# LogSoftmax's reference implementation doesn't handle scalar input properly
onnx_model_logsoftmax_0D
# No Constant/Identity termination condition output body is not supported by current nG Loop
# is_termination_condition_always_true returns false
onnx_controlflow_loop_2d_no_identity_termination_cond
onnx_controlflow_loop_2d_const_no_identity_termination_cond
onnx_controlflow_loop_2d_both_cond_and_trip_count_as_inputs
# Input body shape is changed during Loop iterations
# Exception is throw during Loop shape inference
# Is it expected?
onnx_controlflow_loop_concat_values
# Infinitive Loop is not supported
onnx_controlflow_loop_infinite
# Dynamic shape support?
# Dynamic shape support
onnx_controlflow_loop_2d_trip_count_dynamic
onnx_controlflow_loop_no_variadic_inputs_and_outputs
onnx_controlflow_loop_power

View File

@ -102,16 +102,23 @@ namespace ngraph
const auto& function_output =
m_function->get_results()[m_allocated_expected_outputs];
network_out_name = function_output->get_friendly_name();
// determine output name in IE convention
// (based on name of node which produces the result)
const auto& prev_layer = function_output->input_value(0);
network_out_name = prev_layer.get_node_shared_ptr()->get_friendly_name();
if (prev_layer.get_node_shared_ptr()->get_output_size() != 1)
{
network_out_name += "." + std::to_string(prev_layer.get_index());
}
NGRAPH_CHECK(
m_network_outputs.count(network_out_name) == 1,
"nGraph function's output number ",
m_allocated_expected_outputs,
" was not found in the CNNNetwork built from it. Function's output name: ",
function_output->get_friendly_name());
network_out_name);
network_output = m_network_outputs[function_output->get_friendly_name()];
network_output = m_network_outputs[network_out_name];
}
auto blob =