Move eval tests to template tests (#7132)

* Add If implementation with reference

* fix test

* fix comments

* Fix validate_and_INFER_TYPES

* rewrite tests for dynamic cases

* Fix ci failed

* add comentaries for validate_and_infer_types

* fix api

* Added ngraph checks and delete copied op from opset8

* code style fix

* fix code style

* add checkers to reference

* add has_evaluate

* fix eval

* Fix code style

* fix code style

* Add template plugin tests

* fix code style

* delete boolean

* fix IfParams

* Fix comments

* intermediate commit

* delete eval test

* add common header

* Fix codestyle

* fix set_invariant_input/set_body_output

* fix code_style

* fix codestyle

* delete validate_and_infer_types from type prop tests

* delete comments
This commit is contained in:
Eugeny Volosenkov
2021-10-07 18:40:57 +03:00
committed by GitHub
parent 465304a108
commit 05cd830de5
5 changed files with 434 additions and 377 deletions

View File

@@ -0,0 +1,368 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <algorithm>
#include <ie_core.hpp>
#include <ie_ngraph_utils.hpp>
#include <limits>
#include <ngraph/ngraph.hpp>
#include <shared_test_classes/base/layer_test_utils.hpp>
#include "base_reference_test.hpp"
using namespace reference_tests;
using namespace ngraph;
using namespace InferenceEngine;
struct IfFunctionalBase {
virtual std::shared_ptr<Function> create_function(const std::vector<Tensor>& if_inputs,
const std::vector<Tensor>& results) = 0;
IfFunctionalBase() {}
};
struct IfCondConst : public IfFunctionalBase {
std::shared_ptr<Function> create_function(const std::vector<Tensor>& if_inputs,
const std::vector<Tensor>& results) override {
NGRAPH_CHECK(if_inputs.size() == 2, "Incorrect test case! Number of inputs is not 2.");
NGRAPH_CHECK(results.size() == 1, "Incorrect test case! Number of outputs is not 1.");
auto X = std::make_shared<op::Parameter>(if_inputs[0].type, if_inputs[0].shape);
auto Y = std::make_shared<op::Parameter>(if_inputs[1].type, if_inputs[1].shape);
auto cond = std::make_shared<op::Constant>(ngraph::element::boolean, Shape{1}, cond_value);
auto Xt = std::make_shared<op::Parameter>(if_inputs[0].type, PartialShape::dynamic());
auto Yt = std::make_shared<op::Parameter>(if_inputs[1].type, PartialShape::dynamic());
auto Xe = std::make_shared<op::Parameter>(if_inputs[0].type, PartialShape::dynamic());
auto then_op = std::make_shared<op::v1::Multiply>(Xt, Yt);
auto res0 = std::make_shared<op::Result>(then_op);
auto res1 = std::make_shared<op::Result>(Xe);
auto then_body = std::make_shared<ngraph::Function>(OutputVector{res0}, ParameterVector{Xt, Yt});
auto else_body = std::make_shared<ngraph::Function>(OutputVector{res1}, ParameterVector{Xe});
auto if_op = std::make_shared<op::v8::If>(cond);
if_op->set_then_body(then_body);
if_op->set_else_body(else_body);
if_op->set_input(X, Xt, Xe);
if_op->set_input(Y, Yt, nullptr);
auto result = if_op->set_output(res0, res1);
auto res = std::make_shared<op::Result>(result);
auto fun = std::make_shared<Function>(OutputVector{res}, ParameterVector{X, Y});
return fun;
}
explicit IfCondConst(bool value) : cond_value(value) {}
bool cond_value;
};
struct IfCondIsNonConst : public IfFunctionalBase {
std::shared_ptr<Function> create_function(const std::vector<Tensor>& if_inputs,
const std::vector<Tensor>& results) override {
NGRAPH_CHECK(if_inputs.size() == 3, "Incorrect test case! Number of inputs is not 3.");
NGRAPH_CHECK(results.size() == 1, "Incorrect test case! Number of outputs is not 1.");
auto X = std::make_shared<op::Parameter>(element::f32, Shape{1, 2, 2});
auto Y = std::make_shared<op::Parameter>(element::f32, Shape{1, 2, 2});
auto cond = std::make_shared<op::Parameter>(element::boolean, Shape{1});
// Set up the cell body, a function from (Xi, Yi) -> (Zo)
// Body parameters
auto Xt = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Yt = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Xe = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Ye = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
// Body
auto then_op = std::make_shared<op::v1::Multiply>(Xt, Yt);
auto else_op = std::make_shared<op::v1::Add>(Xe, Ye);
auto then_op_result = std::make_shared<op::Result>(then_op);
auto else_op_result = std::make_shared<op::Result>(else_op);
auto then_body = std::make_shared<ngraph::Function>(OutputVector{then_op_result}, ParameterVector{Xt, Yt});
auto else_body = std::make_shared<ngraph::Function>(OutputVector{else_op_result}, ParameterVector{Xe, Ye});
auto if_op = std::make_shared<op::v8::If>(cond);
if_op->set_then_body(then_body);
if_op->set_else_body(else_body);
if_op->set_input(X, Xt, Xe);
if_op->set_input(Y, Yt, Ye);
auto result = if_op->set_output(then_op_result, else_op_result);
auto res = std::make_shared<op::Result>(result);
auto fun = std::make_shared<Function>(OutputVector{res}, ParameterVector{cond, X, Y});
return fun;
}
};
struct IfWithoutAdditionalInputs : IfFunctionalBase {
std::shared_ptr<Function> create_function(const std::vector<Tensor>& if_inputs,
const std::vector<Tensor>& results) override {
NGRAPH_CHECK(if_inputs.size() == 1, "Incorrect test case! Number of inputs is not 1.");
NGRAPH_CHECK(results.size() == 1, "Incorrect test case! Number of outputs is not 1.");
auto cond = std::make_shared<op::Parameter>(element::boolean, Shape{1});
auto A = std::make_shared<op::Constant>(element::f32, Shape{1}, 8.0);
auto B = std::make_shared<op::Constant>(element::f32, Shape{1}, 2.0);
auto A_res = std::make_shared<op::Result>(A);
auto B_res = std::make_shared<op::Result>(B);
auto then_body = std::make_shared<ngraph::Function>(OutputVector{A_res}, ParameterVector{});
auto else_body = std::make_shared<ngraph::Function>(OutputVector{B_res}, ParameterVector{});
auto if_op = std::make_shared<op::v8::If>(cond);
if_op->set_then_body(then_body);
if_op->set_else_body(else_body);
auto res = if_op->set_output(A_res, B_res);
auto fun = std::make_shared<Function>(OutputVector{res}, ParameterVector{cond});
return fun;
}
};
struct IfDynamismCaseWithStaticInputs : public IfFunctionalBase {
std::shared_ptr<Function> create_function(const std::vector<Tensor>& if_inputs,
const std::vector<Tensor>& results) override {
NGRAPH_CHECK(if_inputs.size() == 4, "Incorrect test case! Number of inputs is not 4.");
NGRAPH_CHECK(results.size() == 2, "Incorrect test case! Number of outputs is not 2.");
auto X = std::make_shared<op::Parameter>(element::f32, Shape{1, 2, 2});
auto Y = std::make_shared<op::Parameter>(element::f32, Shape{4, 2, 2});
auto Z = std::make_shared<op::Parameter>(element::f32, Shape{8, 8, 8});
auto cond = std::make_shared<op::Parameter>(element::boolean, Shape{1});
// Set up the cell body, a function from (Xi, Yi) -> (Zo)
// Body parameters
auto Xt = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Yt = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Xe = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Ze = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
// Body
auto then_op = std::make_shared<op::v1::Multiply>(Xt, Xt);
auto else_op = std::make_shared<op::v1::Add>(Xe, Xe);
auto then_op_result1 = std::make_shared<op::Result>(then_op);
auto then_op_result2 = std::make_shared<op::Result>(Yt);
auto else_op_result1 = std::make_shared<op::Result>(else_op);
auto else_op_result2 = std::make_shared<op::Result>(Ze);
auto then_body =
std::make_shared<ngraph::Function>(OutputVector{then_op_result1, then_op_result2}, ParameterVector{Xt, Yt});
auto else_body =
std::make_shared<ngraph::Function>(OutputVector{else_op_result1, else_op_result2}, ParameterVector{Xe, Ze});
auto if_op = std::make_shared<op::v8::If>(cond);
if_op->set_then_body(then_body);
if_op->set_else_body(else_body);
if_op->set_input(X, Xt, Xe);
if_op->set_input(Y, Yt, nullptr);
if_op->set_input(Z, nullptr, Ze);
auto res1 = if_op->set_output(then_op_result1, else_op_result1);
auto res2 = if_op->set_output(then_op_result2, else_op_result2);
auto result_if1 = std::make_shared<op::Result>(res1);
auto result_if2 = std::make_shared<op::Result>(res2);
auto fun = std::make_shared<Function>(OutputVector{result_if1, result_if2}, ParameterVector{cond, X, Y, Z});
return fun;
}
};
struct IfConditionIsScalar : public IfFunctionalBase {
std::shared_ptr<Function> create_function(const std::vector<Tensor>& if_inputs,
const std::vector<Tensor>& results) override {
NGRAPH_CHECK(if_inputs.size() == 3, "Incorrect test case! Number of inputs is not 3.");
NGRAPH_CHECK(results.size() == 1, "Incorrect test case! Number of outputs is not 1.");
auto X = std::make_shared<op::Parameter>(element::f32, Shape{1, 2, 2});
auto Y = std::make_shared<op::Parameter>(element::f32, Shape{1, 2, 2});
auto cond = std::make_shared<op::Parameter>(element::boolean, Shape{});
// Set up the cell body, a function from (Xi, Yi) -> (Zo)
// Body parameters
auto Xt = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Yt = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Xe = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Ye = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
// Body
auto then_op = std::make_shared<op::v1::Multiply>(Xt, Yt);
auto else_op = std::make_shared<op::v1::Add>(Xe, Ye);
auto then_op_result = std::make_shared<op::Result>(then_op);
auto else_op_result = std::make_shared<op::Result>(else_op);
auto then_body = std::make_shared<ngraph::Function>(OutputVector{then_op_result}, ParameterVector{Xt, Yt});
auto else_body = std::make_shared<ngraph::Function>(OutputVector{else_op_result}, ParameterVector{Xe, Ye});
auto if_op = std::make_shared<op::v8::If>(cond);
if_op->set_then_body(then_body);
if_op->set_else_body(else_body);
if_op->set_input(X, Xt, Xe);
if_op->set_input(Y, Yt, Ye);
auto res = if_op->set_output(then_op_result, else_op_result);
if_op->validate_and_infer_types();
std::vector<float> X_v{1.0, 2.0, 3.0, 4.0};
std::vector<float> Y_v{2.0, 1.0, 2.0, 3.0};
auto fun = std::make_shared<Function>(OutputVector{res}, ParameterVector{cond, X, Y});
return fun;
}
};
struct IfConditionIsDynamic : public IfFunctionalBase {
std::shared_ptr<Function> create_function(const std::vector<Tensor>& if_inputs,
const std::vector<Tensor>& results) override {
NGRAPH_CHECK(if_inputs.size() == 3, "Incorrect test case! Number of inputs is not 3.");
NGRAPH_CHECK(results.size() == 1, "Incorrect test case! Number of outputs is not 1.");
auto X = std::make_shared<op::Parameter>(element::f32, Shape{1, 2, 2});
auto Y = std::make_shared<op::Parameter>(element::f32, Shape{1, 2, 2});
auto cond = std::make_shared<op::Parameter>(element::boolean, PartialShape{Dimension::dynamic()});
// auto cond = std::make_shared<op::Parameter>(element::boolean, Shape{1});
// Set up the cell body, a function from (Xi, Yi) -> (Zo)
// Body parameters
auto Xt = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Yt = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Xe = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Ye = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
// Body
auto then_op = std::make_shared<op::v1::Multiply>(Xt, Yt);
auto else_op = std::make_shared<op::v1::Add>(Xe, Ye);
auto then_op_result = std::make_shared<op::Result>(then_op);
auto else_op_result = std::make_shared<op::Result>(else_op);
auto then_body = std::make_shared<ngraph::Function>(OutputVector{then_op_result}, ParameterVector{Xt, Yt});
auto else_body = std::make_shared<ngraph::Function>(OutputVector{else_op_result}, ParameterVector{Xe, Ye});
auto if_op = std::make_shared<op::v8::If>(cond);
if_op->set_then_body(then_body);
if_op->set_else_body(else_body);
if_op->set_input(X, Xt, Xe);
if_op->set_input(Y, Yt, Ye);
auto rs = if_op->set_output(then_op_result, else_op_result);
auto result = std::make_shared<op::Result>(rs);
auto fun = std::make_shared<Function>(OutputVector{result}, ParameterVector{cond, X, Y});
return fun;
}
};
struct IfParams {
IfParams(const std::shared_ptr<IfFunctionalBase>& functional,
const std::vector<Tensor>& if_inputs,
const std::vector<Tensor>& expected_results,
const std::string& test_case_name)
: function(functional),
inputs(if_inputs),
expected_results(expected_results),
test_case_name(test_case_name) {}
std::shared_ptr<IfFunctionalBase> function;
std::vector<Tensor> inputs;
std::vector<Tensor> expected_results;
std::string test_case_name;
};
class ReferenceIfLayerTest : public testing::TestWithParam<IfParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = params.function->create_function(params.inputs, params.expected_results);
inputData.reserve(params.inputs.size());
refOutData.reserve(params.expected_results.size());
for (auto& input_tensor : params.inputs) {
inputData.push_back(input_tensor.data);
}
for (auto& expected_tensor : params.expected_results) {
refOutData.push_back(expected_tensor.data);
}
}
static std::string getTestCaseName(const testing::TestParamInfo<IfParams>& obj) {
auto param = obj.param;
return param.test_case_name;
}
};
TEST_P(ReferenceIfLayerTest, IfWithHardcodedRefs) {
Exec();
}
std::vector<float> Y_gen() {
std::vector<float> Y_v;
for (auto c_ind = 0; c_ind < 4; ++c_ind) {
for (auto d_ind = 0; d_ind < 4; ++d_ind) {
Y_v.push_back(static_cast<float>(c_ind * d_ind));
}
}
return Y_v;
}
std::vector<float> Z_gen() {
std::vector<float> Z_v;
for (auto c_ind = 0; c_ind < 8; ++c_ind) {
for (auto d_ind = 0; d_ind < 64; ++d_ind) {
Z_v.push_back(static_cast<float>(c_ind * d_ind));
}
}
return Z_v;
}
INSTANTIATE_TEST_SUITE_P(
smoke_If_With_Hardcoded_Refs,
ReferenceIfLayerTest,
::testing::Values(
IfParams(
std::make_shared<IfCondConst>(true),
std::vector<Tensor>{Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{1.0, 1.0, 1.0, 1.0}),
Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{2.0, 2.0, 2.0, 2.0})},
std::vector<Tensor>{Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{2.0, 2.0, 2.0, 2.0})},
"if_condition_const_is_true"),
IfParams(
std::make_shared<IfCondConst>(false),
std::vector<Tensor>{Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{1.0, 1.0, 1.0, 1.0}),
Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{2.0, 2.0, 2.0, 2.0})},
std::vector<Tensor>{Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{1.0, 1.0, 1.0, 1.0})},
"if_condition_const_is_false"),
IfParams(
std::make_shared<IfCondIsNonConst>(),
std::vector<Tensor>{Tensor(Shape{1}, ngraph::element::boolean, std::vector<unsigned char>{1}),
Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{1.0, 2.0, 3.0, 4.0}),
Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{2.0, 1.0, 2.0, 3.0})},
std::vector<Tensor>{Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{2.0, 2.0, 6.0, 12.0})},
"if_condition_si_non_const_true"),
IfParams(
std::make_shared<IfCondIsNonConst>(),
std::vector<Tensor>{Tensor(Shape{1}, ngraph::element::boolean, std::vector<unsigned char>{0}),
Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{1.0, 2.0, 3.0, 4.0}),
Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{2.0, 1.0, 2.0, 3.0})},
std::vector<Tensor>{Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{3.0, 3.0, 5.0, 7.0})},
"if_condition_is_non_const_false"),
IfParams(std::make_shared<IfWithoutAdditionalInputs>(),
std::vector<Tensor>{Tensor(Shape{1}, ngraph::element::boolean, std::vector<unsigned char>{1})},
std::vector<Tensor>{Tensor(Shape{1}, ngraph::element::f32, std::vector<float>{8.0})},
"if_without_addition_inputs_condition_is_true"),
IfParams(std::make_shared<IfWithoutAdditionalInputs>(),
std::vector<Tensor>{Tensor(Shape{1}, ngraph::element::boolean, std::vector<unsigned char>{0})},
std::vector<Tensor>{Tensor(Shape{1}, ngraph::element::f32, std::vector<float>{2.0})},
"if_without_addition_inputs_condition_is_false"),
IfParams(
std::make_shared<IfConditionIsScalar>(),
std::vector<Tensor>{Tensor(Shape{}, ngraph::element::boolean, std::vector<unsigned char>{1}),
Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{1.0, 2.0, 3.0, 4.0}),
Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{2.0, 1.0, 2.0, 3.0})},
std::vector<Tensor>{Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{2.0, 2.0, 6.0, 12.0})},
"if_condition_is_scalar_cond_true"),
IfParams(
std::make_shared<IfConditionIsScalar>(),
std::vector<Tensor>{Tensor(Shape{}, ngraph::element::boolean, std::vector<unsigned char>{0}),
Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{1.0, 2.0, 3.0, 4.0}),
Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{2.0, 1.0, 2.0, 3.0})},
std::vector<Tensor>{Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{3.0, 3.0, 5.0, 7.0})},
"if_condition_is_scalar_cond_false"),
IfParams(
std::make_shared<IfDynamismCaseWithStaticInputs>(),
std::vector<Tensor>{Tensor(Shape{}, ngraph::element::boolean, std::vector<unsigned char>{1}),
Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{1.0, 2.0, 3.0, 4.0}),
Tensor(Shape{4, 2, 2}, ngraph::element::f32, Y_gen()),
Tensor(Shape{8, 8, 8}, ngraph::element::f32, Z_gen())},
std::vector<Tensor>{Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{1.0, 4.0, 9.0, 16.0}),
Tensor(Shape{4, 2, 2}, ngraph::element::f32, Y_gen())},
"If_dynamism_case_with_static_inputs_condition_true"),
IfParams(
std::make_shared<IfDynamismCaseWithStaticInputs>(),
std::vector<Tensor>{Tensor(Shape{}, ngraph::element::boolean, std::vector<unsigned char>{0}),
Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{1.0, 2.0, 3.0, 4.0}),
Tensor(Shape{4, 2, 2}, ngraph::element::f32, Y_gen()),
Tensor(Shape{8, 8, 8}, ngraph::element::f32, Z_gen())},
std::vector<Tensor>{Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{2.0, 4.0, 6.0, 8.0}),
Tensor(Shape{8, 8, 8}, ngraph::element::f32, Z_gen())},
"If_dynamism_case_with_static_inputs_condition_false"),
IfParams(
std::make_shared<IfConditionIsDynamic>(),
std::vector<Tensor>{Tensor(Shape{}, ngraph::element::boolean, std::vector<unsigned char>{1}),
Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{1.0, 2.0, 3.0, 4.0}),
Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{2.0, 1.0, 2.0, 3.0})},
std::vector<Tensor>{Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{2.0, 2.0, 6.0, 12.0})},
"if_condition_is_dynamic_cond_true"),
IfParams(
std::make_shared<IfConditionIsDynamic>(),
std::vector<Tensor>{Tensor(Shape{}, ngraph::element::boolean, std::vector<unsigned char>{0}),
Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{1.0, 2.0, 3.0, 4.0}),
Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{2.0, 1.0, 2.0, 3.0})},
std::vector<Tensor>{Tensor(Shape{1, 2, 2}, ngraph::element::f32, std::vector<float>{3.0, 3.0, 5.0, 7.0})},
"if_condition_is_dynamic_cond_false")));

View File

@@ -0,0 +1,64 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "common_test_utils/test_common.hpp"
#include <ngraph/function.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include <transformations/init_node_info.hpp>
#include "ngraph/opsets/opset1.hpp"
#include "ngraph/opsets/opset5.hpp"
#include "ngraph/opsets/opset8.hpp"
#include <ngraph/pass/constant_folding.hpp>
using namespace testing;
using namespace std;
using namespace ngraph;
TEST(TransformationTests, if_constant_folding) {
std::shared_ptr<ngraph::Function> fun(nullptr);
{
auto cond = std::make_shared<ngraph::opset5::Constant>(element::boolean, Shape{ 1 }, false);
auto A1 = std::make_shared<ngraph::opset5::Constant>(element::f32, Shape{ 1 }, 37.0);
auto A2 = std::make_shared<ngraph::opset5::Constant>(element::f32, Shape{ 1 }, 45.0);
auto B1 = std::make_shared<ngraph::opset5::Constant>(element::f32, Shape{ 1 }, 10.0);
auto B2 = std::make_shared<ngraph::opset5::Constant>(element::f32, Shape{ 1 }, 3.0);
auto Xt = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Yt = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Xe = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Ye = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto a_add = std::make_shared<op::v1::Add>(Xt, Yt);
auto b_pow = std::make_shared<op::v1::Power>(Xe, Ye);
auto then_res = std::make_shared<op::Result>(a_add);
auto then_body = make_shared<ngraph::Function>(OutputVector{ then_res }, ParameterVector{ Xt, Yt });
auto else_res = std::make_shared<op::Result>(b_pow);
auto else_body = make_shared<ngraph::Function>(OutputVector{ else_res }, ParameterVector{ Xe, Ye });
auto if_op = make_shared<op::v8::If>(cond);
if_op->set_then_body(then_body);
if_op->set_else_body(else_body);
if_op->set_input(A1, Xt, nullptr);
if_op->set_input(A2, Yt, nullptr);
if_op->set_input(B1, nullptr, Xe);
if_op->set_input(B2, nullptr, Ye);
auto if_res = if_op->set_output(then_res, else_res);
auto param_add = make_shared<op::Parameter>(element::f32, Shape{ 1 });
auto add = make_shared<op::v1::Add>(if_res, param_add);
auto add_res = make_shared<op::Result>(add);
fun = make_shared<Function>(OutputVector{ add_res }, ParameterVector{ param_add });
ngraph::pass::ConstantFolding().run_on_function(fun);
}
std::shared_ptr<ngraph::Function> f_ref(nullptr);
{
auto constant_folding_if = make_shared<ngraph::opset5::Constant>(element::f32, Shape{ 1 }, 1000.0f);
auto param_add = make_shared<op::Parameter>(element::f32, Shape{ 1 });
auto add = make_shared<op::v1::Add>(constant_folding_if, param_add);
auto add_res = make_shared<op::Result>(add);
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add_res }, ngraph::ParameterVector{ param_add });
}
auto res = compare_functions(fun, f_ref);
ASSERT_TRUE(res.first) << res.second;
}

View File

@@ -135,6 +135,7 @@ void ov::op::util::MultiSubGraphOp::set_invariant_inputs(const Output<Node>& val
}
}
}
validate_and_infer_types();
}
ov::Output<ov::Node> ov::op::util::MultiSubGraphOp::set_body_outputs(const ResultVector& bodies_results) {
@@ -149,6 +150,7 @@ ov::Output<ov::Node> ov::op::util::MultiSubGraphOp::set_body_outputs(const Resul
}
}
set_output_size(output_index + 1);
validate_and_infer_types();
return Output<Node>(shared_from_this(), output_index);
}

View File

@@ -1,368 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/pass/constant_folding.hpp>
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "ngraph/opsets/opset1.hpp"
#include "ngraph/opsets/opset5.hpp"
#include "ngraph/opsets/opset8.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/validation_util.hpp"
#include "runtime/backend.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
TEST(op_eval, if_condition_const) {
auto X = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2});
auto Y = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2});
auto cond = std::make_shared<ngraph::opset5::Constant>(element::boolean, Shape{1}, true);
auto cond2 = std::make_shared<ngraph::opset5::Constant>(element::boolean, Shape{1}, false);
auto Xt = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Yt = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Xe = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Ye = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto then_op = std::make_shared<op::v1::Multiply>(Xt, Yt);
auto res0 = make_shared<op::Result>(then_op);
auto res1 = make_shared<op::Result>(Xe);
auto then_body = make_shared<ngraph::Function>(OutputVector{res0}, ParameterVector{Xt, Yt});
auto else_body = make_shared<ngraph::Function>(OutputVector{res1}, ParameterVector{Xe});
auto if_op = make_shared<op::v8::If>(cond);
if_op->set_then_body(then_body);
if_op->set_else_body(else_body);
if_op->set_input(X, Xt, Xe);
if_op->set_input(Y, Yt, nullptr);
if_op->set_output(res0, res1);
if_op->validate_and_infer_types();
auto if_op2 = if_op->clone_with_new_inputs(OutputVector{cond2, X, Y});
std::vector<float> X_v{1.0, 1.0, 1.0, 1.0};
std::vector<float> Y_v{2.0, 2.0, 2.0, 2.0};
auto fun = make_shared<Function>(OutputVector{if_op}, ParameterVector{X, Y});
auto fun2 = make_shared<Function>(OutputVector{if_op2}, ParameterVector{X, Y});
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, X_v),
make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, Y_v)}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), Shape{std::vector<size_t>({1, 2, 2})});
auto result_data = read_vector<float>(result);
std::vector<float> expected_results{2.0, 2.0, 2.0, 2.0};
for (auto i = 0; i < expected_results.size(); i++)
EXPECT_NEAR(result_data[i], expected_results[i], 0.000001);
auto result1 = make_shared<HostTensor>();
ASSERT_TRUE(fun2->evaluate({result1},
{make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, X_v),
make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, Y_v)}));
EXPECT_EQ(result1->get_element_type(), element::f32);
EXPECT_EQ(result1->get_shape(), Shape{std::vector<size_t>({1, 2, 2})});
auto result_data1 = read_vector<float>(result1);
for (auto i = 0; i < expected_results.size(); i++)
EXPECT_NEAR(result_data1[i], X_v[i], 0.000001);
}
TEST(op_eval, if_condition_non_const) {
auto X = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2});
auto Y = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2});
auto cond = make_shared<op::Parameter>(element::boolean, Shape{1});
// Set up the cell body, a function from (Xi, Yi) -> (Zo)
// Body parameters
auto Xt = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Yt = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Xe = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Ye = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
// Body
auto then_op = std::make_shared<op::v1::Multiply>(Xt, Yt);
auto else_op = std::make_shared<op::v1::Add>(Xe, Ye);
auto then_op_result = make_shared<op::Result>(then_op);
auto else_op_result = make_shared<op::Result>(else_op);
auto then_body = make_shared<ngraph::Function>(OutputVector{then_op_result}, ParameterVector{Xt, Yt});
auto else_body = make_shared<ngraph::Function>(OutputVector{else_op_result}, ParameterVector{Xe, Ye});
auto if_op = make_shared<op::v8::If>(cond);
if_op->set_then_body(then_body);
if_op->set_else_body(else_body);
if_op->set_input(X, Xt, Xe);
if_op->set_input(Y, Yt, Ye);
if_op->set_output(then_op_result, else_op_result);
if_op->validate_and_infer_types();
std::vector<float> X_v{1.0, 2.0, 3.0, 4.0};
std::vector<float> Y_v{2.0, 1.0, 2.0, 3.0};
auto fun = make_shared<Function>(OutputVector{if_op}, ParameterVector{cond, X, Y});
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::boolean>(Shape{1}, {true}),
make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, X_v),
make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, Y_v)}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), Shape{std::vector<size_t>({1, 2, 2})});
auto result_data = read_vector<float>(result);
std::vector<float> expected_results{2.0, 2.0, 6.0, 12.0};
for (auto i = 0; i < expected_results.size(); i++)
EXPECT_NEAR(result_data[i], expected_results[i], 0.000001);
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::boolean>(Shape{1}, {false}),
make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, X_v),
make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, Y_v)}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), Shape{std::vector<size_t>({1, 2, 2})});
result_data = read_vector<float>(result);
expected_results = {3.0, 3.0, 5.0, 7.0};
for (auto i = 0; i < expected_results.size(); i++)
EXPECT_NEAR(result_data[i], expected_results[i], 0.000001);
}
TEST(op_eval, if_free_sample) {
auto cond = make_shared<op::Parameter>(element::boolean, Shape{1});
auto A = std::make_shared<ngraph::opset5::Constant>(element::f32, Shape{1}, 8.0);
auto B = std::make_shared<ngraph::opset5::Constant>(element::f32, Shape{1}, 2.0);
auto A_res = std::make_shared<op::Result>(A);
auto B_res = std::make_shared<op::Result>(B);
auto then_body = make_shared<ngraph::Function>(OutputVector{A_res}, ParameterVector{});
auto else_body = make_shared<ngraph::Function>(OutputVector{B_res}, ParameterVector{});
auto if_op = make_shared<op::v8::If>(cond);
if_op->set_then_body(then_body);
if_op->set_else_body(else_body);
auto res = if_op->set_output(A_res, B_res);
auto fun = make_shared<Function>(OutputVector{res}, ParameterVector{cond});
fun->validate_nodes_and_infer_types();
auto result1 = make_shared<HostTensor>(), result2 = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result1}, {make_host_tensor<element::Type_t::boolean>(Shape{1}, {true})}));
ASSERT_TRUE(fun->evaluate({result2}, {make_host_tensor<element::Type_t::boolean>(Shape{1}, {false})}));
auto result_data1 = read_vector<float>(result1);
auto result_data2 = read_vector<float>(result2);
EXPECT_EQ(result1->get_element_type(), element::f32);
EXPECT_EQ(result1->get_shape(), Shape{std::vector<size_t>({1})});
EXPECT_EQ(result2->get_element_type(), element::f32);
EXPECT_EQ(result2->get_shape(), Shape{std::vector<size_t>({1})});
EXPECT_NEAR(result_data1[0], 8.0, 0.000001);
EXPECT_NEAR(result_data2[0], 2.0, 0.000001);
}
TEST(op_eval, if_constant_folding) {
auto cond = std::make_shared<ngraph::opset5::Constant>(element::boolean, Shape{1}, false);
auto A1 = std::make_shared<ngraph::opset5::Constant>(element::f32, Shape{1}, 37.0);
auto A2 = std::make_shared<ngraph::opset5::Constant>(element::f32, Shape{1}, 45.0);
auto B1 = std::make_shared<ngraph::opset5::Constant>(element::f32, Shape{1}, 10.0);
auto B2 = std::make_shared<ngraph::opset5::Constant>(element::f32, Shape{1}, 3.0);
auto Xt = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Yt = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Xe = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Ye = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto a_add = std::make_shared<op::v1::Add>(Xt, Yt);
auto b_pow = std::make_shared<op::v1::Power>(Xe, Ye);
auto then_res = std::make_shared<op::Result>(a_add);
auto then_body = make_shared<ngraph::Function>(OutputVector{then_res}, ParameterVector{Xt, Yt});
auto else_res = std::make_shared<op::Result>(b_pow);
auto else_body = make_shared<ngraph::Function>(OutputVector{else_res}, ParameterVector{Xe, Ye});
auto if_op = make_shared<op::v8::If>(cond);
if_op->set_then_body(then_body);
if_op->set_else_body(else_body);
if_op->set_input(A1, Xt, nullptr);
if_op->set_input(A2, Yt, nullptr);
if_op->set_input(B1, nullptr, Xe);
if_op->set_input(B2, nullptr, Ye);
if_op->set_output(then_res, else_res);
auto fun = make_shared<Function>(OutputVector{if_op}, ParameterVector{});
fun->validate_nodes_and_infer_types();
ngraph::pass::ConstantFolding().run_on_function(fun);
auto results = fun->get_results();
EXPECT_EQ(results.size(), 1);
auto result = results[0];
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), Shape{1});
const auto& cond_value = get_constant_from_source(result);
auto val = cond_value->cast_vector<float>();
EXPECT_NEAR(val[0], 1000.0, 0.000001);
}
TEST(op_eval, if_dynamism) {
auto X = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2});
auto Y = make_shared<op::Parameter>(element::f32, Shape{4, 2, 2});
auto Z = make_shared<op::Parameter>(element::f32, Shape{8, 8, 8});
auto cond = make_shared<op::Parameter>(element::boolean, Shape{1});
// Set up the cell body, a function from (Xi, Yi) -> (Zo)
// Body parameters
auto Xt = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Yt = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Xe = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Ze = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
// Body
auto then_op = std::make_shared<op::v1::Multiply>(Xt, Xt);
auto else_op = std::make_shared<op::v1::Add>(Xe, Xe);
auto then_op_result1 = make_shared<op::Result>(then_op);
auto then_op_result2 = make_shared<op::Result>(Yt);
auto else_op_result1 = make_shared<op::Result>(else_op);
auto else_op_result2 = make_shared<op::Result>(Ze);
auto then_body =
make_shared<ngraph::Function>(OutputVector{then_op_result1, then_op_result2}, ParameterVector{Xt, Yt});
auto else_body =
make_shared<ngraph::Function>(OutputVector{else_op_result1, else_op_result2}, ParameterVector{Xe, Ze});
auto if_op = make_shared<op::v8::If>(cond);
if_op->set_then_body(then_body);
if_op->set_else_body(else_body);
if_op->set_input(X, Xt, Xe);
if_op->set_input(Y, Yt, nullptr);
if_op->set_input(Z, nullptr, Ze);
auto res1 = if_op->set_output(then_op_result1, else_op_result1);
auto res2 = if_op->set_output(then_op_result2, else_op_result2);
auto result_if1 = make_shared<op::Result>(res1);
auto result_if2 = make_shared<op::Result>(res2);
if_op->validate_and_infer_types();
std::vector<float> X_v{1.0, 2.0, 3.0, 4.0};
std::vector<float> Y_v, Z_v;
for (auto c_ind = 0; c_ind < 4; ++c_ind) {
for (auto d_ind = 0; d_ind < 4; ++d_ind) {
Y_v.push_back(static_cast<float>(c_ind * d_ind));
}
}
for (auto c_ind = 0; c_ind < 8; ++c_ind) {
for (auto d_ind = 0; d_ind < 64; ++d_ind) {
Z_v.push_back(static_cast<float>(c_ind * d_ind));
}
}
auto fun = make_shared<Function>(OutputVector{result_if1, result_if2}, ParameterVector{cond, X, Y, Z});
auto result1 = make_shared<HostTensor>();
auto result2 = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result1, result2},
{make_host_tensor<element::Type_t::boolean>(Shape{1}, {true}),
make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, X_v),
make_host_tensor<element::Type_t::f32>(Shape{4, 2, 2}, Y_v),
make_host_tensor<element::Type_t::f32>(Shape{8, 8, 8}, Z_v)}));
EXPECT_EQ(result1->get_element_type(), element::f32);
EXPECT_EQ(result1->get_shape(), Shape{std::vector<size_t>({1, 2, 2})});
auto result1_data = read_vector<float>(result1);
std::vector<float> expected_results1{1.0, 4.0, 9.0, 16.0};
for (auto i = 0; i < expected_results1.size(); i++)
EXPECT_NEAR(result1_data[i], expected_results1[i], 0.000001);
EXPECT_EQ(result2->get_element_type(), element::f32);
EXPECT_EQ(result2->get_shape(), Shape{std::vector<size_t>({4, 2, 2})});
auto result2_data = read_vector<float>(result2);
for (auto i = 0; i < Y_v.size(); i++)
EXPECT_NEAR(result2_data[i], Y_v[i], 0.000001);
auto result3 = make_shared<HostTensor>();
auto result4 = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result3, result4},
{make_host_tensor<element::Type_t::boolean>(Shape{1}, {false}),
make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, X_v),
make_host_tensor<element::Type_t::f32>(Shape{4, 2, 2}, Y_v),
make_host_tensor<element::Type_t::f32>(Shape{8, 8, 8}, Z_v)}));
EXPECT_EQ(result3->get_element_type(), element::f32);
EXPECT_EQ(result3->get_shape(), Shape{std::vector<size_t>({1, 2, 2})});
auto result3_data = read_vector<float>(result3);
std::vector<float> expected_results2{2.0, 4.0, 6.0, 8.0};
for (auto i = 0; i < expected_results2.size(); i++)
EXPECT_NEAR(result3_data[i], expected_results2[i], 0.000001);
EXPECT_EQ(result4->get_element_type(), element::f32);
EXPECT_EQ(result4->get_shape(), Shape{std::vector<size_t>({8, 8, 8})});
auto result4_data = read_vector<float>(result4);
for (auto i = 0; i < Z_v.size(); i++)
EXPECT_NEAR(result4_data[i], Z_v[i], 0.000001);
}
TEST(op_eval, if_condition_non_const_scalar) {
auto X = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2});
auto Y = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2});
auto cond = make_shared<op::Parameter>(element::boolean, Shape{});
// Set up the cell body, a function from (Xi, Yi) -> (Zo)
// Body parameters
auto Xt = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Yt = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Xe = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Ye = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
// Body
auto then_op = std::make_shared<op::v1::Multiply>(Xt, Yt);
auto else_op = std::make_shared<op::v1::Add>(Xe, Ye);
auto then_op_result = make_shared<op::Result>(then_op);
auto else_op_result = make_shared<op::Result>(else_op);
auto then_body = make_shared<ngraph::Function>(OutputVector{then_op_result}, ParameterVector{Xt, Yt});
auto else_body = make_shared<ngraph::Function>(OutputVector{else_op_result}, ParameterVector{Xe, Ye});
auto if_op = make_shared<op::v8::If>(cond);
if_op->set_then_body(then_body);
if_op->set_else_body(else_body);
if_op->set_input(X, Xt, Xe);
if_op->set_input(Y, Yt, Ye);
if_op->set_output(then_op_result, else_op_result);
if_op->validate_and_infer_types();
std::vector<float> X_v{1.0, 2.0, 3.0, 4.0};
std::vector<float> Y_v{2.0, 1.0, 2.0, 3.0};
auto fun = make_shared<Function>(OutputVector{if_op}, ParameterVector{cond, X, Y});
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::boolean>(Shape{1}, {true}),
make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, X_v),
make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, Y_v)}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), Shape{std::vector<size_t>({1, 2, 2})});
auto result_data = read_vector<float>(result);
std::vector<float> expected_results{2.0, 2.0, 6.0, 12.0};
for (auto i = 0; i < expected_results.size(); i++)
EXPECT_NEAR(result_data[i], expected_results[i], 0.000001);
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::boolean>(Shape{1}, {false}),
make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, X_v),
make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, Y_v)}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), Shape{std::vector<size_t>({1, 2, 2})});
result_data = read_vector<float>(result);
expected_results = {3.0, 3.0, 5.0, 7.0};
for (auto i = 0; i < expected_results.size(); i++)
EXPECT_NEAR(result_data[i], expected_results[i], 0.000001);
}
TEST(op_eval, if_condition_is_dynamic) {
auto X = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2});
auto Y = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2});
auto cond = make_shared<op::Parameter>(element::boolean, PartialShape{Dimension::dynamic()});
// Set up the cell body, a function from (Xi, Yi) -> (Zo)
// Body parameters
auto Xt = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Yt = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Xe = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Ye = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
// Body
auto then_op = std::make_shared<op::v1::Multiply>(Xt, Yt);
auto else_op = std::make_shared<op::v1::Add>(Xe, Ye);
auto then_op_result = make_shared<op::Result>(then_op);
auto else_op_result = make_shared<op::Result>(else_op);
auto then_body = make_shared<ngraph::Function>(OutputVector{then_op_result}, ParameterVector{Xt, Yt});
auto else_body = make_shared<ngraph::Function>(OutputVector{else_op_result}, ParameterVector{Xe, Ye});
auto if_op = make_shared<op::v8::If>(cond);
if_op->set_then_body(then_body);
if_op->set_else_body(else_body);
if_op->set_input(X, Xt, Xe);
if_op->set_input(Y, Yt, Ye);
if_op->set_output(then_op_result, else_op_result);
if_op->validate_and_infer_types();
std::vector<float> X_v{1.0, 2.0, 3.0, 4.0};
std::vector<float> Y_v{2.0, 1.0, 2.0, 3.0};
auto fun = make_shared<Function>(OutputVector{if_op}, ParameterVector{cond, X, Y});
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::boolean>(Shape{1}, {true}),
make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, X_v),
make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, Y_v)}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), Shape{std::vector<size_t>({1, 2, 2})});
auto result_data = read_vector<float>(result);
std::vector<float> expected_results{2.0, 2.0, 6.0, 12.0};
for (auto i = 0; i < expected_results.size(); i++)
EXPECT_NEAR(result_data[i], expected_results[i], 0.000001);
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::boolean>(Shape{1}, {false}),
make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, X_v),
make_host_tensor<element::Type_t::f32>(Shape{1, 2, 2}, Y_v)}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), Shape{std::vector<size_t>({1, 2, 2})});
result_data = read_vector<float>(result);
expected_results = {3.0, 3.0, 5.0, 7.0};
for (auto i = 0; i < expected_results.size(); i++)
EXPECT_NEAR(result_data[i], expected_results[i], 0.000001);
}

View File

@@ -38,8 +38,6 @@ TEST(type_prop, if_simple_test) {
if_op->set_input(X, Xt, Xe);
if_op->set_input(Y, Yt, Ye);
auto res = if_op->set_output(then_op_res, else_op_res);
if_op->validate_and_infer_types();
auto result0 = make_shared<op::Result>(res);
Shape out0_shape{32, 40, 10};
auto sh = result0->get_output_shape(0);
@@ -73,7 +71,6 @@ TEST(type_prop, if_non_const_condition_test) {
if_op->set_input(X, Xt, Xe);
if_op->set_input(Y, Yt, Ye);
auto res = if_op->set_output(then_body_res, else_body_res);
if_op->validate_and_infer_types();
auto result0 = make_shared<op::Result>(res);
Shape out0_shape{32, 40, 10};
auto sh = result0->get_output_shape(0);
@@ -100,14 +97,12 @@ TEST(type_prop, if_clone_test) {
auto else_op = std::make_shared<op::v1::Maximum>(Xe, Ye);
auto else_body_res = make_shared<op::Result>(else_op);
auto else_body = make_shared<ngraph::Function>(OutputVector{else_body_res}, ParameterVector{Xe, Ye});
auto if_op = make_shared<op::v8::If>(cond);
if_op->set_then_body(then_body);
if_op->set_else_body(else_body);
if_op->set_input(X, Xt, Xe);
if_op->set_input(Y, Yt, Ye);
auto res = if_op->set_output(then_body_res, else_body_res);
auto new_if = std::dynamic_pointer_cast<op::v8::If>(if_op->clone_with_new_inputs(OutputVector{cond, Xnew, Ynew}));
EXPECT_EQ(true, true);
}
@@ -147,7 +142,6 @@ TEST(type_prop, if_multiple_outputs) {
if_op->set_input(Y, Yt, Ye);
auto res1 = if_op->set_output(then_body_res_1, else_body_res_1);
auto res2 = if_op->set_output(then_body_res_2, else_body_res_2);
if_op->validate_and_infer_types();
auto result1 = make_shared<op::Result>(res1);
auto result2 = make_shared<op::Result>(res2);
Shape out0_shape{32, 40, 10};
@@ -184,7 +178,6 @@ TEST(type_prop, if_scalar_condition) {
if_op->set_input(X, Xt, Xe);
if_op->set_input(Y, Yt, Ye);
auto res = if_op->set_output(then_body_res, else_body_res);
if_op->validate_and_infer_types();
auto result0 = make_shared<op::Result>(res);
Shape out0_shape{32, 40, 10};
auto sh = result0->get_output_shape(0);
@@ -218,7 +211,6 @@ TEST(type_prop, if_dynamic_output) {
if_op->set_input(X, Xt, nullptr);
if_op->set_input(Y, nullptr, Ye);
auto res = if_op->set_output(then_body_res, else_body_res);
if_op->validate_and_infer_types();
auto result0 = make_shared<op::Result>(res);
auto dynamic_shape = result0->get_output_partial_shape(0);
@@ -265,7 +257,6 @@ TEST(type_prop, if_dynamic_inputs) {
if_op->set_input(X, Xt, Xe);
if_op->set_input(Y, Yt, Ye);
auto res = if_op->set_output(then_body_res, else_body_res);
if_op->validate_and_infer_types();
auto result0 = make_shared<op::Result>(res);
auto dynamic_shape = result0->get_output_partial_shape(0);
auto expected_result = PartialShape{Dimension::dynamic(), 20, 30};