Reference implementations for Loop and TensorIterator ops (#2978)

* Loop op ngraph implementation, update IE IR Reader and ngraph to cnn converter

* refactoring SubGraphOp class

* type prop unit tests

* ngraph code style

* update comment

* single layer tests for Loop operation

* fix file name

* Add SpecialBodyPorts attribute in Loop op, update single layer tests

* first debug version

* more tests

* missing test file

* removed not needed shapes from test data

* move test data to new folder

* shape infer tests

* Added execution tests

* add several new tests cases, strict checks in Loop impl, temporary disable single layer tests

* ngraph codestyle, refactoring, clone_new_args test

* resolve review remarks

* fix build

* fix tests

* more execution tests

* add a new constructor of Loop op, resolve review remarks

* execution tests

* synchro with current version

* handle scalars and more tests

* scalar test enabled

* loop reference impl

* bug fixes in tests, onnx importer part and in the ref implementation of the Loop op

* applied remarks

* handle unsupported cases

* rewrite unit tests

* update INTERPRETER manifest

* is_termination_condition_always_true simplification

* [TEST] update python models tests

* review remarks

* added xfail to tiny_yolov3

* missing model test

* revert test data

* fixed numbers of failing tests

* fixed failed test description

* fix test message

* fix xfail test

* reference implementation for ngraph::function

* update loop reference implementation

* Refactor loop reference implementation

* ngraph codestyle

* Refactoring

* Submodule update

* Skip check for Reduce ops in mkl for scalar cases, support for yolov3

* fix ngraph reader tests

* revert ceiling op, renaming

* Add allias(Ceiling) for Ceil op in mkl

* delete xfails

* fix build

* single layer tests for tensor iterarator

* Refactor TensorIterator and Loop ref impls

* revert dynamic tensor creation, disable some dynamic test cases

* fix warning

* Resolve review remarks

* revert Predefined values in Loop tests

Co-authored-by: Mateusz Bencer <mateusz.bencer@intel.com>
This commit is contained in:
Ivan Tikhonov
2020-11-10 15:49:59 +03:00
committed by GitHub
parent b6e2cd692b
commit c309bb77d2
49 changed files with 1199 additions and 65 deletions

View File

@@ -1,4 +1,4 @@
// Copyright (C) 2019 Intel Corporation
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,4 +1,4 @@
// Copyright (C) 2019 Intel Corporation
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -61,7 +61,7 @@ class StaticShapeLoopTest : public testing::WithParamInterface<StaticShapeLoopPa
public:
static std::string getTestCaseName(const testing::TestParamInfo<StaticShapeLoopParams> &obj);
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override;
std::vector<std::vector<std::uint8_t>> CalculateRefs() override;
std::vector<std::vector<std::uint8_t>> PredefinedRefs();
private:
bool static_iter_num; // trip count provided by constant node
@@ -100,7 +100,7 @@ protected:
return LayerTestsCommon::GenerateInput(info);
}
std::vector<std::vector<std::uint8_t>> CalculateRefs() override {
std::vector<std::vector<std::uint8_t>> PredefinedRefs() {
if (outputGens.empty())
return LayerTestsCommon::CalculateRefs();

View File

@@ -1,4 +1,4 @@
// Copyright (C) 2019 Intel Corporation
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,4 +1,4 @@
// Copyright (C) 2019 Intel Corporation
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,4 +1,4 @@
// Copyright (C) 2019 Intel Corporation
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,4 +1,4 @@
// Copyright (C) 2019 Intel Corporation
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -0,0 +1,39 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <string>
#include <vector>
#include <memory>
#include <ngraph/op/util/attr_types.hpp>
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
namespace LayerTestsDefinitions {
using TensorIteratorParams = typename std::tuple<
bool, // using unroll tensor iterator transformation
size_t, // seq_lengths
size_t, // batch
size_t, // hidden size
size_t, // input size
float, // clip
ngraph::helpers::TensorIteratorBody, // body type
ngraph::op::RecurrentSequenceDirection, // direction
InferenceEngine::Precision, // Network precision
std::string>; // Device name
class TensorIteratorTest : public testing::WithParamInterface<TensorIteratorParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(const testing::TestParamInfo<TensorIteratorParams> &obj);
protected:
void SetUp() override;
};
} // namespace LayerTestsDefinitions

View File

@@ -1,4 +1,4 @@
// Copyright (C) 2019 Intel Corporation
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@@ -36,6 +36,10 @@ std::string GRUCellTest::getTestCaseName(const testing::TestParamInfo<GRUCellPar
std::string targetDevice;
std::tie(should_decompose, batch, hidden_size, input_size, activations, clip,
linear_before_reset, netPrecision, targetDevice) = obj.param;
inputShapes = {
{{batch, input_size}, {batch, hidden_size}, {3 * hidden_size, input_size},
{3 * hidden_size, hidden_size}, {(linear_before_reset? 4 : 3) * hidden_size}},
};
std::ostringstream result;
result << "decomposition" << should_decompose << "_";
result << "batch=" << batch << "_";

View File

@@ -1,4 +1,4 @@
// Copyright (C) 2019 Intel Corporation
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@@ -42,7 +42,7 @@ namespace LayerTestsDefinitions {
{3 * hidden_size, hidden_size}, {(linear_before_reset ? 4 : 3) * hidden_size}},
};
std::ostringstream result;
result << "seq_lenghts" << seq_lenghts << "_";
result << "seq_lenghts=" << seq_lenghts << "_";
result << "batch=" << batch << "_";
result << "hidden_size=" << hidden_size << "_";
result << "input_size=" << input_size << "_";

View File

@@ -53,7 +53,6 @@ namespace LayerTestsDefinitions {
void LoopTest::SetUp() {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
SetRefMode(LayerTestsUtils::IE);
bool execute_first_iteration;
bool is_body_condition_const;
bool body_condition; // works only if is_body_condition_const ==
@@ -161,8 +160,6 @@ namespace LayerTestsDefinitions {
void StaticShapeLoopTest::SetUp() {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
SetRefMode(LayerTestsUtils::IE);
auto args_papck = std::tie(static_iter_num, max_iter_num, dynamic_exit, axis);
std::tie(
static_continue_cond,
@@ -261,7 +258,7 @@ namespace LayerTestsDefinitions {
}
// Predefined ref output
std::vector<std::vector<std::uint8_t>> StaticShapeLoopTest::CalculateRefs() {
std::vector<std::vector<std::uint8_t>> StaticShapeLoopTest::PredefinedRefs() {
bool auto_concat_out = (axis != -1);
const auto n_iter = actual_n_iter();
@@ -293,6 +290,23 @@ namespace LayerTestsDefinitions {
Run();
}
TEST_P(StaticShapeLoopTest, CompareWithPredefinedRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
LoadNetwork();
Infer();
auto expectedOutputs = PredefinedRefs(); // use predefined refs instead of CalculateRefs function
const auto& actualOutputs = GetOutputs();
if (expectedOutputs.empty()) {
return;
}
IE_ASSERT(actualOutputs.size() == expectedOutputs.size())
<< "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size();
Compare(expectedOutputs, actualOutputs);
}
TEST_P(TrivialLoopTest, PassThroughBody) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
InferenceEngine::Precision iePrc;

View File

@@ -1,4 +1,4 @@
// Copyright (C) 2019 Intel Corporation
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,4 +1,4 @@
// Copyright (C) 2019 Intel Corporation
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@@ -41,7 +41,7 @@ namespace LayerTestsDefinitions {
{4 * hidden_size, hidden_size}, {4 * hidden_size}},
};
std::ostringstream result;
result << "seq_lenghts" << seq_lenghts << "_";
result << "seq_lenghts=" << seq_lenghts << "_";
result << "batch=" << batch << "_";
result << "hidden_size=" << hidden_size << "_";
result << "input_size=" << input_size << "_";

View File

@@ -1,4 +1,4 @@
// Copyright (C) 2019 Intel Corporation
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,4 +1,4 @@
// Copyright (C) 2019 Intel Corporation
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@@ -41,7 +41,7 @@ namespace LayerTestsDefinitions {
{hidden_size, hidden_size}, {hidden_size}},
};
std::ostringstream result;
result << "seq_lenghts" << seq_lenghts << "_";
result << "seq_lenghts=" << seq_lenghts << "_";
result << "batch=" << batch << "_";
result << "hidden_size=" << hidden_size << "_";
result << "input_size=" << input_size << "_";

View File

@@ -0,0 +1,226 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <tuple>
#include <string>
#include <vector>
#include <memory>
#include <functional>
#include "ie_core.hpp"
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "functional_test_utils/precision_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "single_layer_tests/tensor_iterator.hpp"
#include <transformations/control_flow/unroll_tensor_iterator.hpp>
namespace LayerTestsDefinitions {
std::string TensorIteratorTest::getTestCaseName(const testing::TestParamInfo<TensorIteratorParams> &obj) {
bool should_decompose;
size_t seq_lenghts;
size_t batch;
size_t hidden_size;
size_t input_size;
ngraph::helpers::TensorIteratorBody ti_body;
float clip;
ngraph::op::RecurrentSequenceDirection direction;
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::tie(should_decompose, seq_lenghts, batch, hidden_size, input_size, clip, ti_body, direction, netPrecision,
targetDevice) = obj.param;
std::vector<std::vector<size_t>> inputShapes = {};
switch (ti_body) {
case ngraph::helpers::TensorIteratorBody::LSTM:
inputShapes = {
{{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size},
{4 * hidden_size, hidden_size}, {4 * hidden_size}},
};
break;
case ngraph::helpers::TensorIteratorBody::GRU:
inputShapes = {
{{batch, input_size}, {batch, hidden_size}, {3 * hidden_size, input_size},
{3 * hidden_size, hidden_size}, {3 * hidden_size}},
};
break;
case ngraph::helpers::TensorIteratorBody::RNN:
inputShapes = {{batch, input_size}, {batch, hidden_size},
{hidden_size, input_size}, {hidden_size, hidden_size}, {hidden_size}};
break;
}
std::ostringstream result;
result << "unrolling=" << should_decompose << "_";
result << "seq_lenghts=" << seq_lenghts << "_";
result << "batch=" << batch << "_";
result << "hidden_size=" << hidden_size << "_";
result << "input_size=" << input_size << "_";
result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
result << "TensorIteratorBody=" << ti_body << "_";
result << "direction=" << direction << "_";
result << "clip=" << clip << "_";
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice << "_";
return result.str();
}
void TensorIteratorTest::SetUp() {
size_t seq_lenghts;
bool should_decompose;
size_t batch;
size_t hidden_size;
size_t input_size;
ngraph::helpers::TensorIteratorBody ti_body;
float clip;
ngraph::op::RecurrentSequenceDirection direction;
InferenceEngine::Precision netPrecision;
std::tie(should_decompose, seq_lenghts, batch, hidden_size, input_size, clip, ti_body, direction, netPrecision,
targetDevice) = this->GetParam();
std::vector<std::vector<size_t>> inputShapes;
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto tensor_iterator = std::make_shared<ngraph::opset5::TensorIterator>();
// Each case consist of 3 steps:
// 1. Create TensorIterator body.
// 2. Set PortMap
// 3. Create outer function
auto axis = std::make_shared<ngraph::opset5::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{1});
switch (ti_body) {
case ngraph::helpers::TensorIteratorBody::LSTM: {
inputShapes = {
{{batch, seq_lenghts, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size},
{4 * hidden_size, hidden_size}, {4 * hidden_size}},
};
auto outer_params = ngraph::builder::makeParams(ngPrc, {inputShapes[0], inputShapes[1], inputShapes[2]});
// 1. Create TensorIterator body.
inputShapes[0][1] = 1; // sliced dimension
auto body_params = ngraph::builder::makeParams(ngPrc, {inputShapes[0], inputShapes[1], inputShapes[2]});
auto squeeze = std::make_shared<ngraph::opset5::Squeeze>(body_params[0], axis);
std::vector<ngraph::Shape> WRB = {inputShapes[3], inputShapes[4], inputShapes[5]};
ngraph::OutputVector out_vector = {squeeze, body_params[1], body_params[2]};
auto lstm_cell = ngraph::builder::makeLSTM(out_vector, WRB, hidden_size, {"sigmoid", "tanh", "tanh"}, {}, {}, clip);
auto unsqueeze = std::make_shared<ngraph::opset5::Unsqueeze>(lstm_cell->output(0), axis);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(unsqueeze),
std::make_shared<ngraph::opset1::Result>(lstm_cell->output(0)),
std::make_shared<ngraph::opset1::Result>(lstm_cell->output(1))};
auto body = std::make_shared<ngraph::Function>(results, body_params, "lstm_cell");
tensor_iterator->set_function(body);
// 2. Set PortMap
if (direction == ngraph::op::RecurrentSequenceDirection::FORWARD) {
tensor_iterator->set_sliced_input(body_params[0], outer_params[0], 0, 1, 1, -1, 1);
tensor_iterator->get_concatenated_slices(results[0], 0, 1, 1, -1, 1);
} else if (direction == ngraph::op::RecurrentSequenceDirection::REVERSE) {
tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, 1);
tensor_iterator->get_concatenated_slices(results[0], -1, -1, 1, 0, 1);
} else {
NGRAPH_CHECK(false, "Bidirectional case is not supported.");
}
tensor_iterator->set_invariant_input(body_params[1], outer_params[1]);
tensor_iterator->set_invariant_input(body_params[2], outer_params[2]);
tensor_iterator->get_iter_value(results[1]);
tensor_iterator->get_iter_value(results[2]);
// 3. Outer function
function = std::make_shared<ngraph::Function>(ngraph::OutputVector{tensor_iterator->output(0), tensor_iterator->output(1),
tensor_iterator->output(2)}, outer_params);
break;
}
case ngraph::helpers::TensorIteratorBody::GRU: {
inputShapes = {
{{batch, seq_lenghts, input_size}, {batch, hidden_size}, {3 * hidden_size, input_size},
{3 * hidden_size, hidden_size}, {3 * hidden_size}},
};
auto outer_params = ngraph::builder::makeParams(ngPrc, {inputShapes[0], inputShapes[1]});
// 1. Create TensorIterator body.
inputShapes[0][1] = 1; // sliced dimension
auto body_params = ngraph::builder::makeParams(ngPrc, {inputShapes[0], inputShapes[1]});
std::vector<ngraph::Shape> WRB = {inputShapes[2], inputShapes[3], inputShapes[4]};
auto squeeze = std::make_shared<ngraph::opset5::Squeeze>(body_params[0], axis);
ngraph::OutputVector out_vector = {squeeze, body_params[1]};
auto gru_cell = ngraph::builder::makeGRU(out_vector, WRB, hidden_size, {"sigmoid", "tanh"},
{}, {}, clip, false);
auto unsqueeze = std::make_shared<ngraph::opset5::Unsqueeze>(gru_cell->output(0), axis);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(gru_cell->output(0)),
std::make_shared<ngraph::opset1::Result>(unsqueeze)};
auto body = std::make_shared<ngraph::Function>(results, body_params, "gru_cell");
tensor_iterator->set_function(body);
// 2. Set PortMap
if (direction == ngraph::op::RecurrentSequenceDirection::FORWARD) {
tensor_iterator->set_sliced_input(body_params[0], outer_params[0], 0, 1, 1, -1, 1);
tensor_iterator->get_concatenated_slices(results[1], 0, 1, 1, -1, 1);
} else if (direction == ngraph::op::RecurrentSequenceDirection::REVERSE) {
tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, 1);
tensor_iterator->get_concatenated_slices(results[1], -1, -1, 1, 0, 1);
} else {
NGRAPH_CHECK(false, "Bidirectional case is not supported.");
}
tensor_iterator->set_invariant_input(body_params[1], outer_params[1]);
tensor_iterator->get_iter_value(results[0]);
// 3. Outer function
function = std::make_shared<ngraph::Function>(ngraph::OutputVector{tensor_iterator->output(0), tensor_iterator->output(1)}, outer_params);
break;
}
case ngraph::helpers::TensorIteratorBody::RNN: {
inputShapes = {{batch, seq_lenghts, input_size},
{batch, hidden_size},
{hidden_size, input_size},
{hidden_size, hidden_size},
{hidden_size}};
auto outer_params = ngraph::builder::makeParams(ngPrc, {inputShapes[0], inputShapes[1]});
// 1. Create TensorIterator body.
inputShapes[0][1] = 1; // sliced dimension
auto body_params = ngraph::builder::makeParams(ngPrc, {inputShapes[0], inputShapes[1]});
std::vector<ngraph::Shape> WRB = {inputShapes[2], inputShapes[3], inputShapes[4]};
auto squeeze = std::make_shared<ngraph::opset5::Squeeze>(body_params[0], axis);
ngraph::OutputVector out_vector = {squeeze, body_params[1]};
auto rnn_cell = ngraph::builder::makeRNN(out_vector, WRB, hidden_size, {"tanh"}, {}, {}, clip);
auto unsqueeze = std::make_shared<ngraph::opset5::Unsqueeze>(rnn_cell->output(0), axis);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(rnn_cell),
std::make_shared<ngraph::opset1::Result>(unsqueeze)};
auto body = std::make_shared<ngraph::Function>(results, body_params, "rnn_cell");
tensor_iterator->set_function(body);
// 2. Set PortMap
if (direction == ngraph::op::RecurrentSequenceDirection::FORWARD) {
tensor_iterator->set_sliced_input(body_params[0], outer_params[0], 0, 1, 1, -1, 1);
tensor_iterator->get_concatenated_slices(results[1], 0, 1, 1, -1, 1);
} else if (direction == ngraph::op::RecurrentSequenceDirection::REVERSE) {
tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, 1);
tensor_iterator->get_concatenated_slices(results[1], -1, -1, 1, 0, 1);
} else {
NGRAPH_CHECK(false, "Bidirectional case is not supported.");
}
tensor_iterator->set_invariant_input(body_params[1], outer_params[1]);
tensor_iterator->get_iter_value(results[0]);
// 3. Outer function
function = std::make_shared<ngraph::Function>(ngraph::OutputVector{tensor_iterator->output(0), tensor_iterator->output(1)}, outer_params);
break;
}
}
if (should_decompose) {
ngraph::pass::Manager m;
m.register_pass<ngraph::pass::UnrollTensorIterator>();
m.run_passes(function);
}
}
TEST_P(TensorIteratorTest, CompareWithRefs) {
Run();
};
} // namespace LayerTestsDefinitions