Removed legacy tests (#6558)

This commit is contained in:
Ilya Churaev 2021-07-08 08:01:40 +03:00 committed by GitHub
parent e63b7dc39c
commit 23f4bf4e70
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
37 changed files with 32 additions and 6240 deletions

View File

@ -12,7 +12,7 @@
namespace ngraph
{
//
// In various places, like ConstantFolding and DynElimination, it is
// In various places, like ConstantFolding, it is
// useful to transform DynSlice by converting it to a sequence of ops:
//
// Slice (to do the basic slicing)

View File

@ -75,13 +75,10 @@ set(SRC
op_eval/strided_slice.cpp
op_eval/transpose.cpp
op_eval/variadic_split.cpp
op_is.cpp
opset1.cpp
partial_shape.cpp
pass_config.cpp
pass_liveness.cpp
pass_manager.cpp
pass_shape_relevance.cpp
pattern.cpp
provenance.cpp
replace_node.cpp

View File

@ -1,142 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/constant_folding.hpp"
#include "ngraph/pass/dyn_elimination.hpp"
#include "ngraph/pass/manager.hpp"
#include "pass/opset0_downgrade.hpp"
#include "util/all_close_f.hpp"
#include "util/test_tools.hpp"
using namespace ngraph;
using namespace std;
TEST(dyn_elimination, transpose)
{
Shape shape_in{2, 4, 6, 8};
auto param = make_shared<op::Parameter>(element::boolean, shape_in);
auto constant_perm =
make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{2, 3, 1, 0});
auto transpose = make_shared<op::Transpose>(param, constant_perm);
auto f = make_shared<Function>(transpose, ParameterVector{param});
pass::Manager pass_manager;
pass_manager.register_pass<pass::DynElimination>();
pass_manager.run_passes(f);
ASSERT_EQ(count_ops_of_type<op::Transpose>(f), 0);
ASSERT_EQ(count_ops_of_type<op::v1::Reshape>(f), 1);
auto new_reshape =
as_type_ptr<op::v1::Reshape>(f->get_results().at(0)->input_value(0).get_node_shared_ptr());
ASSERT_TRUE(new_reshape);
ASSERT_EQ(new_reshape->get_input_order(), (AxisVector{2, 3, 1, 0}));
ASSERT_EQ(new_reshape->get_output_shape(0), (Shape{6, 8, 4, 2}));
ASSERT_EQ(new_reshape->get_output_element_type(0), element::boolean);
}
// For now, we can't handle the case where the input has dynamic shapes,
// because the classic Reshape op demands a Shape. Probably won't be able to
// deal with this until/unless we make a "StaticTranspose". Just make sure
// we don't crash or mangle the graph.
TEST(dyn_elimination, transpose_dyn_shape)
{
PartialShape shape_in{2, 4, Dimension::dynamic(), 8};
auto param = make_shared<op::Parameter>(element::boolean, shape_in);
auto constant_perm =
make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{2, 3, 1, 0});
auto transpose = make_shared<op::Transpose>(param, constant_perm);
auto f = make_shared<Function>(transpose, ParameterVector{param});
pass::Manager pass_manager;
pass_manager.register_pass<pass::DynElimination>();
pass_manager.run_passes(f);
ASSERT_EQ(count_ops_of_type<op::Transpose>(f), 1);
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
auto new_transpose =
as_type_ptr<op::Transpose>(f->get_results().at(0)->input_value(0).get_node_shared_ptr());
ASSERT_TRUE(new_transpose);
ASSERT_EQ(new_transpose->get_output_element_type(0), element::boolean);
ASSERT_TRUE(new_transpose->get_output_partial_shape(0).relaxes(
PartialShape{Dimension::dynamic(), 8, 4, 2}));
}
TEST(dyn_elimination, range)
{
auto constant_start = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{0});
auto constant_stop = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{5});
auto constant_step = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{2});
auto range = make_shared<op::Range>(constant_start, constant_stop, constant_step);
ASSERT_EQ(range->get_element_type(), element::i64);
ASSERT_EQ(range->get_shape(), (Shape{3}));
auto f = make_shared<Function>(range, ParameterVector{});
pass::Manager pass_manager;
pass_manager.register_pass<pass::DynElimination>();
pass_manager.run_passes(f);
ASSERT_EQ(count_ops_of_type<op::Range>(f), 0);
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
auto replacement =
as_type_ptr<op::Constant>(f->get_results().at(0)->input_value(0).get_node_shared_ptr());
ASSERT_NE(replacement, nullptr);
ASSERT_EQ(replacement->get_element_type(), element::i64);
ASSERT_EQ(replacement->get_shape(), (Shape{3}));
auto vals = replacement->get_vector<int64_t>();
ASSERT_EQ(vals, (vector<int64_t>{0, 2, 4}));
}
TEST(dyn_elimination, range_f64)
{
auto constant_start = make_shared<op::Constant>(element::f64, Shape{}, vector<double>{-0.5});
auto constant_stop = make_shared<op::Constant>(element::f64, Shape{}, vector<double>{2});
auto constant_step = make_shared<op::Constant>(element::f64, Shape{}, vector<double>{0.25});
auto range = make_shared<op::Range>(constant_start, constant_stop, constant_step);
ASSERT_EQ(range->get_element_type(), element::f64);
ASSERT_EQ(range->get_shape(), (Shape{10}));
auto f = make_shared<Function>(range, ParameterVector{});
pass::Manager pass_manager;
pass_manager.register_pass<pass::DynElimination>();
pass_manager.run_passes(f);
ASSERT_EQ(count_ops_of_type<op::Range>(f), 0);
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
auto replacement =
as_type_ptr<op::Constant>(f->get_results().at(0)->input_value(0).get_node_shared_ptr());
ASSERT_NE(replacement, nullptr);
ASSERT_EQ(replacement->get_element_type(), element::f64);
ASSERT_EQ(replacement->get_shape(), (Shape{10}));
auto vals = replacement->get_vector<double>();
ASSERT_TRUE(test::all_close_f(
vals, vector<double>{-0.5, -0.25, 0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75}));
}

View File

@ -1,577 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/op/util/op_types.hpp"
#include "ngraph/validation_util.hpp"
#include "op/convolution.hpp"
#include "op/group_conv.hpp"
#include "util/test_tools.hpp"
using namespace ngraph;
NGRAPH_SUPPRESS_DEPRECATED_START
namespace
{
void op_is_Abs()
{
op::Abs node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Acos()
{
op::Acos node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Asin()
{
op::Asin node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Atan()
{
op::Atan node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_AvgPool()
{
op::AvgPool node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_BatchNormInference()
{
op::v0::BatchNormInference node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Broadcast()
{
op::v1::Broadcast node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Ceiling()
{
op::Ceiling node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Clamp()
{
op::Clamp node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Concat()
{
op::Concat node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Constant()
{
op::Constant node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Convert()
{
op::Convert node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Convolution()
{
op::v0::Convolution node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_ConvolutionBackpropData()
{
op::v0::ConvolutionBackpropData node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Cos()
{
op::Cos node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Cosh()
{
op::Cosh node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_CumSum()
{
op::CumSum node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_DepthToSpace()
{
op::DepthToSpace node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Elu()
{
op::Elu node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Erf()
{
op::Erf node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Exp()
{
op::Exp node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_FakeQuantize()
{
op::FakeQuantize node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Floor()
{
op::Floor node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_GRN()
{
op::GRN node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Gather()
{
op::v1::Gather node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Gelu()
{
op::Gelu node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_GroupConvolution()
{
op::v0::GroupConvolution node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_GroupConvolutionBackpropData()
{
op::v0::GroupConvolutionBackpropData node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_HardSigmoid()
{
op::HardSigmoid node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Interpolate()
{
op::v0::Interpolate node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Log()
{
op::Log node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_LRN()
{
op::LRN node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_LSTMSequence()
{
op::v0::LSTMSequence node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_MatMul()
{
op::MatMul node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_NormalizeL2()
{
op::v0::NormalizeL2 node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_MVN()
{
op::MVN node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Negative()
{
op::Negative node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Parameter()
{
op::Parameter node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_PRelu()
{
op::PRelu node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_PriorBox()
{
op::PriorBox node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Range()
{
op::Range node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Relu()
{
op::Relu node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Result()
{
op::Result node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_ReverseSequence()
{
op::ReverseSequence node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Selu()
{
op::Selu node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_ShapeOf()
{
op::ShapeOf node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_ShuffleChannels()
{
op::ShuffleChannels node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Sigmoid()
{
op::Sigmoid node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Sign()
{
op::Sign node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Sin()
{
op::Sin node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Sinh()
{
op::Sinh node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_SpaceToDepth()
{
op::SpaceToDepth node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Sqrt()
{
op::Sqrt node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_SquaredDifference()
{
op::SquaredDifference node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_TRUE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Squeeze()
{
op::Squeeze node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Tan()
{
op::Tan node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Tanh()
{
op::Tanh node;
EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_TensorIterator()
{
op::TensorIterator node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Tile()
{
op::v0::Tile node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Unsqueeze()
{
op::v0::Unsqueeze node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Xor()
{
op::Xor node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_TRUE(op::is_binary_elementwise_logical(&node));
}
} // namespace
TEST(op_is, check)
{
NGRAPH_SUPPRESS_DEPRECATED_START
#define NGRAPH_OP(a, b) op_is_##a();
#include "opset0_tbl.hpp"
#undef NGRAPH_OP
NGRAPH_SUPPRESS_DEPRECATED_END
}

View File

@ -1,49 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/visualize_tree.hpp"
#include "pass/liveness.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
namespace ng = ngraph;
TEST(liveness, constant)
{
Shape shape{1};
auto c = op::Constant::create(element::i32, shape, {5});
auto f = make_shared<Function>(make_shared<op::Negative>(c), ParameterVector{});
pass::Manager pass_manager;
pass_manager.register_pass<pass::Liveness>();
pass_manager.run_passes(f);
auto tmp = f->get_ordered_ops();
vector<shared_ptr<Node>> sorted{tmp.begin(), tmp.end()};
ASSERT_EQ(3, sorted.size());
EXPECT_EQ(0, sorted[0]->liveness_new_list.size());
EXPECT_EQ(0, sorted[0]->liveness_free_list.size());
// op::Negative is live on output to op::Result
// op::Negative is new
EXPECT_EQ(1, sorted[1]->liveness_new_list.size());
EXPECT_EQ(0, sorted[1]->liveness_free_list.size());
// op::Negative is live on input to op::Result
EXPECT_EQ(0, sorted[2]->liveness_new_list.size());
// op::Negative is freed
EXPECT_EQ(1, sorted[2]->liveness_free_list.size());
}

View File

@ -1,171 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/manager.hpp"
#include "pass/shape_relevance.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
using namespace ngraph;
using namespace std;
TEST(shape_relevance, simple)
{
auto param0 = make_shared<op::Parameter>(element::f32, Shape{4, 6});
auto param1 = make_shared<op::Parameter>(element::f32, Shape{4, 6});
auto x = make_shared<op::v1::Add>(param0, param1);
auto f = make_shared<Function>(x, ParameterVector{param0, param1});
pass::Manager manager;
manager.register_pass<pass::ShapeRelevance>();
manager.run_passes(f);
ASSERT_FALSE(param0->is_relevant_to_shapes());
ASSERT_FALSE(param1->is_relevant_to_shapes());
}
TEST(shape_relevance, param_direct)
{
auto param0 = make_shared<op::Parameter>(element::f32, Shape{4, 6});
auto param1 = make_shared<op::Parameter>(element::i64, Shape{4});
auto x = make_shared<op::v1::Reshape>(param0, param1, true);
auto f = make_shared<Function>(x, ParameterVector{param0, param1});
pass::Manager manager;
manager.register_pass<pass::ShapeRelevance>();
manager.run_passes(f);
ASSERT_FALSE(param0->is_relevant_to_shapes());
ASSERT_TRUE(param1->is_relevant_to_shapes());
}
TEST(shape_relevance, param_indirect)
{
auto param0 = make_shared<op::Parameter>(element::f32, Shape{4, 6});
auto param1 = make_shared<op::Parameter>(element::i64, Shape{4});
auto param2 = make_shared<op::Parameter>(element::i64, Shape{2});
auto c = make_shared<op::Concat>(NodeVector{param1, param2}, 0);
auto x = make_shared<op::v1::Reshape>(param0, c, true);
auto f = make_shared<Function>(x, ParameterVector{param0, param1, param2});
pass::Manager manager;
manager.register_pass<pass::ShapeRelevance>();
manager.run_passes(f);
ASSERT_FALSE(param0->is_relevant_to_shapes());
ASSERT_TRUE(param1->is_relevant_to_shapes());
ASSERT_TRUE(param2->is_relevant_to_shapes());
}
TEST(shape_relevance, param_shape_of_direct_v0)
{
auto param0 = make_shared<op::Parameter>(element::f32, Shape{4, 6});
auto x = make_shared<op::v1::Reshape>(param0, make_shared<op::v0::ShapeOf>(param0), true);
auto f = make_shared<Function>(x, ParameterVector{param0});
pass::Manager manager;
manager.register_pass<pass::ShapeRelevance>();
manager.run_passes(f);
ASSERT_FALSE(param0->is_relevant_to_shapes());
}
TEST(shape_relevance, param_shape_of_direct_v3)
{
auto param0 = make_shared<op::Parameter>(element::f32, Shape{4, 6});
auto x = make_shared<op::v1::Reshape>(param0, make_shared<op::v3::ShapeOf>(param0), true);
auto f = make_shared<Function>(x, ParameterVector{param0});
pass::Manager manager;
manager.register_pass<pass::ShapeRelevance>();
manager.run_passes(f);
ASSERT_FALSE(param0->is_relevant_to_shapes());
}
TEST(shape_relevance, param_shape_of_direct_i32_v3)
{
auto param0 = make_shared<op::Parameter>(element::f32, Shape{4, 6});
auto x = make_shared<op::v1::Reshape>(
param0, make_shared<op::v3::ShapeOf>(param0, element::i32), true);
auto f = make_shared<Function>(x, ParameterVector{param0});
pass::Manager manager;
manager.register_pass<pass::ShapeRelevance>();
manager.run_passes(f);
ASSERT_FALSE(param0->is_relevant_to_shapes());
}
TEST(shape_relevance, param_shape_of_indirect_v0)
{
auto param0 = make_shared<op::Parameter>(element::f32, Shape{4, 6});
auto s = make_shared<op::v0::ShapeOf>(param0);
auto r = make_shared<op::v1::Reverse>(
s, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX);
auto x = make_shared<op::v1::Reshape>(param0, r, true);
auto f = make_shared<Function>(x, ParameterVector{param0});
pass::Manager manager;
manager.register_pass<pass::ShapeRelevance>();
manager.run_passes(f);
ASSERT_FALSE(param0->is_relevant_to_shapes());
}
TEST(shape_relevance, param_shape_of_indirect_v3)
{
auto param0 = make_shared<op::Parameter>(element::f32, Shape{4, 6});
auto s = make_shared<op::v3::ShapeOf>(param0);
auto r = make_shared<op::v1::Reverse>(
s, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX);
auto x = make_shared<op::v1::Reshape>(param0, r, true);
auto f = make_shared<Function>(x, ParameterVector{param0});
pass::Manager manager;
manager.register_pass<pass::ShapeRelevance>();
manager.run_passes(f);
ASSERT_FALSE(param0->is_relevant_to_shapes());
}
TEST(shape_relevance, param_shape_of_indirect_i32_v3)
{
auto param0 = make_shared<op::Parameter>(element::f32, Shape{4, 6});
auto s = make_shared<op::v3::ShapeOf>(param0, element::i32);
auto r = make_shared<op::v1::Reverse>(
s, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX);
auto x = make_shared<op::v1::Reshape>(param0, r, true);
auto f = make_shared<Function>(x, ParameterVector{param0});
pass::Manager manager;
manager.register_pass<pass::ShapeRelevance>();
manager.run_passes(f);
ASSERT_FALSE(param0->is_relevant_to_shapes());
}

View File

@ -15,7 +15,6 @@
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/provenance.hpp"
#include "pass/fused_op_decomposition.hpp"
#include "util/provenance_enabler.hpp"
using namespace std;
@ -380,61 +379,6 @@ TEST(provenance, builder)
}
}
TEST(provenance, fused_copy_origin_tags)
{
test::ProvenanceEnabler provenance_enabler;
auto p1 = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 4});
p1->add_provenance_tag("P1");
auto g = make_shared<op::MVN>(p1);
g->add_provenance_tag("G");
auto r = make_shared<op::Result>(g);
auto f = make_shared<Function>(ResultVector{r}, ParameterVector{p1});
pass::Manager manager;
manager.register_pass<pass::FusedOpDecomposition>();
manager.run_passes(f);
traverse_nodes(f, [&](const std::shared_ptr<Node>& node) {
auto tags = node->get_provenance_tags();
if (node == p1)
{
EXPECT_EQ(tags.size(), 1);
EXPECT_TRUE(tags.find("P1") != tags.end());
}
else if (node == r)
{
}
else
{
EXPECT_TRUE(tags.find("G") != tags.end());
EXPECT_TRUE(tags.find("<Decomposed from MVN>") != tags.end());
}
});
}
TEST(provenance, fused_decomposition_tag)
{
test::ProvenanceEnabler provenance_enabler;
auto p1 = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 4});
auto fused_op = make_shared<op::MVN>(p1);
auto result = make_shared<op::Result>(fused_op);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{p1});
pass::Manager manager;
manager.register_pass<pass::FusedOpDecomposition>();
manager.run_passes(f);
const auto tag = "<Decomposed from MVN>";
auto tag_check = [&tag](std::shared_ptr<ngraph::Node> node) {
auto tags = node->get_provenance_tags();
EXPECT_TRUE(tags.find(tag) != tags.end());
};
const auto decomposed_op = f->get_result()->get_input_node_shared_ptr(0);
traverse_nodes(as_node_vector(decomposed_op->outputs()), tag_check, {p1});
}
TEST(provenance, empty_group)
{
auto p1 = make_shared<op::Parameter>(element::i32, PartialShape{2, 3, 4});

View File

@ -14,26 +14,8 @@ set (SRC
performance_counter.hpp
dynamic/dynamic_backend.cpp
dynamic/dynamic_backend.hpp
op/avg_pool.cpp
op/avg_pool.hpp
op/convolution.cpp
op/convolution.hpp
op/group_conv.cpp
op/group_conv.hpp
pass/dyn_elimination.cpp
pass/dyn_elimination.hpp
pass/fused_op_decomposition.cpp
pass/fused_op_decomposition.hpp
pass/implicit_broadcast_elimination.cpp
pass/implicit_broadcast_elimination.hpp
pass/liveness.cpp
pass/liveness.hpp
pass/opset0_downgrade.cpp
pass/opset0_downgrade.hpp
pass/opset1_downgrade.cpp
pass/opset1_downgrade.hpp
pass/opset1_upgrade.cpp
pass/opset1_upgrade.hpp
pass/shape_relevance.cpp
pass/shape_relevance.hpp
)

View File

@ -15,8 +15,6 @@
#include "ngraph/specialize_function.hpp"
#include "ngraph/util.hpp"
#include "pass/dyn_elimination.hpp"
#include "pass/opset0_downgrade.hpp"
#include "pass/opset1_downgrade.hpp"
#include "pass/shape_relevance.hpp"
using namespace std;
@ -239,10 +237,8 @@ bool runtime::dynamic::DynamicExecutable::call(
pass::Manager passes;
// Opset1Downgrade should be moved below DynElimination
// when ConstantFolding for v3 ops will be ready
passes.register_pass<pass::Opset1Downgrade>();
passes.register_pass<pass::ConstantFolding>();
passes.register_pass<pass::DynElimination>();
passes.register_pass<pass::Opset0Downgrade>(); // Converts dynamic v1 variants to v0 ops
passes.set_per_pass_validation(false);
// FIXME(amprocte): Vile, temporary hack: we need to do repeated rounds of

View File

@ -8,7 +8,6 @@
#include "ngraph/pass/manager.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/type/element_type.hpp"
#include "pass/opset1_upgrade.hpp"
using namespace std;
using namespace ngraph;
@ -63,7 +62,7 @@ namespace
memcpy(blob_ptr, data, data_size * elem_type.size());
return blob;
}
}
} // namespace
namespace
{
@ -78,21 +77,18 @@ namespace
ie_ops.insert(opset4.begin(), opset4.end());
auto& opset5 = get_opset5().get_type_info_set();
ie_ops.insert(opset5.begin(), opset5.end());
auto& opset6= get_opset6().get_type_info_set();
auto& opset6 = get_opset6().get_type_info_set();
ie_ops.insert(opset6.begin(), opset6.end());
auto& opset7= get_opset7().get_type_info_set();
auto& opset7 = get_opset7().get_type_info_set();
ie_ops.insert(opset7.begin(), opset7.end());
return ie_ops;
}
}
} // namespace
runtime::ie::IE_Executable::IE_Executable(shared_ptr<Function> func, string device)
: m_device{device}
{
static std::set<NodeTypeInfo> ie_ops = get_ie_ops();
pass::Manager passes;
passes.register_pass<pass::Opset1Upgrade>();
passes.run_passes(func);
for (const auto& node : func->get_ops())
{

View File

@ -21,7 +21,6 @@
#include "ngraph/runtime/reference/reorg_yolo.hpp"
#include "ngraph/runtime/reference/tensor_iterator.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "op/avg_pool.hpp"
namespace ngraph
{

View File

@ -1,34 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "atan2.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/negative.hpp"
#include "ngraph/op/sqrt.hpp"
#include "ngraph/op/subtract.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::v0::Atan2::type_info;
op::v0::Atan2::Atan2(const Output<Node>& y, const Output<Node>& x, const AutoBroadcastSpec& autob)
: BinaryElementwiseArithmetic(y, x, autob)
{
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v0::Atan2::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<Atan2>(new_args.at(0), new_args.at(1), this->get_autob());
}
bool op::v0::Atan2::visit_attributes(AttributeVisitor& visitor)
{
BinaryElementwiseArithmetic::visit_attributes(visitor);
return true;
}

View File

@ -1,43 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include "backend_visibility.hpp"
#include "ngraph/op/util/binary_elementwise_arithmetic.hpp"
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise full arctan operation
class BACKEND_API Atan2 : public util::BinaryElementwiseArithmetic
{
public:
static constexpr NodeTypeInfo type_info{"Atan2", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Atan2()
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NONE)
{
}
/// \brief atan2(y,x) is the angle from the origin to the point (x,y) (note reversed
/// order).
///
/// \param y
/// \param x
Atan2(const Output<Node>& y,
const Output<Node>& x,
const AutoBroadcastSpec& autob = AutoBroadcastSpec());
std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
};
}
}
}

View File

@ -1,235 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "avg_pool.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/validation_util.hpp"
using namespace std;
using namespace ngraph;
// *** AvgPool OP SET 0 ***
constexpr NodeTypeInfo op::v0::AvgPool::type_info;
op::v0::AvgPool::AvgPool(const Output<Node>& arg,
const Shape& window_shape,
const Strides& window_movement_strides,
const Shape& padding_below,
const Shape& padding_above,
bool include_padding_in_avg_computation,
const PadType& pad_type,
bool ceil_mode)
: Op({arg})
, m_window_shape(window_shape)
, m_window_movement_strides(window_movement_strides)
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_include_padding_in_avg_computation(include_padding_in_avg_computation)
, m_pad_type(pad_type)
, m_ceil_mode(ceil_mode)
{
constructor_validate_and_infer_types();
}
op::v0::AvgPool::AvgPool(const Output<Node>& arg,
const Shape& window_shape,
const Strides& window_movement_strides,
const Shape& padding_below,
const Shape& padding_above,
bool include_padding_in_avg_computation,
const PadType& pad_type)
: AvgPool(arg,
window_shape,
window_movement_strides,
padding_below,
padding_above,
include_padding_in_avg_computation,
pad_type,
false)
{
}
op::v0::AvgPool::AvgPool(const Output<Node>& arg,
const Shape& window_shape,
const Strides& window_movement_strides,
const Shape& padding_below,
const Shape& padding_above,
bool include_padding_in_avg_computation)
: AvgPool(arg,
window_shape,
window_movement_strides,
padding_below,
padding_above,
include_padding_in_avg_computation,
PadType::EXPLICIT)
{
}
bool op::v0::AvgPool::visit_attributes(AttributeVisitor& visitor)
{
visitor.on_attribute("window_shape", m_window_shape);
visitor.on_attribute("window_movement_strides", m_window_movement_strides);
visitor.on_attribute("padding_below", m_padding_below);
visitor.on_attribute("padding_above", m_padding_above);
visitor.on_attribute("include_padding_in_avg_computation",
m_include_padding_in_avg_computation);
visitor.on_attribute("pad_type", m_pad_type);
visitor.on_attribute("ceil_mode", m_ceil_mode);
return true;
}
void op::v0::AvgPool::validate_and_infer_types()
{
if (0 == m_window_movement_strides.size())
{
m_window_movement_strides = Strides(m_window_shape.size(), 1);
}
if (0 == m_padding_below.size())
{
m_padding_below = Shape(m_window_shape.size(), 0);
}
if (0 == m_padding_above.size())
{
m_padding_above = Shape(m_window_shape.size(), 0);
}
const PartialShape& arg_shape = get_input_partial_shape(0);
if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER)
{
if (arg_shape.is_static())
{
CoordinateDiff padding_above, padding_below;
infer_auto_padding(arg_shape.to_shape(),
m_window_shape,
m_window_movement_strides,
Strides(m_window_shape.size(), 1), // No dilation
m_pad_type,
padding_above,
padding_below);
m_padding_above = Shape(padding_above.begin(), padding_above.end());
m_padding_below = Shape(padding_below.begin(), padding_below.end());
}
}
// infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
// now still take Shape (no negative padding).
CoordinateDiff padding_below(m_padding_below.begin(), m_padding_below.end());
CoordinateDiff padding_above(m_padding_above.begin(), m_padding_above.end());
set_output_type(0,
get_input_element_type(0),
infer_batched_pooling_forward(this,
arg_shape,
padding_below,
padding_above,
m_window_shape,
m_window_movement_strides,
m_include_padding_in_avg_computation,
m_ceil_mode));
}
op::v0::AvgPool::AvgPool(const Output<Node>& arg,
const Shape& window_shape,
const Strides& window_movement_strides)
: AvgPool(arg, window_shape, window_movement_strides, Shape(), Shape(), false)
{
}
op::v0::AvgPool::AvgPool(const Output<Node>& arg, const Shape& window_shape)
: AvgPool(arg, window_shape, Strides(), Shape(), Shape(), false)
{
}
const Shape& op::v0::AvgPool::get_window_shape() const
{
return m_window_shape;
}
void op::v0::AvgPool::set_window_shape(const Shape& window_shape)
{
m_window_shape = window_shape;
}
const Strides& op::v0::AvgPool::get_window_movement_strides() const
{
return m_window_movement_strides;
}
void op::v0::AvgPool::set_window_movement_strides(const Strides& window_movement_strides)
{
m_window_movement_strides = window_movement_strides;
}
const Shape& op::v0::AvgPool::get_padding_below() const
{
return m_padding_below;
}
void op::v0::AvgPool::set_padding_below(const Shape& padding_below)
{
m_padding_below = padding_below;
}
const Shape& op::v0::AvgPool::get_padding_above() const
{
return m_padding_above;
}
void op::v0::AvgPool::set_padding_above(const Shape& padding_above)
{
m_padding_above = padding_above;
}
bool op::v0::AvgPool::get_include_padding_in_avg_computation() const
{
return m_include_padding_in_avg_computation;
}
void op::v0::AvgPool::set_include_padding_in_avg_computation(
bool include_padding_in_avg_computation)
{
m_include_padding_in_avg_computation = include_padding_in_avg_computation;
}
const op::PadType& op::v0::AvgPool::get_pad_type() const
{
return m_pad_type;
}
void op::v0::AvgPool::set_pad_type(const op::PadType& pad_type)
{
m_pad_type = pad_type;
}
bool op::v0::AvgPool::get_ceil_mode() const
{
return m_ceil_mode;
}
void op::v0::AvgPool::set_ceil_mode(bool ceil_mode)
{
m_ceil_mode = ceil_mode;
}
shared_ptr<Node> op::v0::AvgPool::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<v0::AvgPool>(new_args.at(0),
m_window_shape,
m_window_movement_strides,
m_padding_below,
m_padding_above,
m_include_padding_in_avg_computation,
m_pad_type,
m_ceil_mode);
}
shared_ptr<Node> op::v0::AvgPool::get_default_value() const
{
return Constant::create(get_element_type(), get_shape(), {0});
}

View File

@ -1,164 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "backend_visibility.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/attr_types.hpp"
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Batched average pooling operation, with optional padding and window stride.
///
class BACKEND_API AvgPool : public Op
{
public:
static constexpr NodeTypeInfo type_info{"AvgPool", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a batched average pooling operation.
AvgPool() = default;
/// \brief Constructs a batched average pooling operation.
///
/// \param arg The output producing the input data batch tensor.<br>
/// `[d1, dn]`
/// \param window_shape The window shape.<br>
/// `[n]`
/// \param window_movement_strides The window movement strides.<br>
/// `[n]`
/// \param padding_below The below-padding shape.<br>
/// `[n]`
/// \param padding_above The above-padding shape.<br>
/// `[n]`
/// \param include_padding_in_avg_computation If true then averages include padding
/// elements, each treated as the number zero. If false, padding elements are
/// entirely ignored when computing averages. \param pad_type Padding type to use
/// for additional padded dimensions \param ceil_mode Whether to use ceiling while
/// computing output shape.
AvgPool(const Output<Node>& arg,
const Shape& window_shape,
const Strides& window_movement_strides,
const Shape& padding_below,
const Shape& padding_above,
bool include_padding_in_avg_computation,
const PadType& pad_type,
bool ceil_mode);
/// \brief Constructs a batched average pooling operation.
///
/// \param arg The output producing the input data batch tensor.<br>
/// `[d1, dn]`
/// \param window_shape The window shape.<br>
/// `[n]`
/// \param window_movement_strides The window movement strides.<br>
/// `[n]`
/// \param padding_below The below-padding shape.<br>
/// `[n]`
/// \param padding_above The above-padding shape.<br>
/// `[n]`
/// \param include_padding_in_avg_computation If true then averages include padding
/// elements, each treated as the number zero. If false, padding elements are
/// entirely ignored when computing averages. \param pad_type Padding type to use
/// for additional padded dimensions
AvgPool(const Output<Node>& arg,
const Shape& window_shape,
const Strides& window_movement_strides,
const Shape& padding_below,
const Shape& padding_above,
bool include_padding_in_avg_computation,
const PadType& pad_type);
/// \brief Constructs a batched average pooling operation.
///
/// \param arg The output producing the input data batch tensor.<br>
/// `[d1, dn]`
/// \param window_shape The window shape.<br>
/// `[n]`
/// \param window_movement_strides The window movement strides.<br>
/// `[n]`
/// \param padding_below The below-padding shape.<br>
/// `[n]`
/// \param padding_above The above-padding shape.<br>
/// `[n]`
/// \param include_padding_in_avg_computation If true then averages include padding
/// elements, each treated as the number zero. If false, padding elements are
/// entirely ignored when computing averages.
AvgPool(const Output<Node>& arg,
const Shape& window_shape,
const Strides& window_movement_strides,
const Shape& padding_below,
const Shape& padding_above,
bool include_padding_in_avg_computation = false);
/// \brief Constructs a batched, unpadded average pooling operation (i.e., all
/// padding shapes are set to 0).
///
/// \param arg The output producing the input data batch tensor.<br>
/// `[d1, ..., dn]`
/// \param window_shape The window shape.<br>
/// `[n]`
/// \param window_movement_strides The window movement strides.<br>
/// `[n]`
AvgPool(const Output<Node>& arg,
const Shape& window_shape,
const Strides& window_movement_strides);
/// \brief Constructs an unstrided batched convolution operation (i.e., all window
/// movement strides are 1 and all padding shapes are set to 0).
///
/// \param arg The output producing the input data batch tensor.<br>
/// `[d1, ..., dn]`
/// \param window_shape The window shape.<br>
/// `[n]`
AvgPool(const Output<Node>& arg, const Shape& window_shape);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The window shape.
const Shape& get_window_shape() const;
void set_window_shape(const Shape& window_shape);
/// \return The window movement strides.
const Strides& get_window_movement_strides() const;
void set_window_movement_strides(const Strides& window_movement_strides);
/// \return The below-padding shape.
const Shape& get_padding_below() const;
void set_padding_below(const Shape& padding_below);
/// \return The above-padding shape.
const Shape& get_padding_above() const;
void set_padding_above(const Shape& padding_above);
bool get_include_padding_in_avg_computation() const;
void
set_include_padding_in_avg_computation(bool include_padding_in_avg_computation);
/// \return The pad type for pooling.
const PadType& get_pad_type() const;
void set_pad_type(const PadType& pad_type);
bool get_ceil_mode() const;
void set_ceil_mode(bool ceil_mode);
/// \return The default value for AvgPool.
NGRAPH_SUPPRESS_DEPRECATED_START
virtual std::shared_ptr<Node> get_default_value() const override;
NGRAPH_SUPPRESS_DEPRECATED_END
protected:
Shape m_window_shape;
Strides m_window_movement_strides;
Shape m_padding_below;
Shape m_padding_above;
bool m_include_padding_in_avg_computation{false};
PadType m_pad_type{PadType::EXPLICIT};
bool m_ceil_mode{false};
};
} // namespace v0
} // namespace op
} // namespace ngraph

View File

@ -1,343 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "convolution.hpp"
#include "ngraph/axis_vector.hpp"
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
using namespace std;
using namespace ngraph;
// *** Convolution OP SET 0 ***
constexpr NodeTypeInfo op::v0::Convolution::type_info;
op::v0::Convolution::Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const PadType& pad_type)
: Op({data_batch, filters})
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_data_dilation_strides(data_dilation_strides)
, m_pad_type(pad_type)
{
constructor_validate_and_infer_types();
}
bool op::v0::Convolution::visit_attributes(AttributeVisitor& visitor)
{
visitor.on_attribute("window_movement_strides", m_window_movement_strides);
visitor.on_attribute("window_dilation_strides", m_window_dilation_strides);
visitor.on_attribute("data_dilation_strides", m_data_dilation_strides);
visitor.on_attribute("padding_below", m_padding_below);
visitor.on_attribute("padding_above", m_padding_above);
visitor.on_attribute("pad_type", m_pad_type);
return true;
}
void op::v0::Convolution::validate_and_infer_types()
{
const PartialShape& data_batch_shape = get_input_partial_shape(0);
element::Type data_batch_et = get_input_element_type(0);
const PartialShape& filters_shape = get_input_partial_shape(1);
element::Type filters_et = get_input_element_type(1);
if (m_data_dilation_strides.size() == 0)
{
m_data_dilation_strides = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_window_movement_strides.size() == 0)
{
m_window_movement_strides = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_window_dilation_strides.size() == 0)
{
m_window_dilation_strides = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_padding_below.size() == 0)
{
m_padding_below = conv_default_padding(this, data_batch_shape, filters_shape);
}
if (m_padding_above.size() == 0)
{
m_padding_above = conv_default_padding(this, data_batch_shape, filters_shape);
}
if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER)
{
if (data_batch_shape.is_static() && filters_shape.is_static())
{
// TODO: data dilation
m_padding_below.clear();
m_padding_above.clear();
auto filter_shape = filters_shape.to_shape();
filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I}
infer_auto_padding(data_batch_shape.to_shape(),
filter_shape,
m_window_movement_strides,
m_window_dilation_strides,
m_pad_type,
m_padding_above,
m_padding_below);
}
}
element::Type result_et;
PartialShape result_shape;
NODE_VALIDATION_CHECK(
this,
element::Type::merge(result_et, data_batch_et, filters_et),
"Element types for data batch and filters do not match (data batch element type: ",
data_batch_et,
", filters element type: ",
filters_et,
").");
result_shape = infer_convolution_forward(this,
data_batch_shape,
m_data_dilation_strides,
m_padding_below,
m_padding_above,
filters_shape,
m_window_movement_strides,
m_window_dilation_strides);
set_output_type(0, result_et, result_shape);
}
op::v0::Convolution::Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above)
: Convolution(data_batch,
filters,
window_movement_strides,
window_dilation_strides,
padding_below,
padding_above,
Strides())
{
}
op::v0::Convolution::Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides)
: Convolution(data_batch,
filters,
window_movement_strides,
window_dilation_strides,
CoordinateDiff(),
CoordinateDiff())
{
}
op::v0::Convolution::Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides)
: Convolution(data_batch,
filters,
window_movement_strides,
Strides(),
CoordinateDiff(),
CoordinateDiff())
{
}
op::v0::Convolution::Convolution(const Output<Node>& data_batch, const Output<Node>& filters)
: Convolution(data_batch, filters, Strides(), Strides(), CoordinateDiff(), CoordinateDiff())
{
}
shared_ptr<Node> op::v0::Convolution::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<v0::Convolution>(new_args.at(0),
new_args.at(1),
m_window_movement_strides,
m_window_dilation_strides,
m_padding_below,
m_padding_above,
m_data_dilation_strides,
m_pad_type);
}
constexpr NodeTypeInfo op::v0::ConvolutionBackpropData::type_info;
shared_ptr<Node> op::v0::Convolution::get_default_value() const
{
return ngraph::make_constant_from_string("0", get_element_type(), get_shape());
}
op::v0::ConvolutionBackpropData::ConvolutionBackpropData(
const Shape& data_batch_shape,
const Output<Node>& filters,
const Output<Node>& output_delta,
const Strides& window_movement_strides_forward,
const Strides& window_dilation_strides_forward,
const CoordinateDiff& padding_below_forward,
const CoordinateDiff& padding_above_forward,
const Strides& data_dilation_strides_forward)
: Op({filters, output_delta})
, m_data_batch_shape(data_batch_shape)
, m_window_movement_strides_forward(window_movement_strides_forward)
, m_window_dilation_strides_forward(window_dilation_strides_forward)
, m_padding_below_forward(padding_below_forward)
, m_padding_above_forward(padding_above_forward)
, m_data_dilation_strides_forward(data_dilation_strides_forward)
{
constructor_validate_and_infer_types();
}
bool op::v0::ConvolutionBackpropData::visit_attributes(AttributeVisitor& visitor)
{
visitor.on_attribute("data_batch_shape", m_data_batch_shape);
visitor.on_attribute("window_movement_strides_forward", m_window_movement_strides_forward);
visitor.on_attribute("window_dilation_strides_forward", m_window_dilation_strides_forward);
visitor.on_attribute("padding_below_forward", m_padding_below_forward);
visitor.on_attribute("padding_above_forward", m_padding_above_forward);
visitor.on_attribute("data_dilation_strides_forward", m_data_dilation_strides_forward);
return true;
}
void op::v0::ConvolutionBackpropData::validate_and_infer_types()
{
// Backprop to data is itself convolution, with inputs/outputs/attributes transmogrified as
// follows.
//
// Forward Backward
// "N" axis for data batch 0 0
// "C" axis for data batch 1 1
// "Co" axis for filters 0 0
// "Ci" axis for filters 1 1
// "N" axis for output 0 0
// "C" axis for output 1 1
// Data batch x delta
// Data batch shape S_x S_o
// Filters f reverse(f) [on spatial axes]
// Filters shape S_f S_f
// Window movement strides q_x p_x
// Window dilation strides p_f p_f
// Padding below a_x (S_f - 1)p_f - a_x
// Padding above b_x (S_f - 1)p_f +
// + ((a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f)
// % q_x)
// - b_x
// Data dilation strides p_x q_x
// Output shape S_o S_x
//
// To _validate_, we simply need to check/infer the output shape of the forward convolution,
// then check to make sure that the incoming delta has the same shape as the forward output.
const PartialShape& filters_shape = get_input_partial_shape(0);
element::Type filters_et = get_input_element_type(0);
const PartialShape& delta_shape = get_input_partial_shape(1);
element::Type delta_et = get_input_element_type(1);
element::Type forward_result_et;
PartialShape forward_result_shape;
NODE_VALIDATION_CHECK(
this,
element::Type::merge(forward_result_et, delta_et, filters_et),
"Element types for data batch and filters do not match (data batch element type: ",
delta_et,
", filters element type: ",
filters_et,
").");
forward_result_shape = infer_convolution_forward(this,
m_data_batch_shape,
m_data_dilation_strides_forward,
m_padding_below_forward,
m_padding_above_forward,
filters_shape,
m_window_movement_strides_forward,
m_window_dilation_strides_forward);
NODE_VALIDATION_CHECK(this,
forward_result_shape.compatible(delta_shape),
"Inferred forward output shape (",
forward_result_shape,
") does not match shape of ",
"delta (",
delta_shape,
").");
set_output_type(0, forward_result_et, m_data_batch_shape);
}
shared_ptr<Node>
op::v0::ConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<v0::ConvolutionBackpropData>(m_data_batch_shape,
new_args.at(0),
new_args.at(1),
m_window_movement_strides_forward,
m_window_dilation_strides_forward,
m_padding_below_forward,
m_padding_above_forward,
m_data_dilation_strides_forward);
}
CoordinateDiff op::v0::ConvolutionBackpropData::compute_backward_delta_out_pad_below() const
{
auto& in_shape = get_data_batch_shape();
auto& filter_dilation = get_window_dilation_strides_forward();
auto& filter_shape = get_input_shape(0);
auto& in_pad_below = get_padding_below_forward();
size_t spatial_dim_count = static_cast<size_t>(in_shape.size()) - 2;
CoordinateDiff backward_delta_out_pad_below;
backward_delta_out_pad_below.resize(spatial_dim_count);
for (size_t i = 0; i < spatial_dim_count; i++)
{
backward_delta_out_pad_below[i] =
(static_cast<ptrdiff_t>(filter_shape[i + 2]) - 1) * filter_dilation[i] -
in_pad_below[i];
}
return backward_delta_out_pad_below;
}
CoordinateDiff op::v0::ConvolutionBackpropData::compute_backward_delta_out_pad_above() const
{
auto& in_shape = get_data_batch_shape();
auto& filter_dilation = get_window_dilation_strides_forward();
auto& filter_shape = get_input_shape(0);
auto& in_pad_below = get_padding_below_forward();
auto& in_pad_above = get_padding_above_forward();
auto& in_dilation = get_data_dilation_strides_forward();
auto& stride = get_window_movement_strides_forward();
size_t spatial_dim_count = static_cast<size_t>(in_shape.size()) - 2;
CoordinateDiff backward_delta_out_pad_above;
backward_delta_out_pad_above.resize(spatial_dim_count);
for (size_t i = 0; i < spatial_dim_count; i++)
{
backward_delta_out_pad_above[i] =
(static_cast<ptrdiff_t>(filter_shape[i + 2]) - 1) * filter_dilation[i] +
((in_pad_below[i] + ((in_shape[i + 2]) - 1) * in_dilation[i] + in_pad_above[i] -
(static_cast<ptrdiff_t>(filter_shape[i + 2]) - 1) * filter_dilation[i]) %
stride[i]) -
in_pad_above[i];
}
return backward_delta_out_pad_above;
}

View File

@ -1,304 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "backend_visibility.hpp"
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/attr_types.hpp"
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Batched convolution operation, with optional window dilation and stride.
///
class BACKEND_API Convolution : public Op
{
public:
static constexpr NodeTypeInfo type_info{"Convolution", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a batched convolution operation.
Convolution() = default;
/// \brief Constructs a batched convolution operation.
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
/// \param window_movement_strides The window movement strides.<br>
/// `[f]`
/// \param window_dilation_strides The window dilation strides.<br>
/// `[f]`
/// \param padding_below The padding-below sizes.<br>
/// `[f]`
/// \param padding_above The padding-above sizes.<br>
/// `[f]`
/// \param data_dilation_strides The data dilation strides.<br>
/// `[f]`
/// \param pad_type The pad type for automatically computing padding sizes.<br>
/// `[f]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
///
Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const PadType& pad_type = PadType::EXPLICIT);
/// \brief Constructs a batched convolution operation with no data dilation (i.e.,
/// all
/// data dilation strides are 1).
/// ngraph/test/runtime/interpreter/unit_test.manifest
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
/// \param window_movement_strides The window movement strides.<br>
/// `[f]`
/// \param window_dilation_strides The window dilation strides.<br>
/// `[f]`
/// \param padding_below The padding-below sizes.<br>
/// `[f]`
/// \param padding_above The padding-above sizes.<br>
/// `[f]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
///
Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above);
/// \brief Constructs a batched convolution operation with no padding or data
/// dilation
/// (i.e., padding above and below are 0 everywhere, and all data dilation
/// strides are 1).
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
/// \param window_movement_strides The window movement strides.<br>
/// `[f]`
/// \param window_dilation_strides The window dilation strides.<br>
/// `[f]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
///
Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides);
/// \brief Constructs a batched convolution operation with no window dilation,
/// padding,
/// or data dilation (i.e., padding above and below are 0 everywhere, and all
/// window/data dilation strides are 1).
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
/// \param window_movement_strides The window movement strides.<br>
/// `[f]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
///
Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides);
/// \brief Constructs a batched convolution operation with no window dilation or
/// movement stride (i.e., padding above and below are 0 everywhere, and all
/// window/data dilation strides and window movement strides are 1).
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
///
Convolution(const Output<Node>& data_batch, const Output<Node>& filters);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The window movement strides.
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
void set_window_movement_strides(const Strides& window_movement_strides)
{
m_window_movement_strides = window_movement_strides;
}
/// \return The window dilation strides.
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
void set_window_dilation_strides(const Strides& window_dilation_strides)
{
m_window_dilation_strides = window_dilation_strides;
}
/// \return The padding-below sizes (possibly negative).
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
void set_padding_below(const CoordinateDiff& padding_below)
{
m_padding_below = padding_below;
}
/// \return The padding-above sizes (possibly negative).
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
void set_adding_above(const CoordinateDiff& padding_above)
{
m_padding_above = padding_above;
}
/// \return The input data dilation strides.
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
void set_data_dilation_strides(const Strides& data_dilation_strides)
{
m_data_dilation_strides = data_dilation_strides;
}
/// \return The pad type for convolution.
const PadType& get_pad_type() const { return m_pad_type; }
void set_pad_type(const PadType& pad_type) { m_pad_type = pad_type; }
/// \return The default value for Convolution.
NGRAPH_SUPPRESS_DEPRECATED_START
virtual std::shared_ptr<Node> get_default_value() const override;
NGRAPH_SUPPRESS_DEPRECATED_END
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
Strides m_data_dilation_strides;
PadType m_pad_type;
};
/// \brief Data batch backprop for batched convolution operation.
class BACKEND_API ConvolutionBackpropData : public Op
{
public:
static constexpr NodeTypeInfo type_info{"ConvolutionBackpropData", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a batched-convolution data batch-backprop operation.
ConvolutionBackpropData() = default;
///
/// \brief Constructs a batched-convolution data batch-backprop operation.
///
/// \param data_batch_shape The shape of the data batch from
/// forward-prop.
/// \param filters The node producing the filters from
/// forward-prop.
/// \param data The node producing output delta.
/// \param window_movement_strides_forward The window movement strides from
/// forward-prop.
/// \param window_dilation_strides_forward The window dilation strides from
/// forward-prop.
/// \param padding_below_forward The padding-below sizes from
/// forward-prop.
/// \param padding_above_forward The padding-above sizes from
/// forward-prop.
/// \param data_dilation_strides_forward The data dilation strides from
/// forward-prop.
///
ConvolutionBackpropData(const Shape& data_batch_shape,
const Output<Node>& filters,
const Output<Node>& data,
const Strides& window_movement_strides_forward,
const Strides& window_dilation_strides_forward,
const CoordinateDiff& padding_below_forward,
const CoordinateDiff& padding_above_forward,
const Strides& data_dilation_strides_forward);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The data batch shape.
const Shape& get_data_batch_shape() const { return m_data_batch_shape; }
void set_data_batch_shape(const Shape& data_batch_shape)
{
m_data_batch_shape = data_batch_shape;
}
/// \return The window movement strides from the forward prop.
const Strides& get_window_movement_strides_forward() const
{
return m_window_movement_strides_forward;
}
void set_window_movement_strides_forward(
const Strides& window_movement_strides_forward)
{
m_window_movement_strides_forward = window_movement_strides_forward;
}
/// \return The window dilation strides from the forward prop.
const Strides& get_window_dilation_strides_forward() const
{
return m_window_dilation_strides_forward;
}
void set_window_dilation_strides_forward(
const Strides& window_dilation_strides_forward)
{
m_window_dilation_strides_forward = window_dilation_strides_forward;
}
/// \return The padding-below sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_padding_below_forward() const
{
return m_padding_below_forward;
}
void set_padding_below_forward(const CoordinateDiff& padding_below_forward)
{
m_padding_below_forward = padding_below_forward;
}
/// \return The padding-above sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_padding_above_forward() const
{
return m_padding_above_forward;
}
void set_padding_above_forward(const CoordinateDiff& padding_above_forward)
{
m_padding_above_forward = padding_above_forward;
}
/// \return The input data dilation strides from the forward prop.
const Strides& get_data_dilation_strides_forward() const
{
return m_data_dilation_strides_forward;
}
void set_data_dilation_strides_forward(const Strides& data_dilation_strides_forward)
{
m_data_dilation_strides_forward = data_dilation_strides_forward;
}
// Compute the pad_above values to be used if in a convolution
CoordinateDiff compute_backward_delta_out_pad_above() const;
CoordinateDiff compute_backward_delta_out_pad_below() const;
protected:
Shape m_data_batch_shape;
Strides m_window_movement_strides_forward;
Strides m_window_dilation_strides_forward;
CoordinateDiff m_padding_below_forward;
CoordinateDiff m_padding_above_forward;
Strides m_data_dilation_strides_forward;
};
} // namespace v0
} // namespace op
} // namespace ngraph

View File

@ -1,319 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <numeric>
#include "convolution.hpp"
#include "group_conv.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/builder/reshape.hpp"
#include "ngraph/builder/split.hpp"
#include "ngraph/op/concat.hpp"
#include "ngraph/op/convolution.hpp"
#include "ngraph/validation_util.hpp"
using namespace std;
using namespace ngraph;
NGRAPH_SUPPRESS_DEPRECATED_START
//------------------------------------------------------------------------------
// v0::GroupConvolution
//------------------------------------------------------------------------------
constexpr NodeTypeInfo op::v0::GroupConvolution::type_info;
op::v0::GroupConvolution::GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const size_t groups,
const PadType& pad_type)
: FusedOp({data_batch, filters})
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_data_dilation_strides(data_dilation_strides)
, m_groups(groups)
, m_pad_type(pad_type)
, m_groups_in_filters(false)
{
constructor_validate_and_infer_types();
}
op::v0::GroupConvolution::GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const PadType& pad_type)
: FusedOp({data_batch, filters})
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_data_dilation_strides(data_dilation_strides)
, m_groups(0)
, m_pad_type(pad_type)
, m_groups_in_filters(true)
{
constructor_validate_and_infer_types();
}
void op::v0::GroupConvolution::pre_validate_and_infer_types()
{
auto data_shape = get_input_partial_shape(0);
auto filters_shape = get_input_partial_shape(1);
if (data_shape.is_static() && filters_shape.is_static())
{
// Update groups
if (m_groups_in_filters)
{
m_groups = get_input_partial_shape(1)[0].get_length();
}
// Data channels
NODE_VALIDATION_CHECK(this,
data_shape.to_shape()[1] % get_groups() == 0,
"Data channels not a multiple of group size");
// Output channels
NODE_VALIDATION_CHECK(this,
filters_shape.to_shape()[0] % get_groups() == 0,
"# Filters not a multiple of group size");
// Input Filters
NODE_VALIDATION_CHECK(this,
(filters_shape.to_shape()[m_groups_in_filters ? 2 : 1] *
get_groups()) == data_shape.to_shape()[1],
"Incorrect number of channels per filter");
}
else
{
set_output_type(0, get_input_element_type(0), PartialShape::dynamic());
}
}
void op::v0::GroupConvolution::post_validate_and_infer_types()
{
auto data_shape = get_input_partial_shape(0);
auto filters_shape = get_input_partial_shape(1);
if (data_shape.is_static() && filters_shape.is_static())
{
if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER)
{
m_padding_below.clear();
m_padding_above.clear();
auto filter_shape = filters_shape.to_shape();
filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I}
infer_auto_padding(data_shape.to_shape(),
filter_shape,
m_window_movement_strides,
m_window_dilation_strides,
m_pad_type,
m_padding_above,
m_padding_below);
}
}
}
Shape op::v0::GroupConvolution::get_weights_dimensions() const
{
auto data_shape = get_input_shape(0);
auto weights_shape = get_input_shape(1);
// check if weights already includes groups
if (m_groups_in_filters)
{
return weights_shape;
}
// reshape weights into 5d tensors that includes groups
const size_t OC = 0;
const size_t OC_IN_OUTPUT = 1;
const size_t IC = 1;
Shape weights_shape_groups{weights_shape};
// adjust output and channel given a number of groups
weights_shape_groups.at(OC) = get_shape().at(OC_IN_OUTPUT) / get_groups();
weights_shape_groups.at(IC) = data_shape.at(IC) / get_groups();
// push_front the number of groups
weights_shape_groups.insert(weights_shape_groups.begin(), get_groups());
return weights_shape_groups;
}
shared_ptr<Node> op::v0::GroupConvolution::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
if (m_groups_in_filters)
{
return make_shared<op::v0::GroupConvolution>(new_args.at(0),
new_args.at(1),
get_window_movement_strides(),
get_window_dilation_strides(),
get_padding_below(),
get_padding_above(),
get_data_dilation_strides(),
get_pad_type());
}
else
{
return make_shared<op::v0::GroupConvolution>(new_args.at(0),
new_args.at(1),
get_window_movement_strides(),
get_window_dilation_strides(),
get_padding_below(),
get_padding_above(),
get_data_dilation_strides(),
get_groups(),
get_pad_type());
}
}
OutputVector op::v0::GroupConvolution::decompose_op() const
{
auto data = input_value(0);
auto filters = input_value(1);
auto filters_shape = get_input_shape(1);
// Split one convolution op to N ops where N is the number of groups
// and concat results after computation.
NodeVector convolution_nodes;
// slice data
auto sliced_data = builder::opset1::split(data, get_groups(), 1);
// slice filters
auto sliced_filters = builder::opset1::split(filters, get_groups(), 0);
auto shape = Shape(std::next(std::begin(filters_shape), 1), std::end(filters_shape));
for (std::size_t group{0}; group < get_groups(); ++group)
{
auto sliced_filter = sliced_filters[group];
if (m_groups_in_filters)
{
// Remove group dimension after slicing
sliced_filter = builder::opset1::reshape(sliced_filters[group], shape);
}
convolution_nodes.push_back(
std::make_shared<ngraph::op::v0::Convolution>(sliced_data[group],
sliced_filter,
m_window_movement_strides,
m_window_dilation_strides,
m_padding_below,
m_padding_above,
m_data_dilation_strides,
m_pad_type));
}
std::size_t concatenation_axis = 1;
return {std::make_shared<ngraph::op::Concat>(convolution_nodes, concatenation_axis)};
}
//------------------------------------------------------------------------------
// v0::GroupConvolutionBackpropData
//------------------------------------------------------------------------------
constexpr NodeTypeInfo op::v0::GroupConvolutionBackpropData::type_info;
op::v0::GroupConvolutionBackpropData::GroupConvolutionBackpropData(
const Output<Node>& data_batch,
const Output<Node>& filters,
const Output<Node>& output_delta,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const size_t groups)
: FusedOp({data_batch, filters, output_delta})
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_groups(groups)
{
constructor_validate_and_infer_types();
}
void op::v0::GroupConvolutionBackpropData::pre_validate_and_infer_types()
{
element::Type data_element_type = get_input_element_type(2);
element::Type filters_elem_type = get_input_element_type(1);
NODE_VALIDATION_CHECK(this,
data_element_type.is_dynamic() || data_element_type.is_real(),
"Output delta element type must be f16, bf16, f32, f64 or dynamic (got ",
data_element_type,
").");
NODE_VALIDATION_CHECK(this,
filters_elem_type.is_dynamic() || filters_elem_type.is_real(),
"Filters element type must be f16, bf16, f32, f64 or dynamic (got ",
filters_elem_type,
").");
PartialShape data_pshape = get_input_partial_shape(0);
PartialShape filters_pshape = get_input_partial_shape(1);
PartialShape delta_pshape = get_input_partial_shape(2);
if (data_pshape.is_dynamic() || filters_pshape.is_dynamic() || delta_pshape.is_dynamic())
{
set_output_type(0, data_element_type, PartialShape::dynamic());
}
}
shared_ptr<Node>
op::v0::GroupConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const
{
if (new_args.size() != 3)
{
throw ngraph_error("Incorrect number of new arguments");
}
return make_shared<op::v0::GroupConvolutionBackpropData>(new_args.at(0),
new_args.at(1),
new_args.at(2),
get_window_movement_strides(),
get_window_dilation_strides(),
get_padding_below(),
get_padding_above(),
get_groups());
}
OutputVector op::v0::GroupConvolutionBackpropData::decompose_op() const
{
auto filters = input_value(1);
auto output_delta = input_value(2);
auto data_shape = get_input_shape(0);
NodeVector sliced_inputs;
auto groups = get_groups();
// slice data shape
data_shape[1] /= groups;
// slice delta
auto sliced_delta = builder::opset1::split(output_delta, groups, 1);
// slice filters
auto sliced_filters = builder::opset1::split(filters, groups, 0);
auto num_spatials = get_window_movement_strides().size();
for (size_t i = 0; i < groups; ++i)
{
auto sliced_conv = std::make_shared<op::v0::ConvolutionBackpropData>(
data_shape,
sliced_filters[i],
sliced_delta[i],
get_window_movement_strides(),
get_window_dilation_strides(),
get_padding_below(),
get_padding_above(),
Strides(num_spatials, 1)); // default data dilation strides
sliced_inputs.push_back(sliced_conv);
}
size_t concatenation_axis = 1;
return {std::make_shared<ngraph::op::Concat>(sliced_inputs, concatenation_axis)};
}

View File

@ -1,131 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "backend_visibility.hpp"
#include "ngraph/op/convolution.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/op/util/fused_op.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Group Convolution
class BACKEND_API GroupConvolution : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"GroupConvolution", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
GroupConvolution() = default;
GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const size_t groups,
const PadType& pad_type = PadType::EXPLICIT);
// constructor which accept groups included in filters shape.
GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const PadType& pad_type = PadType::EXPLICIT);
Shape get_weights_dimensions() const;
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
Output<Node> get_filters() { return input_value(1); }
Output<Node> get_data_batch() { return input_value(0); }
size_t get_groups() const { return m_groups; };
const PadType& get_pad_type() const { return m_pad_type; }
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
virtual OutputVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
virtual void post_validate_and_infer_types() override;
bool has_groups_in_filters() const { return m_groups_in_filters; }
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
Strides m_data_dilation_strides;
size_t m_groups;
PadType m_pad_type{PadType::NOTSET};
private:
bool m_groups_in_filters;
};
/// \brief Group Convolution data batch backprop
class BACKEND_API GroupConvolutionBackpropData : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"GroupConvolutionBackpropData", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
GroupConvolutionBackpropData() = default;
GroupConvolutionBackpropData(const Output<Node>& data_batch,
const Output<Node>& filters,
const Output<Node>& output_delta,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const size_t groups);
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
size_t get_groups() const { return m_groups; };
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
virtual OutputVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
size_t m_groups;
};
}
} // namespace op
} // namespace ngraph
NGRAPH_SUPPRESS_DEPRECATED_END

View File

@ -1,101 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
// This collection contains one entry for each op. If an op is added it must be
// added to this list.
//
// In order to use this list you want to define a macro named exactly NGRAPH_OP
// When you are done you should undef the macro
// As an example if you wanted to make a list of all op names as strings you could do this:
//
// #define NGRAPH_OP(a,b) #a,
// std::vector<std::string> op_names{
// #include "this include file name"
// };
// #undef NGRAPH_OP
//
// This sample expands to a list like this:
// "Abs",
// "Acos",
// ...
//
// #define NGRAPH_OP(a,b) b::a,
// std::vector<std::string> op_names{
// #include "this include file name"
// };
// #undef NGRAPH_OP
//
// This sample expands to a list like this:
// ngraph::op::Abs,
// ngraph::op::Acos,
// ...
//
// It's that easy. You can use this for fun and profit.
#ifndef NGRAPH_OP
#warning "NGRAPH_OP not defined"
#define NGRAPH_OP(x, y)
#endif
NGRAPH_OP(Abs, ngraph::op)
NGRAPH_OP(Acos, ngraph::op)
NGRAPH_OP(Asin, ngraph::op)
NGRAPH_OP(Atan, ngraph::op)
NGRAPH_OP(AvgPool, ngraph::op::v0)
NGRAPH_OP(BatchNormInference, ngraph::op::v0)
NGRAPH_OP(Broadcast, ngraph::op::v1)
NGRAPH_OP(Ceiling, ngraph::op)
NGRAPH_OP(Clamp, ngraph::op)
NGRAPH_OP(Concat, ngraph::op)
NGRAPH_OP(Constant, ngraph::op)
NGRAPH_OP(Convert, ngraph::op)
NGRAPH_OP(Convolution, ngraph::op::v0)
NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v0)
NGRAPH_OP(Cos, ngraph::op)
NGRAPH_OP(Cosh, ngraph::op)
NGRAPH_OP(CumSum, ngraph::op::v0)
NGRAPH_OP(DepthToSpace, ngraph::op)
NGRAPH_OP(Elu, ngraph::op)
NGRAPH_OP(Erf, ngraph::op)
NGRAPH_OP(Exp, ngraph::op)
NGRAPH_OP(FakeQuantize, ngraph::op)
NGRAPH_OP(Floor, ngraph::op)
NGRAPH_OP(GRN, ngraph::op)
NGRAPH_OP(Gather, ngraph::op::v1)
NGRAPH_OP(Gelu, ngraph::op)
NGRAPH_OP(GroupConvolution, ngraph::op::v0)
NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v0)
NGRAPH_OP(HardSigmoid, ngraph::op)
NGRAPH_OP(Interpolate, ngraph::op::v0)
NGRAPH_OP(Log, ngraph::op)
NGRAPH_OP(LRN, ngraph::op)
NGRAPH_OP(LSTMSequence, ngraph::op::v0)
NGRAPH_OP(MatMul, ngraph::op)
NGRAPH_OP(MVN, ngraph::op)
NGRAPH_OP(Negative, ngraph::op)
NGRAPH_OP(NormalizeL2, ngraph::op::v0)
NGRAPH_OP(Parameter, ngraph::op)
NGRAPH_OP(PRelu, ngraph::op)
NGRAPH_OP(PriorBox, ngraph::op)
NGRAPH_OP(Range, ngraph::op)
NGRAPH_OP(Relu, ngraph::op)
NGRAPH_OP(Result, ngraph::op)
NGRAPH_OP(ReverseSequence, ngraph::op)
NGRAPH_OP(Selu, ngraph::op)
NGRAPH_OP(ShapeOf, ngraph::op)
NGRAPH_OP(ShuffleChannels, ngraph::op)
NGRAPH_OP(Sigmoid, ngraph::op)
NGRAPH_OP(Sign, ngraph::op)
NGRAPH_OP(Sin, ngraph::op)
NGRAPH_OP(Sinh, ngraph::op)
NGRAPH_OP(SpaceToDepth, ngraph::op)
NGRAPH_OP(Sqrt, ngraph::op)
NGRAPH_OP(SquaredDifference, ngraph::op)
NGRAPH_OP(Squeeze, ngraph::op)
NGRAPH_OP(Tan, ngraph::op)
NGRAPH_OP(Tanh, ngraph::op)
NGRAPH_OP(TensorIterator, ngraph::op)
NGRAPH_OP(Tile, ngraph::op::v0)
NGRAPH_OP(Unsqueeze, ngraph::op::v0)
NGRAPH_OP(Xor, ngraph::op)

View File

@ -1,80 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "fused_op_decomposition.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/util/op_types.hpp"
#include "ngraph/provenance.hpp"
using namespace std;
using namespace ngraph;
NGRAPH_SUPPRESS_DEPRECATED_START
pass::FusedOpDecomposition::FusedOpDecomposition(op_query_t callback)
: m_has_direct_support{callback}
{
}
bool pass::FusedOpDecomposition::run_on_node(shared_ptr<Node> node)
{
bool modified = false;
if (op::supports_decompose(node))
{
if (m_has_direct_support && m_has_direct_support(*node))
{
// Op supported by backend. Do not decompose
return modified;
}
OutputVector output_vector = node->decompose_op();
NodeVector subgraph_outputs = as_node_vector(output_vector);
if (ngraph::get_provenance_enabled())
{
// Capture the input values as an edge for provenance
auto base_input_values = node->input_values();
auto provenance_tags = node->get_provenance_tags();
const std::string tag = "<Decomposed from " + std::string(node->get_type_name()) + ">";
provenance_tags.insert(tag);
// Transfer the new provenance tags to the newly created ops
for (auto output_node : subgraph_outputs)
{
output_node->add_provenance_tags_above(base_input_values, provenance_tags);
}
}
// Run recursively until no more fused ops
auto subgraph = extract_subgraph(subgraph_outputs, as_node_vector(node->input_values()));
for (auto subgraph_node : subgraph)
{
run_on_node(subgraph_node);
}
size_t i = 0;
for (auto output_node : subgraph_outputs)
{
for (size_t j = 0; j < output_node->outputs().size(); j++, i++)
{
std::set<Input<Node>> fop_users = node->outputs().at(i).get_target_inputs();
for (auto fop_user : fop_users)
{
fop_user.replace_source_output(output_node->output(j));
}
}
}
if (i != node->get_output_size())
{
throw ngraph_error("While replacing " + node->get_name() +
", mismatch between op output count and outputs of the decomposed "
"subgraph. Expected: " +
to_string(node->get_output_size()) + " Got: " + to_string(i));
}
modified = true;
}
return modified;
}

View File

@ -1,67 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include "backend_visibility.hpp"
#include "ngraph/pass/pass.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
namespace ngraph
{
namespace pass
{
/// \brief The FusedOpDecomposition pass is used to decompose a fused op
/// into a sub-graph of supported ops if the fused op is not supported by
/// the backend.
///
/// \details By default, the pass decomposes a fused op if it is not
/// supported by the backend and runs recursively until no more fused ops
/// can be found or the new ops are supported by the backend.
/// If the backend supports a fused op, then it can provide a callback
/// function while registering the pass. The callback function can then
/// provide logic to prevent decomposing the supported op.
/// It also adds provenance tags along the way to each op for easy reference
/// and debugging.
///
/// In the example shown below, the original graph has a fused GeLU op.
/// After applying this pass, the GeLU op is decomposed into group of ops which
/// together perform the same operation as GeLU.
/// <table>
/// <tr><th>Before the pass</th>
/// <th> After the pass</th>
/// </tr>
/// <tr>
/// <td> \image html decompose_gelu_pre.svg </td>
/// <td> \image html decompose_gelu_post.svg </td>
/// </tr>
/// </table>
class BACKEND_API FusedOpDecomposition : public NodePass
{
public: /// \brief Function signature type for callback used to check whether provided node
/// is supported by backend.
using op_query_t = std::function<bool(const Node& node)>;
///
/// \brief Constructor for the Fused operation decomposition pass.
///
/// \param[in] callback The function object used to determine whether current backend
/// provide direct support for passed node. Should have signature:
/// bool fn(const Node&)
///
FusedOpDecomposition(op_query_t callback = nullptr);
bool run_on_node(std::shared_ptr<ngraph::Node> node) override;
private:
/// \brief A function returning whether provided Node is supported by current backend.
/// The returned bool value is used to control whether decompose operator or not.
op_query_t m_has_direct_support = nullptr;
};
}
}
NGRAPH_SUPPRESS_DEPRECATED_END

View File

@ -1,61 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "implicit_broadcast_elimination.hpp"
#include "ngraph/builder/autobroadcast.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/util/binary_elementwise_arithmetic.hpp"
#include "ngraph/op/util/binary_elementwise_comparison.hpp"
#include "ngraph/op/util/binary_elementwise_logical.hpp"
#include "ngraph/op/util/op_types.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
using namespace std;
using namespace ngraph;
bool ngraph::pass::ImplicitBroadcastElimination::run_on_node(std::shared_ptr<Node> node)
{
if (ngraph::op::supports_auto_broadcast(node))
{
if (node->get_autob().m_type != op::AutoBroadcastType::NONE)
{
auto new_args = pass::explicit_broadcast(node);
for (size_t i = 0; i < new_args.size(); i++)
{
node->input(i).replace_source_output(new_args[i]->output(0));
}
return true;
}
}
return false;
}
NodeVector ngraph::pass::explicit_broadcast(std::shared_ptr<Node>& node)
{
NodeVector rc;
if (ngraph::op::supports_auto_broadcast(node))
{
auto autob = node->get_autob();
if (autob.m_type == op::AutoBroadcastType::NONE)
{
for (auto& val : node->input_values())
rc.emplace_back(val.get_node_shared_ptr());
}
else if (autob.m_type == op::AutoBroadcastType::NUMPY)
{
rc = as_node_vector(builder::numpy_broadcast_outputs(node->input_values()));
}
else if (autob.m_type == op::AutoBroadcastType::PDPD)
{
rc = as_node_vector(builder::pdpd_broadcast(node->input_values(), autob.m_axis));
}
else
{
throw ngraph_error("Unsupported implicit broadcast type");
}
}
return rc;
}

View File

@ -1,28 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "backend_visibility.hpp"
#include "ngraph/node.hpp"
#include "ngraph/pass/pass.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
namespace ngraph
{
namespace pass
{
NodeVector explicit_broadcast(std::shared_ptr<Node>& node);
class ImplicitBroadcastElimination;
}
}
class BACKEND_API ngraph::pass::ImplicitBroadcastElimination : public ngraph::pass::NodePass
{
public:
bool run_on_node(std::shared_ptr<ngraph::Node> node) override;
};
NGRAPH_SUPPRESS_DEPRECATED_END

View File

@ -1,119 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <exception>
#include <sstream>
#include <unordered_set>
#include "liveness.hpp"
#include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/output.hpp"
#include "ngraph/function.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/parameter.hpp"
#include "ngraph/op/result.hpp"
#include "ngraph/util.hpp"
using namespace std;
using namespace ngraph;
bool pass::Liveness::run_on_function(shared_ptr<Function> function)
{
auto ops = function->get_ordered_ops();
unordered_set<descriptor::Tensor*> persistent_tensors;
unordered_set<descriptor::Tensor*> output_tensors;
for (const shared_ptr<op::Parameter>& node : function->get_parameters())
{
for (auto& output : node->outputs())
{
descriptor::Tensor& tensor = output.get_tensor();
persistent_tensors.insert(&tensor);
}
}
for (const shared_ptr<op::Result>& node : function->get_results())
{
for (auto& output : node->outputs())
{
descriptor::Tensor& tensor = output.get_tensor();
persistent_tensors.insert(&tensor);
output_tensors.insert(&tensor);
}
}
for (const shared_ptr<Node>& node : ops)
{
if (auto constant_node = as_type_ptr<op::Constant>(node))
{
for (auto& output : constant_node->outputs())
{
descriptor::Tensor& tensor = output.get_tensor();
persistent_tensors.insert(&tensor);
}
}
}
unordered_set<descriptor::Tensor*> currently_live;
for (auto it = ops.rbegin(); it != ops.rend(); it++)
{
const shared_ptr<Node>& node = *it;
node->liveness_new_list.clear();
node->liveness_free_list.clear();
unordered_set<descriptor::Tensor*> input_tensor_decls;
for (auto& input : node->inputs())
{
descriptor::Tensor& tensor = input.get_tensor();
if (persistent_tensors.find(&tensor) == persistent_tensors.end())
{
input_tensor_decls.insert(&tensor);
}
}
unordered_set<descriptor::Tensor*> output_tensor_decls;
for (auto& output : node->outputs())
{
descriptor::Tensor& tensor = output.get_tensor();
if (persistent_tensors.find(&tensor) == persistent_tensors.end())
{
output_tensor_decls.insert(&tensor);
}
}
unordered_set<descriptor::Tensor*> free_tensor_decls;
unordered_set<descriptor::Tensor*> new_tensor_decls;
unordered_set<descriptor::Tensor*> all_tensor_decls = input_tensor_decls;
all_tensor_decls.insert(output_tensor_decls.begin(), output_tensor_decls.end());
for (descriptor::Tensor* tensor_decl : all_tensor_decls)
{
if (currently_live.find(tensor_decl) == currently_live.end())
{
// this is the last node that value is seen in
// delete it at the end of the op
currently_live.insert(tensor_decl);
if (output_tensors.find(tensor_decl) == output_tensors.end())
{
// Don't free output tensors
free_tensor_decls.insert(tensor_decl);
}
}
}
for (descriptor::Tensor* output_decl : output_tensor_decls)
{
auto currently_live_it = currently_live.find(output_decl);
if (currently_live_it != currently_live.end())
{
new_tensor_decls.insert(output_decl);
currently_live.erase(currently_live_it);
}
}
node->liveness_free_list = free_tensor_decls;
node->liveness_new_list = new_tensor_decls;
}
return false;
}

View File

@ -1,23 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "backend_visibility.hpp"
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/pass/pass.hpp"
namespace ngraph
{
namespace pass
{
class Liveness;
}
}
class BACKEND_API ngraph::pass::Liveness : public FunctionPass
{
public:
bool run_on_function(std::shared_ptr<ngraph::Function>) override;
};

View File

@ -1,132 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <cstdint>
#include <functional>
#include <numeric>
#include "ngraph/builder/autobroadcast.hpp"
#include "ngraph/builder/reshape.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/op/util/op_types.hpp"
#include "ngraph/ops.hpp"
#include "ngraph/provenance.hpp"
#include "ngraph/slice_plan.hpp"
#include "ngraph/type.hpp"
#include "ngraph/validation_util.hpp"
#include "op/avg_pool.hpp"
#include "pass/implicit_broadcast_elimination.hpp"
#include "pass/opset0_downgrade.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
using namespace std;
using namespace ngraph;
namespace opset0_downgrade
{
template <typename OpV0, typename OpV1>
shared_ptr<Node> op_cast_binary_elementwise_node(const shared_ptr<OpV1>& node)
{
const auto input_arg0 = node->input_value(0);
const auto input_arg1 = node->input_value(1);
const auto autob = node->get_autob();
auto replacement_node = make_shared<OpV0>(input_arg0, input_arg1, autob);
replace_node(node, replacement_node);
return replacement_node;
}
template <typename OpV0, typename OpV1>
shared_ptr<Node> op_cast_reduction_node(const shared_ptr<OpV1>& node)
{
auto replacement_node = make_shared<OpV0>(node->input_value(0), node->input_value(1));
if (node->get_keep_dims())
{
string v1_op_name = string{node->get_type_name()} + ":v1";
string v0_op_name = string{OpV0{}.get_type_name()} + ":v0";
NGRAPH_CHECK(node->reduction_axes_constant(),
"Unable to convert ",
v1_op_name,
"to ",
v0_op_name,
" if reduction axes are not constant (for keep_dims=true). Node: ",
*node);
auto output_pshape = replacement_node->get_output_partial_shape(0);
NGRAPH_CHECK(output_pshape.is_static(),
"Unable to convert ",
v1_op_name,
"to ",
v0_op_name,
" if output shape is dynamic (for keep_dims=true). Node: ",
*node);
const auto output_shape = output_pshape.to_shape();
auto reshaped_output_shape = output_shape;
for (const auto& axis : node->get_reduction_axes())
{
reshaped_output_shape.insert(reshaped_output_shape.begin() + axis, 1);
}
auto shape_pattern = op::Constant::create(
element::u64, {reshaped_output_shape.size()}, reshaped_output_shape);
auto reshaped_product =
make_shared<op::v1::Reshape>(replacement_node->output(0), shape_pattern, false);
return reshaped_product;
}
else
{
return replacement_node;
}
}
// Default is that we did nothing
shared_ptr<Node> op_cast(shared_ptr<Node> node) { return nullptr; }
shared_ptr<Node> op_cast(shared_ptr<op::v1::LogicalXor> node)
{
return op_cast_binary_elementwise_node<op::v0::Xor, op::v1::LogicalXor>(node);
}
using DispatchMap = map<NodeTypeInfo, std::function<bool(shared_ptr<Node> node)>>;
template <typename T>
bool op_cast_thunk(shared_ptr<Node> node)
{
auto downgraded_node = op_cast(as_type_ptr<T>(node));
if (downgraded_node)
{
if (ngraph::get_provenance_enabled())
{
const std::string provenance_tag =
"<Opset0_Downgrade (v1 " + std::string(node->get_type_name()) + ")>";
downgraded_node->add_provenance_tags_above(node->input_values(), {provenance_tag});
}
return true;
}
return false;
}
DispatchMap& get_dispatch_map()
{
static DispatchMap dispatch_map{
#define NGRAPH_OP(NAME, NAMESPACE) {NAMESPACE::NAME::type_info, op_cast_thunk<NAMESPACE::NAME>},
#include "ngraph/opsets/opset1_tbl.hpp"
#undef NGRAPH_OP
};
return dispatch_map;
}
} // namespace opset0_downgrade
bool pass::Opset0Downgrade::run_on_node(shared_ptr<Node> node)
{
bool modified = false;
auto& dispatch_map = opset0_downgrade::get_dispatch_map();
auto it = dispatch_map.find(node->get_type_info());
if (it != dispatch_map.end())
{
modified = it->second(node);
}
return modified;
}

View File

@ -1,31 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "backend_visibility.hpp"
#include "ngraph/pass/pass.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
namespace ngraph
{
namespace pass
{
class BACKEND_API Opset0Downgrade : public NodePass
{
public:
///
/// \brief Constructor for the Opv1 downgrade transformation pass.
///
/// \details This transformation pass iterates over all nodes in a graph
/// and updates version 1 ops to their version 0 equivalents.
/// All ops in the final graph have op version 0.
Opset0Downgrade() = default;
bool run_on_node(std::shared_ptr<ngraph::Node> node) override;
};
}
}
NGRAPH_SUPPRESS_DEPRECATED_END

View File

@ -1,128 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include "ngraph/node.hpp"
#include "ngraph/ops.hpp"
#include "ngraph/provenance.hpp"
#include "ngraph/validation_util.hpp"
#include "opset1_downgrade.hpp"
using namespace std;
using namespace ngraph;
namespace opset1_downgrade
{
shared_ptr<Node> op_cast(shared_ptr<op::v3::Broadcast> node)
{
const auto data = node->input_value(0).get_node_shared_ptr();
const auto target_shape = node->input_value(1).get_node_shared_ptr();
shared_ptr<Node> replacement_node;
switch (node->get_broadcast_spec().m_type)
{
case op::BroadcastType::BIDIRECTIONAL:
{
const auto const_filled_with_ones = make_shared<op::v1::Broadcast>(
op::Constant::create(data->get_element_type(), {}, {1}), target_shape);
if (const_filled_with_ones->get_element_type() == element::boolean)
{
replacement_node = make_shared<op::v1::LogicalOr>(data, const_filled_with_ones);
}
else
{
replacement_node = make_shared<op::v1::Multiply>(data, const_filled_with_ones);
}
break;
}
case op::BroadcastType::EXPLICIT:
{
const auto axes_mapping = node->input_value(2).get_node_shared_ptr();
replacement_node = make_shared<op::v1::Broadcast>(
data, target_shape, axes_mapping, op::AutoBroadcastType::EXPLICIT);
break;
}
case op::BroadcastType::NUMPY:
{
replacement_node =
make_shared<op::v1::Broadcast>(data, target_shape, op::AutoBroadcastType::NUMPY);
break;
}
case op::BroadcastType::PDPD:
{
op::AutoBroadcastSpec broadcast_spec;
broadcast_spec.m_type = op::AutoBroadcastType::PDPD;
broadcast_spec.m_axis = node->get_broadcast_spec().m_axis;
replacement_node = make_shared<op::v1::Broadcast>(data, target_shape, broadcast_spec);
break;
}
default:
{
NGRAPH_CHECK(
true,
"Not supported broadcast type during Broadcast:v3 to Broadcast:v1 conversion. ",
"Node: ",
*node);
}
}
replace_node(node, replacement_node);
return replacement_node;
}
shared_ptr<Node> op_cast(shared_ptr<op::v3::TopK> node)
{
const auto data = node->input_value(0);
const auto k = node->input_value(1);
const auto replacement_node = make_shared<op::v1::TopK>(data,
k,
node->get_axis(),
node->get_mode(),
node->get_sort_type(),
node->get_index_element_type());
replace_node(node, replacement_node);
return replacement_node;
}
using DispatchMap = map<NodeTypeInfo, std::function<bool(shared_ptr<Node> node)>>;
template <typename T>
bool op_cast_thunk(shared_ptr<Node> node)
{
auto downgraded_node = op_cast(as_type_ptr<T>(node));
if (downgraded_node)
{
if (ngraph::get_provenance_enabled())
{
const std::string provenance_tag =
"<Opset1_Downgrade (v3 " + std::string(node->get_type_name()) + ")>";
downgraded_node->add_provenance_tags_above(node->input_values(), {provenance_tag});
}
return true;
}
return false;
}
DispatchMap& get_dispatch_map()
{
static DispatchMap dispatch_map{
#define NGRAPH_OP(NAME, NAMESPACE) {NAMESPACE::NAME::type_info, op_cast_thunk<NAMESPACE::NAME>},
NGRAPH_OP(Broadcast, op::v3) NGRAPH_OP(TopK, op::v3)
#undef NGRAPH_OP
};
return dispatch_map;
}
} // namespace opset1_downgrade
bool pass::Opset1Downgrade::run_on_node(shared_ptr<Node> node)
{
bool modified = false;
auto& dispatch_map = opset1_downgrade::get_dispatch_map();
auto it = dispatch_map.find(node->get_type_info());
if (it != dispatch_map.end())
{
modified = it->second(node);
}
return modified;
}

View File

@ -1,31 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "backend_visibility.hpp"
#include "ngraph/pass/pass.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
namespace ngraph
{
namespace pass
{
class BACKEND_API Opset1Downgrade : public NodePass
{
public:
///
/// \brief Constructor for the Opv1 downgrade transformation pass.
///
/// \details This transformation pass iterates over all nodes in a graph
/// and updates version 3 ops to their version 1 equivalents.
/// All ops in the final graph have op version 1.
Opset1Downgrade() = default;
bool run_on_node(std::shared_ptr<ngraph::Node> node) override;
};
}
}
NGRAPH_SUPPRESS_DEPRECATED_END

View File

@ -1,222 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "opset1_upgrade.hpp"
#include <functional>
#include <iterator>
#include <limits>
#include <numeric>
#include "ngraph/builder/autobroadcast.hpp"
#include "ngraph/builder/reshape.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/util/op_types.hpp"
#include "ngraph/ops.hpp"
#include "ngraph/provenance.hpp"
#include "op/avg_pool.hpp"
#include "op/convolution.hpp"
#include "op/group_conv.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
using namespace std;
using namespace ngraph;
namespace opset1_upgrade
{
template <typename OpV0, typename OpV1>
shared_ptr<Node> op_cast_binary_elementwise_node(const shared_ptr<OpV0>& node)
{
const auto autob = node->get_autob();
auto replacement_node =
make_shared<OpV1>(node->input_value(0), node->input_value(1), autob);
replace_node(node, replacement_node);
return replacement_node;
}
// Default is that we didn nothing
shared_ptr<Node> op_cast(shared_ptr<Node> node) { return nullptr; }
shared_ptr<Node> op_cast(shared_ptr<op::v0::ConvolutionBackpropData> node)
{
auto data_batch_shape = node->get_data_batch_shape();
auto strides = node->get_window_movement_strides_forward();
auto dilations = node->get_window_dilation_strides_forward();
auto pads_begin = node->get_padding_below_forward();
auto pads_end = node->get_padding_above_forward();
auto data_dilation_strides = node->get_data_dilation_strides_forward();
bool is_dds_valid = all_of(data_dilation_strides.begin(),
data_dilation_strides.end(),
[](size_t value) { return value == 1; });
NGRAPH_CHECK(is_dds_valid,
"Unable to convert ConvolutionBackpropData:0 to ConvolutionBackpropData:1 "
"with data dilation strides "
"other than `1`. Node: ",
*node);
auto replacement_node = make_shared<op::v1::ConvolutionBackpropData>(
node->input_value(1), // data
node->input_value(0), // filters
op::Constant::create(
element::i64,
Shape{data_batch_shape.size() - 2},
vector<size_t>(data_batch_shape.begin() + 2, data_batch_shape.end())),
strides,
pads_begin,
pads_end,
dilations);
replace_node(node, replacement_node);
return replacement_node;
}
shared_ptr<Node> op_cast(shared_ptr<op::v0::GroupConvolution> node)
{
auto strides = node->get_window_movement_strides();
auto dilations = node->get_window_dilation_strides();
auto pads_begin = node->get_padding_below();
auto pads_end = node->get_padding_above();
auto data_dilation_strides = node->get_data_dilation_strides();
auto auto_pad = node->get_pad_type();
bool is_dds_valid = all_of(data_dilation_strides.begin(),
data_dilation_strides.end(),
[](size_t value) { return value == 1; });
NGRAPH_CHECK(is_dds_valid,
"Unable to convert GroupConvolution:0 to GroupConvolution:1"
"with data dilation strides other than `1`. Node: ",
*node);
shared_ptr<Node> replacement_node;
if (node->has_groups_in_filters())
{
replacement_node = make_shared<op::v1::GroupConvolution>(node->input_value(0),
node->input_value(1),
strides,
pads_begin,
pads_end,
dilations,
auto_pad);
}
else
{
NGRAPH_CHECK(node->get_input_partial_shape(1).is_static(),
"Unable to convert GroupConvolution:0 to GroupConvolution:1"
"with dynamic filters shape. Node: ",
*node);
auto filters_shape = node->get_input_shape(1);
auto groups = node->get_groups();
filters_shape[0] /= groups;
filters_shape.insert(filters_shape.begin(), groups);
auto reshaped_filters = builder::opset1::reshape(node->input_value(1), filters_shape);
replacement_node = make_shared<op::v1::GroupConvolution>(node->input_value(0),
reshaped_filters,
strides,
pads_begin,
pads_end,
dilations,
auto_pad);
}
replace_node(node, replacement_node);
return replacement_node;
}
shared_ptr<Node> op_cast(shared_ptr<op::v0::GroupConvolutionBackpropData> node)
{
const auto strides = node->get_window_movement_strides();
const auto dilations = node->get_window_dilation_strides();
const auto pads_begin = node->get_padding_below();
const auto pads_end = node->get_padding_above();
const auto data_batch_pshape = node->get_input_partial_shape(0);
const auto filters_pshape = node->get_input_partial_shape(1);
NGRAPH_CHECK(data_batch_pshape.is_static(),
"Unable to convert GroupConvolutionBackpropData:0 to "
"GroupConvolutionBackpropData:1 with dynamic data_batch shape. Node: ",
*node);
NGRAPH_CHECK(filters_pshape.is_static(),
"Unable to convert GroupConvolutionBackpropData:0 to "
"GroupConvolutionBackpropData:1 with dynamic filters shape. Node: ",
*node);
auto data_batch_shape = data_batch_pshape.to_shape();
// Remove N, C from output shape to preserve only spatial dimentions.
data_batch_shape.erase(std::begin(data_batch_shape),
std::next(std::begin(data_batch_shape), 2));
auto filters_shape = filters_pshape.to_shape();
auto groups = node->get_groups();
filters_shape[0] /= groups;
filters_shape.insert(filters_shape.begin(), groups);
auto reshaped_filters = builder::opset1::reshape(node->input_value(1), filters_shape);
auto replacement_node = make_shared<op::v1::GroupConvolutionBackpropData>(
node->input_value(2),
reshaped_filters,
op::Constant::create(element::i64, Shape{data_batch_shape.size()}, data_batch_shape),
strides,
pads_begin,
pads_end,
dilations);
replace_node(node, replacement_node);
return replacement_node;
}
shared_ptr<Node> op_cast(shared_ptr<op::Xor> node)
{
auto replacement_node = make_shared<op::v1::LogicalXor>(
node->input_value(0), node->input_value(1), node->get_autob());
replace_node(node, replacement_node);
return replacement_node;
}
using DispatchMap = map<NodeTypeInfo, std::function<bool(shared_ptr<Node> node)>>;
template <typename T>
bool op_cast_thunk(shared_ptr<Node> node)
{
auto upgraded_node = op_cast(as_type_ptr<T>(node));
if (upgraded_node)
{
if (ngraph::get_provenance_enabled())
{
const std::string provenance_tag =
"<Opset1_Upgrade (v0 " + std::string(node->get_type_name()) + ")>";
upgraded_node->add_provenance_tags_above(node->input_values(), {provenance_tag});
}
return true;
}
return false;
}
DispatchMap& get_dispatch_map()
{
NGRAPH_SUPPRESS_DEPRECATED_START
static DispatchMap dispatch_map{
#define NGRAPH_OP(NAME, NAMESPACE) {NAMESPACE::NAME::type_info, op_cast_thunk<NAMESPACE::NAME>},
#include "opset0_tbl.hpp"
#undef NGRAPH_OP
};
return dispatch_map;
NGRAPH_SUPPRESS_DEPRECATED_END
}
} // namespace opset1_upgrade
bool pass::Opset1Upgrade::run_on_node(shared_ptr<Node> node)
{
bool modified = false;
auto& dispatch_map = opset1_upgrade::get_dispatch_map();
auto it = dispatch_map.find(node->get_type_info());
if (it != dispatch_map.end())
{
modified = it->second(node);
}
return modified;
}

View File

@ -1,31 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "backend_visibility.hpp"
#include "ngraph/pass/pass.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
namespace ngraph
{
namespace pass
{
class BACKEND_API Opset1Upgrade : public NodePass
{
public:
///
/// \brief Constructor for the Opset1Upgrade transformation pass.
///
/// \details This transformation pass iterates over all nodes in a graph
/// and updates version 0 ops to their version 1 equivalents.
/// All ops in the final graph have op version 1.
Opset1Upgrade() = default;
bool run_on_node(std::shared_ptr<ngraph::Node> node) override;
};
}
}
NGRAPH_SUPPRESS_DEPRECATED_END

View File

@ -13,7 +13,6 @@
#include "ngraph/ngraph.hpp"
#include "ngraph/opsets/opset6.hpp"
#include "ngraph/pass/manager.hpp"
#include "pass/liveness.hpp"
#include "util/test_tools.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
@ -21,66 +20,6 @@ NGRAPH_SUPPRESS_DEPRECATED_START
using namespace std;
using namespace ngraph;
TEST(tensor, size)
{
pass::Manager pass_manager;
pass_manager.register_pass<pass::Liveness>();
{
auto arg0 = make_shared<op::Parameter>(element::f32, Shape{2, 3});
auto add = make_shared<op::v1::Add>(arg0, arg0);
auto f0 = make_shared<Function>(add, ParameterVector{arg0});
pass_manager.run_passes(f0);
ASSERT_EQ(1, arg0->get_output_size());
descriptor::Tensor& output = arg0->get_output_tensor(0);
EXPECT_EQ(2 * 3 * 4, output.size());
}
{
auto arg0 = make_shared<op::Parameter>(element::f32, Shape{});
auto add = make_shared<op::v1::Add>(arg0, arg0);
auto f0 = make_shared<Function>(add, ParameterVector{arg0});
pass_manager.run_passes(f0);
ASSERT_EQ(1, arg0->get_output_size());
descriptor::Tensor& output = arg0->get_output_tensor(0);
EXPECT_EQ(1 * 4, output.size());
}
{
auto arg0 = make_shared<op::Parameter>(element::f32, Shape{1});
auto add = make_shared<op::v1::Add>(arg0, arg0);
auto f0 = make_shared<Function>(add, ParameterVector{arg0});
pass_manager.run_passes(f0);
ASSERT_EQ(1, arg0->get_output_size());
descriptor::Tensor& output = arg0->get_output_tensor(0);
EXPECT_EQ(1 * 4, output.size());
}
}
TEST(tensor, output_flag)
{
pass::Manager pass_manager;
pass_manager.register_pass<pass::Liveness>();
auto arg0 = make_shared<op::Parameter>(element::f32, Shape{1});
auto add = make_shared<op::v1::Add>(arg0, arg0);
auto f0 = make_shared<Function>(add, ParameterVector{arg0});
pass_manager.run_passes(f0);
for (size_t i = 0; i < f0->get_output_size(); ++i)
{
EXPECT_TRUE(op::is_output(f0->get_output_op(i)));
}
}
TEST(tensor, tensor_names)
{
auto arg0 = make_shared<opset6::Parameter>(element::f32, Shape{1});

File diff suppressed because it is too large Load Diff

View File

@ -16,260 +16,11 @@
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "op/convolution.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
// ---------------------------- v0 ----------------------------
TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce)
{
// Deduce type
Shape data_batch_shape{64, 3, 100};
auto param0 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10}); // filters
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 91}); // output delta
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
Strides{1},
Strides{1},
CoordinateDiff{0},
CoordinateDiff{0},
Strides{1});
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), data_batch_shape);
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{0});
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0});
}
TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce_padded)
{
// Deduce type
Shape data_batch_shape{64, 3, 100};
auto param0 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10}); // filters
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 96}); // output delta
auto move_strides = Strides{1};
auto dilation_strides = Strides{1};
auto padding_below = CoordinateDiff{2};
auto padding_above = CoordinateDiff{3};
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
move_strides,
dilation_strides,
padding_below,
padding_above,
Strides{1});
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), data_batch_shape);
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{2});
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{3});
}
TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce_strided)
{
// Deduce type
Shape data_batch_shape{64, 3, 100};
auto param0 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10}); // filters
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 46}); // output delta
auto move_strides = Strides{2};
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
move_strides,
Strides{1},
CoordinateDiff{0},
CoordinateDiff{0},
Strides{1});
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), data_batch_shape);
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2});
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{0});
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0});
}
TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce_strided_padded)
{
// Deduce type
Shape data_batch_shape{64, 3, 100};
auto param0 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10}); // filters
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 48}); // output delta
auto move_strides = Strides{2};
auto dilation_strides = Strides{1};
auto padding_below = CoordinateDiff{2};
auto padding_above = CoordinateDiff{3};
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
move_strides,
dilation_strides,
padding_below,
padding_above,
Strides{1});
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), data_batch_shape);
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2});
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{2});
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{3});
}
TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce_strided_small_uneven)
{
// Deduce type
Shape data_batch_shape{64, 3, 5};
auto param0 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 2}); // filters
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 2}); // output delta
auto move_strides = Strides{2};
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
move_strides,
Strides{1},
CoordinateDiff{0},
CoordinateDiff{0},
Strides{1});
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), data_batch_shape);
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2});
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{0});
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0});
}
TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce_strided_small_even)
{
// Deduce type
Shape data_batch_shape{64, 3, 6};
auto param0 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 2}); // filters
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 3}); // output delta
auto move_strides = Strides{2};
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
move_strides,
Strides{1},
CoordinateDiff{0},
CoordinateDiff{0},
Strides{1});
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), data_batch_shape);
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2});
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{0});
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0});
}
TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce_window_dilated)
{
// Deduce type
Shape data_batch_shape{64, 3, 100};
auto param0 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10}); // filters
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 82}); // output delta
auto move_strides = Strides{1};
auto dilate_strides = Strides{2};
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
move_strides,
dilate_strides,
CoordinateDiff{0},
CoordinateDiff{0},
Strides{1});
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), data_batch_shape);
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{2});
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{0});
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0});
}
TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce_window_dilated_padded)
{
// Deduce type
Shape data_batch_shape{64, 3, 100};
auto param0 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10}); // filters
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 87}); // output delta
auto move_strides = Strides{1};
auto dilate_strides = Strides{2};
auto padding_below = CoordinateDiff{2};
auto padding_above = CoordinateDiff{3};
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
move_strides,
dilate_strides,
padding_below,
padding_above,
Strides{1});
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), data_batch_shape);
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{2});
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{2});
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{3});
}
TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce_window_dilated_data_dilated_padded)
{
// Deduce type
Shape data_batch_shape{64, 3, 100};
auto param0 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10}); // filters
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 285}); // output delta
auto move_strides = Strides{1};
auto dilate_strides = Strides{2};
auto padding_below = CoordinateDiff{2};
auto padding_above = CoordinateDiff{3};
auto data_dilate_strides = Strides{3};
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
move_strides,
dilate_strides,
padding_below,
padding_above,
data_dilate_strides);
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), data_batch_shape);
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1});
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{2});
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{3});
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{2});
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{3});
}
// ---------------------------- v1 ----------------------------
TEST(type_prop, convolution_backprop_data_partial_auto_padding_upper)
{
@ -338,7 +89,7 @@ TEST(type_prop, convolution_backprop_data_auto_pad_explicit_with_output_padding)
auto filters = make_shared<op::Parameter>(inputs_et, filters_pshape);
auto conv_backprop = make_shared<op::v1::ConvolutionBackpropData>(
data, filters, strides, padding_begin, padding_end, dilations, auto_pad, output_padding);
ASSERT_TRUE(conv_backprop->get_output_partial_shape(0).same_scheme(PartialShape{1, 6, 4, 4}));
ASSERT_EQ(conv_backprop->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(conv_backprop->get_pads_end(), (CoordinateDiff{1, 1}));
@ -361,9 +112,16 @@ TEST(type_prop, convolution_backprop_data_auto_pad_same_with_output_padding_and_
auto data = make_shared<op::Parameter>(inputs_et, data_pshape);
auto filters = make_shared<op::Parameter>(inputs_et, filters_pshape);
auto output_shape = op::Constant::create(element::i64, Shape{2}, {3, 3});
auto conv_backprop = make_shared<op::v1::ConvolutionBackpropData>(
data, filters, output_shape, strides, padding_begin, padding_end, dilations, auto_pad, output_padding);
auto conv_backprop = make_shared<op::v1::ConvolutionBackpropData>(data,
filters,
output_shape,
strides,
padding_begin,
padding_end,
dilations,
auto_pad,
output_padding);
ASSERT_TRUE(conv_backprop->get_output_partial_shape(0).same_scheme(PartialShape{1, 6, 3, 3}));
ASSERT_EQ(conv_backprop->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(conv_backprop->get_pads_end(), (CoordinateDiff{2, 2}));
@ -807,13 +565,15 @@ TEST(type_prop, convolution_backprop_data_invalid_et_inputs)
// output shape input element type must be of integer type
FAIL() << "Invalid element type of output_shape input not detected";
}
catch(const NodeValidationFailure& error)
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Element type for output shape should be of integer type");
EXPECT_HAS_SUBSTRING(error.what(),
"Element type for output shape should be of integer type");
}
catch (...)
{
FAIL() << "Element type of output_shape input validation check failed for unexpected reason";
FAIL()
<< "Element type of output_shape input validation check failed for unexpected reason";
}
}
@ -899,9 +659,8 @@ TEST(type_prop, convolution_backprop_data_invalid_input_ranks)
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Spatial shape of output input must be of rank 1"));
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Spatial shape of output input must be of rank 1"));
}
catch (...)
{
@ -930,7 +689,9 @@ TEST(type_prop, convolution_backprop_data_invalid_input_channel_dims)
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Input channels dimension of data and filters inputs must be equal"));
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Input channels dimension of data and filters inputs must be equal"));
}
catch (...)
{
@ -1159,4 +920,4 @@ TEST(type_prop, convolution_backprop_data_invalid_conv_param_spatial_dims)
{
FAIL() << "Output padding spatial dimensions validation check failed for unexpected reason";
}
}
}

View File

@ -6,7 +6,6 @@
#include "ngraph/opsets/opset.hpp"
#include "ngraph/pass/manager.hpp"
#include "pass/opset1_upgrade.hpp"
#include "shared_utils.hpp"
using namespace ngraph;
@ -178,7 +177,6 @@ namespace
test::IE_Engine::IE_Engine(const std::shared_ptr<Function> function, const char* device)
: m_function{function}
{
upgrade_and_validate_function(m_function);
const auto cnn_network = InferenceEngine::CNNNetwork(m_function);
m_network_inputs = cnn_network.getInputsInfo();
m_network_outputs = cnn_network.getOutputsInfo();
@ -200,7 +198,7 @@ void test::IE_Engine::infer()
if (m_network_inputs.size() != m_allocated_inputs)
{
IE_THROW() << "The tested graph has " << m_network_inputs.size() << " inputs, but "
<< m_allocated_inputs << " were passed.";
<< m_allocated_inputs << " were passed.";
}
else
{
@ -294,26 +292,6 @@ testing::AssertionResult
return comparison_result;
}
std::shared_ptr<Function>
test::IE_Engine::upgrade_and_validate_function(const std::shared_ptr<Function> function) const
{
pass::Manager passes;
passes.register_pass<pass::Opset1Upgrade>();
passes.run_passes(function);
static std::set<NodeTypeInfo> ie_ops = get_ie_ops();
for (const auto& node : function->get_ops())
{
if (ie_ops.find(node->get_type_info()) == ie_ops.end())
{
IE_THROW() << "Unsupported operator detected in the graph: "
<< node->get_type_info().name;
}
}
return function;
}
std::set<NodeTypeInfo> test::IE_Engine::get_ie_ops() const
{
std::set<NodeTypeInfo> ie_ops = get_opset1().get_type_info_set();
@ -341,8 +319,8 @@ void test::IE_Engine::reset()
namespace InferenceEngine
{
// Without this section the linker is not able to find destructors for missing TBlob specializations
// which are instantiated in the unit tests that use TestCase and this engine
// Without this section the linker is not able to find destructors for missing TBlob
// specializations which are instantiated in the unit tests that use TestCase and this engine
template <typename T, typename U>
TBlob<T, U>::~TBlob()
{

View File

@ -113,11 +113,6 @@ namespace ngraph
unsigned int m_allocated_inputs = 0;
unsigned int m_allocated_expected_outputs = 0;
/// Upgrades functions containing legacy opset0 to opset1
/// and checks if the graph can be executed
std::shared_ptr<Function>
upgrade_and_validate_function(const std::shared_ptr<Function> function) const;
/// Retrieves a set of all ops IE can execute
std::set<NodeTypeInfo> get_ie_ops() const;
@ -160,5 +155,5 @@ namespace ngraph
{
static constexpr bool value = true;
};
}
}
} // namespace test
} // namespace ngraph