Add ngraph shell for AdaptivePool operations (#6126)

Apply review feedback

Fix code style

Add visitors tests

Check visitor for type attribute
This commit is contained in:
Maxim Vafin 2021-06-22 15:00:27 +03:00 committed by GitHub
parent b0e932567d
commit 1bc18d8f4e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 553 additions and 0 deletions

View File

@ -0,0 +1,43 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/attr_types.hpp"
namespace ngraph
{
namespace op
{
namespace v8
{
/// \brief Adaptive average pooling operation.
///
class NGRAPH_API AdaptiveAvgPool : public Op
{
public:
NGRAPH_RTTI_DECLARATION;
AdaptiveAvgPool() = default;
///
/// \brief Constructs adaptive average pooling operation.
///
/// \param data Input data
///
/// \param output_shape 1D tensor describing output shape for spatial
/// dimensions.
///
AdaptiveAvgPool(const Output<Node>& data, const Output<Node>& output_shape);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
};
} // namespace v8
} // namespace op
} // namespace ngraph

View File

@ -0,0 +1,54 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/attr_types.hpp"
namespace ngraph
{
namespace op
{
namespace v8
{
/// \brief Adaptive max pooling operation.
///
class NGRAPH_API AdaptiveMaxPool : public Op
{
public:
NGRAPH_RTTI_DECLARATION;
AdaptiveMaxPool() = default;
///
/// \brief Constructs adaptive max pooling operation.
///
/// \param data Input data
///
/// \param output_shape 1D tensor describing output shape for spatial
/// dimensions.
///
/// \param index_element_type Specifies the output tensor type for indices
/// output
///
AdaptiveMaxPool(
const Output<Node>& data,
const Output<Node>& output_shape,
const ngraph::element::Type& index_element_type = ngraph::element::i64);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
element::Type get_index_element_type() const { return m_index_element_type; }
protected:
ngraph::element::Type m_index_element_type = ngraph::element::i64;
};
} // namespace v8
} // namespace op
} // namespace ngraph

View File

@ -9,6 +9,8 @@
#include "ngraph/op/abs.hpp"
#include "ngraph/op/acos.hpp"
#include "ngraph/op/acosh.hpp"
#include "ngraph/op/adaptive_avg_pool.hpp"
#include "ngraph/op/adaptive_max_pool.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/and.hpp"
#include "ngraph/op/asin.hpp"

View File

@ -124,4 +124,5 @@ namespace ngraph
const NGRAPH_API OpSet& get_opset5();
const NGRAPH_API OpSet& get_opset6();
const NGRAPH_API OpSet& get_opset7();
const NGRAPH_API OpSet& get_opset8();
} // namespace ngraph

View File

@ -177,3 +177,5 @@ NGRAPH_OP(Roll, ngraph::op::v7)
// New operations added in opset8
NGRAPH_OP(Gather, ngraph::op::v8)
NGRAPH_OP(AdaptiveAvgPool, ngraph::op::v8)
NGRAPH_OP(AdaptiveMaxPool, ngraph::op::v8)

View File

@ -0,0 +1,73 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/adaptive_avg_pool.hpp"
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/validation_util.hpp"
using namespace std;
using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v8::AdaptiveAvgPool, "AdaptiveAvgPool", 8);
op::v8::AdaptiveAvgPool::AdaptiveAvgPool(const Output<Node>& data, const Output<Node>& output_shape)
: Op({data, output_shape})
{
constructor_validate_and_infer_types();
}
bool op::v8::AdaptiveAvgPool::visit_attributes(AttributeVisitor& visitor)
{
NGRAPH_OP_SCOPE(v8_AdaptiveAvgPool_visit_attributes);
return true;
}
void op::v8::AdaptiveAvgPool::validate_and_infer_types()
{
NGRAPH_OP_SCOPE(v8_AdaptiveAvgPool_validate_and_infer_types);
const PartialShape& data_shape = get_input_partial_shape(0);
NODE_VALIDATION_CHECK(this,
data_shape.rank().compatible(3) || data_shape.rank().compatible(4) ||
data_shape.rank().compatible(5),
"Expected a 3D, 4D or 5D tensor for the input. Got: ",
data_shape);
auto output_shape = PartialShape::dynamic(data_shape.rank());
if (data_shape.rank().is_static())
{
if (data_shape[0].is_static())
{
output_shape[0] = data_shape[0]; // batch size
}
if (data_shape[1].is_static())
{
output_shape[1] = data_shape[1]; // channel size
}
if (const auto& const_output_shape = get_constant_from_source(input_value(1)))
{
auto output_spatial_shape = const_output_shape->cast_vector<int64_t>();
NODE_VALIDATION_CHECK(this,
(size_t)data_shape.rank().get_length() ==
2 + output_spatial_shape.size(),
"Output shape is not compatible with input data rank");
int i = 2;
for (auto& dim : output_spatial_shape)
{
output_shape[i++] = dim;
}
}
}
set_output_type(0, get_input_element_type(0), output_shape);
}
shared_ptr<Node> op::v8::AdaptiveAvgPool::clone_with_new_inputs(const OutputVector& new_args) const
{
NGRAPH_OP_SCOPE(v8_AdaptiveAvgPool_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<v8::AdaptiveAvgPool>(new_args.at(0), new_args.at(1));
}

View File

@ -0,0 +1,83 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/adaptive_max_pool.hpp"
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/validation_util.hpp"
using namespace std;
using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::v8::AdaptiveMaxPool, "AdaptiveMaxPool", 8);
op::v8::AdaptiveMaxPool::AdaptiveMaxPool(const Output<Node>& data,
const Output<Node>& output_shape,
const ngraph::element::Type& index_element_type)
: Op({data, output_shape})
, m_index_element_type{index_element_type}
{
constructor_validate_and_infer_types();
}
bool op::v8::AdaptiveMaxPool::visit_attributes(AttributeVisitor& visitor)
{
NGRAPH_OP_SCOPE(v8_AdaptiveMaxPool_visit_attributes);
visitor.on_attribute("index_element_type", m_index_element_type);
return true;
}
void op::v8::AdaptiveMaxPool::validate_and_infer_types()
{
NGRAPH_OP_SCOPE(v8_AdaptiveMaxPool_validate_and_infer_types);
NODE_VALIDATION_CHECK(this,
m_index_element_type == element::i64 ||
m_index_element_type == element::i32,
"Index element type must be i32 or i64");
const PartialShape& data_shape = get_input_partial_shape(0);
NODE_VALIDATION_CHECK(this,
data_shape.rank().compatible(3) || data_shape.rank().compatible(4) ||
data_shape.rank().compatible(5),
"Expected a 3D, 4D or 5D tensor for the input. Got: ",
data_shape);
auto output_shape = PartialShape::dynamic(data_shape.rank());
if (data_shape.rank().is_static())
{
if (data_shape[0].is_static())
{
output_shape[0] = data_shape[0]; // batch size
}
if (data_shape[1].is_static())
{
output_shape[1] = data_shape[1]; // channel size
}
if (const auto& const_output_shape = get_constant_from_source(input_value(1)))
{
auto output_spatial_shape = const_output_shape->cast_vector<int64_t>();
NODE_VALIDATION_CHECK(this,
(size_t)data_shape.rank().get_length() ==
2 + output_spatial_shape.size(),
"Output shape is not compatible with input data rank");
int i = 2;
for (auto& dim : output_spatial_shape)
{
output_shape[i++] = dim;
}
}
}
set_output_type(0, get_input_element_type(0), output_shape);
set_output_type(1, m_index_element_type, output_shape);
}
shared_ptr<Node> op::v8::AdaptiveMaxPool::clone_with_new_inputs(const OutputVector& new_args) const
{
NGRAPH_OP_SCOPE(v8_AdaptiveMaxPool_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<v8::AdaptiveMaxPool>(new_args.at(0), new_args.at(1), m_index_element_type);
}

View File

@ -101,6 +101,8 @@ set(SRC
tensor.cpp
type_prop/abs.cpp
type_prop/acos.cpp
type_prop/adaptive_avg_pool.cpp
type_prop/adaptive_max_pool.cpp
type_prop/asin.cpp
type_prop/assign.cpp
type_prop/avg_pool.cpp
@ -229,6 +231,8 @@ set(SRC
visitors/partial_shape.cpp
visitors/user_op.cpp
visitors/value_map.cpp
visitors/op/adaptive_avg_pool.cpp
visitors/op/adaptive_max_pool.cpp
visitors/op/batch_norm.cpp
visitors/op/broadcast.cpp
visitors/op/bucketize.cpp

View File

@ -0,0 +1,107 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(type_prop, adaptive_avg_pool)
{
const PartialShape arg_shape{1, 6, 8, 9};
const vector<int64_t> output_shape{5, 7};
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape);
auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, out_shape);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme({1, 6, 5, 7}));
}
TEST(type_prop, adaptive_avg_pool_dyn_batch)
{
const PartialShape arg_shape{Dimension::dynamic(), 6, 8, 9};
const vector<int64_t> output_shape{5, 7};
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape);
auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, out_shape);
ASSERT_TRUE(
adaptive_pool->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), 6, 5, 7}));
}
TEST(type_prop, adaptive_avg_pool_dyn_channels)
{
const PartialShape arg_shape{1, Dimension::dynamic(), 8, 9};
const vector<int64_t> output_shape{5, 7};
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape);
auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, out_shape);
ASSERT_TRUE(
adaptive_pool->get_output_partial_shape(0).same_scheme({1, Dimension::dynamic(), 5, 7}));
}
TEST(type_prop, adaptive_avg_pool_dyn_spatial)
{
const PartialShape arg_shape{1, 6, Dimension::dynamic(), Dimension::dynamic()};
const vector<int64_t> output_shape{5, 7};
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape);
auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, out_shape);
ASSERT_TRUE(
adaptive_pool->get_output_partial_shape(0).same_scheme({1, 6, 5, 7}));
}
TEST(type_prop, adaptive_avg_pool_dyn_output_shape)
{
const PartialShape arg_shape{1, 6, 8, 9};
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = make_shared<op::Parameter>(element::i64, Shape{2});
auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, out_shape);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme(
{1, 6, Dimension::dynamic(), Dimension::dynamic()}));
}
TEST(type_prop, adaptive_avg_pool_dyn_rank)
{
const PartialShape arg_shape = PartialShape::dynamic();
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = make_shared<op::Parameter>(element::i64, Shape{2});
auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, out_shape);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
}
TEST(type_prop, adaptive_avg_pool_unsupported_input_shape)
{
const PartialShape arg_shape{1, 6};
const vector<int64_t> output_shape{1};
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{}, output_shape);
EXPECT_THROW(make_shared<op::v8::AdaptiveAvgPool>(data, out_shape), NodeValidationFailure);
}
TEST(type_prop, adaptive_avg_pool_wrong_out_shape)
{
const PartialShape arg_shape{1, 6, 8, 9};
const vector<int64_t> output_shape{5, 7, 8};
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{3}, output_shape);
EXPECT_THROW(make_shared<op::v8::AdaptiveAvgPool>(data, out_shape),
NodeValidationFailure);
}

View File

@ -0,0 +1,128 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(type_prop, adaptive_max_pool)
{
const PartialShape arg_shape{1, 6, 8, 9};
const vector<int64_t> output_shape{5, 7};
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape);
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, out_shape);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme({1, 6, 5, 7}));
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(1).same_scheme({1, 6, 5, 7}));
}
TEST(type_prop, adaptive_max_pool_i32_indices)
{
const PartialShape arg_shape{1, 6, 8, 9};
const vector<int64_t> output_shape{5, 7};
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape);
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, out_shape, element::i32);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme({1, 6, 5, 7}));
ASSERT_EQ(adaptive_pool->output(1).get_element_type(), element::i32);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(1).same_scheme({1, 6, 5, 7}));
}
TEST(type_prop, adaptive_max_pool_dyn_batch)
{
const PartialShape arg_shape{Dimension::dynamic(), 6, 8, 9};
const vector<int64_t> output_shape{5, 7};
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape);
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, out_shape);
ASSERT_TRUE(
adaptive_pool->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), 6, 5, 7}));
ASSERT_TRUE(
adaptive_pool->get_output_partial_shape(1).same_scheme({Dimension::dynamic(), 6, 5, 7}));
}
TEST(type_prop, adaptive_max_pool_dyn_channels)
{
const PartialShape arg_shape{1, Dimension::dynamic(), 8, 9};
const vector<int64_t> output_shape{5, 7};
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape);
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, out_shape);
ASSERT_TRUE(
adaptive_pool->get_output_partial_shape(0).same_scheme({1, Dimension::dynamic(), 5, 7}));
ASSERT_TRUE(
adaptive_pool->get_output_partial_shape(1).same_scheme({1, Dimension::dynamic(), 5, 7}));
}
TEST(type_prop, adaptive_max_pool_dyn_spatial)
{
const PartialShape arg_shape{1, 6, Dimension::dynamic(), Dimension::dynamic()};
const vector<int64_t> output_shape{5, 7};
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape);
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, out_shape);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme({1, 6, 5, 7}));
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(1).same_scheme({1, 6, 5, 7}));
}
TEST(type_prop, adaptive_max_pool_dyn_output_shape)
{
const PartialShape arg_shape{1, 6, 8, 9};
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = make_shared<op::Parameter>(element::i64, Shape{2});
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, out_shape);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme(
{1, 6, Dimension::dynamic(), Dimension::dynamic()}));
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(1).same_scheme(
{1, 6, Dimension::dynamic(), Dimension::dynamic()}));
}
TEST(type_prop, adaptive_max_pool_dyn_rank)
{
const PartialShape arg_shape = PartialShape::dynamic();
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = make_shared<op::Parameter>(element::i64, Shape{2});
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, out_shape);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(1).same_scheme(PartialShape::dynamic()));
}
TEST(type_prop, adaptive_max_pool_unsupported_input_shape)
{
const PartialShape arg_shape{1, 6};
const vector<int64_t> output_shape{1};
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{}, output_shape);
EXPECT_THROW(make_shared<op::v8::AdaptiveMaxPool>(data, out_shape), NodeValidationFailure);
}
TEST(type_prop, adaptive_max_pool_wrong_out_shape)
{
const PartialShape arg_shape{1, 6, 8, 9};
const vector<int64_t> output_shape{5, 7, 8};
auto data = make_shared<op::Parameter>(element::f32, arg_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{3}, output_shape);
EXPECT_THROW(make_shared<op::v8::AdaptiveMaxPool>(data, out_shape), NodeValidationFailure);
}

View File

@ -0,0 +1,27 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/opsets/opset8.hpp"
#include "util/visitor.hpp"
using namespace std;
using namespace ngraph;
using ngraph::test::NodeBuilder;
TEST(attributes, adaptive_avg_pool_op)
{
NodeBuilder::get_ops().register_factory<opset8::AdaptiveAvgPool>();
const auto A = make_shared<op::Parameter>(element::f32, Shape{1, 3, 5, 4});
const auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {4, 3});
const auto adaptive_pool = make_shared<opset8::AdaptiveAvgPool>(A, out_shape);
NodeBuilder builder(adaptive_pool);
const auto expected_attr_count = 0;
EXPECT_EQ(builder.get_value_map_size(), expected_attr_count);
}

View File

@ -0,0 +1,29 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/opsets/opset8.hpp"
#include "util/visitor.hpp"
using namespace std;
using namespace ngraph;
using ngraph::test::NodeBuilder;
TEST(attributes, adaptive_max_pool_op)
{
NodeBuilder::get_ops().register_factory<opset8::AdaptiveMaxPool>();
const auto A = make_shared<op::Parameter>(element::f32, Shape{1, 3, 5, 4});
const auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {4, 3});
const auto adaptive_pool = make_shared<opset8::AdaptiveMaxPool>(A, out_shape);
NodeBuilder builder(adaptive_pool);
auto g_adaptive_pool = as_type_ptr<opset8::AdaptiveMaxPool>(builder.create());
const auto expected_attr_count = 1;
EXPECT_EQ(builder.get_value_map_size(), expected_attr_count);
EXPECT_EQ(g_adaptive_pool->get_index_element_type(), adaptive_pool->get_index_element_type());
}