Revise space_to_batch (#6034)

This commit is contained in:
Bartek Szmelczynski
2021-06-14 12:18:22 +02:00
committed by GitHub
parent dc415573d4
commit 826638e523
8 changed files with 349 additions and 44 deletions

View File

@@ -0,0 +1,38 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/single_layer/space_to_batch.hpp"
#include <vector>
using namespace LayerTestsDefinitions;
namespace {
TEST_P(SpaceToBatchLayerTest, Serialize) {
Serialize();
}
const std::vector<std::vector<int64_t>> blockShapes4D{{1, 1, 2, 2}};
const std::vector<std::vector<int64_t>> padsBegins4D{{0, 0, 0, 0},
{0, 0, 0, 2}};
const std::vector<std::vector<int64_t>> padsEnds4D{{0, 0, 0, 0}, {0, 0, 0, 2}};
const std::vector<std::vector<size_t>> dataShapes4D{
{1, 1, 2, 2}, {1, 3, 2, 2}, {1, 1, 4, 4}, {2, 1, 2, 4}};
const auto SpaceToBatch4D = ::testing::Combine(
::testing::ValuesIn(blockShapes4D), ::testing::ValuesIn(padsBegins4D),
::testing::ValuesIn(padsEnds4D), ::testing::ValuesIn(dataShapes4D),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU));
INSTANTIATE_TEST_CASE_P(smoke_spacetobatch4D_Serialization,
SpaceToBatchLayerTest, SpaceToBatch4D,
SpaceToBatchLayerTest::getTestCaseName);
} // namespace

View File

@@ -11,47 +11,64 @@ using namespace LayerTestsDefinitions;
namespace {
spaceToBatchParamsTuple stb_only_test_cases[] = {
spaceToBatchParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {1, 1, 2, 2},
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Layout::ANY,
InferenceEngine::Layout::ANY,
CommonTestUtils::DEVICE_CPU),
spaceToBatchParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {1, 3, 2, 2},
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Layout::ANY,
InferenceEngine::Layout::ANY,
CommonTestUtils::DEVICE_CPU),
spaceToBatchParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {1, 1, 4, 4},
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Layout::ANY,
InferenceEngine::Layout::ANY,
CommonTestUtils::DEVICE_CPU),
spaceToBatchParamsTuple({1, 1, 2, 2}, {0, 0, 0, 2}, {0, 0, 0, 0}, {2, 1, 2, 4},
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Layout::ANY,
InferenceEngine::Layout::ANY,
CommonTestUtils::DEVICE_CPU),
spaceToBatchParamsTuple({1, 1, 3, 2, 2}, {0, 0, 1, 0, 3}, {0, 0, 2, 0, 0}, {1, 1, 3, 2, 1},
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Layout::ANY,
InferenceEngine::Layout::ANY,
CommonTestUtils::DEVICE_CPU),
const std::vector<std::vector<int64_t >> blockShapes4D {
{1, 1, 2, 2}
};
const std::vector<std::vector<int64_t >> padsBegins4D {
{0, 0, 0, 0}, {0, 0, 0, 2}
};
const std::vector<std::vector<int64_t >> padsEnds4D {
{0, 0, 0, 0}, {0, 0, 0, 2}
};
const std::vector<std::vector<size_t >> dataShapes4D {
{1, 1, 2, 2}, {1, 3, 2, 2}, {1, 1, 4, 4}, {2, 1, 2, 4}
};
const auto SpaceToBatch4D = ::testing::Combine(
::testing::ValuesIn(blockShapes4D),
::testing::ValuesIn(padsBegins4D),
::testing::ValuesIn(padsEnds4D),
::testing::ValuesIn(dataShapes4D),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU)
);
INSTANTIATE_TEST_CASE_P(
smoke_MKLDNN, SpaceToBatchLayerTest, ::testing::ValuesIn(stb_only_test_cases),
smoke_spacetobatch4D, SpaceToBatchLayerTest, SpaceToBatch4D,
SpaceToBatchLayerTest::getTestCaseName);
const std::vector<std::vector<int64_t >> blockShapes5D {
{1, 1, 3, 2, 2}
};
const std::vector<std::vector<int64_t >> padsBegins5D {
{0, 0, 1, 0, 3}
};
const std::vector<std::vector<int64_t >> padsEnds5D {
{0, 0, 2, 0, 0}
};
const std::vector<std::vector<size_t >> dataShapes5D {
{1, 1, 3, 2, 1}
};
const auto SpaceToBatch5D = ::testing::Combine(
::testing::ValuesIn(blockShapes5D),
::testing::ValuesIn(padsBegins5D),
::testing::ValuesIn(padsEnds5D),
::testing::ValuesIn(dataShapes5D),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU)
);
INSTANTIATE_TEST_CASE_P(
smoke_spacetobatch5D, SpaceToBatchLayerTest, SpaceToBatch5D,
SpaceToBatchLayerTest::getTestCaseName);
} // namespace

View File

@@ -4,8 +4,7 @@
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/op/util/fused_op.hpp"
#include "ngraph/op/op.hpp"
namespace ngraph
{
@@ -27,8 +26,8 @@ namespace ngraph
class NGRAPH_API SpaceToBatch : public Op
{
public:
static constexpr NodeTypeInfo type_info{"SpaceToBatch", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
NGRAPH_RTTI_DECLARATION;
SpaceToBatch() = default;
/// \brief Constructs a SpaceToBatch operation.

View File

@@ -21,7 +21,7 @@
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::v1::SpaceToBatch::type_info;
NGRAPH_RTTI_DEFINITION(op::v1::SpaceToBatch, "SpaceToBatch", 1);
ngraph::op::v1::SpaceToBatch::SpaceToBatch(const ngraph::Output<ngraph::Node>& data,
const ngraph::Output<ngraph::Node>& block_shape,
@@ -49,13 +49,13 @@ void op::v1::SpaceToBatch::validate_and_infer_types()
NODE_VALIDATION_CHECK(this,
pads_begin_type.is_integral_number(),
"crops_begin must be an integral number but got (",
"pads_begin must be an integral number but got (",
pads_begin_type,
").");
NODE_VALIDATION_CHECK(this,
pads_end_type.is_integral_number(),
"crops_end must be an integral number but got (",
"pads_end must be an integral number but got (",
pads_end_type,
").");

View File

@@ -275,6 +275,7 @@ set(SRC
visitors/op/selu.cpp
visitors/op/shuffle_channels.cpp
visitors/op/softmax.cpp
visitors/op/space_to_batch.cpp
visitors/op/space_to_depth.cpp
visitors/op/split.cpp
visitors/op/squared_difference.cpp
@@ -445,6 +446,7 @@ set(MULTI_TEST_SRC
backend/sin.in.cpp
backend/sinh.in.cpp
backend/softmax.in.cpp
backend/space_to_batch.in.cpp
backend/split.in.cpp
backend/sqrt.in.cpp
backend/squared_difference.in.cpp

View File

@@ -0,0 +1,115 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/engine/test_engines.hpp"
#include "util/test_case.hpp"
#include "util/test_control.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
static void SpaceToBatchTest(const std::vector<float>& inputs,
const Shape inputs_shape,
const std::vector<int64_t>& block_shapes,
const Shape blocks_shape,
const std::vector<int64_t>& pads_begins,
const std::vector<int64_t>& pads_ends,
const Shape pads_shape,
const std::vector<float>& outputs,
const Shape outputs_shape)
{
auto inputs_param = make_shared<op::Parameter>(element::f32, inputs_shape);
auto block_shapes_param = make_shared<op::Constant>(element::i64, blocks_shape, block_shapes);
auto pads_begins_param = make_shared<op::Constant>(element::i64, pads_shape, pads_begins);
auto pads_ends_param = make_shared<op::Constant>(element::i64, pads_shape, pads_ends);
auto space_to_batch = make_shared<op::v1::SpaceToBatch>(
inputs_param, block_shapes_param, pads_begins_param, pads_ends_param);
auto f = make_shared<Function>(space_to_batch, ParameterVector{inputs_param});
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>(inputs);
test_case.add_expected_output<float>(outputs_shape, outputs);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, space_to_batch_4D)
{
const Shape inputs_shape{1, 1, 2, 2};
const std::vector<float> inputs{1.0f, 1.0f,
1.0f, 1.0f};
const Shape blocks_shape{4};
const std::vector<int64_t> block_shapes{1, 1, 1, 1};
const Shape pads_shape{4};
const std::vector<int64_t> pads_begins{0, 0 ,0, 0};
const std::vector<int64_t> pads_ends{0, 0, 0, 0};
const Shape outputs_shape{1, 1, 2, 2};
const std::vector<float> outputs{1.0f, 1.0f,
1.0f, 1.0f};
SpaceToBatchTest(inputs, inputs_shape, block_shapes, blocks_shape, pads_begins,
pads_ends, pads_shape, outputs, outputs_shape);
}
NGRAPH_TEST(${BACKEND_NAME}, space_to_batch_5D)
{
const Shape inputs_shape{1, 1, 3, 2, 1};
const std::vector<float> inputs{1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f};
const Shape blocks_shape{5};
const std::vector<int64_t> block_shapes{1, 1, 3, 2, 2};
const Shape pads_shape{5};
const std::vector<int64_t> pads_begins{0, 0 ,1, 0, 3};
const std::vector<int64_t> pads_ends{0, 0, 2, 0, 0};
const Shape outputs_shape{12, 1, 2, 1, 2};
const std::vector<float> outputs{0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f};
SpaceToBatchTest(inputs, inputs_shape, block_shapes, blocks_shape, pads_begins,
pads_ends, pads_shape, outputs, outputs_shape);
}
NGRAPH_TEST(${BACKEND_NAME}, space_to_batch_4x4)
{
const Shape inputs_shape{1, 1, 4, 4};
const std::vector<float> inputs{1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f};
const Shape blocks_shape{4};
const std::vector<int64_t> block_shapes{1, 1, 1, 1};
const Shape pads_shape{4};
const std::vector<int64_t> pads_begins{0, 0, 1, 0};
const std::vector<int64_t> pads_ends{0, 0, 0, 0};
const Shape outputs_shape{1, 1, 5, 4};
const std::vector<float> outputs{0.0f, 0.0f, 0.0f, 0.0f,
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f};
SpaceToBatchTest(inputs, inputs_shape, block_shapes, blocks_shape, pads_begins,
pads_ends, pads_shape, outputs, outputs_shape);
}

View File

@@ -110,3 +110,104 @@ TEST(type_prop, space_to_batch_dynamic_shape_dynamic_rank)
ASSERT_EQ(space_to_batch->get_element_type(), element::f32);
ASSERT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape::dynamic());
}
TEST(type_prop, space_to_batch_invalid_element_type_block_shape)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 128});
auto block_shape = make_shared<op::Constant>(element::f32, Shape{2}, vector<int64_t>{1, 5});
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{2}, vector<float>{0, 2});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 0});
try
{
auto space_to_batch =
make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
// Input element type is float32
FAIL() << "Invalid f32 element type for block_shape not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "block_shape must be an integral number");
}
catch(...)
{
FAIL() << "Integral element type node validation check failed for unexpected reason";
}
}
TEST(type_prop, space_to_batch_invalid_element_type_pads_begin)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 128});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 5});
auto pads_begin = make_shared<op::Constant>(element::f32, Shape{2}, vector<float>{0, 2});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 0});
try
{
auto space_to_batch =
make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
// Input element type is float32
FAIL() << "Invalid f32 element type for pads_begin not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "pads_begin must be an integral number but got");
}
catch(...)
{
FAIL() << "Integral element type node validation check failed for unexpected reason";
}
}
TEST(type_prop, space_to_batch_invalid_element_type_pads_end)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 128});
auto block_shape = make_shared<op::Constant>(element::i16, Shape{2}, vector<int64_t>{1, 5});
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 2});
auto pads_end = make_shared<op::Constant>(element::f32, Shape{2}, vector<float>{0, 0});
try
{
auto space_to_batch =
make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
// Input element type is float32
FAIL() << "Invalid f32 element type for pads_end not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "pads_end must be an integral number but got");
}
catch(...)
{
FAIL() << "Integral element type node validation check failed for unexpected reason";
}
}
TEST(type_prop, space_to_batch_invalid_value_block_shape)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 128});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{-1, -5});
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 2});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{2}, vector<float>{0, 0});
try
{
auto space_to_batch =
make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
// Input element type is float32
FAIL() << "Invalid block_shape value not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "block_shape values must be greater than 0");
}
catch(...)
{
FAIL() << "block_shape value node validation check failed for unexpected reason";
}
}

View File

@@ -0,0 +1,33 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/opsets/opset2.hpp"
#include "util/visitor.hpp"
using namespace std;
using namespace ngraph;
using ngraph::test::NodeBuilder;
TEST(attributes, space_to_batch_op)
{
using namespace opset2;
NodeBuilder::get_ops().register_factory<SpaceToBatch>();
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 128});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 5});
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 2});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 0});
auto op = make_shared<SpaceToBatch>(data, block_shape, pads_begin, pads_end);
NodeBuilder builder(op);
const auto expected_attr_count = 0;
EXPECT_EQ(builder.get_value_map_size(), expected_attr_count);
}