Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Steve Yoo 2021-06-15 08:24:23 +09:00
commit 84bc851ecf
73 changed files with 3889 additions and 1792 deletions

View File

@ -147,21 +147,29 @@ def get_wrong_commits(pull):
# import pprint; pprint.pprint(commit.raw_data) # import pprint; pprint.pprint(commit.raw_data)
print("Commit SHA:", commit.sha) print("Commit SHA:", commit.sha)
# Use raw data because commit author can be non GitHub user # Use raw data because commit author can be non GitHub user
commit_email = (commit.raw_data["commit"]["author"]["email"] or "").lower() commit_author_email = (commit.raw_data["commit"]["author"]["email"] or "").lower()
print(" Commit email:", commit_email) commit_committer_email = (commit.raw_data["commit"]["committer"]["email"] or "").lower()
print(" Commit author email:", commit_author_email)
print(" Commit committer email:", commit_committer_email)
if not github_api.is_valid_user(commit.author): if not github_api.is_valid_user(commit.author):
print( print(
" ERROR: User with the commit email is absent in GitHub:", " ERROR: User with the commit author email is absent in GitHub:",
commit.raw_data["commit"]["author"]["name"], commit.raw_data["commit"]["author"]["name"],
) )
wrong_commits.add(commit.sha) wrong_commits.add(commit.sha)
if not github_api.is_valid_user(commit.committer):
print(
" ERROR: User with the commit committer email is absent in GitHub:",
commit.raw_data["commit"]["committer"]["name"],
)
wrong_commits.add(commit.sha)
if not commit.raw_data["commit"]["verification"]["verified"]: if not commit.raw_data["commit"]["verification"]["verified"]:
print( print(
" WARNING: The commit is not verified. Reason:", " WARNING: The commit is not verified. Reason:",
commit.raw_data["commit"]["verification"]["reason"], commit.raw_data["commit"]["verification"]["reason"],
) )
if pr_author_email != commit_email: if pr_author_email != commit_author_email or pr_author_email != commit_committer_email:
print(" WARNING: Commit email and GitHub PR author public email are differnt") print(" WARNING: Commit emails and GitHub PR author public email are differnt")
return wrong_commits return wrong_commits

View File

@ -277,8 +277,8 @@ int main(int argc, char* argv[]) {
setThroughputStreams(); setThroughputStreams();
if ((device_name.find("MULTI") != std::string::npos) && (device_name.find("CPU") != std::string::npos)) { if ((device_name.find("MULTI") != std::string::npos) && (device_name.find("CPU") != std::string::npos)) {
slog::warn << "Turn on GPU trottling. Multi-device execution with " slog::warn << "Turn on GPU throttling. Multi-device execution with "
"the CPU + GPU performs best with GPU trottling hint," "the CPU + GPU performs best with GPU throttling hint, "
<< "which releases another CPU thread (that is otherwise " << "which releases another CPU thread (that is otherwise "
"used by the GPU driver for active polling)" "used by the GPU driver for active polling)"
<< slog::endl; << slog::endl;

View File

@ -120,10 +120,10 @@ bool NormalizeL2Transformation::transform(TransformationContext &context, ngraph
} }
auto newNormalize = std::make_shared<op::TypeRelaxed<opset1::NormalizeL2>>( auto newNormalize = std::make_shared<op::TypeRelaxed<opset1::NormalizeL2>>(
std::vector<ngraph::element::Type>{ element::f32, element::f32 }, std::vector<ngraph::element::Type>{ element::f32, axes->output(0).get_element_type() },
std::vector<ngraph::element::Type>{deqPrecision}, std::vector<ngraph::element::Type>{deqPrecision},
ngraph::op::TemporaryReplaceOutputType(dequantization.subtract == nullptr ? dequantization.data : dequantization.subtract, element::f32).get(), ngraph::op::TemporaryReplaceOutputType(dequantization.subtract == nullptr ? dequantization.data : dequantization.subtract, element::f32).get(),
ngraph::op::TemporaryReplaceOutputType(axes->clone_with_new_inputs({}), element::f32).get(), axes,
normalize->get_eps(), normalize->get_eps(),
normalize->get_eps_mode()); normalize->get_eps_mode());
NetworkHelper::copyInfo(normalize, newNormalize); NetworkHelper::copyInfo(normalize, newNormalize);

View File

@ -0,0 +1,38 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/single_layer/space_to_batch.hpp"
#include <vector>
using namespace LayerTestsDefinitions;
namespace {
TEST_P(SpaceToBatchLayerTest, Serialize) {
Serialize();
}
const std::vector<std::vector<int64_t>> blockShapes4D{{1, 1, 2, 2}};
const std::vector<std::vector<int64_t>> padsBegins4D{{0, 0, 0, 0},
{0, 0, 0, 2}};
const std::vector<std::vector<int64_t>> padsEnds4D{{0, 0, 0, 0}, {0, 0, 0, 2}};
const std::vector<std::vector<size_t>> dataShapes4D{
{1, 1, 2, 2}, {1, 3, 2, 2}, {1, 1, 4, 4}, {2, 1, 2, 4}};
const auto SpaceToBatch4D = ::testing::Combine(
::testing::ValuesIn(blockShapes4D), ::testing::ValuesIn(padsBegins4D),
::testing::ValuesIn(padsEnds4D), ::testing::ValuesIn(dataShapes4D),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU));
INSTANTIATE_TEST_CASE_P(smoke_spacetobatch4D_Serialization,
SpaceToBatchLayerTest, SpaceToBatch4D,
SpaceToBatchLayerTest::getTestCaseName);
} // namespace

View File

@ -30,7 +30,7 @@ TEST(TransformationTests, MVNFusionTestOutside) {
auto sub2 = std::make_shared<ngraph::opset6::Subtract>(input, mean2); auto sub2 = std::make_shared<ngraph::opset6::Subtract>(input, mean2);
auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 });
auto power_sqr = std::make_shared<ngraph::opset6::Power>(sub2, const_2); auto power_sqr = std::make_shared<ngraph::opset6::Power>(sub2, const_2);
auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 });
auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes); auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes);
auto const_0_5 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 0.5 }); auto const_0_5 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 0.5 });
auto power_sqrt = std::make_shared<ngraph::opset6::Power>(mean3, const_0_5); auto power_sqrt = std::make_shared<ngraph::opset6::Power>(mean3, const_0_5);
@ -70,7 +70,7 @@ TEST(TransformationTests, MVNFusionTestReuseSub) {
auto sub1 = std::make_shared<ngraph::opset6::Subtract>(input, mean1); auto sub1 = std::make_shared<ngraph::opset6::Subtract>(input, mean1);
auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 });
auto power_sqr = std::make_shared<ngraph::opset6::Power>(sub1, const_2); auto power_sqr = std::make_shared<ngraph::opset6::Power>(sub1, const_2);
auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 });
auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes); auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes);
auto const_0_5 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 0.5 }); auto const_0_5 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 0.5 });
auto power_sqrt = std::make_shared<ngraph::opset6::Power>(mean3, const_0_5); auto power_sqrt = std::make_shared<ngraph::opset6::Power>(mean3, const_0_5);
@ -111,7 +111,7 @@ TEST(TransformationTests, MVNFusionTestWithConvert) {
auto cast = std::make_shared<ngraph::opset6::Convert>(sub1, ngraph::element::f32); auto cast = std::make_shared<ngraph::opset6::Convert>(sub1, ngraph::element::f32);
auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 });
auto power_sqr = std::make_shared<ngraph::opset6::Power>(cast, const_2); auto power_sqr = std::make_shared<ngraph::opset6::Power>(cast, const_2);
auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 });
auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes); auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes);
auto const_0_5 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 0.5 }); auto const_0_5 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 0.5 });
auto power_sqrt = std::make_shared<ngraph::opset6::Power>(mean3, const_0_5); auto power_sqrt = std::make_shared<ngraph::opset6::Power>(mean3, const_0_5);
@ -151,7 +151,7 @@ TEST(TransformationTests, MVNFusionTestSqrt) {
auto sub1 = std::make_shared<ngraph::opset6::Subtract>(input, mean1); auto sub1 = std::make_shared<ngraph::opset6::Subtract>(input, mean1);
auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 });
auto power_sqr = std::make_shared<ngraph::opset6::Power>(sub1, const_2); auto power_sqr = std::make_shared<ngraph::opset6::Power>(sub1, const_2);
auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 });
auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes); auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes);
auto power_sqrt = std::make_shared<ngraph::opset6::Sqrt>(mean3); auto power_sqrt = std::make_shared<ngraph::opset6::Sqrt>(mean3);
auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 }); auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 });
@ -190,7 +190,7 @@ TEST(TransformationTests, MVNFusionTestAltDiv) {
auto sub1 = std::make_shared<ngraph::opset6::Subtract>(input, mean1); auto sub1 = std::make_shared<ngraph::opset6::Subtract>(input, mean1);
auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 });
auto power_sqr = std::make_shared<ngraph::opset6::Power>(sub1, const_2); auto power_sqr = std::make_shared<ngraph::opset6::Power>(sub1, const_2);
auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 });
auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes); auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes);
auto const_0_5 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 0.5 }); auto const_0_5 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 0.5 });
auto power_sqrt = std::make_shared<ngraph::opset6::Power>(mean3, const_0_5); auto power_sqrt = std::make_shared<ngraph::opset6::Power>(mean3, const_0_5);
@ -231,7 +231,7 @@ TEST(TransformationTests, MVNFusionTestInsideSqrt) {
auto sub2 = std::make_shared<ngraph::opset6::Subtract>(input, mean2); auto sub2 = std::make_shared<ngraph::opset6::Subtract>(input, mean2);
auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 });
auto power_sqr = std::make_shared<ngraph::opset6::Power>(sub2, const_2); auto power_sqr = std::make_shared<ngraph::opset6::Power>(sub2, const_2);
auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 });
auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes); auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes);
auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 }); auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 });
auto add_eps = std::make_shared<ngraph::opset6::Add>(mean3, eps); auto add_eps = std::make_shared<ngraph::opset6::Add>(mean3, eps);
@ -271,7 +271,7 @@ TEST(TransformationTests, MVNFusionTestReuseSubInsideSqrt) {
auto sub1 = std::make_shared<ngraph::opset6::Subtract>(input, mean1); auto sub1 = std::make_shared<ngraph::opset6::Subtract>(input, mean1);
auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 });
auto power_sqr = std::make_shared<ngraph::opset6::Power>(sub1, const_2); auto power_sqr = std::make_shared<ngraph::opset6::Power>(sub1, const_2);
auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 });
auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes); auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes);
auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 }); auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 });
auto add_eps = std::make_shared<ngraph::opset6::Add>(mean3, eps); auto add_eps = std::make_shared<ngraph::opset6::Add>(mean3, eps);
@ -312,7 +312,7 @@ TEST(TransformationTests, MVNFusionTestWithConvertInsideSqrt) {
auto cast = std::make_shared<ngraph::opset6::Convert>(sub1, ngraph::element::f32); auto cast = std::make_shared<ngraph::opset6::Convert>(sub1, ngraph::element::f32);
auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 });
auto power_sqr = std::make_shared<ngraph::opset6::Power>(cast, const_2); auto power_sqr = std::make_shared<ngraph::opset6::Power>(cast, const_2);
auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 });
auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes); auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes);
auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 }); auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 });
auto add_eps = std::make_shared<ngraph::opset6::Add>(mean3, eps); auto add_eps = std::make_shared<ngraph::opset6::Add>(mean3, eps);
@ -352,7 +352,7 @@ TEST(TransformationTests, MVNFusionTestSqrtInsideSqrt) {
auto sub1 = std::make_shared<ngraph::opset6::Subtract>(input, mean1); auto sub1 = std::make_shared<ngraph::opset6::Subtract>(input, mean1);
auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 });
auto power_sqr = std::make_shared<ngraph::opset6::Power>(sub1, const_2); auto power_sqr = std::make_shared<ngraph::opset6::Power>(sub1, const_2);
auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 });
auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes); auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes);
auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 }); auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 });
auto add_eps = std::make_shared<ngraph::opset6::Add>(mean3, eps); auto add_eps = std::make_shared<ngraph::opset6::Add>(mean3, eps);
@ -391,7 +391,7 @@ TEST(TransformationTests, MVNFusionTestAltDivInsideSqrt) {
auto sub1 = std::make_shared<ngraph::opset6::Subtract>(input, mean1); auto sub1 = std::make_shared<ngraph::opset6::Subtract>(input, mean1);
auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 });
auto power_sqr = std::make_shared<ngraph::opset6::Power>(sub1, const_2); auto power_sqr = std::make_shared<ngraph::opset6::Power>(sub1, const_2);
auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 });
auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes); auto mean3 = std::make_shared<ngraph::opset6::ReduceMean>(power_sqr, mean3_axes);
auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 }); auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 });
auto add_eps = std::make_shared<ngraph::opset6::Add>(mean3, eps); auto add_eps = std::make_shared<ngraph::opset6::Add>(mean3, eps);

View File

@ -11,47 +11,64 @@ using namespace LayerTestsDefinitions;
namespace { namespace {
spaceToBatchParamsTuple stb_only_test_cases[] = { const std::vector<std::vector<int64_t >> blockShapes4D {
spaceToBatchParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {1, 1, 2, 2}, {1, 1, 2, 2}
InferenceEngine::Precision::FP32, };
InferenceEngine::Precision::UNSPECIFIED, const std::vector<std::vector<int64_t >> padsBegins4D {
InferenceEngine::Precision::UNSPECIFIED, {0, 0, 0, 0}, {0, 0, 0, 2}
InferenceEngine::Layout::ANY, };
InferenceEngine::Layout::ANY, const std::vector<std::vector<int64_t >> padsEnds4D {
CommonTestUtils::DEVICE_CPU), {0, 0, 0, 0}, {0, 0, 0, 2}
spaceToBatchParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {1, 3, 2, 2}, };
InferenceEngine::Precision::FP32, const std::vector<std::vector<size_t >> dataShapes4D {
InferenceEngine::Precision::UNSPECIFIED, {1, 1, 2, 2}, {1, 3, 2, 2}, {1, 1, 4, 4}, {2, 1, 2, 4}
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Layout::ANY,
InferenceEngine::Layout::ANY,
CommonTestUtils::DEVICE_CPU),
spaceToBatchParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {1, 1, 4, 4},
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Layout::ANY,
InferenceEngine::Layout::ANY,
CommonTestUtils::DEVICE_CPU),
spaceToBatchParamsTuple({1, 1, 2, 2}, {0, 0, 0, 2}, {0, 0, 0, 0}, {2, 1, 2, 4},
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Layout::ANY,
InferenceEngine::Layout::ANY,
CommonTestUtils::DEVICE_CPU),
spaceToBatchParamsTuple({1, 1, 3, 2, 2}, {0, 0, 1, 0, 3}, {0, 0, 2, 0, 0}, {1, 1, 3, 2, 1},
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Layout::ANY,
InferenceEngine::Layout::ANY,
CommonTestUtils::DEVICE_CPU),
}; };
const auto SpaceToBatch4D = ::testing::Combine(
::testing::ValuesIn(blockShapes4D),
::testing::ValuesIn(padsBegins4D),
::testing::ValuesIn(padsEnds4D),
::testing::ValuesIn(dataShapes4D),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU)
);
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(
smoke_MKLDNN, SpaceToBatchLayerTest, ::testing::ValuesIn(stb_only_test_cases), smoke_spacetobatch4D, SpaceToBatchLayerTest, SpaceToBatch4D,
SpaceToBatchLayerTest::getTestCaseName); SpaceToBatchLayerTest::getTestCaseName);
const std::vector<std::vector<int64_t >> blockShapes5D {
{1, 1, 3, 2, 2}
};
const std::vector<std::vector<int64_t >> padsBegins5D {
{0, 0, 1, 0, 3}
};
const std::vector<std::vector<int64_t >> padsEnds5D {
{0, 0, 2, 0, 0}
};
const std::vector<std::vector<size_t >> dataShapes5D {
{1, 1, 3, 2, 1}
};
const auto SpaceToBatch5D = ::testing::Combine(
::testing::ValuesIn(blockShapes5D),
::testing::ValuesIn(padsBegins5D),
::testing::ValuesIn(padsEnds5D),
::testing::ValuesIn(dataShapes5D),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU)
);
INSTANTIATE_TEST_CASE_P(
smoke_spacetobatch5D, SpaceToBatchLayerTest, SpaceToBatch5D,
SpaceToBatchLayerTest::getTestCaseName);
} // namespace } // namespace

View File

@ -36,7 +36,7 @@ std::shared_ptr<ngraph::Function> NormalizeL2Function::getOriginal(
fakeQuantize->set_friendly_name("fakeQuantize"); fakeQuantize->set_friendly_name("fakeQuantize");
const auto axesNode = std::make_shared<ngraph::op::Constant>(ngraph::element::u64, ngraph::Shape{ axes.size() }, axes); const auto axesNode = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{ axes.size() }, axes);
axesNode->set_friendly_name("axes"); axesNode->set_friendly_name("axes");
const auto normalizeL2 = std::make_shared<ngraph::opset1::NormalizeL2>(fakeQuantize->output(0), axesNode, 1e-6, ngraph::op::EpsMode::ADD); const auto normalizeL2 = std::make_shared<ngraph::opset1::NormalizeL2>(fakeQuantize->output(0), axesNode, 1e-6, ngraph::op::EpsMode::ADD);
normalizeL2->set_friendly_name("normalizeL2"); normalizeL2->set_friendly_name("normalizeL2");
@ -104,10 +104,10 @@ std::shared_ptr<ngraph::Function> NormalizeL2Function::getReference(
const auto axesNode = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{ axes.size() }, axes); const auto axesNode = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{ axes.size() }, axes);
const auto normalizeL2 = std::make_shared<ngraph::op::TypeRelaxed<ngraph::opset1::NormalizeL2>>( const auto normalizeL2 = std::make_shared<ngraph::op::TypeRelaxed<ngraph::opset1::NormalizeL2>>(
std::vector<ngraph::element::Type>{ element::f32, element::f32 }, std::vector<ngraph::element::Type>{ element::f32, axesNode->output(0).get_element_type() },
std::vector<ngraph::element::Type>{dequantizationAfter.empty() ? precision : element::f32}, std::vector<ngraph::element::Type>{dequantizationAfter.empty() ? precision : element::f32},
ngraph::op::TemporaryReplaceOutputType(deqBefore, element::f32).get(), ngraph::op::TemporaryReplaceOutputType(deqBefore, element::f32).get(),
ngraph::op::TemporaryReplaceOutputType(axesNode, element::f32).get(), axesNode,
1e-6, 1e-6,
epsMode); epsMode);
auto& rtInfo = normalizeL2->get_rt_info(); auto& rtInfo = normalizeL2->get_rt_info();

View File

@ -137,7 +137,7 @@ std::shared_ptr<Node> TransformationsAfterSplitFunction::getLayerByTransformatio
} }
if (transformationName == "NormalizeL2Transformation") { if (transformationName == "NormalizeL2Transformation") {
const auto dequantization = makeDequantization(parent, { {element::f32}, {}, { 0.1f } }); const auto dequantization = makeDequantization(parent, { {element::f32}, {}, { 0.1f } });
const auto axesNode = opset1::Constant::create(element::u64, ngraph::Shape{ 3 }, { 1, 2, 3 }); const auto axesNode = opset1::Constant::create(element::i64, ngraph::Shape{ 3 }, { 1, 2, 3 });
return std::make_shared<ngraph::opset1::NormalizeL2>(dequantization, axesNode, 1e-6, ngraph::op::EpsMode::ADD); return std::make_shared<ngraph::opset1::NormalizeL2>(dequantization, axesNode, 1e-6, ngraph::op::EpsMode::ADD);
} }
if (transformationName == "PReluTransformation") { if (transformationName == "PReluTransformation") {

View File

@ -6,10 +6,6 @@
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/op/util/fused_op.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
namespace ngraph namespace ngraph
{ {
namespace op namespace op
@ -17,12 +13,12 @@ namespace ngraph
namespace v0 namespace v0
{ {
/// \brief Performs a SELU activation function on all elements of the input node /// \brief Performs a SELU activation function on all elements of the input node
class NGRAPH_API Selu : public ngraph::op::util::FusedOp class NGRAPH_API Selu : public ngraph::op::Op
{ {
public: public:
static constexpr NodeTypeInfo type_info{"Selu", 0}; NGRAPH_RTTI_DECLARATION;
const NodeTypeInfo& get_type_info() const override { return type_info; }
Selu(); Selu() = default;
/// \brief Constructs a Selu node. /// \brief Constructs a Selu node.
/// ///
/// \param data - Node producing the input tensor /// \param data - Node producing the input tensor
@ -31,9 +27,10 @@ namespace ngraph
Selu(const Output<Node>& data, Selu(const Output<Node>& data,
const Output<Node>& alpha, const Output<Node>& alpha,
const Output<Node>& lambda); const Output<Node>& lambda);
virtual void pre_validate_and_infer_types() override;
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override; bool visit_attributes(AttributeVisitor& visitor) override;
virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override; clone_with_new_inputs(const OutputVector& new_args) const override;
@ -42,5 +39,3 @@ namespace ngraph
using v0::Selu; using v0::Selu;
} // namespace op } // namespace op
} // namespace ngraph } // namespace ngraph
NGRAPH_SUPPRESS_DEPRECATED_END

View File

@ -4,8 +4,7 @@
#pragma once #pragma once
#include "ngraph/node.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/op/util/fused_op.hpp"
namespace ngraph namespace ngraph
{ {
@ -27,8 +26,8 @@ namespace ngraph
class NGRAPH_API SpaceToBatch : public Op class NGRAPH_API SpaceToBatch : public Op
{ {
public: public:
static constexpr NodeTypeInfo type_info{"SpaceToBatch", 1}; NGRAPH_RTTI_DECLARATION;
const NodeTypeInfo& get_type_info() const override { return type_info; }
SpaceToBatch() = default; SpaceToBatch() = default;
/// \brief Constructs a SpaceToBatch operation. /// \brief Constructs a SpaceToBatch operation.

View File

@ -5,6 +5,7 @@
#pragma once #pragma once
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/op/util/reduction_base.hpp"
namespace ngraph namespace ngraph
{ {
@ -15,7 +16,7 @@ namespace ngraph
/// \brief Abstract base class for arithmetic reduction operations, i.e., operations /// \brief Abstract base class for arithmetic reduction operations, i.e., operations
/// where chosen axes of the input tensors are eliminated (reduced out) by /// where chosen axes of the input tensors are eliminated (reduced out) by
/// repeated application of a particular binary arithmetic operation. /// repeated application of a particular binary arithmetic operation.
class NGRAPH_API ArithmeticReduction : public Op class NGRAPH_API ArithmeticReduction : public ReductionBase
{ {
protected: protected:
/// \brief Constructs an arithmetic reduction operation. /// \brief Constructs an arithmetic reduction operation.

View File

@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#pragma once
#include "ngraph/axis_set.hpp" #include "ngraph/axis_set.hpp"
#include "ngraph/descriptor/tensor.hpp" #include "ngraph/descriptor/tensor.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
@ -18,10 +20,5 @@ namespace ngraph
/// \return Normalized (positive only) axes as an AxisSet object. /// \return Normalized (positive only) axes as an AxisSet object.
AxisSet get_normalized_axes_from_tensor(const HostTensorPtr tensor, AxisSet get_normalized_axes_from_tensor(const HostTensorPtr tensor,
const ngraph::Rank& rank, const ngraph::Rank& rank,
const std::string& node_description) const std::string& node_description);
{
const auto axes_vector = host_tensor_2_vector<int64_t>(tensor);
const auto normalized_axes = ngraph::normalize_axes(node_description, axes_vector, rank);
return AxisSet{normalized_axes};
}
} // namespace ngraph } // namespace ngraph

View File

@ -5,6 +5,7 @@
#pragma once #pragma once
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/op/util/reduction_base.hpp"
namespace ngraph namespace ngraph
{ {
@ -15,7 +16,7 @@ namespace ngraph
/// \brief Abstract base class for logical reduction operations, i.e., operations where /// \brief Abstract base class for logical reduction operations, i.e., operations where
/// chosen axes of the input tensors are eliminated (reduced out) by repeated /// chosen axes of the input tensors are eliminated (reduced out) by repeated
/// application of a particular binary logical operation. /// application of a particular binary logical operation.
class NGRAPH_API LogicalReduction : public Op class NGRAPH_API LogicalReduction : public ReductionBase
{ {
protected: protected:
/// \brief Constructs a logical reduction operation. /// \brief Constructs a logical reduction operation.

View File

@ -0,0 +1,39 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ngraph/op/op.hpp"
namespace ngraph
{
namespace op
{
namespace util
{
class NGRAPH_API ReductionBase : public Op
{
protected:
/// \brief Constructs a reduction operation.
ReductionBase();
/// \brief Constructs a reduction operation.
///
/// \param arg Output that produces the first input tensor.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
ReductionBase(const Output<Node>& arg, const Output<Node>& reduction_axes);
/// \brief Infers reduction operations output shape.
///
/// \param[in] keep_dims Reduction operation keeps dimensions.
///
/// \return Partial shape of the output.
PartialShape infer_reduction_output_shape(const bool keep_dims);
public:
NGRAPH_RTTI_DECLARATION;
};
} // namespace util
} // namespace op
} // namespace ngraph

View File

@ -18,8 +18,6 @@
#include "ngraph/runtime/reference/split.hpp" #include "ngraph/runtime/reference/split.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
// can't be removed currently due to arm-plugin dependency
#include "ngraph/runtime/reference/convolution_backprop_data.hpp"
namespace ngraph namespace ngraph
{ {
namespace runtime namespace runtime
@ -42,15 +40,18 @@ namespace ngraph
std::vector<int> dilation; std::vector<int> dilation;
std::vector<int> pads_begin; std::vector<int> pads_begin;
std::vector<int> pads_end; std::vector<int> pads_end;
std::vector<int> output_padding;
ConvolutionParams(const Strides& strides_, ConvolutionParams(const Strides& strides_,
const Strides& dilation_, const Strides& dilation_,
const CoordinateDiff& pads_begin_, const CoordinateDiff& pads_begin_,
const CoordinateDiff& pads_end_) const CoordinateDiff& pads_end_,
const CoordinateDiff& output_padding_ = {0, 0, 0})
: strides{strides_.begin(), strides_.end()} : strides{strides_.begin(), strides_.end()}
, dilation{dilation_.begin(), dilation_.end()} , dilation{dilation_.begin(), dilation_.end()}
, pads_begin{pads_begin_.begin(), pads_begin_.end()} , pads_begin{pads_begin_.begin(), pads_begin_.end()}
, pads_end{pads_end_.begin(), pads_end_.end()} {}; , pads_end{pads_end_.begin(), pads_end_.end()}
, output_padding{output_padding_.begin(), output_padding_.end()} {};
}; };
template <typename Int> template <typename Int>
@ -86,15 +87,18 @@ namespace ngraph
const size_t filter_channel_size = shape_size(filter_channel_shape); const size_t filter_channel_size = shape_size(filter_channel_shape);
for (int i_z = -p.pads_begin[0]; for (int i_z = -p.pads_begin[0];
i_z <= (p.pads_end[0] + input_size_z - dilated_filter_size_z); i_z <= (p.pads_end[0] + input_size_z - dilated_filter_size_z +
p.output_padding[0]);
i_z += p.strides[0]) i_z += p.strides[0])
{ {
for (int i_y = -p.pads_begin[1]; for (int i_y = -p.pads_begin[1];
i_y <= (p.pads_end[1] + input_size_y - dilated_filter_size_y); i_y <= (p.pads_end[1] + input_size_y - dilated_filter_size_y +
p.output_padding[1]);
i_y += p.strides[1]) i_y += p.strides[1])
{ {
for (int i_x = -p.pads_begin[2]; for (int i_x = -p.pads_begin[2];
i_x <= (p.pads_end[2] + input_size_x - dilated_filter_size_x); i_x <= (p.pads_end[2] + input_size_x - dilated_filter_size_x +
p.output_padding[2]);
i_x += p.strides[2]) i_x += p.strides[2])
{ {
auto input_channel = batch; auto input_channel = batch;
@ -154,6 +158,8 @@ namespace ngraph
std::prev(p.pads_begin.end(), spatial_rank), missing_dims, 0); std::prev(p.pads_begin.end(), spatial_rank), missing_dims, 0);
p.pads_end.insert( p.pads_end.insert(
std::prev(p.pads_end.end(), spatial_rank), missing_dims, 0); std::prev(p.pads_end.end(), spatial_rank), missing_dims, 0);
p.output_padding.insert(
std::prev(p.output_padding.end(), spatial_rank), missing_dims, 0);
in_shape.insert(std::next(in_shape.end(), -spatial_rank), missing_dims, 1); in_shape.insert(std::next(in_shape.end(), -spatial_rank), missing_dims, 1);
filter_shape.insert( filter_shape.insert(
std::prev(filter_shape.end(), spatial_rank), missing_dims, 1); std::prev(filter_shape.end(), spatial_rank), missing_dims, 1);
@ -324,3 +330,6 @@ namespace ngraph
} // namespace reference } // namespace reference
} // namespace runtime } // namespace runtime
} // namespace ngraph } // namespace ngraph
// can't be removed currently due to arm-plugin dependency
#include "ngraph/runtime/reference/convolution_backprop_data.hpp"

View File

@ -10,11 +10,7 @@
#include <numeric> #include <numeric>
#include "ngraph/axis_vector.hpp" #include "ngraph/axis_vector.hpp"
#include "ngraph/coordinate_transform.hpp" #include "ngraph/runtime/reference/convolution.hpp"
#include "ngraph/runtime/reference/concat.hpp"
#include "ngraph/runtime/reference/helpers.hpp"
#include "ngraph/runtime/reference/reverse.hpp"
#include "ngraph/runtime/reference/split.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
namespace ngraph namespace ngraph
@ -23,217 +19,302 @@ namespace ngraph
{ {
namespace reference namespace reference
{ {
// in: NC_I... namespace
// filter: C_OC_I... {
// out: NC_O... constexpr size_t filter_input_ch_axis = 0;
template <typename INPUT,
typename FILTER, template <typename T>
typename OUTPUT, void extend_with_zeros(const Strides& strides,
typename ACCUMULATION = typename widen<OUTPUT>::type> const Shape& input_shape,
void convolution_backprop_impl(const INPUT* in, const T* in,
const FILTER* filter, Shape& output_shape,
OUTPUT* out, std::vector<T>& input_zeros)
{
std::vector<int> input_3d(3, 1);
std::vector<int> strides_3d(3, 1);
std::vector<int> output_3d(3, 1);
for (size_t i = 0; i < strides.size(); ++i)
{
output_shape[i + 2] =
input_shape[i + 2] + (strides[i] - 1) * (input_shape[i + 2] - 1);
input_3d[input_3d.size() - strides.size() + i] = input_shape[i + 2];
strides_3d[strides_3d.size() - strides.size() + i] = strides[i];
output_3d[output_3d.size() - strides.size() + i] = output_shape[i + 2];
}
const size_t input_size = shape_size(input_3d);
if (input_size == 1)
{
for (size_t i = 0; i < shape_size(input_shape); ++i)
{
input_zeros.push_back(in[i]);
}
}
else
{
for (size_t batch = 0; batch < input_shape[0]; ++batch)
{
const auto offset_batch = batch * input_size * input_shape[1];
for (size_t channel = 0; channel < input_shape[1]; ++channel)
{
const auto offset_channel = offset_batch + channel * input_size;
for (int i_z = 0; i_z < input_3d[0]; ++i_z)
{
const auto offset_i_z = i_z * input_3d[2] * input_3d[1];
for (int i_y = 0; i_y < input_3d[1]; ++i_y)
{
const auto offset_i_y = i_y * input_3d[2];
for (int i_x = 0; i_x < input_3d[2]; ++i_x)
{
input_zeros.push_back(
in[offset_channel + i_x + offset_i_y + offset_i_z]);
if (i_x < input_3d[2] - 1)
{
for (int k = 0; k < strides_3d[2] - 1; k++)
{
input_zeros.push_back(0);
}
}
}
if (i_y < input_3d[1] - 1)
{
const auto new_size =
output_3d[2] * (strides_3d[1] - 1);
input_zeros.insert(input_zeros.begin() +
input_zeros.size(),
new_size,
0);
}
}
if (i_z < input_3d[0] - 1)
{
const auto new_size =
output_3d[1] * output_3d[2] * (strides_3d[0] - 1);
input_zeros.insert(
input_zeros.begin() + input_zeros.size(), new_size, 0);
}
}
}
}
}
}
void infer_forward_convbackprop_output_shape(const Shape& in_spatial_shape,
const Shape& f_spatial_shape,
const Shape& out_spatial_shape,
Shape& infer_spatial_shape,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& output_padding)
{
for (size_t idx = 0; idx < in_spatial_shape.size(); idx++)
{
int total_padding = strides[idx] * (in_spatial_shape[idx] - 1) +
dilations[idx] * (f_spatial_shape[idx] - 1) + 1 -
out_spatial_shape[idx] + output_padding[idx];
size_t padded_dim = std::max<size_t>(total_padding, 0);
size_t filter_dilated_dim = dilations[idx] * (f_spatial_shape[idx] - 1) + 1;
size_t out_spatial_dim = (in_spatial_shape[idx] - 1) * strides[idx] +
filter_dilated_dim - padded_dim +
output_padding[idx];
infer_spatial_shape.push_back(out_spatial_dim);
}
}
void validate_convolution_backprop_parameters(const Shape& in_shape,
const Shape& f_shape,
const Shape& out_shape,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const CoordinateDiff& output_padding)
{
// this implementation supports 1D, 2D and 3D convolutions
NGRAPH_CHECK(in_shape.size() >= 3 && in_shape.size() <= 5,
"Unsupported input rank: ",
in_shape);
NGRAPH_CHECK(in_shape.size() == f_shape.size(),
"Incompatible input ranks: ",
in_shape.size(),
" and ",
f_shape.size());
NGRAPH_CHECK(in_shape[in_channel_axis] == f_shape[filter_input_ch_axis],
"Incompatible input channels in data batch and filters shapes: ",
in_shape[in_channel_axis],
" and ",
f_shape[filter_input_ch_axis]);
NGRAPH_CHECK(in_shape.size() == out_shape.size(),
"Incompatible input and output ranks: ",
in_shape.size(),
" and ",
out_shape.size());
const auto spatial_dims = in_shape.size() - 2;
NGRAPH_CHECK(strides.size() == spatial_dims,
"Strides not definied for all and only spatial dimensions.");
NGRAPH_CHECK(dilations.size() == spatial_dims,
"Dilations not defined for all and only spatial dimensions.");
NGRAPH_CHECK((pads_begin.size() == pads_end.size()) &&
(pads_begin.size() == spatial_dims),
"Pads not defined for all and only spatial dimensions.");
NGRAPH_CHECK(!output_padding.empty() && output_padding.size() == spatial_dims,
"Output padding not defined for all and only spatial dimensions.");
Shape out_spatial_shape{std::next(out_shape.begin(), 2), std::end(out_shape)};
Shape infered_out_spatial_shape{};
infer_forward_convbackprop_output_shape(
Shape{std::next(in_shape.begin(), 2), std::end(in_shape)},
Shape{std::next(f_shape.begin(), 2), std::end(f_shape)},
Shape{std::next(out_shape.begin(), 2), std::end(out_shape)},
infered_out_spatial_shape,
strides,
dilations,
output_padding);
NGRAPH_CHECK(out_spatial_shape == infered_out_spatial_shape,
"Incorrect output shape provided");
}
} // namespace
template <typename T>
void convolution_backprop_impl(const T* in,
const T* f,
T* out,
const Shape& in_shape,
const Shape& f_shape,
const Shape& out_shape,
const Strides& strides,
const Strides& dilation,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const CoordinateDiff& output_padding)
{
// here we are converting all param types to int's to avoid arithmetic issues
// (e.g signed + unsigned) in indexes calculation later
ConvolutionParams params{strides, dilation, pads_begin, pads_end, output_padding};
// here we are extending spatial dimensions to 3D, because we are going to use 3D
// convolution implementation to convolve also in 1D & 2D case
Shape input_shape{in_shape};
Shape filters_shape{f_shape};
if (in_shape.size() < 5)
{
extend_to_3D(params, input_shape, filters_shape);
}
for (size_t i = 0; i < input_shape.size() - 2; ++i)
{
if (input_shape[i + 2] > 1 || filters_shape[i + 2] > 1)
{
params.pads_begin[i] = filters_shape[i + 2] - params.pads_begin[i] - 1;
params.pads_end[i] = filters_shape[i + 2] - params.pads_end[i] - 1;
}
else
{
params.pads_begin[i] = 0;
params.pads_end[i] = 0;
}
}
// convert output shape to 3D, contains only dimensions
Shape out_shape_3d{out_shape.begin() + 2, out_shape.end()};
int out_shape_rank = out_shape.size() - 2;
if (out_shape_rank < 3)
{
int missing_dims = 3 - out_shape_rank;
out_shape_3d.insert(
std::prev(out_shape_3d.end(), out_shape_rank), missing_dims, 1);
}
// modify params.pads_end when output_shape was provided in ctor in order to
// calculate expected number of output elements
for (size_t i = 0; i < out_shape_3d.size(); i++)
{
if (out_shape_3d[i] > 1)
{
// expected_dim = (in - 1)* strides + filter - 2*padding + out_padding
// strides is already applied (through 0's extension in input)
// padding = pads_begin + pads_end, formula below is using
// params.pad_begin/params.pads_end:
const size_t expected_dim =
out_shape_3d[i] - ((input_shape[i + 2] - 1) - filters_shape[i + 2] +
params.pads_begin[i] + params.pads_end[i] + 2 +
params.output_padding[i]);
params.pads_end[i] += expected_dim;
}
}
const size_t filters_count = filters_shape[filter_out_ch_axis];
const Shape filter_shape(++filters_shape.begin(), filters_shape.end());
const size_t filter_size = shape_size(filter_shape);
const size_t batches_count = input_shape[in_batch_axis];
Shape batch_shape(++input_shape.begin(), input_shape.end());
const size_t batch_size = shape_size(batch_shape);
auto batch = in;
for (size_t batch_idx = 0; batch_idx < batches_count; ++batch_idx)
{
auto filter = f;
for (size_t f_idx = 0; f_idx < filters_count; ++f_idx)
{
convolve_3D_channels(params, batch, batch_shape, filter, filter_shape, out);
filter += filter_size;
}
batch += batch_size;
}
}
template <typename T>
void convolution_backprop_in(const T* delta_in,
const T* filter,
T* delta_out,
const Shape& in_shape, const Shape& in_shape,
const Shape& filter_shape, const Shape& filter_shape,
const Shape& out_shape, const Shape& out_shape,
const Strides& stride,
const Strides& filter_dilation,
const CoordinateDiff& in_pad_below,
const CoordinateDiff& in_pad_above,
const Strides& in_dilation,
size_t in_batch_axis,
size_t in_channel_axis,
size_t filter_out_channel_axis,
size_t filter_in_channel_axis,
size_t out_batch_axis,
size_t out_channel_axis)
{
auto old_mode = std::fegetround();
std::fesetround(FE_TONEAREST);
// Comments throughout assume without loss of generality that:
//
// * batch axes for both in and out are 0
// * in channel axes for both in and filter are 1
// * out channel axes for filter is 0
// * out channel axis for out is 1
// At the outermost level we will walk over every out coordinate O.
CoordinateTransform out_transform(out_shape);
for (const Coordinate& out_coord : out_transform)
{
// Our out coordinate O will have the form:
//
// (N,chan_out,i_1,...,i_n)
size_t batch_index = out_coord[out_batch_axis];
size_t out_channel = out_coord[out_channel_axis];
// For the in we need to iterate the coordinate:
//
// I:
//
// over the range (noninclusive on the right):
//
// (N,0,s_1*i_1,s_2*i_2,...,s_n*i_n) ->
//
// (N+1,
// chans_in_count,
// s_1*i_1+ l_1*filter_dims_1,
/// ...,
/// s_n*i_n +l_n*filter_dims_n)
//
// with strides:
//
// (1,l_1,...,l_n).
//
// Note that we are iterating within the *padded* and *dilated* in batch, so
// further down we must check the current coordinate is in the pad or dilation
// gap.
size_t n_spatial_dimensions = in_shape.size() - 2;
size_t n_in_channels = in_shape[in_channel_axis];
Coordinate in_transform_start(2 + n_spatial_dimensions);
Coordinate in_transform_end(2 + n_spatial_dimensions);
Strides in_transform_movement_strides(2 + n_spatial_dimensions, 1);
CoordinateDiff in_transform_pad_below(2 + n_spatial_dimensions, 0);
CoordinateDiff in_transform_pad_above(2 + n_spatial_dimensions, 0);
Strides in_transform_dilation_strides(2 + n_spatial_dimensions, 1);
in_transform_start[in_batch_axis] = batch_index;
in_transform_end[in_batch_axis] = batch_index + 1;
in_transform_start[in_channel_axis] = 0;
in_transform_end[in_channel_axis] = 1;
for (size_t i = 2; i < n_spatial_dimensions + 2; i++)
{
size_t filter_dilation_stride = filter_dilation[i - 2];
size_t filter_movement_stride = stride[i - 2];
std::ptrdiff_t below_pad = in_pad_below[i - 2];
std::ptrdiff_t above_pad = in_pad_above[i - 2];
size_t in_dilation_stride = in_dilation[i - 2];
in_transform_start[i] = filter_movement_stride * out_coord[i];
in_transform_end[i] = in_transform_start[i] +
(filter_shape[i] - 1) * filter_dilation_stride + 1;
in_transform_movement_strides[i] = filter_dilation_stride;
in_transform_pad_below[i] = below_pad;
in_transform_pad_above[i] = above_pad;
in_transform_dilation_strides[i] = in_dilation_stride;
}
AxisVector in_transform_axis_order(2 + n_spatial_dimensions);
for (size_t i = 0; i < in_transform_axis_order.size(); i++)
{
in_transform_axis_order[i] = i;
}
CoordinateTransform in_transform(in_shape,
in_transform_start,
in_transform_end,
in_transform_movement_strides,
in_transform_axis_order,
in_transform_pad_below,
in_transform_pad_above,
in_transform_dilation_strides);
// Simultaneously with iterating I, for the filter we need to iterate the
// coordinate:
//
// F
//
// over the range (noninclusive on the right):
//
// (chan_out,0,0,...,0) ->
// (chan_out+1,
// chans_in_count,
// filter_dims_1,
// ...,
// filter_dims_n)
//
// with unit stride.
Shape filter_transform_start(2 + n_spatial_dimensions);
Shape filter_transform_end(2 + n_spatial_dimensions);
filter_transform_start[filter_out_channel_axis] = out_channel;
filter_transform_end[filter_out_channel_axis] = out_channel + 1;
filter_transform_start[filter_in_channel_axis] = 0;
filter_transform_end[filter_in_channel_axis] = 1;
for (size_t i = 2; i < n_spatial_dimensions + 2; i++)
{
filter_transform_start[i] = 0;
filter_transform_end[i] = filter_shape[i];
}
CoordinateTransform filter_transform(
filter_shape, filter_transform_start, filter_transform_end);
// As we go, we sum up:
//
// out[O] += in[I] * filter[F].
ACCUMULATION result = 0;
CoordinateTransform::Iterator in_it = in_transform.begin();
CoordinateTransform::Iterator filter_it = filter_transform.begin();
CoordinateTransform::Iterator in_it_end = in_transform.end();
CoordinateTransform::Iterator filter_it_end = filter_transform.end();
size_t in_channel_stride = row_major_strides(in_shape).at(in_channel_axis);
size_t filter_in_channel_stride =
row_major_strides(filter_shape).at(filter_in_channel_axis);
while (in_it != in_it_end && filter_it != filter_it_end)
{
const Coordinate& in_coord = *in_it;
if (in_transform.has_source_coordinate(in_coord))
{
size_t in_idx = in_transform.index(in_coord);
const Coordinate& filter_coord = *filter_it;
size_t filter_idx = filter_transform.index(filter_coord);
for (size_t in_channel = 0; in_channel < n_in_channels; ++in_channel)
{
ACCUMULATION in_v = static_cast<ACCUMULATION>(in[in_idx]);
ACCUMULATION f_v = static_cast<ACCUMULATION>(filter[filter_idx]);
result += in_v * f_v;
in_idx += in_channel_stride;
filter_idx += filter_in_channel_stride;
}
}
++in_it;
++filter_it;
}
out[out_transform.index(out_coord)] = result;
}
std::fesetround(old_mode);
}
template <typename OUTPUT,
typename FILTER,
typename INPUT,
typename ACCUMULATION = typename widen<INPUT>::type>
void convolution_backprop_in(const OUTPUT* delta_out,
const FILTER* filter,
INPUT* delta_in,
const Shape& out_shape,
const Shape& filter_shape,
const Shape& in_shape,
const Strides& in_dilation, const Strides& in_dilation,
const Strides& filter_dilation, const Strides& filter_dilation,
const CoordinateDiff& forward_in_pad_bellow, const CoordinateDiff& forward_in_pad_bellow,
const CoordinateDiff& forward_in_pad_above, const CoordinateDiff& forward_in_pad_above,
const Strides& stride) const Strides& stride,
const CoordinateDiff& output_padding)
{ {
std::vector<T> extended_input;
std::vector<T> extended_filter;
AxisSet reverse_axes;
Shape conv_input_shape = in_shape;
Shape conv_filter_shape = filter_shape;
Strides conv_stride = stride;
Strides conv_filter_dilation = filter_dilation;
auto conv_input_data = delta_in;
validate_convolution_backprop_parameters(in_shape,
filter_shape,
out_shape,
stride,
filter_dilation,
forward_in_pad_bellow,
forward_in_pad_above,
output_padding);
// Note that we only reverse the spatial dimensions here (loop // Note that we only reverse the spatial dimensions here (loop
// starts at 2) // starts at 2)
std::vector<INPUT> reversed(shape_size(filter_shape)); std::vector<T> reversed(shape_size(filter_shape));
AxisSet reverse_axes; for (size_t i = 2; i < filter_shape.size(); ++i)
size_t reverse_axes_start = 2;
for (size_t i = reverse_axes_start; i < filter_shape.size(); ++i)
{ {
reverse_axes.insert(i); reverse_axes.insert(i);
} }
@ -242,55 +323,109 @@ namespace ngraph
filter_shape, filter_shape,
filter_shape, filter_shape,
reverse_axes, reverse_axes,
sizeof(FILTER)); sizeof(T));
size_t filter_out_channel_axis = 1;
size_t filter_in_channel_axis = 0;
// Compute backward pad out pad bellow auto conv_filter_data = &reversed[0];
size_t spatial_dim_count = in_shape.size() - 2;
CoordinateDiff backward_delta_out_pad_below; // if channel number for output is > 1 then reverse layout of filter coefficients as
backward_delta_out_pad_below.resize(spatial_dim_count); // it is required by convolve_3D_channels() function.
// Current layout:
for (size_t i = 0; i < spatial_dim_count; i++) // batch0_ch0|batch0_ch1|...|batch0_chN|...|batch1_ch0|batch1_ch1|...|batch1_chN|...
// Expected layout:
// batch0_ch0|batch1_ch0|...|batchN_ch0|...|batch0_ch1|batch1_ch1|...|batch1_chN|...
if (filter_shape[1] > 1)
{ {
backward_delta_out_pad_below[i] = std::vector<T> temp_reversed(reversed);
(static_cast<ptrdiff_t>(filter_shape[i + 2]) - 1) * filter_dilation[i] - const Shape filter_dim_shape(filter_shape.begin() + 2, filter_shape.end());
forward_in_pad_bellow[i]; const size_t filter_size = shape_size(filter_dim_shape);
for (size_t i = 0; i < filter_shape[1]; i++)
{
for (size_t j = 0; j < filter_shape[0]; j++)
{
const auto delta = temp_reversed.begin() +
j * filter_shape[1] * filter_size + i * filter_size;
const auto out = reversed.begin() + i * filter_shape[0] * filter_size +
j * filter_size;
std::copy(delta, delta + filter_size, out);
}
} }
// Compute backward pad out pad above
CoordinateDiff backward_delta_out_pad_above;
backward_delta_out_pad_above.resize(spatial_dim_count);
for (size_t i = 0; i < spatial_dim_count; i++)
{
backward_delta_out_pad_above[i] =
(static_cast<ptrdiff_t>(filter_shape[i + 2]) - 1) * filter_dilation[i] +
((forward_in_pad_bellow[i] + ((in_shape[i + 2]) - 1) * in_dilation[i] +
forward_in_pad_above[i] -
(static_cast<ptrdiff_t>(filter_shape[i + 2]) - 1) * filter_dilation[i]) %
stride[i]) -
forward_in_pad_above[i];
} }
convolution_backprop_impl<OUTPUT, FILTER, INPUT, ACCUMULATION>( // swap filter batch and channels
delta_out, std::iter_swap(conv_filter_shape.begin(), conv_filter_shape.begin() + 1);
&reversed[0],
delta_in, // extend stride and filter inputs with zero padding for stride and filter_dilation
out_shape, // > 1, after that set stride and filter params to 1.
const size_t stride_dim =
std::accumulate(stride.begin(), stride.end(), 1, std::multiplies<size_t>());
if (stride_dim >= 2)
{
extend_with_zeros(stride, in_shape, delta_in, conv_input_shape, extended_input);
std::fill(conv_stride.begin(), conv_stride.end(), 1);
conv_input_data = &extended_input[0];
}
const size_t dilation_dim = std::accumulate(
filter_dilation.begin(), filter_dilation.end(), 1, std::multiplies<size_t>());
if (dilation_dim >= 2)
{
extend_with_zeros<T>(filter_dilation,
filter_shape, filter_shape,
reinterpret_cast<const T*>(&reversed[0]),
conv_filter_shape,
extended_filter);
std::fill(conv_filter_dilation.begin(), conv_filter_dilation.end(), 1);
conv_filter_data = &extended_filter[0];
}
convolution_backprop_impl(conv_input_data,
conv_filter_data,
delta_out,
conv_input_shape,
conv_filter_shape,
out_shape,
conv_stride,
conv_filter_dilation,
forward_in_pad_bellow,
forward_in_pad_above,
output_padding);
}
// DEPRECATED, can't be removed currently due to arm-plugin dependency
template <typename OUTPUT,
typename FILTER,
typename INPUT,
typename ACCUMULATION = typename widen<INPUT>::type>
NGRAPH_DEPRECATED(
"convolution_backprop_in function with 4 template types is deprecated, use "
"function with 1 template and output_padding parameter.")
void convolution_backprop_in(const INPUT* delta_in,
const FILTER* filter,
OUTPUT* delta_out,
const Shape& in_shape,
const Shape& filter_shape,
const Shape& out_shape,
const Strides& in_dilation,
const Strides& filter_dilation,
const CoordinateDiff& forward_in_pad_bellow,
const CoordinateDiff& forward_in_pad_above,
const Strides& stride)
{
const ngraph::CoordinateDiff output_padding(in_shape.size() - 2, 0);
convolution_backprop_in(delta_in,
filter,
delta_out,
in_shape, in_shape,
filter_shape,
out_shape,
in_dilation, in_dilation,
filter_dilation, filter_dilation,
backward_delta_out_pad_below, forward_in_pad_bellow,
backward_delta_out_pad_above, forward_in_pad_above,
stride, stride,
0, output_padding);
1,
filter_out_channel_axis,
filter_in_channel_axis,
0,
1);
} }
} // namespace reference } // namespace reference
} // namespace runtime } // namespace runtime

View File

@ -178,13 +178,13 @@ namespace ngraph
const size_t group_out_size = shape_size(group_out_shape); const size_t group_out_size = shape_size(group_out_shape);
Strides in_dilation(in_shape.size(), 1); Strides in_dilation(in_shape.size(), 1);
const ngraph::CoordinateDiff output_padding(in_shape.size() - 2, 0);
for (size_t batch_idx = 0; batch_idx < in_shape[in_batch_axis]; ++batch_idx) for (size_t batch_idx = 0; batch_idx < in_shape[in_batch_axis]; ++batch_idx)
{ {
group_filter = f; group_filter = f;
for (size_t group_idx = 0; group_idx < group_count; ++group_idx) for (size_t group_idx = 0; group_idx < group_count; ++group_idx)
{ {
runtime::reference::convolution_backprop_in<INPUT, FILTER, OUTPUT, ACCU>( runtime::reference::convolution_backprop_in(group_batch,
group_batch,
group_filter, group_filter,
group_out, group_out,
group_batch_shape, group_batch_shape,
@ -194,7 +194,8 @@ namespace ngraph
dilation, dilation,
pads_begin, pads_begin,
pads_end, pads_end,
strides); strides,
output_padding);
group_batch += group_batch_size; group_batch += group_batch_size;
group_filter += group_filter_size; group_filter += group_filter_size;
group_out += group_out_size; group_out += group_out_size;

View File

@ -35,7 +35,7 @@ namespace ngraph
arg[transform.index(coord)] - temp_max[temp_transform.index(temp_coord)]); arg[transform.index(coord)] - temp_max[temp_transform.index(temp_coord)]);
} }
sum(out, temp_sum.data(), shape, axes, true); sum(out, temp_sum.data(), shape, axes);
for (const Coordinate& coord : transform) for (const Coordinate& coord : transform)
{ {

View File

@ -42,7 +42,7 @@ namespace ngraph
} }
std::vector<T> sum_data(shape_size(reduce_shape)); std::vector<T> sum_data(shape_size(reduce_shape));
sum(sqr_data.data(), sum_data.data(), data_shape, axes, true); sum(sqr_data.data(), sum_data.data(), data_shape, axes);
autobroadcast_binop(data, autobroadcast_binop(data,
sum_data.data(), sum_data.data(),
out, out,

View File

@ -5,6 +5,7 @@
#pragma once #pragma once
#include <cmath> #include <cmath>
#include <numeric>
#include "ngraph/coordinate_transform.hpp" #include "ngraph/coordinate_transform.hpp"
#include "ngraph/shape_util.hpp" #include "ngraph/shape_util.hpp"
@ -16,29 +17,27 @@ namespace ngraph
namespace reference namespace reference
{ {
template <typename T> template <typename T>
void product(const T* arg, void product(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes)
T* out,
const Shape& in_shape,
const AxisSet& reduction_axes,
bool keep_dims)
{ {
auto out_shape = reduce(in_shape, reduction_axes, keep_dims); constexpr bool dont_keep_dims_in_output = false;
CoordinateTransform output_transform(out_shape); const auto out_shape = reduce(in_shape, reduction_axes, dont_keep_dims_in_output);
std::fill(out, out + shape_size(out_shape), 1);
for (const Coordinate& output_coord : output_transform) const auto in_strides = row_major_strides(in_shape);
{ const auto out_strides = row_major_strides(out_shape);
out[output_transform.index(output_coord)] = 1;
}
CoordinateTransform input_transform(in_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const Coordinate& input_coord : input_transform) for (const Coordinate& input_coord : input_transform)
{ {
Coordinate output_coord = reduce(input_coord, reduction_axes, keep_dims); const Coordinate output_coord =
reduce(input_coord, reduction_axes, dont_keep_dims_in_output);
size_t output_index = output_transform.index(output_coord); const size_t in_idx = std::inner_product(
input_coord.begin(), input_coord.end(), in_strides.begin(), 0);
const size_t out_idx = std::inner_product(
output_coord.begin(), output_coord.end(), out_strides.begin(), 0);
out[output_index] = out[output_index] * arg[input_transform.index(input_coord)]; out[out_idx] = out[out_idx] * arg[in_idx];
} }
} }
} // namespace reference } // namespace reference

View File

@ -34,7 +34,7 @@ namespace ngraph
arg[transform.index(coord)] - temp_ptr[temp_transform.index(temp_coord)]); arg[transform.index(coord)] - temp_ptr[temp_transform.index(temp_coord)]);
} }
sum(out, temp_ptr, shape, axes, true); sum(out, temp_ptr, shape, axes);
for (const Coordinate& coord : transform) for (const Coordinate& coord : transform)
{ {

View File

@ -5,6 +5,7 @@
#pragma once #pragma once
#include <cmath> #include <cmath>
#include <numeric>
#include "ngraph/coordinate_transform.hpp" #include "ngraph/coordinate_transform.hpp"
#include "ngraph/shape_util.hpp" #include "ngraph/shape_util.hpp"
@ -41,34 +42,34 @@ namespace ngraph
} }
template <typename T> template <typename T>
void sum(const T* arg, void sum(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes)
T* out,
const Shape& in_shape,
const AxisSet& reduction_axes,
bool keep_dims)
{ {
auto out_shape = reduce(in_shape, reduction_axes, keep_dims); constexpr bool dont_keep_dims_in_output = false;
CoordinateTransform output_transform(out_shape); const auto out_shape = reduce(in_shape, reduction_axes, dont_keep_dims_in_output);
std::vector<T> cs(shape_size(out_shape));
for (const Coordinate& output_coord : output_transform) std::vector<T> cs(shape_size(out_shape), 0);
{ std::fill(out, out + shape_size(out_shape), 0);
out[output_transform.index(output_coord)] = 0;
cs[output_transform.index(output_coord)] = 0;
}
CoordinateTransform input_transform(in_shape); const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const Coordinate& input_coord : input_transform) for (const Coordinate& input_coord : input_transform)
{ {
Coordinate output_coord = reduce(input_coord, reduction_axes, keep_dims); const Coordinate output_coord =
reduce(input_coord, reduction_axes, dont_keep_dims_in_output);
T x = arg[input_transform.index(input_coord)]; const size_t in_idx = std::inner_product(
T& z = out[output_transform.index(output_coord)]; input_coord.begin(), input_coord.end(), in_strides.begin(), 0);
const size_t out_idx = std::inner_product(
output_coord.begin(), output_coord.end(), out_strides.begin(), 0);
T x = arg[in_idx];
T& z = out[out_idx];
if (is_finite(x) && is_finite(z)) if (is_finite(x) && is_finite(z))
{ {
T& c = cs[output_transform.index(output_coord)]; T& c = cs[out_idx];
T t = z + (x - c); T t = z + (x - c);
c = (t - z) - (x - c); c = (t - z) - (x - c);
z = t; z = t;

View File

@ -436,8 +436,7 @@ namespace ngraph
ngraph::runtime::reference::sum<T>(input_ptr->get_data_ptr<T>(), ngraph::runtime::reference::sum<T>(input_ptr->get_data_ptr<T>(),
output_ptr->get_data_ptr<T>(), output_ptr->get_data_ptr<T>(),
input_shape, input_shape,
reduced_axes, reduced_axes);
false);
// update a vector of inputs and input subscripts // update a vector of inputs and input subscripts
inputs[input_ind] = output_ptr; inputs[input_ind] = output_ptr;
@ -743,8 +742,7 @@ namespace ngraph
ngraph::runtime::reference::sum<T>(mul_output->get_data_ptr<T>(), ngraph::runtime::reference::sum<T>(mul_output->get_data_ptr<T>(),
result->get_data_ptr<T>(), result->get_data_ptr<T>(),
mul_output->get_shape(), mul_output->get_shape(),
reduced_axes, reduced_axes);
false);
inputs[input_ind] = result; inputs[input_ind] = result;
input_subscripts[input_ind] = resultant_subscript; input_subscripts[input_ind] = resultant_subscript;
} }

View File

@ -6,6 +6,7 @@
#include <ngraph/validation_util.hpp> #include <ngraph/validation_util.hpp>
#include "itt.hpp" #include "itt.hpp"
#include "ngraph/graph_util.hpp" #include "ngraph/graph_util.hpp"
#include "ngraph/op/util/evaluate_helpers.hpp"
#include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/reference/product.hpp" #include "ngraph/runtime/reference/product.hpp"
#include "ngraph/shape_util.hpp" #include "ngraph/shape_util.hpp"
@ -45,7 +46,7 @@ namespace reduce_prod
{ {
out->set_shape(reduce(arg->get_shape(), axes, keep_dims)); out->set_shape(reduce(arg->get_shape(), axes, keep_dims));
runtime::reference::product( runtime::reference::product(
arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), arg->get_shape(), axes, keep_dims); arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), arg->get_shape(), axes);
return true; return true;
} }
@ -75,8 +76,11 @@ bool op::v1::ReduceProd::evaluate(const HostTensorVector& outputs,
NGRAPH_OP_SCOPE(v1_ReduceProd_evaluate); NGRAPH_OP_SCOPE(v1_ReduceProd_evaluate);
NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2)); NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2));
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1));
return reduce_prod::evaluate_product(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); const auto reduction_axes = get_normalized_axes_from_tensor(
inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name());
return reduce_prod::evaluate_product(inputs[0], outputs[0], reduction_axes, get_keep_dims());
} }
bool op::v1::ReduceProd::has_evaluate() const bool op::v1::ReduceProd::has_evaluate() const

View File

@ -3,14 +3,15 @@
// //
#include "ngraph/op/reduce_sum.hpp" #include "ngraph/op/reduce_sum.hpp"
#include <ngraph/validation_util.hpp>
#include "itt.hpp" #include "itt.hpp"
#include "ngraph/graph_util.hpp" #include "ngraph/graph_util.hpp"
#include "ngraph/op/broadcast.hpp" #include "ngraph/op/broadcast.hpp"
#include "ngraph/op/util/evaluate_helpers.hpp"
#include "ngraph/op/util/op_types.hpp" #include "ngraph/op/util/op_types.hpp"
#include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/reference/sum.hpp" #include "ngraph/runtime/reference/sum.hpp"
#include "ngraph/shape_util.hpp" #include "ngraph/shape_util.hpp"
#include "util/evaluate_helpers.hpp"
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
@ -47,7 +48,7 @@ namespace reduce_sum
{ {
out->set_shape(reduce(arg->get_shape(), axes, keep_dims)); out->set_shape(reduce(arg->get_shape(), axes, keep_dims));
runtime::reference::sum( runtime::reference::sum(
arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), arg->get_shape(), axes, keep_dims); arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), arg->get_shape(), axes);
return true; return true;
} }
@ -75,13 +76,11 @@ bool op::v1::ReduceSum::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_ReduceSum_evaluate); NGRAPH_OP_SCOPE(v1_ReduceSum_evaluate);
NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2));
NGRAPH_CHECK(inputs.size() == 2, NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1));
"The ReduceSum operation expects 2 input tensors. Got: ",
inputs.size());
const auto reduction_axes = get_normalized_axes_from_tensor( const auto reduction_axes = get_normalized_axes_from_tensor(
inputs[1], get_input_partial_shape(0).rank(), get_friendly_name()); inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name());
return reduce_sum::evaluate_sum(inputs[0], outputs[0], reduction_axes, get_keep_dims()); return reduce_sum::evaluate_sum(inputs[0], outputs[0], reduction_axes, get_keep_dims());
} }

View File

@ -5,65 +5,53 @@
#include "ngraph/op/selu.hpp" #include "ngraph/op/selu.hpp"
#include "itt.hpp" #include "itt.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/exp.hpp"
#include "ngraph/op/maximum.hpp"
#include "ngraph/op/minimum.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/subtract.hpp"
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
NGRAPH_SUPPRESS_DEPRECATED_START NGRAPH_RTTI_DEFINITION(op::v0::Selu, "Selu", 0);
constexpr NodeTypeInfo op::v0::Selu::type_info;
op::v0::Selu::Selu()
: FusedOp()
{
}
op::v0::Selu::Selu(const Output<Node>& data, const Output<Node>& alpha, const Output<Node>& lambda) op::v0::Selu::Selu(const Output<Node>& data, const Output<Node>& alpha, const Output<Node>& lambda)
: FusedOp({data, alpha, lambda}) : Op({data, alpha, lambda})
{ {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
} }
void ngraph::op::v0::Selu::pre_validate_and_infer_types() void op::v0::Selu::validate_and_infer_types()
{ {
set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); NGRAPH_OP_SCOPE(v0_Selu_validate_and_infer_types);
auto data_et = get_input_element_type(0);
auto alpha_et = get_input_element_type(1);
auto lambda_et = get_input_element_type(2);
auto result_et = element::dynamic;
NODE_VALIDATION_CHECK(this,
element::Type::merge(result_et, result_et, data_et) &&
element::Type::merge(result_et, result_et, alpha_et) &&
element::Type::merge(result_et, result_et, lambda_et),
"Input element types do not match : ",
data_et,
" and ",
alpha_et,
" and ",
lambda_et);
NODE_VALIDATION_CHECK(this,
result_et.is_dynamic() || result_et.is_real(),
"Input element types must be floating-point. Got: ",
result_et);
set_output_type(0, result_et, get_input_partial_shape(0));
} }
bool ngraph::op::v0::Selu::visit_attributes(AttributeVisitor& visitor) bool op::v0::Selu::visit_attributes(AttributeVisitor& visitor)
{ {
NGRAPH_OP_SCOPE(v0_Selu_visit_attributes); NGRAPH_OP_SCOPE(v0_Selu_visit_attributes);
return true; return true;
} }
OutputVector op::v0::Selu::decompose_op() const
{
const auto data = input_value(0);
const auto alpha = input_value(1);
const auto lambda = input_value(2);
const auto zero_node = op::Constant::create(data.get_element_type(), Shape{1}, {0});
// lambda * ((max(data, 0) + (alpha * exp(min(data, 0)) - alpha))
return {std::make_shared<op::v1::Multiply>(
lambda,
std::make_shared<op::v1::Add>(
std::make_shared<op::v1::Maximum>(data, zero_node),
std::make_shared<op::v1::Subtract>(
std::make_shared<op::v1::Multiply>(
alpha,
std::make_shared<op::Exp>(std::make_shared<op::v1::Minimum>(data, zero_node))),
alpha)))};
}
shared_ptr<Node> op::v0::Selu::clone_with_new_inputs(const OutputVector& new_args) const shared_ptr<Node> op::v0::Selu::clone_with_new_inputs(const OutputVector& new_args) const
{ {
NGRAPH_OP_SCOPE(v0_Selu_clone_with_new_inputs); NGRAPH_OP_SCOPE(v0_Selu_clone_with_new_inputs);
check_new_args_count(this, new_args); check_new_args_count(this, new_args);
return make_shared<v0::Selu>(new_args.at(0), new_args.at(1), new_args.at(2)); return make_shared<op::v0::Selu>(new_args.at(0), new_args.at(1), new_args.at(2));
} }

View File

@ -21,7 +21,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
constexpr NodeTypeInfo op::v1::SpaceToBatch::type_info; NGRAPH_RTTI_DEFINITION(op::v1::SpaceToBatch, "SpaceToBatch", 1);
ngraph::op::v1::SpaceToBatch::SpaceToBatch(const ngraph::Output<ngraph::Node>& data, ngraph::op::v1::SpaceToBatch::SpaceToBatch(const ngraph::Output<ngraph::Node>& data,
const ngraph::Output<ngraph::Node>& block_shape, const ngraph::Output<ngraph::Node>& block_shape,
@ -49,13 +49,13 @@ void op::v1::SpaceToBatch::validate_and_infer_types()
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
pads_begin_type.is_integral_number(), pads_begin_type.is_integral_number(),
"crops_begin must be an integral number but got (", "pads_begin must be an integral number but got (",
pads_begin_type, pads_begin_type,
")."); ").");
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
pads_end_type.is_integral_number(), pads_end_type.is_integral_number(),
"crops_end must be an integral number but got (", "pads_end must be an integral number but got (",
pads_end_type, pads_end_type,
")."); ").");

View File

@ -16,7 +16,7 @@ op::util::ArithmeticReduction::ArithmeticReduction() {}
op::util::ArithmeticReduction::ArithmeticReduction(const Output<Node>& arg, op::util::ArithmeticReduction::ArithmeticReduction(const Output<Node>& arg,
const Output<Node>& reduction_axes) const Output<Node>& reduction_axes)
: Op({arg, reduction_axes}) : ReductionBase(arg, reduction_axes)
{ {
} }
@ -49,51 +49,15 @@ void op::util::ArithmeticReduction::set_reduction_axes(const AxisSet& reduction_
void op::util::ArithmeticReduction::validate_and_infer_types() void op::util::ArithmeticReduction::validate_and_infer_types()
{ {
NGRAPH_OP_SCOPE(util_ArithmeticReduction_validate_and_infer_types); NGRAPH_OP_SCOPE(util_ArithmeticReduction_validate_and_infer_types);
auto input_shape = get_input_partial_shape(0);
const auto input_rank = input_shape.rank();
PartialShape result_shape{PartialShape::dynamic()}; const PartialShape& axes_shape = get_input_partial_shape(1);
const Rank axes_rank = axes_shape.rank();
auto axes = get_constant_from_source(input_value(1));
if (input_rank.is_static() && axes)
{
AxisSet reduction_axes;
const auto reduction_axes_val = axes->cast_vector<int64_t>();
for (auto axis : reduction_axes_val)
{
try
{
axis = normalize_axis(this, axis, input_rank);
}
catch (const ngraph_error&)
{
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
false, axes_rank.compatible(0) || axes_rank.compatible(1),
"Reduction axis (", "Axes input must be a scalar or 1D input. Got: ",
axis, axes_shape);
") is out of bounds ",
"(argument shape: ",
input_shape,
", reduction axes: ",
reduction_axes,
")");
}
reduction_axes.insert(axis);
}
std::vector<Dimension> dims;
for (int64_t i = 0; i < input_rank.get_length(); i++)
{
if (reduction_axes.count(i) == 0)
{
dims.push_back(input_shape[i]);
}
}
result_shape = PartialShape(dims);
}
PartialShape result_shape = infer_reduction_output_shape(false);
set_input_is_relevant_to_shape(1); set_input_is_relevant_to_shape(1);
set_output_type(0, get_input_element_type(0), result_shape); set_output_type(0, get_input_element_type(0), result_shape);
} }

View File

@ -32,61 +32,28 @@ bool ngraph::op::util::ArithmeticReductionKeepDims::visit_attributes(AttributeVi
void op::util::ArithmeticReductionKeepDims::validate_and_infer_types() void op::util::ArithmeticReductionKeepDims::validate_and_infer_types()
{ {
NGRAPH_OP_SCOPE(v0_util_ArithmeticReductionKeepDims_validate_and_infer_types); NGRAPH_OP_SCOPE(v0_util_ArithmeticReductionKeepDims_validate_and_infer_types);
if (m_keep_dims)
{
auto input_shape = get_input_partial_shape(0);
auto input_rank = input_shape.rank();
PartialShape result_shape{PartialShape::dynamic()};
if (input_rank.is_static()) const element::Type& data_et = get_input_element_type(0);
result_shape = PartialShape::dynamic(input_rank); const PartialShape& axes_shape = get_input_partial_shape(1);
const element::Type& axes_et = get_input_element_type(1);
const auto& axes = get_constant_from_source(input_value(1));
if (input_rank.is_static() && axes)
{
AxisSet reduction_axes;
auto reduction_axes_val = axes->cast_vector<int64_t>();
for (auto axis : reduction_axes_val)
{
try
{
axis = normalize_axis(this, axis, input_rank);
}
catch (const ngraph_error&)
{
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
false, data_et.is_real() || data_et.is_integral_number(),
"Reduction axis (", "Element type of data input must be numeric. Got: ",
axis, data_et);
") is out of bounds ",
"(argument shape: ",
input_shape,
", reduction axes: ",
reduction_axes,
")");
}
reduction_axes.insert(axis);
}
std::vector<Dimension> dims; NODE_VALIDATION_CHECK(this,
for (int64_t i = 0; i < input_rank.get_length(); i++) axes_et.is_integral_number(),
{ "Element type of axes input must be integer. Got: ",
if (reduction_axes.count(i) == 0) axes_et);
{
dims.push_back(input_shape[i]); const Rank axes_rank = axes_shape.rank();
} NODE_VALIDATION_CHECK(this,
else axes_rank.compatible(0) || axes_rank.compatible(1),
{ "Axes input must be a scalar or 1D input. Got: ",
dims.emplace_back(Dimension{1}); axes_shape);
}
} PartialShape result_shape = infer_reduction_output_shape(m_keep_dims);
result_shape = PartialShape(dims);
}
set_input_is_relevant_to_shape(1); set_input_is_relevant_to_shape(1);
set_output_type(0, get_input_element_type(0), result_shape); set_output_type(0, data_et, result_shape);
}
else
{
ArithmeticReduction::validate_and_infer_types();
}
} }

View File

@ -0,0 +1,17 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/util/evaluate_helpers.hpp"
namespace ngraph
{
AxisSet get_normalized_axes_from_tensor(const HostTensorPtr tensor,
const ngraph::Rank& rank,
const std::string& node_description)
{
const auto axes_vector = host_tensor_2_vector<int64_t>(tensor);
const auto normalized_axes = ngraph::normalize_axes(node_description, axes_vector, rank);
return AxisSet{normalized_axes};
}
} // namespace ngraph

View File

@ -15,17 +15,17 @@ NGRAPH_RTTI_DEFINITION(op::util::LogicalReduction, "LogicalReduction", 1);
op::util::LogicalReduction::LogicalReduction() {} op::util::LogicalReduction::LogicalReduction() {}
op::util::LogicalReduction::LogicalReduction(const Output<Node>& arg, const AxisSet& reduction_axes) op::util::LogicalReduction::LogicalReduction(const Output<Node>& arg, const AxisSet& reduction_axes)
: Op({arg, : ReductionBase(arg,
op::Constant::create( op::Constant::create(
element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector())
->output(0)}) ->output(0))
{ {
add_provenance_group_member(input_value(1).get_node_shared_ptr()); add_provenance_group_member(input_value(1).get_node_shared_ptr());
} }
op::util::LogicalReduction::LogicalReduction(const Output<Node>& arg, op::util::LogicalReduction::LogicalReduction(const Output<Node>& arg,
const Output<Node>& reduction_axes) const Output<Node>& reduction_axes)
: Op({arg, reduction_axes}) : ReductionBase(arg, reduction_axes)
{ {
} }
@ -54,59 +54,20 @@ void op::util::LogicalReduction::set_reduction_axes(const AxisSet& reduction_axe
void op::util::LogicalReduction::validate_and_infer_types() void op::util::LogicalReduction::validate_and_infer_types()
{ {
NGRAPH_OP_SCOPE(util_LogicalReduction_validate_and_infer_types); NGRAPH_OP_SCOPE(util_LogicalReduction_validate_and_infer_types);
auto input_shape = get_input_partial_shape(0);
auto input_rank = input_shape.rank();
PartialShape result_shape{PartialShape::dynamic()}; const element::Type& data_et = get_input_element_type(0);
const PartialShape& axes_shape = get_input_partial_shape(1);
NODE_VALIDATION_CHECK(
this, data_et.compatible(element::boolean), "Element type of data input must be boolean.");
const Rank axes_rank = axes_shape.rank();
NODE_VALIDATION_CHECK(this,
axes_rank.compatible(0) || axes_rank.compatible(1),
"Axes input must be a scalar or 1D input. Got: ",
axes_shape);
PartialShape result_shape = infer_reduction_output_shape(false);
set_input_is_relevant_to_shape(1); set_input_is_relevant_to_shape(1);
set_output_type(0, data_et, result_shape);
NODE_VALIDATION_CHECK(this,
get_input_element_type(0).compatible(element::boolean),
"Input element type must be boolean.");
set_output_type(0, element::boolean, result_shape);
if (input_rank.is_dynamic())
return;
if (const auto axes_const = get_constant_from_source(input_value(1)))
{
AxisSet reduction_axes;
auto reduction_axes_val = axes_const->cast_vector<int64_t>();
for (auto axis : reduction_axes_val)
{
try
{
axis = normalize_axis(this, axis, input_rank);
}
catch (const ngraph_error&)
{
NODE_VALIDATION_CHECK(this,
false,
"Reduction axis (",
axis,
") is out of bounds ",
"(argument shape: ",
input_shape,
", reduction axes: ",
reduction_axes,
")");
}
reduction_axes.insert(axis);
}
std::vector<Dimension> dims;
for (int64_t i = 0; i < input_rank.get_length(); i++)
{
if (reduction_axes.count(i) == 0)
{
dims.push_back(input_shape[i]);
}
}
result_shape = PartialShape(dims);
}
set_output_type(0, element::boolean, result_shape);
} }

View File

@ -32,63 +32,26 @@ bool ngraph::op::util::LogicalReductionKeepDims::visit_attributes(AttributeVisit
void op::util::LogicalReductionKeepDims::validate_and_infer_types() void op::util::LogicalReductionKeepDims::validate_and_infer_types()
{ {
NGRAPH_OP_SCOPE(v0_util_LogicalReductionKeepDims_validate_and_infer_types); NGRAPH_OP_SCOPE(v0_util_LogicalReductionKeepDims_validate_and_infer_types);
if (m_keep_dims)
{
const auto input_shape = get_input_partial_shape(0);
const auto input_rank = input_shape.rank();
PartialShape result_shape{PartialShape::dynamic(input_rank)};
set_input_is_relevant_to_shape(1); const element::Type& data_et = get_input_element_type(0);
set_output_type(0, get_input_element_type(0), result_shape); const PartialShape& axes_shape = get_input_partial_shape(1);
const element::Type& axes_et = get_input_element_type(1);
if (input_shape.is_dynamic()) NODE_VALIDATION_CHECK(
return; this, data_et.compatible(element::boolean), "Element type of data input must be boolean.");
if (auto axes_const = get_constant_from_source(input_value(1)))
{
AxisSet reduction_axes;
auto reduction_axes_val = axes_const->cast_vector<int64_t>();
for (auto axis : reduction_axes_val)
{
try
{
axis = normalize_axis(this, axis, input_rank);
}
catch (const ngraph_error&)
{
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
false, axes_et.is_integral_number(),
"Reduction axis (", "Element type of axes input must be integer. Got: ",
axis, axes_et);
") is out of bounds ",
"(argument shape: ",
input_shape,
", reduction axes: ",
reduction_axes,
")");
}
reduction_axes.insert(axis);
}
std::vector<Dimension> dims; const Rank axes_rank = axes_shape.rank();
for (int64_t i = 0; i < input_rank.get_length(); i++) NODE_VALIDATION_CHECK(this,
{ axes_rank.compatible(0) || axes_rank.compatible(1),
if (reduction_axes.count(i) == 0) "Axes input must be a scalar or 1D input. Got: ",
{ axes_shape);
dims.push_back(input_shape[i]);
}
else
{
dims.emplace_back(Dimension{1});
}
}
result_shape = PartialShape(dims);
}
set_output_type(0, get_input_element_type(0), result_shape); PartialShape result_shape = infer_reduction_output_shape(m_keep_dims);
} set_input_is_relevant_to_shape(1);
else set_output_type(0, data_et, result_shape);
{
LogicalReduction::validate_and_infer_types();
}
} }

View File

@ -0,0 +1,74 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/util/reduction_base.hpp"
#include "itt.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/validation_util.hpp"
using namespace std;
using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::util::ReductionBase, "ReductionBase", 0);
op::util::ReductionBase::ReductionBase() {}
op::util::ReductionBase::ReductionBase(const Output<Node>& arg, const Output<Node>& reduction_axes)
: Op({arg, reduction_axes})
{
}
PartialShape op::util::ReductionBase::infer_reduction_output_shape(const bool keep_dims)
{
const PartialShape& data_ps = get_input_partial_shape(0);
PartialShape result_ps{PartialShape::dynamic()};
Rank data_rank = data_ps.rank();
if (data_rank.is_static() && keep_dims)
{
result_ps = PartialShape::dynamic(data_rank);
}
const auto& axes = get_constant_from_source(input_value(1));
if (data_rank.is_static() && axes)
{
AxisSet reduction_axes;
auto reduction_axes_val = axes->cast_vector<int64_t>();
for (auto axis : reduction_axes_val)
{
try
{
axis = normalize_axis(this, axis, data_rank);
}
catch (const ngraph_error&)
{
NODE_VALIDATION_CHECK(this,
false,
"Reduction axis (",
axis,
") is out of bounds ",
"(argument shape: ",
data_ps,
", reduction axes: ",
reduction_axes,
")");
}
reduction_axes.insert(axis);
}
std::vector<Dimension> dims;
for (int64_t i = 0; i < data_rank.get_length(); i++)
{
if (reduction_axes.count(i) == 0)
{
dims.push_back(data_ps[i]);
}
else if (keep_dims)
{
dims.emplace_back(Dimension{1});
}
}
result_ps = PartialShape(dims);
}
return result_ps;
}

View File

@ -19,19 +19,6 @@ namespace ngraph
{ {
OutputVector global_average_pool(const Node& node) OutputVector global_average_pool(const Node& node)
{ {
auto data = node.get_ng_inputs()[0];
auto data_rank = data.get_partial_shape().rank();
NGRAPH_CHECK(data_rank.is_static(),
"The input data tensor's rank has to be known (static)");
auto data_rank_value = data_rank.get_length();
NGRAPH_CHECK(data_rank_value > 2,
"The input data tensor's rank has to be greater than 2."
"Provided data rank is: ",
data_rank_value);
// Generate axes for reduce operation which contain all spatial dims indexes. // Generate axes for reduce operation which contain all spatial dims indexes.
// Examples: // Examples:
// Input shape: [N, C, H, W] // Input shape: [N, C, H, W]
@ -41,11 +28,22 @@ namespace ngraph
// Input shape: [N, C, H, W, D] // Input shape: [N, C, H, W, D]
// Input spatial dimensions are H, W and D // Input spatial dimensions are H, W and D
// Expected spatial dims indexes: [2, 3, 4] // Expected spatial dims indexes: [2, 3, 4]
size_t data_spatial_rank = data_rank_value - 2; auto data = node.get_ng_inputs()[0];
auto reduce_axes_vector = std::vector<std::int64_t>(data_spatial_rank);
std::iota(reduce_axes_vector.begin(), reduce_axes_vector.end(), 2); const auto zero_node =
auto reduce_axes = default_opset::Constant::create( default_opset::Constant::create(element::i64, Shape{}, {0});
element::i64, Shape{data_spatial_rank}, reduce_axes_vector); const auto one_node =
default_opset::Constant::create(element::i64, Shape{}, {1});
const auto two_node =
default_opset::Constant::create(element::i64, Shape{}, {2});
const auto data_shape = std::make_shared<default_opset::ShapeOf>(data);
const auto data_rank = std::make_shared<default_opset::ShapeOf>(data_shape);
const auto data_rank_as_scalar =
std::make_shared<default_opset::Squeeze>(data_rank);
const auto reduce_axes = std::make_shared<default_opset::Range>(
two_node, data_rank_as_scalar, one_node, element::i64);
return {std::make_shared<default_opset::ReduceMean>(data, reduce_axes, true)}; return {std::make_shared<default_opset::ReduceMean>(data, reduce_axes, true)};
} }

View File

@ -19,19 +19,6 @@ namespace ngraph
{ {
OutputVector global_max_pool(const Node& node) OutputVector global_max_pool(const Node& node)
{ {
auto data = node.get_ng_inputs()[0];
auto data_rank = data.get_partial_shape().rank();
NGRAPH_CHECK(data_rank.is_static(),
"The input data tensor's rank has to be known (static)");
auto data_rank_value = data_rank.get_length();
NGRAPH_CHECK(data_rank_value > 2,
"The input data tensor's rank has to be greater than 2."
"Provided data rank is: ",
data_rank_value);
// Generate axes for reduce operation which contain all spatial dims indexes. // Generate axes for reduce operation which contain all spatial dims indexes.
// Examples: // Examples:
// Input shape: [N, C, H, W] // Input shape: [N, C, H, W]
@ -41,11 +28,22 @@ namespace ngraph
// Input shape: [N, C, H, W, D] // Input shape: [N, C, H, W, D]
// Input spatial dimensions are H, W and D // Input spatial dimensions are H, W and D
// Expected spatial dims indexes: [2, 3, 4] // Expected spatial dims indexes: [2, 3, 4]
size_t data_spatial_rank = data_rank_value - 2; auto data = node.get_ng_inputs()[0];
auto reduce_axes_vector = std::vector<std::int64_t>(data_spatial_rank);
std::iota(reduce_axes_vector.begin(), reduce_axes_vector.end(), 2); const auto zero_node =
auto reduce_axes = default_opset::Constant::create( default_opset::Constant::create(element::i64, Shape{}, {0});
element::i64, Shape{data_spatial_rank}, reduce_axes_vector); const auto one_node =
default_opset::Constant::create(element::i64, Shape{}, {1});
const auto two_node =
default_opset::Constant::create(element::i64, Shape{}, {2});
const auto data_shape = std::make_shared<default_opset::ShapeOf>(data);
const auto data_rank = std::make_shared<default_opset::ShapeOf>(data_shape);
const auto data_rank_as_scalar =
std::make_shared<default_opset::Squeeze>(data_rank);
const auto reduce_axes = std::make_shared<default_opset::Range>(
two_node, data_rank_as_scalar, one_node, element::i64);
return {std::make_shared<default_opset::ReduceMax>(data, reduce_axes, true)}; return {std::make_shared<default_opset::ReduceMax>(data, reduce_axes, true)};
} }

View File

@ -73,6 +73,8 @@ set(SRC
op_eval/non_zero.cpp op_eval/non_zero.cpp
op_eval/reduce_l1.cpp op_eval/reduce_l1.cpp
op_eval/reduce_l2.cpp op_eval/reduce_l2.cpp
op_eval/reduce_prod.cpp
op_eval/reduce_sum.cpp
op_eval/roi_align.cpp op_eval/roi_align.cpp
op_eval/roi_pooling.cpp op_eval/roi_pooling.cpp
op_eval/round.cpp op_eval/round.cpp
@ -172,6 +174,13 @@ set(SRC
type_prop/read_value.cpp type_prop/read_value.cpp
type_prop/reduce_l1.cpp type_prop/reduce_l1.cpp
type_prop/reduce_l2.cpp type_prop/reduce_l2.cpp
type_prop/reduce_logical_and.cpp
type_prop/reduce_logical_or.cpp
type_prop/reduce_max.cpp
type_prop/reduce_mean.cpp
type_prop/reduce_min.cpp
type_prop/reduce_prod.cpp
type_prop/reduce_sum.cpp
type_prop/reorg_yolo.cpp type_prop/reorg_yolo.cpp
type_prop/reshape.cpp type_prop/reshape.cpp
type_prop/result.cpp type_prop/result.cpp
@ -188,6 +197,7 @@ set(SRC
type_prop/scatter_nd_update.cpp type_prop/scatter_nd_update.cpp
type_prop/scatter_update.cpp type_prop/scatter_update.cpp
type_prop/select.cpp type_prop/select.cpp
type_prop/selu.cpp
type_prop/shape_of.cpp type_prop/shape_of.cpp
type_prop/shuffle_channels.cpp type_prop/shuffle_channels.cpp
type_prop/softmax.cpp type_prop/softmax.cpp
@ -198,9 +208,6 @@ set(SRC
type_prop/squared_difference.cpp type_prop/squared_difference.cpp
type_prop/squeeze.cpp type_prop/squeeze.cpp
type_prop/swish.cpp type_prop/swish.cpp
type_prop/reduce_mean.cpp
type_prop/reduce_prod.cpp
type_prop/reduce_sum.cpp
type_prop/ti.cpp type_prop/ti.cpp
type_prop/tile.cpp type_prop/tile.cpp
type_prop/top_k.cpp type_prop/top_k.cpp
@ -265,8 +272,10 @@ set(SRC
visitors/op/reverse_sequence.cpp visitors/op/reverse_sequence.cpp
visitors/op/rnn_cell.cpp visitors/op/rnn_cell.cpp
visitors/op/roi_pooling.cpp visitors/op/roi_pooling.cpp
visitors/op/selu.cpp
visitors/op/shuffle_channels.cpp visitors/op/shuffle_channels.cpp
visitors/op/softmax.cpp visitors/op/softmax.cpp
visitors/op/space_to_batch.cpp
visitors/op/space_to_depth.cpp visitors/op/space_to_depth.cpp
visitors/op/split.cpp visitors/op/split.cpp
visitors/op/squared_difference.cpp visitors/op/squared_difference.cpp
@ -347,6 +356,7 @@ set(MULTI_TEST_SRC
backend/constant.in.cpp backend/constant.in.cpp
backend/convert.in.cpp backend/convert.in.cpp
backend/convert_like.in.cpp backend/convert_like.in.cpp
backend/convolution_backprop.in.cpp
backend/convolution.in.cpp backend/convolution.in.cpp
backend/binary_convolution.in.cpp backend/binary_convolution.in.cpp
backend/clamp.in.cpp backend/clamp.in.cpp
@ -430,12 +440,14 @@ set(MULTI_TEST_SRC
backend/round.in.cpp backend/round.in.cpp
backend/scatter_nd_update.in.cpp backend/scatter_nd_update.in.cpp
backend/select.in.cpp backend/select.in.cpp
backend/selu.in.cpp
backend/shape_of.in.cpp backend/shape_of.in.cpp
backend/sigmoid.in.cpp backend/sigmoid.in.cpp
backend/sign.in.cpp backend/sign.in.cpp
backend/sin.in.cpp backend/sin.in.cpp
backend/sinh.in.cpp backend/sinh.in.cpp
backend/softmax.in.cpp backend/softmax.in.cpp
backend/space_to_batch.in.cpp
backend/split.in.cpp backend/split.in.cpp
backend/sqrt.in.cpp backend/sqrt.in.cpp
backend/squared_difference.in.cpp backend/squared_difference.in.cpp

File diff suppressed because it is too large Load Diff

View File

@ -80,95 +80,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_rows)
EXPECT_TRUE(test::all_close_f((vector<float>{2, 12, 30}), read_vector<float>(result))); EXPECT_TRUE(test::all_close_f((vector<float>{2, 12, 30}), read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_rows_zero)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1, 1}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_cols_zero)
{
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_product_vector_zero)
{
Shape shape_a{0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_to_scalar_zero_by_zero)
{
Shape shape_a{0, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{};
auto axes = make_shared<op::Constant>(element::i32, Shape{2}, vector<int32_t>{0, 1});
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_matrix_most_sig)
{ {
Shape shape_a{3, 3, 3}; Shape shape_a{3, 3, 3};
@ -283,31 +194,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_scalar)
read_vector<float>(result))); read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_eliminate_zero_dim)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
// right value.
copy_data(result, vector<float>{2112, 2112, 2112, 2112, 2112, 2112});
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1, 1, 1, 1, 1}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_product_2d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_2d_to_scalar_int32)
{ {
Shape shape_a{3, 3}; Shape shape_a{3, 3};
@ -433,95 +319,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_rows)
EXPECT_TRUE(test::all_close_f((vector<float>{2, 12, 30}), read_vector<float>(result))); EXPECT_TRUE(test::all_close_f((vector<float>{2, 12, 30}), read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_rows_zero)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 1};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1, 1}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_cols_zero)
{
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_vector_zero)
{
Shape shape_a{0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_to_scalar_zero_by_zero)
{
Shape shape_a{0, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1, 1};
auto axes = make_shared<op::Constant>(element::i32, Shape{2}, vector<int32_t>{0, 1});
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_matrix_most_sig)
{ {
Shape shape_a{3, 3, 3}; Shape shape_a{3, 3, 3};
@ -636,31 +433,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_scalar)
read_vector<float>(result))); read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_eliminate_zero_dim)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 1, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
// right value.
copy_data(result, vector<float>{2112, 2112, 2112, 2112, 2112, 2112});
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1, 1, 1, 1, 1}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_2d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_2d_to_scalar_int32)
{ {
Shape shape_a{3, 3}; Shape shape_a{3, 3};

View File

@ -151,95 +151,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_rows)
EXPECT_TRUE(test::all_close_f((vector<float>{3, 7, 11}), read_vector<float>(result))); EXPECT_TRUE(test::all_close_f((vector<float>{3, 7, 11}), read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_rows_zero)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0, 0}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_cols_zero)
{
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_vector_zero)
{
Shape shape_a{0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_to_scalar_zero_by_zero)
{
Shape shape_a{0, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{};
auto axes = make_shared<op::Constant>(element::i32, Shape{2}, vector<int32_t>{0, 1});
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_matrix_most_sig)
{ {
Shape shape_a{3, 3, 3}; Shape shape_a{3, 3, 3};
@ -376,56 +287,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_scalar_int32)
read_vector<int32_t>(result)); read_vector<int32_t>(result));
} }
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_eliminate_zero_dim)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
// right value.
copy_data(result, vector<float>{2112, 2112, 2112, 2112, 2112, 2112});
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0, 0, 0, 0, 0}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_eliminate_zero_dim_int32)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_rt{3, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{});
auto result = backend->create_tensor(element::i32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
// right value.
copy_data(result, vector<int32_t>{2112, 2112, 2112, 2112, 2112, 2112});
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int32_t>{0, 0, 0, 0, 0, 0}), read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_5d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_5d_to_scalar)
{ {
Shape shape_a{3, 3, 3, 3, 3}; Shape shape_a{3, 3, 3, 3, 3};
@ -489,27 +350,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_2d_to_scalar_int8)
EXPECT_EQ(std::vector<int8_t>{45}, read_vector<int8_t>(result)); EXPECT_EQ(std::vector<int8_t>{45}, read_vector<int8_t>(result));
} }
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_trivial_in_double)
{
Shape shape{4, 3};
Shape rshape{3};
auto A = make_shared<op::Parameter>(element::f64, shape);
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f64, shape);
copy_data(a, vector<double>{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7});
auto result = backend->create_tensor(element::f64, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<double>{30, 22, 26}), read_vector<double>(result)));
}
#if NGRAPH_INTERPRETER_ENABLE #if NGRAPH_INTERPRETER_ENABLE
#ifndef _WIN32 #ifndef _WIN32
@ -548,39 +388,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_acc)
} }
#endif #endif
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_acc_double)
{
std::string backend_name = "${BACKEND_NAME}";
if (backend_name == "INTERPRETER")
{
return;
}
Shape shape_a{10, 10, 20, 300};
auto A = make_shared<op::Parameter>(element::f64, shape_a);
Shape shape_rt{10};
auto axes = make_shared<op::Constant>(element::i32, Shape{3}, vector<int32_t>{1, 2, 3});
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, false), ParameterVector{A});
test::Uniform<double> rng(1000000000.0L, 1000000000.001L, 2112);
vector<vector<double>> args;
for (shared_ptr<op::Parameter> param : f->get_parameters())
{
vector<double> tensor_val(shape_size(param->get_shape()));
rng.initialize(tensor_val);
args.push_back(tensor_val);
}
auto ref_func = clone_function(*f);
auto bk_func = clone_function(*f);
auto ref_results = execute(ref_func, args, "INTERPRETER");
auto bk_results = execute(bk_func, args, "${BACKEND_NAME}");
EXPECT_TRUE(test::all_close(ref_results.at(0), bk_results.at(0), 0.0, 1e-5));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_simple_float) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_simple_float)
{ {
std::string backend_name = "${BACKEND_NAME}"; std::string backend_name = "${BACKEND_NAME}";
@ -611,106 +418,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_simple_float)
test::all_close_f(ref_results.at(0), bk_results.at(0), DEFAULT_FLOAT_TOLERANCE_BITS - 1)); test::all_close_f(ref_results.at(0), bk_results.at(0), DEFAULT_FLOAT_TOLERANCE_BITS - 1));
} }
#ifndef _WIN32
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_simple_double)
{
std::string backend_name = "${BACKEND_NAME}";
if (backend_name == "INTERPRETER")
{
return;
}
Shape shape_a{20};
auto A = make_shared<op::Parameter>(element::f64, shape_a);
Shape shape_rt{};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, false), ParameterVector{A});
vector<vector<double>> args;
args.push_back(vector<double>{10000000000000000.0L,
0.2L,
0.3L,
0.4L,
0.5L,
0.6L,
0.7L,
0.8L,
0.9L,
0.7L,
0.9L,
0.7L,
0.3L,
0.6L,
0.8L,
0.4L,
0.6L,
0.5L,
0.8L,
0.7L});
auto ref_func = clone_function(*f);
auto bk_func = clone_function(*f);
auto ref_results = execute(ref_func, args, "INTERPRETER");
auto bk_results = execute(bk_func, args, "${BACKEND_NAME}");
EXPECT_TRUE(test::all_close(ref_results.at(0), bk_results.at(0), 0.0, 2.0));
}
#endif #endif
#endif
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_dynamic)
{
// Create a graph for f(x,axes:int32) = Sum(x,Convert<int64>(axes)).
auto x = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto axes = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
auto axes_i64 = make_shared<op::Convert>(axes, element::i64);
auto sum = make_shared<op::v1::ReduceSum>(x, axes_i64, false);
ASSERT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic());
auto f = make_shared<Function>(NodeVector{sum}, ParameterVector{x, axes});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto ex = backend->compile(f);
auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
std::vector<Shape> x_shapes{
Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}};
std::vector<std::vector<int32_t>> axeses{{}, {0}, {1}, {0, 1}, {}, {0}};
std::vector<std::vector<float>> inputs{{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5},
{1, 2, 3, 4, 5}};
std::vector<Shape> expected_result_shapes{
Shape{2, 3}, Shape{3}, Shape{2}, Shape{}, Shape{5}, Shape{}};
std::vector<std::vector<float>> expected_results{
{1, 2, 3, 4, 5, 6}, {5, 7, 9}, {6, 15}, {21}, {1, 2, 3, 4, 5}, {15}};
for (size_t i = 0; i < x_shapes.size(); i++)
{
auto t_x = backend->create_tensor(element::f32, x_shapes[i]);
auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()});
copy_data(t_x, inputs[i]);
copy_data(t_axes, axeses[i]);
ex->call_with_validate({t_r}, {t_x, t_axes});
ASSERT_EQ(t_r->get_shape(), expected_result_shapes[i]);
auto results = read_vector<float>(t_r);
ASSERT_TRUE(test::all_close_f(results, expected_results[i], MIN_FLOAT_TOLERANCE_BITS));
}
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_inf) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_inf)
{ {
Shape shape{7, 4}; Shape shape{7, 4};
@ -874,95 +583,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_rows)
EXPECT_TRUE(test::all_close_f((vector<float>{3, 7, 11}), read_vector<float>(result))); EXPECT_TRUE(test::all_close_f((vector<float>{3, 7, 11}), read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_rows_zero)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 1};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0, 0}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_cols_zero)
{
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_vector_zero)
{
Shape shape_a{0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_to_scalar_zero_by_zero)
{
Shape shape_a{0, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1, 1};
auto axes = make_shared<op::Constant>(element::i32, Shape{2}, vector<int32_t>{0, 1});
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_matrix_most_sig)
{ {
Shape shape_a{3, 3, 3}; Shape shape_a{3, 3, 3};
@ -1099,56 +719,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_scalar_int32)
read_vector<int32_t>(result)); read_vector<int32_t>(result));
} }
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_eliminate_zero_dim)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 1, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
// right value.
copy_data(result, vector<float>{2112, 2112, 2112, 2112, 2112, 2112});
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0, 0, 0, 0, 0}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_eliminate_zero_dim_int32)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_rt{3, 1, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{});
auto result = backend->create_tensor(element::i32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
// right value.
copy_data(result, vector<int32_t>{2112, 2112, 2112, 2112, 2112, 2112});
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int32_t>{0, 0, 0, 0, 0, 0}), read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_5d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_5d_to_scalar)
{ {
Shape shape_a{3, 3, 3, 3, 3}; Shape shape_a{3, 3, 3, 3, 3};
@ -1212,27 +782,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_2d_to_scalar_int8)
EXPECT_EQ(std::vector<int8_t>{45}, read_vector<int8_t>(result)); EXPECT_EQ(std::vector<int8_t>{45}, read_vector<int8_t>(result));
} }
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_trivial_in_double)
{
Shape shape{4, 3};
Shape rshape{1, 3};
auto A = make_shared<op::Parameter>(element::f64, shape);
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f64, shape);
copy_data(a, vector<double>{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7});
auto result = backend->create_tensor(element::f64, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<double>{30, 22, 26}), read_vector<double>(result)));
}
#if NGRAPH_INTERPRETER_ENABLE #if NGRAPH_INTERPRETER_ENABLE
#ifndef _WIN32 #ifndef _WIN32
@ -1271,38 +820,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_acc)
} }
#endif #endif
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_acc_double)
{
std::string backend_name = "${BACKEND_NAME}";
if (backend_name == "INTERPRETER")
{
return;
}
Shape shape_a{10, 10, 20, 300};
auto A = make_shared<op::Parameter>(element::f64, shape_a);
Shape shape_rt{10, 1, 1, 1};
auto axes = make_shared<op::Constant>(element::i32, Shape{3}, vector<int32_t>{1, 2, 3});
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, true), ParameterVector{A});
test::Uniform<double> rng(1000000000.0L, 1000000000.001L, 2112);
vector<vector<double>> args;
for (shared_ptr<op::Parameter> param : f->get_parameters())
{
vector<double> tensor_val(shape_size(param->get_shape()));
rng.initialize(tensor_val);
args.push_back(tensor_val);
}
auto ref_func = clone_function(*f);
auto bk_func = clone_function(*f);
auto ref_results = execute(ref_func, args, "INTERPRETER");
auto bk_results = execute(bk_func, args, "${BACKEND_NAME}");
EXPECT_TRUE(test::all_close(ref_results.at(0), bk_results.at(0), 0.0, 1e-5));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_simple_float) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_simple_float)
{ {
@ -1334,106 +851,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_simple_float)
test::all_close_f(ref_results.at(0), bk_results.at(0), DEFAULT_FLOAT_TOLERANCE_BITS - 1)); test::all_close_f(ref_results.at(0), bk_results.at(0), DEFAULT_FLOAT_TOLERANCE_BITS - 1));
} }
#ifndef _WIN32
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_simple_double)
{
std::string backend_name = "${BACKEND_NAME}";
if (backend_name == "INTERPRETER")
{
return;
}
Shape shape_a{20};
auto A = make_shared<op::Parameter>(element::f64, shape_a);
Shape shape_rt{1};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, true), ParameterVector{A});
vector<vector<double>> args;
args.push_back(vector<double>{10000000000000000.0L,
0.2L,
0.3L,
0.4L,
0.5L,
0.6L,
0.7L,
0.8L,
0.9L,
0.7L,
0.9L,
0.7L,
0.3L,
0.6L,
0.8L,
0.4L,
0.6L,
0.5L,
0.8L,
0.7L});
auto ref_func = clone_function(*f);
auto bk_func = clone_function(*f);
auto ref_results = execute(ref_func, args, "INTERPRETER");
auto bk_results = execute(bk_func, args, "${BACKEND_NAME}");
EXPECT_TRUE(test::all_close(ref_results.at(0), bk_results.at(0), 0.0, 2.0));
}
#endif #endif
#endif
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_dynamic)
{
// Create a graph for f(x,axes:int32) = Sum(x,Convert<int64>(axes)).
auto x = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto axes = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
auto axes_i64 = make_shared<op::Convert>(axes, element::i64);
auto sum = make_shared<op::v1::ReduceSum>(x, axes_i64, true);
ASSERT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic());
auto f = make_shared<Function>(NodeVector{sum}, ParameterVector{x, axes});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto ex = backend->compile(f);
auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
std::vector<Shape> x_shapes{
Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}};
std::vector<std::vector<int32_t>> axeses{{}, {0}, {1}, {0, 1}, {}, {0}};
std::vector<std::vector<float>> inputs{{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5},
{1, 2, 3, 4, 5}};
std::vector<Shape> expected_result_shapes{
Shape{2, 3}, Shape{1, 3}, Shape{2, 1}, Shape{1, 1}, Shape{5}, Shape{1}};
std::vector<std::vector<float>> expected_results{
{1, 2, 3, 4, 5, 6}, {5, 7, 9}, {6, 15}, {21}, {1, 2, 3, 4, 5}, {15}};
for (size_t i = 0; i < x_shapes.size(); i++)
{
auto t_x = backend->create_tensor(element::f32, x_shapes[i]);
auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()});
copy_data(t_x, inputs[i]);
copy_data(t_axes, axeses[i]);
ex->call_with_validate({t_r}, {t_x, t_axes});
ASSERT_EQ(t_r->get_shape(), expected_result_shapes[i]);
auto results = read_vector<float>(t_r);
ASSERT_TRUE(test::all_close_f(results, expected_results[i], MIN_FLOAT_TOLERANCE_BITS));
}
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_inf) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_inf)
{ {
Shape shape{7, 4}; Shape shape{7, 4};

View File

@ -0,0 +1,99 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/engine/test_engines.hpp"
#include "util/test_case.hpp"
#include "util/test_control.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
NGRAPH_TEST(${BACKEND_NAME}, selu_2Dfprop)
{
Shape rt_shape{2};
Shape c_shape{1};
element::Type et = element::f32;
auto input = make_shared<op::Parameter>(et, rt_shape);
auto alpha = op::Constant::create(et, c_shape, {1.67326324});
auto lambda = op::Constant::create(et, c_shape, {1.05070098});
auto selu = make_shared<op::v0::Selu>(input, alpha, lambda);
auto f = make_shared<Function>(selu, ParameterVector{input});
vector<float> input_data{-1, 3};
vector<float> expected_out{-1.1113307, 3.152103};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>(rt_shape, input_data);
test_case.add_expected_output(rt_shape, expected_out);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, selu_4Dfprop)
{
Shape in_shape{4};
Shape c_shape{1};
element::Type et = element::f32;
auto input = make_shared<op::Parameter>(et, in_shape);
auto alpha = op::Constant::create(et, c_shape, {1.67326324});
auto lambda = op::Constant::create(et, c_shape, {1.05070098});
auto selu = make_shared<op::v0::Selu>(input, alpha, lambda);
auto f = make_shared<Function>(selu, ParameterVector{input});
vector<float> in_vec{-1.0, 0.0, 1.0, 2.0};
vector<float> out_vec{-1.1113307, 0., 1.050701, 2.101402};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>(in_shape, in_vec);
test_case.add_expected_output<float>(in_shape, out_vec);
test_case.run_with_tolerance_as_fp(1e-4f);
}
NGRAPH_TEST(${BACKEND_NAME}, selu_1Dfprop)
{
Shape in_shape{1};
Shape c_shape{1};
element::Type et = element::f32;
auto input = make_shared<op::Parameter>(et, in_shape);
auto alpha = op::Constant::create(et, c_shape, {1.67326324});
auto lambda = op::Constant::create(et, c_shape, {1.05070098});
auto selu = make_shared<op::v0::Selu>(input, alpha, lambda);
auto f = make_shared<Function>(selu, ParameterVector{input});
vector<float> in_vec{112.0};
vector<float> out_vec{117.67851};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>(in_shape, in_vec);
test_case.add_expected_output<float>(in_shape, out_vec);
test_case.run_with_tolerance_as_fp(1e-4f);
}
NGRAPH_TEST(${BACKEND_NAME}, selu_3Dfprop_negative)
{
Shape in_shape{3};
Shape c_shape{1};
element::Type et = element::f32;
auto input = make_shared<op::Parameter>(et, in_shape);
auto alpha = op::Constant::create(et, c_shape, {1.67326324});
auto lambda = op::Constant::create(et, c_shape, {1.05070098});
auto selu = make_shared<op::v0::Selu>(input, alpha, lambda);
auto f = make_shared<Function>(selu, ParameterVector{input});
vector<float> in_vec{-3.0, -12.5, -7.0};
vector<float> out_vec{-1.6705687, -1.7580928, -1.7564961};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>(in_shape, in_vec);
test_case.add_expected_output<float>(in_shape, out_vec);
test_case.run_with_tolerance_as_fp(1e-4f);
}

View File

@ -0,0 +1,115 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/engine/test_engines.hpp"
#include "util/test_case.hpp"
#include "util/test_control.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
static void SpaceToBatchTest(const std::vector<float>& inputs,
const Shape inputs_shape,
const std::vector<int64_t>& block_shapes,
const Shape blocks_shape,
const std::vector<int64_t>& pads_begins,
const std::vector<int64_t>& pads_ends,
const Shape pads_shape,
const std::vector<float>& outputs,
const Shape outputs_shape)
{
auto inputs_param = make_shared<op::Parameter>(element::f32, inputs_shape);
auto block_shapes_param = make_shared<op::Constant>(element::i64, blocks_shape, block_shapes);
auto pads_begins_param = make_shared<op::Constant>(element::i64, pads_shape, pads_begins);
auto pads_ends_param = make_shared<op::Constant>(element::i64, pads_shape, pads_ends);
auto space_to_batch = make_shared<op::v1::SpaceToBatch>(
inputs_param, block_shapes_param, pads_begins_param, pads_ends_param);
auto f = make_shared<Function>(space_to_batch, ParameterVector{inputs_param});
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>(inputs);
test_case.add_expected_output<float>(outputs_shape, outputs);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, space_to_batch_4D)
{
const Shape inputs_shape{1, 1, 2, 2};
const std::vector<float> inputs{1.0f, 1.0f,
1.0f, 1.0f};
const Shape blocks_shape{4};
const std::vector<int64_t> block_shapes{1, 1, 1, 1};
const Shape pads_shape{4};
const std::vector<int64_t> pads_begins{0, 0 ,0, 0};
const std::vector<int64_t> pads_ends{0, 0, 0, 0};
const Shape outputs_shape{1, 1, 2, 2};
const std::vector<float> outputs{1.0f, 1.0f,
1.0f, 1.0f};
SpaceToBatchTest(inputs, inputs_shape, block_shapes, blocks_shape, pads_begins,
pads_ends, pads_shape, outputs, outputs_shape);
}
NGRAPH_TEST(${BACKEND_NAME}, space_to_batch_5D)
{
const Shape inputs_shape{1, 1, 3, 2, 1};
const std::vector<float> inputs{1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f};
const Shape blocks_shape{5};
const std::vector<int64_t> block_shapes{1, 1, 3, 2, 2};
const Shape pads_shape{5};
const std::vector<int64_t> pads_begins{0, 0 ,1, 0, 3};
const std::vector<int64_t> pads_ends{0, 0, 2, 0, 0};
const Shape outputs_shape{12, 1, 2, 1, 2};
const std::vector<float> outputs{0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f};
SpaceToBatchTest(inputs, inputs_shape, block_shapes, blocks_shape, pads_begins,
pads_ends, pads_shape, outputs, outputs_shape);
}
NGRAPH_TEST(${BACKEND_NAME}, space_to_batch_4x4)
{
const Shape inputs_shape{1, 1, 4, 4};
const std::vector<float> inputs{1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f};
const Shape blocks_shape{4};
const std::vector<int64_t> block_shapes{1, 1, 1, 1};
const Shape pads_shape{4};
const std::vector<int64_t> pads_begins{0, 0, 1, 0};
const std::vector<int64_t> pads_ends{0, 0, 0, 0};
const Shape outputs_shape{1, 1, 5, 4};
const std::vector<float> outputs{0.0f, 0.0f, 0.0f, 0.0f,
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f};
SpaceToBatchTest(inputs, inputs_shape, block_shapes, blocks_shape, pads_begins,
pads_ends, pads_shape, outputs, outputs_shape);
}

View File

@ -0,0 +1,244 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/test_control.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
TEST(op_eval, reduce_product_matrix_rows_zero)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1, 1}), read_vector<float>(result)));
}
TEST(op_eval, reduce_product_matrix_cols_zero)
{
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1}), read_vector<float>(result)));
}
TEST(op_eval, reduce_product_vector_zero)
{
Shape shape_a{0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1}), read_vector<float>(result)));
}
TEST(op_eval, reduce_product_matrix_to_scalar_zero_by_zero)
{
Shape shape_a{0, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{};
auto axes = make_shared<op::Constant>(element::i32, Shape{2}, vector<int32_t>{0, 1});
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1}), read_vector<float>(result)));
}
TEST(op_eval, reduce_product_3d_eliminate_zero_dim)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
// right value.
copy_data(result, vector<float>{2112, 2112, 2112, 2112, 2112, 2112});
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1, 1, 1, 1, 1}), read_vector<float>(result)));
}
TEST(op_eval, reduce_product_keep_matrix_rows_zero)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 1};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1, 1}), read_vector<float>(result)));
}
TEST(op_eval, reduce_product_keep_matrix_cols_zero)
{
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1}), read_vector<float>(result)));
}
TEST(op_eval, reduce_product_keep_vector_zero)
{
Shape shape_a{0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1}), read_vector<float>(result)));
}
TEST(op_eval, reduce_product_keep_matrix_to_scalar_zero_by_zero)
{
Shape shape_a{0, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1, 1};
auto axes = make_shared<op::Constant>(element::i32, Shape{2}, vector<int32_t>{0, 1});
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1}), read_vector<float>(result)));
}
TEST(op_eval, reduce_product_keep_3d_eliminate_zero_dim)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 1, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceProd>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
// right value.
copy_data(result, vector<float>{2112, 2112, 2112, 2112, 2112, 2112});
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1, 1, 1, 1, 1}), read_vector<float>(result)));
}

View File

@ -0,0 +1,395 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/test_control.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
TEST(op_eval, reduce_sum_matrix_rows_zero)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0, 0}), read_vector<float>(result)));
}
TEST(op_eval, reduce_sum_vector_zero)
{
Shape shape_a{0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0}), read_vector<float>(result)));
}
TEST(op_eval, reduce_sum_matrix_cols_zero)
{
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0}), read_vector<float>(result)));
}
TEST(op_eval, reduce_sum_matrix_to_scalar_zero_by_zero)
{
Shape shape_a{0, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{};
auto axes = make_shared<op::Constant>(element::i32, Shape{2}, vector<int32_t>{0, 1});
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0}), read_vector<float>(result)));
}
TEST(op_eval, reduce_sum_3d_eliminate_zero_dim)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
// right value.
copy_data(result, vector<float>{2112, 2112, 2112, 2112, 2112, 2112});
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0, 0, 0, 0, 0}), read_vector<float>(result)));
}
TEST(op_eval, reduce_sum_3d_eliminate_zero_dim_int32)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_rt{3, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{});
auto result = backend->create_tensor(element::i32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
// right value.
copy_data(result, vector<int32_t>{2112, 2112, 2112, 2112, 2112, 2112});
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int32_t>{0, 0, 0, 0, 0, 0}), read_vector<int32_t>(result));
}
TEST(op_eval, reduce_sum_dynamic)
{
// Create a graph for f(x,axes:int32) = Sum(x,Convert<int64>(axes)).
auto x = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto axes = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
auto axes_i64 = make_shared<op::Convert>(axes, element::i64);
auto sum = make_shared<op::v1::ReduceSum>(x, axes_i64, false);
ASSERT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic());
auto f = make_shared<Function>(NodeVector{sum}, ParameterVector{x, axes});
auto backend = runtime::Backend::create("INTERPRETER", true);
auto ex = backend->compile(f);
auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
std::vector<Shape> x_shapes{
Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}};
std::vector<std::vector<int32_t>> axeses{{}, {0}, {1}, {0, 1}, {}, {0}};
std::vector<std::vector<float>> inputs{{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5},
{1, 2, 3, 4, 5}};
std::vector<Shape> expected_result_shapes{
Shape{2, 3}, Shape{3}, Shape{2}, Shape{}, Shape{5}, Shape{}};
std::vector<std::vector<float>> expected_results{
{1, 2, 3, 4, 5, 6}, {5, 7, 9}, {6, 15}, {21}, {1, 2, 3, 4, 5}, {15}};
for (size_t i = 0; i < x_shapes.size(); i++)
{
auto t_x = backend->create_tensor(element::f32, x_shapes[i]);
auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()});
copy_data(t_x, inputs[i]);
copy_data(t_axes, axeses[i]);
ex->call_with_validate({t_r}, {t_x, t_axes});
ASSERT_EQ(t_r->get_shape(), expected_result_shapes[i]);
auto results = read_vector<float>(t_r);
ASSERT_TRUE(test::all_close_f(results, expected_results[i], MIN_FLOAT_TOLERANCE_BITS));
}
}
TEST(op_eval, reduce_sum_keep_matrix_rows_zero)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 1};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0, 0}), read_vector<float>(result)));
}
TEST(op_eval, reduce_sum_keep_matrix_cols_zero)
{
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0}), read_vector<float>(result)));
}
TEST(op_eval, reduce_sum_keep_vector_zero)
{
Shape shape_a{0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0}), read_vector<float>(result)));
}
TEST(op_eval, reduce_sum_keep_matrix_to_scalar_zero_by_zero)
{
Shape shape_a{0, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1, 1};
auto axes = make_shared<op::Constant>(element::i32, Shape{2}, vector<int32_t>{0, 1});
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0}), read_vector<float>(result)));
}
TEST(op_eval, reduce_sum_keep_3d_eliminate_zero_dim)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 1, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
// right value.
copy_data(result, vector<float>{2112, 2112, 2112, 2112, 2112, 2112});
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0, 0, 0, 0, 0}), read_vector<float>(result)));
}
TEST(op_eval, reduce_sum_keep_3d_eliminate_zero_dim_int32)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_rt{3, 1, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceSum>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{});
auto result = backend->create_tensor(element::i32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
// right value.
copy_data(result, vector<int32_t>{2112, 2112, 2112, 2112, 2112, 2112});
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int32_t>{0, 0, 0, 0, 0, 0}), read_vector<int32_t>(result));
}
TEST(op_eval, reduce_sum_keep_dynamic)
{
// Create a graph for f(x,axes:int32) = Sum(x,Convert<int64>(axes)).
auto x = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto axes = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
auto axes_i64 = make_shared<op::Convert>(axes, element::i64);
auto sum = make_shared<op::v1::ReduceSum>(x, axes_i64, true);
ASSERT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic());
auto f = make_shared<Function>(NodeVector{sum}, ParameterVector{x, axes});
auto backend = runtime::Backend::create("INTERPRETER", true);
auto ex = backend->compile(f);
auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
std::vector<Shape> x_shapes{
Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}};
std::vector<std::vector<int32_t>> axeses{{}, {0}, {1}, {0, 1}, {}, {0}};
std::vector<std::vector<float>> inputs{{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5},
{1, 2, 3, 4, 5}};
std::vector<Shape> expected_result_shapes{
Shape{2, 3}, Shape{1, 3}, Shape{2, 1}, Shape{1, 1}, Shape{5}, Shape{1}};
std::vector<std::vector<float>> expected_results{
{1, 2, 3, 4, 5, 6}, {5, 7, 9}, {6, 15}, {21}, {1, 2, 3, 4, 5}, {15}};
for (size_t i = 0; i < x_shapes.size(); i++)
{
auto t_x = backend->create_tensor(element::f32, x_shapes[i]);
auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()});
copy_data(t_x, inputs[i]);
copy_data(t_axes, axeses[i]);
ex->call_with_validate({t_r}, {t_x, t_axes});
ASSERT_EQ(t_r->get_shape(), expected_result_shapes[i]);
auto results = read_vector<float>(t_r);
ASSERT_TRUE(test::all_close_f(results, expected_results[i], MIN_FLOAT_TOLERANCE_BITS));
}
}

View File

@ -368,36 +368,18 @@ any_2x0_to_scalar
all_trivial all_trivial
all_2x0_to_scalar all_2x0_to_scalar
all_dynamic all_dynamic
reduce_sum_keep_dynamic
reduce_sum_keep_stable_simple_double
reduce_sum_keep_stable_acc_double
reduce_sum_keep_stable_acc
reduce_sum_keep_3d_eliminate_zero_dim
reduce_sum_keep_vector_zero
reduce_sum_keep_matrix_rows_zero
reduce_sum_dynamic
reduce_sum_3d_eliminate_zero_dim_int32
reduce_sum_keep_3d_eliminate_zero_dim_int32
reduce_sum_keep_trivial_in_double
reduce_sum_3d_eliminate_zero_dim
reduce_sum_matrix_to_scalar_zero_by_zero
reduce_sum_vector_zero
reduce_sum_matrix_cols_zero
reduce_sum_matrix_rows_zero
reduce_sum_keep_matrix_to_scalar_zero_by_zero
reduce_sum_keep_matrix_cols_zero
reduce_product_matrix_columns
# Reduce ops disabled/accuracy: 56520
# disabled reference implementation
reduce_sum_keep_2d_to_scalar_int8
reduce_sum_2d_to_scalar_int8
reduce_product_to_scalar_int8
reduce_product_keep_to_scalar_int8
# accuracy # accuracy
reduce_sum_keep_stable_acc reduce_sum_keep_stable_acc
reduce_sum_keep_2d_to_scalar_int8
reduce_sum_keep_3d_to_scalar_int32 reduce_sum_keep_3d_to_scalar_int32
reduce_sum_keep_large_1d_to_scalar reduce_sum_keep_large_1d_to_scalar
reduce_sum_stable_simple_double
reduce_sum_stable_acc_double
reduce_sum_stable_acc reduce_sum_stable_acc
reduce_sum_trivial_in_double
reduce_sum_2d_to_scalar_int8
reduce_sum_3d_to_scalar_int32 reduce_sum_3d_to_scalar_int32
reduce_sum_large_1d_to_scalar reduce_sum_large_1d_to_scalar
@ -478,17 +460,6 @@ onnx_dyn_shapes_model_tile_static
gather_4d_indices_axis_0_uint8 gather_4d_indices_axis_0_uint8
tensor_constant_with_op tensor_constant_with_op
constant_equality_bool constant_equality_bool
reduce_product_matrix_rows
reduce_product_3d_to_matrix_most_sig
reduce_product_3d_to_matrix_least_sig
reduce_product_keep_matrix_columns
reduce_product_keep_matrix_rows
reduce_product_keep_3d_to_matrix_most_sig
reduce_product_keep_3d_to_matrix_least_sig
reduce_product_matrix_columns_dynamic
reduce_product_matrix_rows_dynamic
reduce_product_keep_matrix_columns_dynamic
reduce_product_keep_matrix_rows_dynamic
reduce_min_matrix_columns reduce_min_matrix_columns
reduce_min_matrix_rows reduce_min_matrix_rows
reduce_min_matrix_rows_int32 reduce_min_matrix_rows_int32
@ -505,18 +476,6 @@ reduce_min_keep_matrix_columns_dynamic
reduce_min_keep_matrix_rows_dynamic reduce_min_keep_matrix_rows_dynamic
# zero dimension / result mismatch # zero dimension / result mismatch
reduce_product_matrix_rows_zero
reduce_product_matrix_cols_zero
reduce_product_vector_zero
reduce_product_matrix_to_scalar_zero_by_zero
reduce_product_3d_eliminate_zero_dim
reduce_product_to_scalar_int8
reduce_product_keep_matrix_rows_zero
reduce_product_keep_matrix_cols_zero
reduce_product_keep_vector_zero
reduce_product_keep_matrix_to_scalar_zero_by_zero
reduce_product_keep_3d_eliminate_zero_dim
reduce_product_keep_to_scalar_int8
reduce_min_to_scalar_int8 reduce_min_to_scalar_int8
reduce_min_matrix_rows_zero reduce_min_matrix_rows_zero
reduce_min_matrix_cols_zero reduce_min_matrix_cols_zero

View File

@ -283,7 +283,8 @@ namespace
op->get_dilations(), op->get_dilations(),
op->get_pads_begin(), op->get_pads_begin(),
op->get_pads_end(), op->get_pads_end(),
op->get_strides()); op->get_strides(),
op->get_output_padding());
return true; return true;
} }

View File

@ -3,6 +3,7 @@ fake_quantize_pdpd
INTERPRETER.onnx_model_quant_conv_linear INTERPRETER.onnx_model_quant_conv_linear
INTERPRETER.onnx_top_k_opset_10 INTERPRETER.onnx_top_k_opset_10
# Failed in ubuntu18 i386
reduce_sum_large_1d_to_scalar reduce_sum_large_1d_to_scalar
reduce_sum_keep_large_1d_to_scalar reduce_sum_keep_large_1d_to_scalar
@ -31,11 +32,8 @@ INTERPRETER.onnx_model_matmul_integer_4d_no_zero_point
# Disabled tests for disabled reference implementations # Disabled tests for disabled reference implementations
INTERPRETER.onnx_dyn_shapes_expand_uint16_dyn_shape INTERPRETER.onnx_dyn_shapes_expand_uint16_dyn_shape
INTERPRETER.sum_2d_to_scalar_int8 INTERPRETER.sum_2d_to_scalar_int8
INTERPRETER.sum_trivial_in_double
INTERPRETER.reduce_sum_2d_to_scalar_int8 INTERPRETER.reduce_sum_2d_to_scalar_int8
INTERPRETER.reduce_sum_trivial_in_double
INTERPRETER.reduce_sum_keep_2d_to_scalar_int8 INTERPRETER.reduce_sum_keep_2d_to_scalar_int8
INTERPRETER.reduce_sum_keep_trivial_in_double
INTERPRETER.reduce_product_to_scalar_int8 INTERPRETER.reduce_product_to_scalar_int8
INTERPRETER.reduce_product_keep_to_scalar_int8 INTERPRETER.reduce_product_keep_to_scalar_int8
INTERPRETER.reduce_min_to_scalar_int8 INTERPRETER.reduce_min_to_scalar_int8

View File

@ -2,47 +2,8 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "gtest/gtest.h" #include "reduce_ops.hpp"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std; using Type = ::testing::Types<op::v4::ReduceL1>;
using namespace ngraph; INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_l1, ReduceTest, Type);
INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_l1_et, ReduceArithmeticTest, Type);
TEST(type_prop, reduce_l1_v4_axis_out_of_range)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{2, 3});
try
{
auto reduce_sum = make_shared<op::v4::ReduceL1>(arg, axes);
// Should have thrown, so fail if it didn't
FAIL() << "Incorrect axes values exception not thrown";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis ("));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, reduce_l1_v4_shape_if_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = true;
auto reduce_prod = make_shared<op::v4::ReduceL1>(arg, axes, keep_dims);
ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1}));
}
TEST(type_prop, reduce_l1_v4_shape_if_not_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = false;
auto reduce_prod = make_shared<op::v4::ReduceL1>(arg, axes, keep_dims);
ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3}));
}

View File

@ -2,47 +2,8 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "gtest/gtest.h" #include "reduce_ops.hpp"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std; using Type = ::testing::Types<op::v4::ReduceL2>;
using namespace ngraph; INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_l2, ReduceTest, Type);
INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_l2_et, ReduceArithmeticTest, Type);
TEST(type_prop, reduce_l2_v4_axis_out_of_range)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{2, 3});
try
{
auto reduce_sum = make_shared<op::v4::ReduceL2>(arg, axes);
// Should have thrown, so fail if it didn't
FAIL() << "Incorrect axes values exception not thrown";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis ("));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, reduce_l2_v4_shape_if_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = true;
auto reduce_prod = make_shared<op::v4::ReduceL2>(arg, axes, keep_dims);
ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1}));
}
TEST(type_prop, reduce_l2_v4_shape_if_not_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = false;
auto reduce_prod = make_shared<op::v4::ReduceL2>(arg, axes, keep_dims);
ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3}));
}

View File

@ -0,0 +1,9 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "reduce_ops.hpp"
using Type = ::testing::Types<op::v1::ReduceLogicalAnd>;
INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_logical_and, ReduceTest, Type);
INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_logical_and_et, ReduceLogicalTest, Type);

View File

@ -0,0 +1,9 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "reduce_ops.hpp"
using Type = ::testing::Types<op::v1::ReduceLogicalOr>;
INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_logical_or, ReduceTest, Type);
INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_logical_or_et, ReduceLogicalTest, Type);

View File

@ -0,0 +1,9 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "reduce_ops.hpp"
using Type = ::testing::Types<op::v1::ReduceMax>;
INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_max, ReduceTest, Type);
INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_max_et, ReduceArithmeticTest, Type);

View File

@ -2,71 +2,8 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "gtest/gtest.h" #include "reduce_ops.hpp"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std; using Type = ::testing::Types<op::v1::ReduceMean>;
using namespace ngraph; INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_mean, ReduceTest, Type);
INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_mean_et, ReduceArithmeticTest, Type);
TEST(type_prop, reduce_mean_v1_axis_out_of_range)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{2, 3});
try
{
auto reduce_sum = make_shared<op::v1::ReduceMean>(arg, axes);
// Should have thrown, so fail if it didn't
FAIL() << "Incorrect axes values exception not thrown";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis ("));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, reduce_mean_v1_shape_if_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = true;
auto reduce_prod = make_shared<op::v1::ReduceMean>(arg, axes, keep_dims);
ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1}));
}
TEST(type_prop, reduce_mean_v1_shape_if_not_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = false;
auto reduce_prod = make_shared<op::v1::ReduceMean>(arg, axes, keep_dims);
ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3}));
}
TEST(type_prop, reduce_mean_dynamic_shape)
{
auto arg =
make_shared<op::Parameter>(element::f32, PartialShape{3, 4, 5, Dimension::dynamic()});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = true;
auto reduce_prod = make_shared<op::v1::ReduceMean>(arg, axes, keep_dims);
ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(
PartialShape{3, 1, 1, Dimension::dynamic()}));
}
TEST(type_prop, reduce_mean_reduce_dynamic_shape)
{
auto arg =
make_shared<op::Parameter>(element::f32, PartialShape{3, 4, 5, Dimension::dynamic()});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 3});
auto keep_dims = true;
auto reduce_prod = make_shared<op::v1::ReduceMean>(arg, axes, keep_dims);
ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(
PartialShape{3, 1, 5, Dimension::dynamic()}));
}

View File

@ -0,0 +1,9 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "reduce_ops.hpp"
using Type = ::testing::Types<op::v1::ReduceMin>;
INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_min, ReduceTest, Type);
INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_min_et, ReduceArithmeticTest, Type);

View File

@ -0,0 +1,388 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
struct ReduceParams
{
PartialShape data_ps;
element::Type data_et;
Shape axes_ps;
std::vector<int64_t> axes;
element::Type axes_et;
bool keep_dims;
};
template<class T>
static std::shared_ptr<Node> makeReduceOp(const ReduceParams& p, bool axes_as_param = false)
{
auto in_data = make_shared<op::Parameter>(p.data_et, p.data_ps);
shared_ptr<Node> in_axes;
if (axes_as_param)
{
in_axes = make_shared<op::Parameter>(p.axes_et, p.axes_ps);
}
else
{
if (shape_size(p.axes_ps) != p.axes.size())
{
throw ngraph_error("Axes shape does not match with axes elements");
}
in_axes = make_shared<op::Constant>(p.axes_et, p.axes_ps, p.axes);
}
return make_shared<T>(in_data, in_axes, p.keep_dims);
}
template<class T>
class ReduceTest : public testing::Test
{
};
TYPED_TEST_CASE_P(ReduceTest);
TYPED_TEST_P(ReduceTest, reduce_basic_shape_infer)
{
PartialShape data_ps{3, 4, 5};
element::Type data_et = element::dynamic;
Shape axes_ps{2};
element::Type axes_et = element::i64;
std::vector<int64_t> axes{1, 2};
bool keep_dims = false;
PartialShape out_ps{3};
const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims};
auto reduce_op = makeReduceOp<TypeParam>(params);
ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps));
}
TYPED_TEST_P(ReduceTest, reduce_basic_shape_infer_keep_dims)
{
PartialShape data_ps{3, 4, 5};
element::Type data_et = element::dynamic;
Shape axes_ps{2};
element::Type axes_et = element::i64;
std::vector<int64_t> axes{1, 2};
bool keep_dims = true;
PartialShape out_ps{3, 1, 1};
const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims};
auto reduce_op = makeReduceOp<TypeParam>(params);
ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps));
}
TYPED_TEST_P(ReduceTest, reduce_basic_shape_infer_scalar_axis)
{
PartialShape data_ps{3, 4, 5};
element::Type data_et = element::dynamic;
Shape axes_ps{};
element::Type axes_et = element::i64;
std::vector<int64_t> axes{1};
bool keep_dims = false;
PartialShape out_ps{3, 5};
const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims};
auto reduce_op = makeReduceOp<TypeParam>(params);
ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps));
}
TYPED_TEST_P(ReduceTest, reduce_basic_shape_infer_axes_as_param)
{
PartialShape data_ps{3, 4, 5};
element::Type data_et = element::dynamic;
Shape axes_ps{2};
element::Type axes_et = element::i32;
std::vector<int64_t> axes;
bool keep_dims = false;
PartialShape out_ps{PartialShape::dynamic()};
const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims};
bool axes_as_param = true;
auto reduce_op = makeReduceOp<TypeParam>(params, axes_as_param);
ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps));
}
TYPED_TEST_P(ReduceTest, reduce_dynamic_shape_reduced_axes_static)
{
PartialShape data_ps{3, 4, 5, Dimension::dynamic()};
element::Type data_et = element::dynamic;
Shape axes_ps{2};
element::Type axes_et = element::i64;
std::vector<int64_t> axes{1, 2};
bool keep_dims = false;
PartialShape out_ps{3, Dimension::dynamic()};
const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims};
auto reduce_op = makeReduceOp<TypeParam>(params);
ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps));
}
TYPED_TEST_P(ReduceTest, reduce_dynamic_shape_reduced_axes_static_keep_dims)
{
PartialShape data_ps{3, 4, 5, Dimension::dynamic()};
element::Type data_et = element::dynamic;
Shape axes_ps{2};
element::Type axes_et = element::i64;
std::vector<int64_t> axes{1, 2};
bool keep_dims = true;
PartialShape out_ps{3, 1, 1, Dimension::dynamic()};
const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims};
auto reduce_op = makeReduceOp<TypeParam>(params);
ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps));
}
TYPED_TEST_P(ReduceTest, reduce_dynamic_shape_reduced_axes_not_static)
{
PartialShape data_ps{Dimension::dynamic(), 4, 5, Dimension::dynamic()};
element::Type data_et = element::dynamic;
Shape axes_ps{2};
element::Type axes_et = element::i64;
std::vector<int64_t> axes{2, 3};
bool keep_dims = false;
PartialShape out_ps{Dimension::dynamic(), 4};
const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims};
auto reduce_op = makeReduceOp<TypeParam>(params);
ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps));
}
TYPED_TEST_P(ReduceTest, reduce_dynamic_shape_reduced_axes_not_static_keep_dims)
{
PartialShape data_ps{Dimension::dynamic(), 4, 5, Dimension::dynamic()};
element::Type data_et = element::dynamic;
Shape axes_ps{2};
element::Type axes_et = element::i64;
std::vector<int64_t> axes{2, 3};
bool keep_dims = true;
PartialShape out_ps{Dimension::dynamic(), 4, 1, 1};
const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims};
auto reduce_op = makeReduceOp<TypeParam>(params);
ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps));
}
TYPED_TEST_P(ReduceTest, reduce_dynamic_shape_data)
{
PartialShape data_ps{PartialShape::dynamic()};
element::Type data_et = element::dynamic;
Shape axes_ps{2};
element::Type axes_et = element::i64;
std::vector<int64_t> axes{1, 2};
bool keep_dims = false;
PartialShape out_ps{PartialShape::dynamic()};
const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims};
auto reduce_op = makeReduceOp<TypeParam>(params);
ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps));
}
TYPED_TEST_P(ReduceTest, reduce_invalid_axis_out_of_range)
{
PartialShape data_ps{1, 2, 3};
element::Type data_et = element::dynamic;
Shape axes_ps{2};
element::Type axes_et = element::i64;
std::vector<int64_t> axes{2, 3};
bool keep_dims = false;
const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims};
try
{
auto reduce_op = makeReduceOp<TypeParam>(params);
FAIL() << "Invalid axes values not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Reduction axis (");
}
catch (...)
{
FAIL() << "Axes input values validation check failed for unexpected reason";
}
}
TYPED_TEST_P(ReduceTest, reduce_invalid_axes_shape)
{
PartialShape data_ps{1, 2, 3};
element::Type data_et = element::dynamic;
Shape axes_ps{2, 1};
element::Type axes_et = element::i64;
std::vector<int64_t> axes{0, 1};
bool keep_dims = true;
const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims};
try
{
auto reduce_op = makeReduceOp<TypeParam>(params);
FAIL() << "Invalid shape of axes input not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Axes input must be a scalar or 1D input.");
}
catch (...)
{
FAIL() << "Axes input shape validation check failed for unexpected reason";
}
}
TYPED_TEST_P(ReduceTest, reduce_invalid_axes_et)
{
element::Type data_et = element::dynamic;
PartialShape data_ps{1, 2, 3};
element::Type axes_et = element::f32;
Shape axes_ps{2};
std::vector<int64_t> axes{0, 1};
bool keep_dims = true;
const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims};
try
{
auto reduce_op = makeReduceOp<TypeParam>(params);
FAIL() << "Invalid element type of axes input not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Element type of axes input must be integer.");
}
catch (...)
{
FAIL() << "Axes input element type validation check failed for unexpected reason";
}
}
REGISTER_TYPED_TEST_CASE_P(
ReduceTest,
reduce_basic_shape_infer,
reduce_basic_shape_infer_keep_dims,
reduce_basic_shape_infer_scalar_axis,
reduce_basic_shape_infer_axes_as_param,
reduce_dynamic_shape_data,
reduce_dynamic_shape_reduced_axes_static,
reduce_dynamic_shape_reduced_axes_static_keep_dims,
reduce_dynamic_shape_reduced_axes_not_static,
reduce_dynamic_shape_reduced_axes_not_static_keep_dims,
reduce_invalid_axis_out_of_range,
reduce_invalid_axes_shape,
reduce_invalid_axes_et);
template<class T>
class ReduceArithmeticTest : public testing::Test
{
};
TYPED_TEST_CASE_P(ReduceArithmeticTest);
TYPED_TEST_P(ReduceArithmeticTest, reduce_arithmetic_invalid_data_et)
{
element::Type data_et = element::boolean;
PartialShape data_ps{1, 2, 3};
element::Type axes_et = element::i32;
Shape axes_ps{2};
std::vector<int64_t> axes{0, 1};
bool keep_dims = true;
const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims};
try
{
auto reduce_op = makeReduceOp<TypeParam>(params);
FAIL() << "Invalid element type of data input not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Element type of data input must be numeric.");
}
catch (...)
{
FAIL() << "Data input element type validation check failed for unexpected reason";
}
}
REGISTER_TYPED_TEST_CASE_P(
ReduceArithmeticTest,
reduce_arithmetic_invalid_data_et);
template<class T>
class ReduceLogicalTest : public testing::Test
{
};
TYPED_TEST_CASE_P(ReduceLogicalTest);
TYPED_TEST_P(ReduceLogicalTest, reduce_logical_invalid_data_et)
{
std::vector<element::Type> element_types{
element::f32,
element::i32,
element::u32};
PartialShape data_ps{1, 2, 3};
element::Type axes_et = element::i32;
Shape axes_ps{2};
std::vector<int64_t> axes{0, 1};
bool keep_dims = true;
for (const auto& data_et : element_types)
{
const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims};
try
{
auto reduce_op = makeReduceOp<TypeParam>(params);
FAIL() << "Invalid element type of data input not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Element type of data input must be boolean.");
}
catch (...)
{
FAIL() << "Data input element type validation check failed for unexpected reason";
}
}
}
REGISTER_TYPED_TEST_CASE_P(
ReduceLogicalTest,
reduce_logical_invalid_data_et);

View File

@ -2,48 +2,8 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "gtest/gtest.h" #include "reduce_ops.hpp"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std; using Type = ::testing::Types<op::v1::ReduceProd>;
using namespace ngraph; INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_prod, ReduceTest, Type);
INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_prod_et, ReduceArithmeticTest, Type);
TEST(type_prop, reduce_prod_v1_axis_out_of_range)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{2, 3});
try
{
auto reduce_prod = make_shared<op::v1::ReduceProd>(arg, axes);
// Should have thrown, so fail if it didn't
FAIL() << "Incorrect axes values exception not thrown";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis ("));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, reduce_prod_v1_shape_if_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = true;
auto reduce_prod = make_shared<op::v1::ReduceProd>(arg, axes, keep_dims);
ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1}));
}
TEST(type_prop, reduce_prod_v1_shape_if_not_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = false;
auto reduce_prod = make_shared<op::v1::ReduceProd>(arg, axes, keep_dims);
ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3}));
}

View File

@ -2,48 +2,8 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "gtest/gtest.h" #include "reduce_ops.hpp"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std; using Type = ::testing::Types<op::v1::ReduceSum>;
using namespace ngraph; INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_sum, ReduceTest, Type);
INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_sum_et, ReduceArithmeticTest, Type);
TEST(type_prop, reduce_sum_v1_axis_out_of_range)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{2, 3});
try
{
auto reduce_sum = make_shared<op::v1::ReduceSum>(arg, axes);
// Should have thrown, so fail if it didn't
FAIL() << "Incorrect axes values exception not thrown";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis ("));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, reduce_sum_v1_shape_if_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = true;
auto reduce_prod = make_shared<op::v1::ReduceSum>(arg, axes, keep_dims);
ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1}));
}
TEST(type_prop, reduce_sum_v1_shape_if_not_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = false;
auto reduce_prod = make_shared<op::v1::ReduceSum>(arg, axes, keep_dims);
ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3}));
}

View File

@ -0,0 +1,171 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(type_prop, selu_basic_inference_f32_3D)
{
const auto param = make_shared<op::Parameter>(element::f32, Shape{1, 32, 32});
const auto alpha = make_shared<op::Parameter>(element::f32, Shape{1});
const auto lambda = make_shared<op::Parameter>(element::f32, Shape{1});
const auto selu = make_shared<op::Selu>(param, alpha, lambda);
ASSERT_EQ(selu->get_element_type(), element::f32);
ASSERT_EQ(selu->get_shape(), (Shape{1, 32, 32}));
}
TEST(type_prop, selu_basic_inference_f16_3D)
{
const auto param = make_shared<op::Parameter>(element::f16, Shape{1, 32, 32});
const auto alpha = make_shared<op::Parameter>(element::f16, Shape{1});
const auto lambda = make_shared<op::Parameter>(element::f16, Shape{1});
const auto selu = make_shared<op::Selu>(param, alpha, lambda);
ASSERT_EQ(selu->get_element_type(), element::f16);
ASSERT_EQ(selu->get_shape(), (Shape{1, 32, 32}));
}
TEST(type_prop, selu_basic_inference_f32_5D)
{
const auto param = make_shared<op::Parameter>(element::f32, Shape{12, 135, 221, 31, 15});
const auto alpha = make_shared<op::Parameter>(element::f32, Shape{1});
const auto lambda = make_shared<op::Parameter>(element::f32, Shape{1});
const auto selu = make_shared<op::Selu>(param, alpha, lambda);
ASSERT_EQ(selu->get_element_type(), element::f32);
ASSERT_EQ(selu->get_shape(), (Shape{12, 135, 221, 31, 15}));
}
TEST(type_prop, selu_basic_inference_f16_5D)
{
const auto param = make_shared<op::Parameter>(element::f16, Shape{12, 135, 221, 31, 15});
const auto alpha = make_shared<op::Parameter>(element::f16, Shape{1});
const auto lambda = make_shared<op::Parameter>(element::f16, Shape{1});
const auto selu = make_shared<op::Selu>(param, alpha, lambda);
ASSERT_EQ(selu->get_element_type(), element::f16);
ASSERT_EQ(selu->get_shape(), (Shape{12, 135, 221, 31, 15}));
}
TEST(type_prop, selu_incompatible_input_type_boolean)
{
// Invalid data input element type
try
{
auto data = make_shared<op::Parameter>(element::boolean, Shape{1, 2, 3, 4});
const auto alpha = make_shared<op::Parameter>(element::boolean, Shape{1});
const auto lambda = make_shared<op::Parameter>(element::boolean, Shape{1});
auto selu = make_shared<op::Selu>(data, alpha, lambda);
// Data input expected to be of numeric type
FAIL() << "Invalid input type not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Input element types must be floating-point"));
}
catch (...)
{
FAIL() << "Input type check failed for unexpected reason";
}
}
TEST(type_prop, selu_incompatible_input_type_i32)
{
// Invalid data input element type
try
{
auto data = make_shared<op::Parameter>(element::i32, Shape{1, 2, 3, 4});
const auto alpha = make_shared<op::Parameter>(element::i32, Shape{1});
const auto lambda = make_shared<op::Parameter>(element::i32, Shape{1});
auto selu = make_shared<op::Selu>(data, alpha, lambda);
// Data input expected to be of numeric type
FAIL() << "Invalid input type not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Input element types must be floating-point"));
}
catch (...)
{
FAIL() << "Input type check failed for unexpected reason";
}
}
TEST(type_prop, selu_incompatible_input_type_u16)
{
// Invalid data input element type
try
{
auto data = make_shared<op::Parameter>(element::u16, Shape{1, 2, 3, 4});
const auto alpha = make_shared<op::Parameter>(element::u16, Shape{1});
const auto lambda = make_shared<op::Parameter>(element::u16, Shape{1});
auto selu = make_shared<op::Selu>(data, alpha, lambda);
// Data input expected to be of numeric type
FAIL() << "Invalid input type not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Input element types must be floating-point"));
}
catch (...)
{
FAIL() << "Input type check failed for unexpected reason";
}
}
TEST(type_prop, selu_incompatible_input_types)
{
// Invalid data input element type
try
{
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
const auto alpha = make_shared<op::Parameter>(element::f32, Shape{1});
const auto lambda = make_shared<op::Parameter>(element::u16, Shape{1});
auto selu = make_shared<op::Selu>(data, alpha, lambda);
// Data input expected to be of numeric type
FAIL() << "Inavlid input types not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Input element types do not match"));
}
catch (...)
{
FAIL() << "Input type check failed for unexpected reason";
}
}
TEST(type_prop, selu_dynamic_rank_input_shape_2D)
{
const PartialShape param_shape{Dimension::dynamic(), 10};
const auto param = std::make_shared<op::Parameter>(element::f32, param_shape);
const auto alpha = make_shared<op::Parameter>(element::f32, Shape{2, 1});
const auto lambda = make_shared<op::Parameter>(element::f32, Shape{1});
const auto op = std::make_shared<op::Selu>(param, alpha, lambda);
ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{Dimension(), 10}));
}
TEST(type_prop, selu_dynamic_rank_input_shape_3D)
{
const PartialShape param_shape{100, Dimension::dynamic(), 58};
const auto param = std::make_shared<op::Parameter>(element::f32, param_shape);
const auto alpha = make_shared<op::Parameter>(element::f32, Shape{1});
const auto lambda = make_shared<op::Parameter>(element::f32, Shape{1});
const auto op = std::make_shared<op::Selu>(param, alpha, lambda);
ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{100, Dimension(), 58}));
}
TEST(type_prop, selu_dynamic_rank_input_shape_full)
{
const auto param = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
const auto alpha = make_shared<op::Parameter>(element::f32, Shape{1});
const auto lambda = make_shared<op::Parameter>(element::f32, Shape{1});
const auto op = std::make_shared<op::Selu>(param, alpha, lambda);
ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
}

View File

@ -110,3 +110,104 @@ TEST(type_prop, space_to_batch_dynamic_shape_dynamic_rank)
ASSERT_EQ(space_to_batch->get_element_type(), element::f32); ASSERT_EQ(space_to_batch->get_element_type(), element::f32);
ASSERT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape::dynamic()); ASSERT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape::dynamic());
} }
TEST(type_prop, space_to_batch_invalid_element_type_block_shape)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 128});
auto block_shape = make_shared<op::Constant>(element::f32, Shape{2}, vector<int64_t>{1, 5});
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{2}, vector<float>{0, 2});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 0});
try
{
auto space_to_batch =
make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
// Input element type is float32
FAIL() << "Invalid f32 element type for block_shape not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "block_shape must be an integral number");
}
catch(...)
{
FAIL() << "Integral element type node validation check failed for unexpected reason";
}
}
TEST(type_prop, space_to_batch_invalid_element_type_pads_begin)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 128});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 5});
auto pads_begin = make_shared<op::Constant>(element::f32, Shape{2}, vector<float>{0, 2});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 0});
try
{
auto space_to_batch =
make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
// Input element type is float32
FAIL() << "Invalid f32 element type for pads_begin not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "pads_begin must be an integral number but got");
}
catch(...)
{
FAIL() << "Integral element type node validation check failed for unexpected reason";
}
}
TEST(type_prop, space_to_batch_invalid_element_type_pads_end)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 128});
auto block_shape = make_shared<op::Constant>(element::i16, Shape{2}, vector<int64_t>{1, 5});
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 2});
auto pads_end = make_shared<op::Constant>(element::f32, Shape{2}, vector<float>{0, 0});
try
{
auto space_to_batch =
make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
// Input element type is float32
FAIL() << "Invalid f32 element type for pads_end not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "pads_end must be an integral number but got");
}
catch(...)
{
FAIL() << "Integral element type node validation check failed for unexpected reason";
}
}
TEST(type_prop, space_to_batch_invalid_value_block_shape)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 128});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{-1, -5});
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 2});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{2}, vector<float>{0, 0});
try
{
auto space_to_batch =
make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
// Input element type is float32
FAIL() << "Invalid block_shape value not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "block_shape values must be greater than 0");
}
catch(...)
{
FAIL() << "block_shape value node validation check failed for unexpected reason";
}
}

View File

@ -24,7 +24,7 @@ TYPED_TEST_CASE_P(ReduceOpsAttrTest);
TYPED_TEST_P(ReduceOpsAttrTest, reduce_ops) TYPED_TEST_P(ReduceOpsAttrTest, reduce_ops)
{ {
Shape in_shape{3, 4, 5}; Shape in_shape{3, 4, 5};
element::Type in_et = element::f32; element::Type in_et = element::dynamic;
Shape axes_shape{2}; Shape axes_shape{2};
element::Type axes_et = element::i64; element::Type axes_et = element::i64;

View File

@ -0,0 +1,30 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/opsets/opset1.hpp"
#include "util/visitor.hpp"
using namespace std;
using namespace ngraph;
using ngraph::test::NodeBuilder;
TEST(attributes, selu_op)
{
NodeBuilder::get_ops().register_factory<opset1::Selu>();
const auto data_input = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
const auto alpha = make_shared<op::Parameter>(element::f32, Shape{1});
const auto lambda = make_shared<op::Parameter>(element::f32, Shape{1});
const auto op = make_shared<opset1::Selu>(data_input, alpha, lambda);
NodeBuilder builder(op);
const auto expected_attr_count = 0;
EXPECT_EQ(builder.get_value_map_size(), expected_attr_count);
}

View File

@ -0,0 +1,33 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/opsets/opset2.hpp"
#include "util/visitor.hpp"
using namespace std;
using namespace ngraph;
using ngraph::test::NodeBuilder;
TEST(attributes, space_to_batch_op)
{
using namespace opset2;
NodeBuilder::get_ops().register_factory<SpaceToBatch>();
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 128});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 5});
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 2});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 0});
auto op = make_shared<SpaceToBatch>(data, block_shape, pads_begin, pads_end);
NodeBuilder builder(op);
const auto expected_attr_count = 0;
EXPECT_EQ(builder.get_value_map_size(), expected_attr_count);
}

View File

@ -18,14 +18,72 @@ To run fuzzing you will need [LLVM](https://apt.llvm.org/) components:
- lld (linker) - lld (linker)
- libc++ - libc++
## Reproducing Failure Found by Fuzzing
1. Build `fuzz` test target: ## Building fuzz tests
1. Build openvino
Build openvino with options `ENABLE_FUZZING` and `ENABLE_SANITIZER` enabled. It
is recommended to use clang compiler.
```bash ```bash
cmake -DENABLE_TESTS=ON .. && ninja fuzz (\
mkdir -p build && cd build && \
CC=clang CXX=clang++ cmake .. -DENABLE_FUZZING=ON -DENABLE_SANITIZER=ON -DTREAT_WARNING_AS_ERROR=OFF && \
cmake --build . \
)
``` ```
2. Run fuzzing test passing a failure reproducer as a command-line argument: 2. Build fuzz tests
``` bash
./read_network-fuzzer crash-reproducer Build fuzz tests with options `ENABLE_FUZZING` and `ENABLE_SANITIZER` enabled.
You should use the same compiler as was used for the openvino build.
```bash
(\
mkdir -p tests/fuzz/build && cd tests/fuzz/build && \
CC=clang CXX=clang++ cmake .. -DENABLE_FUZZING=ON -DENABLE_SANITIZER=ON -DTREAT_WARNING_AS_ERROR=OFF -DInferenceEngine_DIR=$(pwd)/../../../build && \
cmake --build . \
)
``` ```
## Running fuzz tests
1. Prepare fuzzing corpus
Fuzzing engine needs a set of valid inputs to start fuzzing from. Those files
are called a fuzzing corpus. Place valid inputs for the fuzzing test into
directory.
Intel employees can get the corpus as described here
https://wiki.ith.intel.com/x/2N42bg.
2. Run fuzzing
```bash
./read_network-fuzzer -max_total_time=600 ./read_network-corpus
```
Consider adding those useful command line options:
- `-jobs=$(nproc)` runs multiple fuzzing jobs in parallel.
- `-rss_limit_mb=0` to ignore out-of-memory issues.
## Analyzing fuzzing quality
### Explore code coverage
To build coverage report after fuzz test execution run:
```
llvm-profdata merge -sparse *.profraw -o default.profdata && \
llvm-cov show ./read_network-fuzzer -instr-profile=default.profdata -format=html -output-dir=read_network-coverage
```
## Reproducing findings
Fuzzing run halts on the first issue identified, prints issue details to stdout and save data to reproduce the issue as a file in the current folder. To debug the issue pass reproducer as command line argument to fuzz test
```bash
./read_network-fuzzer crash-409b5eeed46a8445b7f7b7a2ce5b60a9ad895e3b
```
It is recommended but not required to use binaries built for fuzzing to debug the issues. A binaries built without `ENABLE_FUZZING` options can also be used to reproduce and debug the issues.

View File

@ -6,7 +6,7 @@ set(TARGET_NAME fuzz-testhelper)
file( file(
GLOB SRC_FILES GLOB SRC_FILES
${CMAKE_CURRENT_SOURCE_DIR}/*-testhelper.cc) ${CMAKE_CURRENT_SOURCE_DIR}/*.cc)
add_library( add_library(
${TARGET_NAME} STATIC ${TARGET_NAME} STATIC

View File

@ -0,0 +1,40 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "fuzz-utils.h"
#include <stdexcept>
#include <stdlib.h>
#include <string.h>
#include <string>
#ifndef _WIN32
#include <unistd.h>
#endif // _WIN32
MemoryFile::MemoryFile(const void *data, size_t size) {
#ifdef _WIN32
throw std::exception("MemoryFile is not implemented for Windows");
#else // _WIN32
m_name = strdup("/dev/shm/fuzz-XXXXXX");
if (!m_name)
throw std::bad_alloc();
int fd = mkstemp(m_name);
if (size) {
size_t nbytes = write(fd, data, size);
if (nbytes != size) {
free(m_name);
close(fd);
throw std::runtime_error("Failed to write " + std::to_string(size) +
" bytes to " + m_name);
}
}
close(fd);
#endif // _WIN32
}
MemoryFile::~MemoryFile() {
#ifndef _WIN32
unlink(m_name);
free(m_name);
#endif // _WIN32
}

View File

@ -0,0 +1,19 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <stddef.h>
class MemoryFile {
public:
/// Create a memory backed file
MemoryFile(const void *data, size_t size);
/// Delete memory backed file
~MemoryFile();
/// Get path to a file.
const char *name() { return m_name; }
private:
char *m_name;
};

View File

@ -9,11 +9,14 @@ add_custom_target(fuzz)
# Fuzz test target name is source file name without extension. # Fuzz test target name is source file name without extension.
FILE(GLOB tests "*-fuzzer.cc") FILE(GLOB tests "*-fuzzer.cc")
add_subdirectory(../../../thirdparty/cnpy ${CMAKE_CURRENT_BINARY_DIR}/cnpy)
add_subdirectory(../../../thirdparty/zlib ${CMAKE_CURRENT_BINARY_DIR}/zlib)
foreach(test_source ${tests}) foreach(test_source ${tests})
get_filename_component(test_name ${test_source} NAME_WE) get_filename_component(test_name ${test_source} NAME_WE)
add_fuzzer(${test_name} ${test_source}) add_fuzzer(${test_name} ${test_source})
target_link_libraries(${test_name} PRIVATE IE::inference_engine) target_link_libraries(${test_name} PRIVATE IE::inference_engine cnpy zlib)
add_dependencies(fuzz ${test_name}) add_dependencies(fuzz ${test_name})
endforeach() endforeach()

View File

@ -0,0 +1,21 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <stdio.h>
#include <cnpy.h>
#include "fuzz-utils.h"
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * inputData, size_t inputSize) {
MemoryFile file(inputData, inputSize);
try {
cnpy::NpyArray array = cnpy::npy_load(file.name());
}
catch (const std::exception&) {
return 0; // fail gracefully on expected exceptions
}
return 0;
}

View File

@ -67,6 +67,7 @@ def metadata_from_manifest(manifest):
'commit_sha': repo_trigger['revision'], 'commit_sha': repo_trigger['revision'],
'commit_date': repo_trigger['commit_time'], 'commit_date': repo_trigger['commit_time'],
'repo_url': repo_trigger['url'], 'repo_url': repo_trigger['url'],
'branch': repo_trigger['branch'],
'target_branch': repo_trigger['target_branch'], 'target_branch': repo_trigger['target_branch'],
'event_type': manifest['components'][PRODUCT_NAME]['build_event'].lower(), 'event_type': manifest['components'][PRODUCT_NAME]['build_event'].lower(),
f'{PRODUCT_NAME}_version': manifest['components'][PRODUCT_NAME]['version'], f'{PRODUCT_NAME}_version': manifest['components'][PRODUCT_NAME]['version'],

View File

@ -325,10 +325,11 @@ def manifest_metadata(request):
"repo_url": {"type": "string"}, "repo_url": {"type": "string"},
"commit_sha": {"type": "string"}, "commit_sha": {"type": "string"},
"commit_date": {"type": "string"}, "commit_date": {"type": "string"},
"branch": {"type": "string"},
"target_branch": {"type": "string"}, "target_branch": {"type": "string"},
"version": {"type": "string"} "version": {"type": "string"}
}, },
"required": ["product_type", "repo_url", "commit_sha", "commit_date", "target_branch", "version"], "required": ["product_type", "repo_url", "commit_sha", "commit_date", "branch", "target_branch", "version"],
"additionalProperties": false "additionalProperties": false
} }
""" """

View File

@ -57,7 +57,8 @@ def metadata_from_manifest(manifest: Path):
'commit_sha': repo_trigger['revision'], 'commit_sha': repo_trigger['revision'],
'commit_date': repo_trigger['commit_time'], 'commit_date': repo_trigger['commit_time'],
'repo_url': repo_trigger['url'], 'repo_url': repo_trigger['url'],
'target_branch': repo_trigger['branch'], 'branch': repo_trigger['branch'],
'target_branch': repo_trigger['target_branch'],
'version': manifest['components'][PRODUCT_NAME]['version'] 'version': manifest['components'][PRODUCT_NAME]['version']
} }

View File

@ -90,7 +90,9 @@ void cnpy::parse_npy_header(unsigned char* buffer,size_t& word_size, std::vector
//byte order code | stands for not applicable. //byte order code | stands for not applicable.
//not sure when this applies except for byte array //not sure when this applies except for byte array
loc1 = header.find("descr")+9; loc1 = header.find("descr")+9;
bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false); bool littleEndian = false;
if (loc1 < header.size())
littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false);
assert(littleEndian); assert(littleEndian);
//char type = header[loc1+1]; //char type = header[loc1+1];
@ -148,7 +150,9 @@ void cnpy::parse_npy_header(FILE* fp, size_t& word_size, std::vector<size_t>& sh
if (loc1 == std::string::npos) if (loc1 == std::string::npos)
throw std::runtime_error("parse_npy_header: failed to find header keyword: 'descr'"); throw std::runtime_error("parse_npy_header: failed to find header keyword: 'descr'");
loc1 += 9; loc1 += 9;
bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false); bool littleEndian = false;
if (loc1 < header.size())
littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false);
assert(littleEndian); assert(littleEndian);
//char type = header[loc1+1]; //char type = header[loc1+1];

View File

@ -27,6 +27,11 @@ namespace cnpy {
{ {
num_vals = 1; num_vals = 1;
for(size_t i = 0;i < shape.size();i++) num_vals *= shape[i]; for(size_t i = 0;i < shape.size();i++) num_vals *= shape[i];
if (word_size &&
num_vals > std::vector<char>().max_size() / word_size)
throw std::length_error("NpyArray of " + std::to_string(num_vals) +
"*" + std::to_string(word_size) +
" elements is too big.");
data_holder = std::shared_ptr<std::vector<char>>( data_holder = std::shared_ptr<std::vector<char>>(
new std::vector<char>(num_vals * word_size)); new std::vector<char>(num_vals * word_size));
} }

View File

@ -150,7 +150,7 @@ def run(args):
set_throughput_streams() set_throughput_streams()
if MULTI_DEVICE_NAME in device_name and CPU_DEVICE_NAME in device_name: if MULTI_DEVICE_NAME in device_name and CPU_DEVICE_NAME in device_name:
logger.warning("Turn on GPU trottling. Multi-device execution with the CPU + GPU performs best with GPU trottling hint, " + logger.warning("Turn on GPU throttling. Multi-device execution with the CPU + GPU performs best with GPU throttling hint, " +
"which releases another CPU thread (that is otherwise used by the GPU driver for active polling)") "which releases another CPU thread (that is otherwise used by the GPU driver for active polling)")
config[device]['GPU_PLUGIN_THROTTLE'] = '1' config[device]['GPU_PLUGIN_THROTTLE'] = '1'
elif device == MYRIAD_DEVICE_NAME: elif device == MYRIAD_DEVICE_NAME: