diff --git a/.github/org_control/check_pr.py b/.github/org_control/check_pr.py index 7bb8f89dd60..0e23251e157 100644 --- a/.github/org_control/check_pr.py +++ b/.github/org_control/check_pr.py @@ -147,21 +147,29 @@ def get_wrong_commits(pull): # import pprint; pprint.pprint(commit.raw_data) print("Commit SHA:", commit.sha) # Use raw data because commit author can be non GitHub user - commit_email = (commit.raw_data["commit"]["author"]["email"] or "").lower() - print(" Commit email:", commit_email) + commit_author_email = (commit.raw_data["commit"]["author"]["email"] or "").lower() + commit_committer_email = (commit.raw_data["commit"]["committer"]["email"] or "").lower() + print(" Commit author email:", commit_author_email) + print(" Commit committer email:", commit_committer_email) if not github_api.is_valid_user(commit.author): print( - " ERROR: User with the commit email is absent in GitHub:", + " ERROR: User with the commit author email is absent in GitHub:", commit.raw_data["commit"]["author"]["name"], ) wrong_commits.add(commit.sha) + if not github_api.is_valid_user(commit.committer): + print( + " ERROR: User with the commit committer email is absent in GitHub:", + commit.raw_data["commit"]["committer"]["name"], + ) + wrong_commits.add(commit.sha) if not commit.raw_data["commit"]["verification"]["verified"]: print( " WARNING: The commit is not verified. Reason:", commit.raw_data["commit"]["verification"]["reason"], ) - if pr_author_email != commit_email: - print(" WARNING: Commit email and GitHub PR author public email are differnt") + if pr_author_email != commit_author_email or pr_author_email != commit_committer_email: + print(" WARNING: Commit emails and GitHub PR author public email are differnt") return wrong_commits diff --git a/inference-engine/samples/benchmark_app/main.cpp b/inference-engine/samples/benchmark_app/main.cpp index cd7ddc641dc..2a5252ba443 100644 --- a/inference-engine/samples/benchmark_app/main.cpp +++ b/inference-engine/samples/benchmark_app/main.cpp @@ -277,8 +277,8 @@ int main(int argc, char* argv[]) { setThroughputStreams(); if ((device_name.find("MULTI") != std::string::npos) && (device_name.find("CPU") != std::string::npos)) { - slog::warn << "Turn on GPU trottling. Multi-device execution with " - "the CPU + GPU performs best with GPU trottling hint," + slog::warn << "Turn on GPU throttling. Multi-device execution with " + "the CPU + GPU performs best with GPU throttling hint, " << "which releases another CPU thread (that is otherwise " "used by the GPU driver for active polling)" << slog::endl; diff --git a/inference-engine/src/low_precision_transformations/src/normalize_l2.cpp b/inference-engine/src/low_precision_transformations/src/normalize_l2.cpp index 4368a48075f..93dcdf8afbe 100644 --- a/inference-engine/src/low_precision_transformations/src/normalize_l2.cpp +++ b/inference-engine/src/low_precision_transformations/src/normalize_l2.cpp @@ -120,10 +120,10 @@ bool NormalizeL2Transformation::transform(TransformationContext &context, ngraph } auto newNormalize = std::make_shared>( - std::vector{ element::f32, element::f32 }, + std::vector{ element::f32, axes->output(0).get_element_type() }, std::vector{deqPrecision}, ngraph::op::TemporaryReplaceOutputType(dequantization.subtract == nullptr ? dequantization.data : dequantization.subtract, element::f32).get(), - ngraph::op::TemporaryReplaceOutputType(axes->clone_with_new_inputs({}), element::f32).get(), + axes, normalize->get_eps(), normalize->get_eps_mode()); NetworkHelper::copyInfo(normalize, newNormalize); diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/space_to_batch.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/space_to_batch.cpp new file mode 100644 index 00000000000..1b55e2fbac7 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/space_to_batch.cpp @@ -0,0 +1,38 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_layer/space_to_batch.hpp" + +#include + +using namespace LayerTestsDefinitions; + +namespace { + +TEST_P(SpaceToBatchLayerTest, Serialize) { + Serialize(); +} + +const std::vector> blockShapes4D{{1, 1, 2, 2}}; +const std::vector> padsBegins4D{{0, 0, 0, 0}, + {0, 0, 0, 2}}; +const std::vector> padsEnds4D{{0, 0, 0, 0}, {0, 0, 0, 2}}; +const std::vector> dataShapes4D{ + {1, 1, 2, 2}, {1, 3, 2, 2}, {1, 1, 4, 4}, {2, 1, 2, 4}}; + +const auto SpaceToBatch4D = ::testing::Combine( + ::testing::ValuesIn(blockShapes4D), ::testing::ValuesIn(padsBegins4D), + ::testing::ValuesIn(padsEnds4D), ::testing::ValuesIn(dataShapes4D), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(CommonTestUtils::DEVICE_CPU)); + +INSTANTIATE_TEST_CASE_P(smoke_spacetobatch4D_Serialization, + SpaceToBatchLayerTest, SpaceToBatch4D, + SpaceToBatchLayerTest::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/inference_engine/transformations/mvn_fusion_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/mvn_fusion_test.cpp index e45258a63e6..ecad9adff3d 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/mvn_fusion_test.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/mvn_fusion_test.cpp @@ -30,7 +30,7 @@ TEST(TransformationTests, MVNFusionTestOutside) { auto sub2 = std::make_shared(input, mean2); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto power_sqr = std::make_shared(sub2, const_2); - auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); + auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3 = std::make_shared(power_sqr, mean3_axes); auto const_0_5 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 0.5 }); auto power_sqrt = std::make_shared(mean3, const_0_5); @@ -70,7 +70,7 @@ TEST(TransformationTests, MVNFusionTestReuseSub) { auto sub1 = std::make_shared(input, mean1); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto power_sqr = std::make_shared(sub1, const_2); - auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); + auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3 = std::make_shared(power_sqr, mean3_axes); auto const_0_5 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 0.5 }); auto power_sqrt = std::make_shared(mean3, const_0_5); @@ -111,7 +111,7 @@ TEST(TransformationTests, MVNFusionTestWithConvert) { auto cast = std::make_shared(sub1, ngraph::element::f32); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto power_sqr = std::make_shared(cast, const_2); - auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); + auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3 = std::make_shared(power_sqr, mean3_axes); auto const_0_5 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 0.5 }); auto power_sqrt = std::make_shared(mean3, const_0_5); @@ -151,7 +151,7 @@ TEST(TransformationTests, MVNFusionTestSqrt) { auto sub1 = std::make_shared(input, mean1); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto power_sqr = std::make_shared(sub1, const_2); - auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); + auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3 = std::make_shared(power_sqr, mean3_axes); auto power_sqrt = std::make_shared(mean3); auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 }); @@ -190,7 +190,7 @@ TEST(TransformationTests, MVNFusionTestAltDiv) { auto sub1 = std::make_shared(input, mean1); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto power_sqr = std::make_shared(sub1, const_2); - auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); + auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3 = std::make_shared(power_sqr, mean3_axes); auto const_0_5 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 0.5 }); auto power_sqrt = std::make_shared(mean3, const_0_5); @@ -231,7 +231,7 @@ TEST(TransformationTests, MVNFusionTestInsideSqrt) { auto sub2 = std::make_shared(input, mean2); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto power_sqr = std::make_shared(sub2, const_2); - auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); + auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3 = std::make_shared(power_sqr, mean3_axes); auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 }); auto add_eps = std::make_shared(mean3, eps); @@ -271,7 +271,7 @@ TEST(TransformationTests, MVNFusionTestReuseSubInsideSqrt) { auto sub1 = std::make_shared(input, mean1); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto power_sqr = std::make_shared(sub1, const_2); - auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); + auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3 = std::make_shared(power_sqr, mean3_axes); auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 }); auto add_eps = std::make_shared(mean3, eps); @@ -312,7 +312,7 @@ TEST(TransformationTests, MVNFusionTestWithConvertInsideSqrt) { auto cast = std::make_shared(sub1, ngraph::element::f32); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto power_sqr = std::make_shared(cast, const_2); - auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); + auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3 = std::make_shared(power_sqr, mean3_axes); auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 }); auto add_eps = std::make_shared(mean3, eps); @@ -352,7 +352,7 @@ TEST(TransformationTests, MVNFusionTestSqrtInsideSqrt) { auto sub1 = std::make_shared(input, mean1); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto power_sqr = std::make_shared(sub1, const_2); - auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); + auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3 = std::make_shared(power_sqr, mean3_axes); auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 }); auto add_eps = std::make_shared(mean3, eps); @@ -391,7 +391,7 @@ TEST(TransformationTests, MVNFusionTestAltDivInsideSqrt) { auto sub1 = std::make_shared(input, mean1); auto const_2 = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 2 }); auto power_sqr = std::make_shared(sub1, const_2); - auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{ 3 }, { 1, 2, 3 }); + auto mean3_axes = ngraph::opset6::Constant::create(ngraph::element::i32, ngraph::Shape{ 3 }, { 1, 2, 3 }); auto mean3 = std::make_shared(power_sqr, mean3_axes); auto eps = ngraph::opset6::Constant::create(ngraph::element::f32, ngraph::Shape{}, { 1e-9 }); auto add_eps = std::make_shared(mean3, eps); diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/space_to_batch.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/space_to_batch.cpp index ac4801df822..884440c7bde 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/space_to_batch.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/space_to_batch.cpp @@ -11,47 +11,64 @@ using namespace LayerTestsDefinitions; namespace { -spaceToBatchParamsTuple stb_only_test_cases[] = { - spaceToBatchParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {1, 1, 2, 2}, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, - CommonTestUtils::DEVICE_CPU), - spaceToBatchParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {1, 3, 2, 2}, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, - CommonTestUtils::DEVICE_CPU), - spaceToBatchParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {1, 1, 4, 4}, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, - CommonTestUtils::DEVICE_CPU), - spaceToBatchParamsTuple({1, 1, 2, 2}, {0, 0, 0, 2}, {0, 0, 0, 0}, {2, 1, 2, 4}, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, - CommonTestUtils::DEVICE_CPU), - spaceToBatchParamsTuple({1, 1, 3, 2, 2}, {0, 0, 1, 0, 3}, {0, 0, 2, 0, 0}, {1, 1, 3, 2, 1}, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, - CommonTestUtils::DEVICE_CPU), +const std::vector> blockShapes4D { + {1, 1, 2, 2} +}; +const std::vector> padsBegins4D { + {0, 0, 0, 0}, {0, 0, 0, 2} +}; +const std::vector> padsEnds4D { + {0, 0, 0, 0}, {0, 0, 0, 2} +}; +const std::vector> dataShapes4D { + {1, 1, 2, 2}, {1, 3, 2, 2}, {1, 1, 4, 4}, {2, 1, 2, 4} }; +const auto SpaceToBatch4D = ::testing::Combine( + ::testing::ValuesIn(blockShapes4D), + ::testing::ValuesIn(padsBegins4D), + ::testing::ValuesIn(padsEnds4D), + ::testing::ValuesIn(dataShapes4D), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + INSTANTIATE_TEST_CASE_P( - smoke_MKLDNN, SpaceToBatchLayerTest, ::testing::ValuesIn(stb_only_test_cases), + smoke_spacetobatch4D, SpaceToBatchLayerTest, SpaceToBatch4D, SpaceToBatchLayerTest::getTestCaseName); +const std::vector> blockShapes5D { + {1, 1, 3, 2, 2} +}; +const std::vector> padsBegins5D { + {0, 0, 1, 0, 3} +}; +const std::vector> padsEnds5D { + {0, 0, 2, 0, 0} +}; +const std::vector> dataShapes5D { + {1, 1, 3, 2, 1} +}; + +const auto SpaceToBatch5D = ::testing::Combine( + ::testing::ValuesIn(blockShapes5D), + ::testing::ValuesIn(padsBegins5D), + ::testing::ValuesIn(padsEnds5D), + ::testing::ValuesIn(dataShapes5D), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +INSTANTIATE_TEST_CASE_P( + smoke_spacetobatch5D, SpaceToBatchLayerTest, SpaceToBatch5D, + SpaceToBatchLayerTest::getTestCaseName); } // namespace \ No newline at end of file diff --git a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/normalize_l2_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/normalize_l2_function.cpp index 553ec6575ec..4d1ef00a598 100644 --- a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/normalize_l2_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/normalize_l2_function.cpp @@ -36,7 +36,7 @@ std::shared_ptr NormalizeL2Function::getOriginal( fakeQuantize->set_friendly_name("fakeQuantize"); - const auto axesNode = std::make_shared(ngraph::element::u64, ngraph::Shape{ axes.size() }, axes); + const auto axesNode = std::make_shared(ngraph::element::i64, ngraph::Shape{ axes.size() }, axes); axesNode->set_friendly_name("axes"); const auto normalizeL2 = std::make_shared(fakeQuantize->output(0), axesNode, 1e-6, ngraph::op::EpsMode::ADD); normalizeL2->set_friendly_name("normalizeL2"); @@ -104,10 +104,10 @@ std::shared_ptr NormalizeL2Function::getReference( const auto axesNode = std::make_shared(ngraph::element::i64, ngraph::Shape{ axes.size() }, axes); const auto normalizeL2 = std::make_shared>( - std::vector{ element::f32, element::f32 }, + std::vector{ element::f32, axesNode->output(0).get_element_type() }, std::vector{dequantizationAfter.empty() ? precision : element::f32}, ngraph::op::TemporaryReplaceOutputType(deqBefore, element::f32).get(), - ngraph::op::TemporaryReplaceOutputType(axesNode, element::f32).get(), + axesNode, 1e-6, epsMode); auto& rtInfo = normalizeL2->get_rt_info(); diff --git a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/transformations_after_split_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/transformations_after_split_function.cpp index 8b2010abb76..ad8fd671592 100644 --- a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/transformations_after_split_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/transformations_after_split_function.cpp @@ -137,7 +137,7 @@ std::shared_ptr TransformationsAfterSplitFunction::getLayerByTransformatio } if (transformationName == "NormalizeL2Transformation") { const auto dequantization = makeDequantization(parent, { {element::f32}, {}, { 0.1f } }); - const auto axesNode = opset1::Constant::create(element::u64, ngraph::Shape{ 3 }, { 1, 2, 3 }); + const auto axesNode = opset1::Constant::create(element::i64, ngraph::Shape{ 3 }, { 1, 2, 3 }); return std::make_shared(dequantization, axesNode, 1e-6, ngraph::op::EpsMode::ADD); } if (transformationName == "PReluTransformation") { diff --git a/ngraph/core/include/ngraph/op/selu.hpp b/ngraph/core/include/ngraph/op/selu.hpp index 9d71f71936b..9bc1c71acd6 100644 --- a/ngraph/core/include/ngraph/op/selu.hpp +++ b/ngraph/core/include/ngraph/op/selu.hpp @@ -6,10 +6,6 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" -#include "ngraph/op/util/fused_op.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - namespace ngraph { namespace op @@ -17,12 +13,12 @@ namespace ngraph namespace v0 { /// \brief Performs a SELU activation function on all elements of the input node - class NGRAPH_API Selu : public ngraph::op::util::FusedOp + class NGRAPH_API Selu : public ngraph::op::Op { public: - static constexpr NodeTypeInfo type_info{"Selu", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - Selu(); + NGRAPH_RTTI_DECLARATION; + + Selu() = default; /// \brief Constructs a Selu node. /// /// \param data - Node producing the input tensor @@ -31,9 +27,10 @@ namespace ngraph Selu(const Output& data, const Output& alpha, const Output& lambda); - virtual void pre_validate_and_infer_types() override; + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; - virtual OutputVector decompose_op() const override; virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; @@ -42,5 +39,3 @@ namespace ngraph using v0::Selu; } // namespace op } // namespace ngraph - -NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/space_to_batch.hpp b/ngraph/core/include/ngraph/op/space_to_batch.hpp index 35587652384..9f6220b3293 100644 --- a/ngraph/core/include/ngraph/op/space_to_batch.hpp +++ b/ngraph/core/include/ngraph/op/space_to_batch.hpp @@ -4,8 +4,7 @@ #pragma once -#include "ngraph/node.hpp" -#include "ngraph/op/util/fused_op.hpp" +#include "ngraph/op/op.hpp" namespace ngraph { @@ -27,8 +26,8 @@ namespace ngraph class NGRAPH_API SpaceToBatch : public Op { public: - static constexpr NodeTypeInfo type_info{"SpaceToBatch", 1}; - const NodeTypeInfo& get_type_info() const override { return type_info; } + NGRAPH_RTTI_DECLARATION; + SpaceToBatch() = default; /// \brief Constructs a SpaceToBatch operation. diff --git a/ngraph/core/include/ngraph/op/util/arithmetic_reduction.hpp b/ngraph/core/include/ngraph/op/util/arithmetic_reduction.hpp index 2d433e340d2..6f9484ab340 100644 --- a/ngraph/core/include/ngraph/op/util/arithmetic_reduction.hpp +++ b/ngraph/core/include/ngraph/op/util/arithmetic_reduction.hpp @@ -5,6 +5,7 @@ #pragma once #include "ngraph/op/op.hpp" +#include "ngraph/op/util/reduction_base.hpp" namespace ngraph { @@ -15,7 +16,7 @@ namespace ngraph /// \brief Abstract base class for arithmetic reduction operations, i.e., operations /// where chosen axes of the input tensors are eliminated (reduced out) by /// repeated application of a particular binary arithmetic operation. - class NGRAPH_API ArithmeticReduction : public Op + class NGRAPH_API ArithmeticReduction : public ReductionBase { protected: /// \brief Constructs an arithmetic reduction operation. diff --git a/ngraph/core/src/op/util/evaluate_helpers.hpp b/ngraph/core/include/ngraph/op/util/evaluate_helpers.hpp similarity index 79% rename from ngraph/core/src/op/util/evaluate_helpers.hpp rename to ngraph/core/include/ngraph/op/util/evaluate_helpers.hpp index d67a212f157..abcd8729a98 100644 --- a/ngraph/core/src/op/util/evaluate_helpers.hpp +++ b/ngraph/core/include/ngraph/op/util/evaluate_helpers.hpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#pragma once + #include "ngraph/axis_set.hpp" #include "ngraph/descriptor/tensor.hpp" #include "ngraph/util.hpp" @@ -18,10 +20,5 @@ namespace ngraph /// \return Normalized (positive only) axes as an AxisSet object. AxisSet get_normalized_axes_from_tensor(const HostTensorPtr tensor, const ngraph::Rank& rank, - const std::string& node_description) - { - const auto axes_vector = host_tensor_2_vector(tensor); - const auto normalized_axes = ngraph::normalize_axes(node_description, axes_vector, rank); - return AxisSet{normalized_axes}; - } + const std::string& node_description); } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/util/logical_reduction.hpp b/ngraph/core/include/ngraph/op/util/logical_reduction.hpp index 0fa85369b31..1c8953ba1fd 100644 --- a/ngraph/core/include/ngraph/op/util/logical_reduction.hpp +++ b/ngraph/core/include/ngraph/op/util/logical_reduction.hpp @@ -5,6 +5,7 @@ #pragma once #include "ngraph/op/op.hpp" +#include "ngraph/op/util/reduction_base.hpp" namespace ngraph { @@ -15,7 +16,7 @@ namespace ngraph /// \brief Abstract base class for logical reduction operations, i.e., operations where /// chosen axes of the input tensors are eliminated (reduced out) by repeated /// application of a particular binary logical operation. - class NGRAPH_API LogicalReduction : public Op + class NGRAPH_API LogicalReduction : public ReductionBase { protected: /// \brief Constructs a logical reduction operation. diff --git a/ngraph/core/include/ngraph/op/util/reduction_base.hpp b/ngraph/core/include/ngraph/op/util/reduction_base.hpp new file mode 100644 index 00000000000..5cc10b0be7e --- /dev/null +++ b/ngraph/core/include/ngraph/op/util/reduction_base.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/op/op.hpp" + +namespace ngraph +{ + namespace op + { + namespace util + { + class NGRAPH_API ReductionBase : public Op + { + protected: + /// \brief Constructs a reduction operation. + ReductionBase(); + + /// \brief Constructs a reduction operation. + /// + /// \param arg Output that produces the first input tensor. + /// \param reduction_axes The axis positions (0-based) to be eliminated. + ReductionBase(const Output& arg, const Output& reduction_axes); + + /// \brief Infers reduction operations output shape. + /// + /// \param[in] keep_dims Reduction operation keeps dimensions. + /// + /// \return Partial shape of the output. + PartialShape infer_reduction_output_shape(const bool keep_dims); + + public: + NGRAPH_RTTI_DECLARATION; + }; + } // namespace util + } // namespace op +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/convolution.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/convolution.hpp index e5e12bac9ac..adee512d975 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/convolution.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/convolution.hpp @@ -18,8 +18,6 @@ #include "ngraph/runtime/reference/split.hpp" #include "ngraph/util.hpp" -// can't be removed currently due to arm-plugin dependency -#include "ngraph/runtime/reference/convolution_backprop_data.hpp" namespace ngraph { namespace runtime @@ -42,15 +40,18 @@ namespace ngraph std::vector dilation; std::vector pads_begin; std::vector pads_end; + std::vector output_padding; ConvolutionParams(const Strides& strides_, const Strides& dilation_, const CoordinateDiff& pads_begin_, - const CoordinateDiff& pads_end_) + const CoordinateDiff& pads_end_, + const CoordinateDiff& output_padding_ = {0, 0, 0}) : strides{strides_.begin(), strides_.end()} , dilation{dilation_.begin(), dilation_.end()} , pads_begin{pads_begin_.begin(), pads_begin_.end()} - , pads_end{pads_end_.begin(), pads_end_.end()} {}; + , pads_end{pads_end_.begin(), pads_end_.end()} + , output_padding{output_padding_.begin(), output_padding_.end()} {}; }; template @@ -86,15 +87,18 @@ namespace ngraph const size_t filter_channel_size = shape_size(filter_channel_shape); for (int i_z = -p.pads_begin[0]; - i_z <= (p.pads_end[0] + input_size_z - dilated_filter_size_z); + i_z <= (p.pads_end[0] + input_size_z - dilated_filter_size_z + + p.output_padding[0]); i_z += p.strides[0]) { for (int i_y = -p.pads_begin[1]; - i_y <= (p.pads_end[1] + input_size_y - dilated_filter_size_y); + i_y <= (p.pads_end[1] + input_size_y - dilated_filter_size_y + + p.output_padding[1]); i_y += p.strides[1]) { for (int i_x = -p.pads_begin[2]; - i_x <= (p.pads_end[2] + input_size_x - dilated_filter_size_x); + i_x <= (p.pads_end[2] + input_size_x - dilated_filter_size_x + + p.output_padding[2]); i_x += p.strides[2]) { auto input_channel = batch; @@ -154,6 +158,8 @@ namespace ngraph std::prev(p.pads_begin.end(), spatial_rank), missing_dims, 0); p.pads_end.insert( std::prev(p.pads_end.end(), spatial_rank), missing_dims, 0); + p.output_padding.insert( + std::prev(p.output_padding.end(), spatial_rank), missing_dims, 0); in_shape.insert(std::next(in_shape.end(), -spatial_rank), missing_dims, 1); filter_shape.insert( std::prev(filter_shape.end(), spatial_rank), missing_dims, 1); @@ -324,3 +330,6 @@ namespace ngraph } // namespace reference } // namespace runtime } // namespace ngraph + +// can't be removed currently due to arm-plugin dependency +#include "ngraph/runtime/reference/convolution_backprop_data.hpp" diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/convolution_backprop_data.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/convolution_backprop_data.hpp index 3fa325f7726..1c755198163 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/convolution_backprop_data.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/convolution_backprop_data.hpp @@ -10,11 +10,7 @@ #include #include "ngraph/axis_vector.hpp" -#include "ngraph/coordinate_transform.hpp" -#include "ngraph/runtime/reference/concat.hpp" -#include "ngraph/runtime/reference/helpers.hpp" -#include "ngraph/runtime/reference/reverse.hpp" -#include "ngraph/runtime/reference/split.hpp" +#include "ngraph/runtime/reference/convolution.hpp" #include "ngraph/util.hpp" namespace ngraph @@ -23,217 +19,302 @@ namespace ngraph { namespace reference { - // in: NC_I... - // filter: C_OC_I... - // out: NC_O... - template ::type> - void convolution_backprop_impl(const INPUT* in, - const FILTER* filter, - OUTPUT* out, - const Shape& in_shape, - const Shape& filter_shape, - const Shape& out_shape, - const Strides& stride, - const Strides& filter_dilation, - const CoordinateDiff& in_pad_below, - const CoordinateDiff& in_pad_above, - const Strides& in_dilation, - size_t in_batch_axis, - size_t in_channel_axis, - size_t filter_out_channel_axis, - size_t filter_in_channel_axis, - size_t out_batch_axis, - size_t out_channel_axis) + namespace { - auto old_mode = std::fegetround(); - std::fesetround(FE_TONEAREST); - // Comments throughout assume without loss of generality that: - // - // * batch axes for both in and out are 0 - // * in channel axes for both in and filter are 1 - // * out channel axes for filter is 0 - // * out channel axis for out is 1 + constexpr size_t filter_input_ch_axis = 0; - // At the outermost level we will walk over every out coordinate O. - CoordinateTransform out_transform(out_shape); - - for (const Coordinate& out_coord : out_transform) + template + void extend_with_zeros(const Strides& strides, + const Shape& input_shape, + const T* in, + Shape& output_shape, + std::vector& input_zeros) { - // Our out coordinate O will have the form: - // - // (N,chan_out,i_1,...,i_n) + std::vector input_3d(3, 1); + std::vector strides_3d(3, 1); + std::vector output_3d(3, 1); - size_t batch_index = out_coord[out_batch_axis]; - size_t out_channel = out_coord[out_channel_axis]; - - // For the in we need to iterate the coordinate: - // - // I: - // - // over the range (noninclusive on the right): - // - // (N,0,s_1*i_1,s_2*i_2,...,s_n*i_n) -> - // - // (N+1, - // chans_in_count, - // s_1*i_1+ l_1*filter_dims_1, - /// ..., - /// s_n*i_n +l_n*filter_dims_n) - // - // with strides: - // - // (1,l_1,...,l_n). - // - // Note that we are iterating within the *padded* and *dilated* in batch, so - // further down we must check the current coordinate is in the pad or dilation - // gap. - - size_t n_spatial_dimensions = in_shape.size() - 2; - size_t n_in_channels = in_shape[in_channel_axis]; - - Coordinate in_transform_start(2 + n_spatial_dimensions); - Coordinate in_transform_end(2 + n_spatial_dimensions); - Strides in_transform_movement_strides(2 + n_spatial_dimensions, 1); - CoordinateDiff in_transform_pad_below(2 + n_spatial_dimensions, 0); - CoordinateDiff in_transform_pad_above(2 + n_spatial_dimensions, 0); - Strides in_transform_dilation_strides(2 + n_spatial_dimensions, 1); - - in_transform_start[in_batch_axis] = batch_index; - in_transform_end[in_batch_axis] = batch_index + 1; - in_transform_start[in_channel_axis] = 0; - in_transform_end[in_channel_axis] = 1; - - for (size_t i = 2; i < n_spatial_dimensions + 2; i++) + for (size_t i = 0; i < strides.size(); ++i) { - size_t filter_dilation_stride = filter_dilation[i - 2]; - size_t filter_movement_stride = stride[i - 2]; - std::ptrdiff_t below_pad = in_pad_below[i - 2]; - std::ptrdiff_t above_pad = in_pad_above[i - 2]; - size_t in_dilation_stride = in_dilation[i - 2]; - - in_transform_start[i] = filter_movement_stride * out_coord[i]; - in_transform_end[i] = in_transform_start[i] + - (filter_shape[i] - 1) * filter_dilation_stride + 1; - in_transform_movement_strides[i] = filter_dilation_stride; - in_transform_pad_below[i] = below_pad; - in_transform_pad_above[i] = above_pad; - in_transform_dilation_strides[i] = in_dilation_stride; + output_shape[i + 2] = + input_shape[i + 2] + (strides[i] - 1) * (input_shape[i + 2] - 1); + input_3d[input_3d.size() - strides.size() + i] = input_shape[i + 2]; + strides_3d[strides_3d.size() - strides.size() + i] = strides[i]; + output_3d[output_3d.size() - strides.size() + i] = output_shape[i + 2]; } - AxisVector in_transform_axis_order(2 + n_spatial_dimensions); - for (size_t i = 0; i < in_transform_axis_order.size(); i++) + const size_t input_size = shape_size(input_3d); + if (input_size == 1) { - in_transform_axis_order[i] = i; - } - CoordinateTransform in_transform(in_shape, - in_transform_start, - in_transform_end, - in_transform_movement_strides, - in_transform_axis_order, - in_transform_pad_below, - in_transform_pad_above, - in_transform_dilation_strides); - - // Simultaneously with iterating I, for the filter we need to iterate the - // coordinate: - // - // F - // - // over the range (noninclusive on the right): - // - // (chan_out,0,0,...,0) -> - // (chan_out+1, - // chans_in_count, - // filter_dims_1, - // ..., - // filter_dims_n) - // - // with unit stride. - - Shape filter_transform_start(2 + n_spatial_dimensions); - Shape filter_transform_end(2 + n_spatial_dimensions); - - filter_transform_start[filter_out_channel_axis] = out_channel; - filter_transform_end[filter_out_channel_axis] = out_channel + 1; - filter_transform_start[filter_in_channel_axis] = 0; - filter_transform_end[filter_in_channel_axis] = 1; - - for (size_t i = 2; i < n_spatial_dimensions + 2; i++) - { - filter_transform_start[i] = 0; - filter_transform_end[i] = filter_shape[i]; - } - - CoordinateTransform filter_transform( - filter_shape, filter_transform_start, filter_transform_end); - - // As we go, we sum up: - // - // out[O] += in[I] * filter[F]. - - ACCUMULATION result = 0; - - CoordinateTransform::Iterator in_it = in_transform.begin(); - CoordinateTransform::Iterator filter_it = filter_transform.begin(); - CoordinateTransform::Iterator in_it_end = in_transform.end(); - CoordinateTransform::Iterator filter_it_end = filter_transform.end(); - - size_t in_channel_stride = row_major_strides(in_shape).at(in_channel_axis); - size_t filter_in_channel_stride = - row_major_strides(filter_shape).at(filter_in_channel_axis); - - while (in_it != in_it_end && filter_it != filter_it_end) - { - const Coordinate& in_coord = *in_it; - if (in_transform.has_source_coordinate(in_coord)) + for (size_t i = 0; i < shape_size(input_shape); ++i) { - size_t in_idx = in_transform.index(in_coord); - const Coordinate& filter_coord = *filter_it; - size_t filter_idx = filter_transform.index(filter_coord); - for (size_t in_channel = 0; in_channel < n_in_channels; ++in_channel) + input_zeros.push_back(in[i]); + } + } + else + { + for (size_t batch = 0; batch < input_shape[0]; ++batch) + { + const auto offset_batch = batch * input_size * input_shape[1]; + for (size_t channel = 0; channel < input_shape[1]; ++channel) { - ACCUMULATION in_v = static_cast(in[in_idx]); - ACCUMULATION f_v = static_cast(filter[filter_idx]); + const auto offset_channel = offset_batch + channel * input_size; + for (int i_z = 0; i_z < input_3d[0]; ++i_z) + { + const auto offset_i_z = i_z * input_3d[2] * input_3d[1]; + for (int i_y = 0; i_y < input_3d[1]; ++i_y) + { + const auto offset_i_y = i_y * input_3d[2]; + for (int i_x = 0; i_x < input_3d[2]; ++i_x) + { + input_zeros.push_back( + in[offset_channel + i_x + offset_i_y + offset_i_z]); - result += in_v * f_v; - in_idx += in_channel_stride; - filter_idx += filter_in_channel_stride; + if (i_x < input_3d[2] - 1) + { + for (int k = 0; k < strides_3d[2] - 1; k++) + { + input_zeros.push_back(0); + } + } + } + + if (i_y < input_3d[1] - 1) + { + const auto new_size = + output_3d[2] * (strides_3d[1] - 1); + input_zeros.insert(input_zeros.begin() + + input_zeros.size(), + new_size, + 0); + } + } + + if (i_z < input_3d[0] - 1) + { + const auto new_size = + output_3d[1] * output_3d[2] * (strides_3d[0] - 1); + input_zeros.insert( + input_zeros.begin() + input_zeros.size(), new_size, 0); + } + } } } - ++in_it; - ++filter_it; } - - out[out_transform.index(out_coord)] = result; } - std::fesetround(old_mode); + + void infer_forward_convbackprop_output_shape(const Shape& in_spatial_shape, + const Shape& f_spatial_shape, + const Shape& out_spatial_shape, + Shape& infer_spatial_shape, + const Strides& strides, + const Strides& dilations, + const CoordinateDiff& output_padding) + { + for (size_t idx = 0; idx < in_spatial_shape.size(); idx++) + { + int total_padding = strides[idx] * (in_spatial_shape[idx] - 1) + + dilations[idx] * (f_spatial_shape[idx] - 1) + 1 - + out_spatial_shape[idx] + output_padding[idx]; + size_t padded_dim = std::max(total_padding, 0); + size_t filter_dilated_dim = dilations[idx] * (f_spatial_shape[idx] - 1) + 1; + size_t out_spatial_dim = (in_spatial_shape[idx] - 1) * strides[idx] + + filter_dilated_dim - padded_dim + + output_padding[idx]; + infer_spatial_shape.push_back(out_spatial_dim); + } + } + + void validate_convolution_backprop_parameters(const Shape& in_shape, + const Shape& f_shape, + const Shape& out_shape, + const Strides& strides, + const Strides& dilations, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const CoordinateDiff& output_padding) + { + // this implementation supports 1D, 2D and 3D convolutions + NGRAPH_CHECK(in_shape.size() >= 3 && in_shape.size() <= 5, + "Unsupported input rank: ", + in_shape); + + NGRAPH_CHECK(in_shape.size() == f_shape.size(), + "Incompatible input ranks: ", + in_shape.size(), + " and ", + f_shape.size()); + + NGRAPH_CHECK(in_shape[in_channel_axis] == f_shape[filter_input_ch_axis], + "Incompatible input channels in data batch and filters shapes: ", + in_shape[in_channel_axis], + " and ", + f_shape[filter_input_ch_axis]); + + NGRAPH_CHECK(in_shape.size() == out_shape.size(), + "Incompatible input and output ranks: ", + in_shape.size(), + " and ", + out_shape.size()); + + const auto spatial_dims = in_shape.size() - 2; + NGRAPH_CHECK(strides.size() == spatial_dims, + "Strides not definied for all and only spatial dimensions."); + + NGRAPH_CHECK(dilations.size() == spatial_dims, + "Dilations not defined for all and only spatial dimensions."); + + NGRAPH_CHECK((pads_begin.size() == pads_end.size()) && + (pads_begin.size() == spatial_dims), + "Pads not defined for all and only spatial dimensions."); + + NGRAPH_CHECK(!output_padding.empty() && output_padding.size() == spatial_dims, + "Output padding not defined for all and only spatial dimensions."); + + Shape out_spatial_shape{std::next(out_shape.begin(), 2), std::end(out_shape)}; + Shape infered_out_spatial_shape{}; + infer_forward_convbackprop_output_shape( + Shape{std::next(in_shape.begin(), 2), std::end(in_shape)}, + Shape{std::next(f_shape.begin(), 2), std::end(f_shape)}, + Shape{std::next(out_shape.begin(), 2), std::end(out_shape)}, + infered_out_spatial_shape, + strides, + dilations, + output_padding); + NGRAPH_CHECK(out_spatial_shape == infered_out_spatial_shape, + "Incorrect output shape provided"); + } + } // namespace + + template + void convolution_backprop_impl(const T* in, + const T* f, + T* out, + const Shape& in_shape, + const Shape& f_shape, + const Shape& out_shape, + const Strides& strides, + const Strides& dilation, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const CoordinateDiff& output_padding) + + { + // here we are converting all param types to int's to avoid arithmetic issues + // (e.g signed + unsigned) in indexes calculation later + ConvolutionParams params{strides, dilation, pads_begin, pads_end, output_padding}; + + // here we are extending spatial dimensions to 3D, because we are going to use 3D + // convolution implementation to convolve also in 1D & 2D case + Shape input_shape{in_shape}; + Shape filters_shape{f_shape}; + if (in_shape.size() < 5) + { + extend_to_3D(params, input_shape, filters_shape); + } + + for (size_t i = 0; i < input_shape.size() - 2; ++i) + { + if (input_shape[i + 2] > 1 || filters_shape[i + 2] > 1) + { + params.pads_begin[i] = filters_shape[i + 2] - params.pads_begin[i] - 1; + params.pads_end[i] = filters_shape[i + 2] - params.pads_end[i] - 1; + } + else + { + params.pads_begin[i] = 0; + params.pads_end[i] = 0; + } + } + + // convert output shape to 3D, contains only dimensions + Shape out_shape_3d{out_shape.begin() + 2, out_shape.end()}; + + int out_shape_rank = out_shape.size() - 2; + if (out_shape_rank < 3) + { + int missing_dims = 3 - out_shape_rank; + out_shape_3d.insert( + std::prev(out_shape_3d.end(), out_shape_rank), missing_dims, 1); + } + + // modify params.pads_end when output_shape was provided in ctor in order to + // calculate expected number of output elements + for (size_t i = 0; i < out_shape_3d.size(); i++) + { + if (out_shape_3d[i] > 1) + { + // expected_dim = (in - 1)* strides + filter - 2*padding + out_padding + // strides is already applied (through 0's extension in input) + // padding = pads_begin + pads_end, formula below is using + // params.pad_begin/params.pads_end: + const size_t expected_dim = + out_shape_3d[i] - ((input_shape[i + 2] - 1) - filters_shape[i + 2] + + params.pads_begin[i] + params.pads_end[i] + 2 + + params.output_padding[i]); + params.pads_end[i] += expected_dim; + } + } + + const size_t filters_count = filters_shape[filter_out_ch_axis]; + const Shape filter_shape(++filters_shape.begin(), filters_shape.end()); + const size_t filter_size = shape_size(filter_shape); + + const size_t batches_count = input_shape[in_batch_axis]; + Shape batch_shape(++input_shape.begin(), input_shape.end()); + const size_t batch_size = shape_size(batch_shape); + + auto batch = in; + + for (size_t batch_idx = 0; batch_idx < batches_count; ++batch_idx) + { + auto filter = f; + for (size_t f_idx = 0; f_idx < filters_count; ++f_idx) + { + convolve_3D_channels(params, batch, batch_shape, filter, filter_shape, out); + filter += filter_size; + } + batch += batch_size; + } } - template ::type> - void convolution_backprop_in(const OUTPUT* delta_out, - const FILTER* filter, - INPUT* delta_in, - const Shape& out_shape, - const Shape& filter_shape, + template + void convolution_backprop_in(const T* delta_in, + const T* filter, + T* delta_out, const Shape& in_shape, + const Shape& filter_shape, + const Shape& out_shape, const Strides& in_dilation, const Strides& filter_dilation, const CoordinateDiff& forward_in_pad_bellow, const CoordinateDiff& forward_in_pad_above, - const Strides& stride) + const Strides& stride, + const CoordinateDiff& output_padding) { + std::vector extended_input; + std::vector extended_filter; + AxisSet reverse_axes; + + Shape conv_input_shape = in_shape; + Shape conv_filter_shape = filter_shape; + Strides conv_stride = stride; + Strides conv_filter_dilation = filter_dilation; + auto conv_input_data = delta_in; + + validate_convolution_backprop_parameters(in_shape, + filter_shape, + out_shape, + stride, + filter_dilation, + forward_in_pad_bellow, + forward_in_pad_above, + output_padding); + // Note that we only reverse the spatial dimensions here (loop // starts at 2) - std::vector reversed(shape_size(filter_shape)); - AxisSet reverse_axes; - size_t reverse_axes_start = 2; - for (size_t i = reverse_axes_start; i < filter_shape.size(); ++i) + std::vector reversed(shape_size(filter_shape)); + for (size_t i = 2; i < filter_shape.size(); ++i) { reverse_axes.insert(i); } @@ -242,55 +323,109 @@ namespace ngraph filter_shape, filter_shape, reverse_axes, - sizeof(FILTER)); - size_t filter_out_channel_axis = 1; - size_t filter_in_channel_axis = 0; + sizeof(T)); - // Compute backward pad out pad bellow - size_t spatial_dim_count = in_shape.size() - 2; + auto conv_filter_data = &reversed[0]; - CoordinateDiff backward_delta_out_pad_below; - backward_delta_out_pad_below.resize(spatial_dim_count); - - for (size_t i = 0; i < spatial_dim_count; i++) + // if channel number for output is > 1 then reverse layout of filter coefficients as + // it is required by convolve_3D_channels() function. + // Current layout: + // batch0_ch0|batch0_ch1|...|batch0_chN|...|batch1_ch0|batch1_ch1|...|batch1_chN|... + // Expected layout: + // batch0_ch0|batch1_ch0|...|batchN_ch0|...|batch0_ch1|batch1_ch1|...|batch1_chN|... + if (filter_shape[1] > 1) { - backward_delta_out_pad_below[i] = - (static_cast(filter_shape[i + 2]) - 1) * filter_dilation[i] - - forward_in_pad_bellow[i]; - } - // Compute backward pad out pad above - CoordinateDiff backward_delta_out_pad_above; - backward_delta_out_pad_above.resize(spatial_dim_count); + std::vector temp_reversed(reversed); + const Shape filter_dim_shape(filter_shape.begin() + 2, filter_shape.end()); + const size_t filter_size = shape_size(filter_dim_shape); - for (size_t i = 0; i < spatial_dim_count; i++) - { - backward_delta_out_pad_above[i] = - (static_cast(filter_shape[i + 2]) - 1) * filter_dilation[i] + - ((forward_in_pad_bellow[i] + ((in_shape[i + 2]) - 1) * in_dilation[i] + - forward_in_pad_above[i] - - (static_cast(filter_shape[i + 2]) - 1) * filter_dilation[i]) % - stride[i]) - - forward_in_pad_above[i]; + for (size_t i = 0; i < filter_shape[1]; i++) + { + for (size_t j = 0; j < filter_shape[0]; j++) + { + const auto delta = temp_reversed.begin() + + j * filter_shape[1] * filter_size + i * filter_size; + const auto out = reversed.begin() + i * filter_shape[0] * filter_size + + j * filter_size; + std::copy(delta, delta + filter_size, out); + } + } } - convolution_backprop_impl( - delta_out, - &reversed[0], - delta_in, - out_shape, - filter_shape, - in_shape, - in_dilation, - filter_dilation, - backward_delta_out_pad_below, - backward_delta_out_pad_above, - stride, - 0, - 1, - filter_out_channel_axis, - filter_in_channel_axis, - 0, - 1); + // swap filter batch and channels + std::iter_swap(conv_filter_shape.begin(), conv_filter_shape.begin() + 1); + + // extend stride and filter inputs with zero padding for stride and filter_dilation + // > 1, after that set stride and filter params to 1. + const size_t stride_dim = + std::accumulate(stride.begin(), stride.end(), 1, std::multiplies()); + if (stride_dim >= 2) + { + extend_with_zeros(stride, in_shape, delta_in, conv_input_shape, extended_input); + std::fill(conv_stride.begin(), conv_stride.end(), 1); + conv_input_data = &extended_input[0]; + } + + const size_t dilation_dim = std::accumulate( + filter_dilation.begin(), filter_dilation.end(), 1, std::multiplies()); + if (dilation_dim >= 2) + { + extend_with_zeros(filter_dilation, + filter_shape, + reinterpret_cast(&reversed[0]), + conv_filter_shape, + extended_filter); + std::fill(conv_filter_dilation.begin(), conv_filter_dilation.end(), 1); + conv_filter_data = &extended_filter[0]; + } + + convolution_backprop_impl(conv_input_data, + conv_filter_data, + delta_out, + conv_input_shape, + conv_filter_shape, + out_shape, + conv_stride, + conv_filter_dilation, + forward_in_pad_bellow, + forward_in_pad_above, + output_padding); + } + + // DEPRECATED, can't be removed currently due to arm-plugin dependency + template ::type> + NGRAPH_DEPRECATED( + "convolution_backprop_in function with 4 template types is deprecated, use " + "function with 1 template and output_padding parameter.") + void convolution_backprop_in(const INPUT* delta_in, + const FILTER* filter, + OUTPUT* delta_out, + const Shape& in_shape, + const Shape& filter_shape, + const Shape& out_shape, + const Strides& in_dilation, + const Strides& filter_dilation, + const CoordinateDiff& forward_in_pad_bellow, + const CoordinateDiff& forward_in_pad_above, + const Strides& stride) + { + const ngraph::CoordinateDiff output_padding(in_shape.size() - 2, 0); + + convolution_backprop_in(delta_in, + filter, + delta_out, + in_shape, + filter_shape, + out_shape, + in_dilation, + filter_dilation, + forward_in_pad_bellow, + forward_in_pad_above, + stride, + output_padding); } } // namespace reference } // namespace runtime diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/group_convolution_backprop_data.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/group_convolution_backprop_data.hpp index b70c0d3ed9a..306ddf047bf 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/group_convolution_backprop_data.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/group_convolution_backprop_data.hpp @@ -178,23 +178,24 @@ namespace ngraph const size_t group_out_size = shape_size(group_out_shape); Strides in_dilation(in_shape.size(), 1); + const ngraph::CoordinateDiff output_padding(in_shape.size() - 2, 0); for (size_t batch_idx = 0; batch_idx < in_shape[in_batch_axis]; ++batch_idx) { group_filter = f; for (size_t group_idx = 0; group_idx < group_count; ++group_idx) { - runtime::reference::convolution_backprop_in( - group_batch, - group_filter, - group_out, - group_batch_shape, - group_filter_shape, - group_out_shape, - in_dilation, - dilation, - pads_begin, - pads_end, - strides); + runtime::reference::convolution_backprop_in(group_batch, + group_filter, + group_out, + group_batch_shape, + group_filter_shape, + group_out_shape, + in_dilation, + dilation, + pads_begin, + pads_end, + strides, + output_padding); group_batch += group_batch_size; group_filter += group_filter_size; group_out += group_out_size; diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/log_softmax.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/log_softmax.hpp index 11b9092e6e6..27198f465b3 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/log_softmax.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/log_softmax.hpp @@ -35,7 +35,7 @@ namespace ngraph arg[transform.index(coord)] - temp_max[temp_transform.index(temp_coord)]); } - sum(out, temp_sum.data(), shape, axes, true); + sum(out, temp_sum.data(), shape, axes); for (const Coordinate& coord : transform) { diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/normalize_l2.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/normalize_l2.hpp index 5da76763465..c32ec70488e 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/normalize_l2.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/normalize_l2.hpp @@ -42,7 +42,7 @@ namespace ngraph } std::vector sum_data(shape_size(reduce_shape)); - sum(sqr_data.data(), sum_data.data(), data_shape, axes, true); + sum(sqr_data.data(), sum_data.data(), data_shape, axes); autobroadcast_binop(data, sum_data.data(), out, diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/product.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/product.hpp index 14615dc8b7c..addd17eb82a 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/product.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/product.hpp @@ -5,6 +5,7 @@ #pragma once #include +#include #include "ngraph/coordinate_transform.hpp" #include "ngraph/shape_util.hpp" @@ -16,29 +17,27 @@ namespace ngraph namespace reference { template - void product(const T* arg, - T* out, - const Shape& in_shape, - const AxisSet& reduction_axes, - bool keep_dims) + void product(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes) { - auto out_shape = reduce(in_shape, reduction_axes, keep_dims); - CoordinateTransform output_transform(out_shape); + constexpr bool dont_keep_dims_in_output = false; + const auto out_shape = reduce(in_shape, reduction_axes, dont_keep_dims_in_output); + std::fill(out, out + shape_size(out_shape), 1); - for (const Coordinate& output_coord : output_transform) - { - out[output_transform.index(output_coord)] = 1; - } - - CoordinateTransform input_transform(in_shape); + const auto in_strides = row_major_strides(in_shape); + const auto out_strides = row_major_strides(out_shape); + CoordinateTransformBasic input_transform(in_shape); for (const Coordinate& input_coord : input_transform) { - Coordinate output_coord = reduce(input_coord, reduction_axes, keep_dims); + const Coordinate output_coord = + reduce(input_coord, reduction_axes, dont_keep_dims_in_output); - size_t output_index = output_transform.index(output_coord); + const size_t in_idx = std::inner_product( + input_coord.begin(), input_coord.end(), in_strides.begin(), 0); + const size_t out_idx = std::inner_product( + output_coord.begin(), output_coord.end(), out_strides.begin(), 0); - out[output_index] = out[output_index] * arg[input_transform.index(input_coord)]; + out[out_idx] = out[out_idx] * arg[in_idx]; } } } // namespace reference diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/softmax.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/softmax.hpp index c31c901012c..94cb0549b73 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/softmax.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/softmax.hpp @@ -34,7 +34,7 @@ namespace ngraph arg[transform.index(coord)] - temp_ptr[temp_transform.index(temp_coord)]); } - sum(out, temp_ptr, shape, axes, true); + sum(out, temp_ptr, shape, axes); for (const Coordinate& coord : transform) { diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/sum.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/sum.hpp index 0c20eaf431a..d73ec441f7b 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/sum.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/sum.hpp @@ -5,6 +5,7 @@ #pragma once #include +#include #include "ngraph/coordinate_transform.hpp" #include "ngraph/shape_util.hpp" @@ -41,34 +42,34 @@ namespace ngraph } template - void sum(const T* arg, - T* out, - const Shape& in_shape, - const AxisSet& reduction_axes, - bool keep_dims) + void sum(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes) { - auto out_shape = reduce(in_shape, reduction_axes, keep_dims); - CoordinateTransform output_transform(out_shape); - std::vector cs(shape_size(out_shape)); + constexpr bool dont_keep_dims_in_output = false; + const auto out_shape = reduce(in_shape, reduction_axes, dont_keep_dims_in_output); - for (const Coordinate& output_coord : output_transform) - { - out[output_transform.index(output_coord)] = 0; - cs[output_transform.index(output_coord)] = 0; - } + std::vector cs(shape_size(out_shape), 0); + std::fill(out, out + shape_size(out_shape), 0); - CoordinateTransform input_transform(in_shape); + const auto in_strides = row_major_strides(in_shape); + const auto out_strides = row_major_strides(out_shape); + CoordinateTransformBasic input_transform(in_shape); for (const Coordinate& input_coord : input_transform) { - Coordinate output_coord = reduce(input_coord, reduction_axes, keep_dims); + const Coordinate output_coord = + reduce(input_coord, reduction_axes, dont_keep_dims_in_output); - T x = arg[input_transform.index(input_coord)]; - T& z = out[output_transform.index(output_coord)]; + const size_t in_idx = std::inner_product( + input_coord.begin(), input_coord.end(), in_strides.begin(), 0); + const size_t out_idx = std::inner_product( + output_coord.begin(), output_coord.end(), out_strides.begin(), 0); + + T x = arg[in_idx]; + T& z = out[out_idx]; if (is_finite(x) && is_finite(z)) { - T& c = cs[output_transform.index(output_coord)]; + T& c = cs[out_idx]; T t = z + (x - c); c = (t - z) - (x - c); z = t; diff --git a/ngraph/core/reference/src/runtime/reference/einsum.cpp b/ngraph/core/reference/src/runtime/reference/einsum.cpp index e1356788c5e..51352ddadac 100644 --- a/ngraph/core/reference/src/runtime/reference/einsum.cpp +++ b/ngraph/core/reference/src/runtime/reference/einsum.cpp @@ -436,8 +436,7 @@ namespace ngraph ngraph::runtime::reference::sum(input_ptr->get_data_ptr(), output_ptr->get_data_ptr(), input_shape, - reduced_axes, - false); + reduced_axes); // update a vector of inputs and input subscripts inputs[input_ind] = output_ptr; @@ -743,8 +742,7 @@ namespace ngraph ngraph::runtime::reference::sum(mul_output->get_data_ptr(), result->get_data_ptr(), mul_output->get_shape(), - reduced_axes, - false); + reduced_axes); inputs[input_ind] = result; input_subscripts[input_ind] = resultant_subscript; } diff --git a/ngraph/core/src/op/reduce_prod.cpp b/ngraph/core/src/op/reduce_prod.cpp index bbacf23cc7e..c7a78beb51f 100644 --- a/ngraph/core/src/op/reduce_prod.cpp +++ b/ngraph/core/src/op/reduce_prod.cpp @@ -6,6 +6,7 @@ #include #include "itt.hpp" #include "ngraph/graph_util.hpp" +#include "ngraph/op/util/evaluate_helpers.hpp" #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/product.hpp" #include "ngraph/shape_util.hpp" @@ -45,7 +46,7 @@ namespace reduce_prod { out->set_shape(reduce(arg->get_shape(), axes, keep_dims)); runtime::reference::product( - arg->get_data_ptr(), out->get_data_ptr(), arg->get_shape(), axes, keep_dims); + arg->get_data_ptr(), out->get_data_ptr(), arg->get_shape(), axes); return true; } @@ -75,8 +76,11 @@ bool op::v1::ReduceProd::evaluate(const HostTensorVector& outputs, NGRAPH_OP_SCOPE(v1_ReduceProd_evaluate); NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2)); NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); - return reduce_prod::evaluate_product( - inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); + + const auto reduction_axes = get_normalized_axes_from_tensor( + inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name()); + + return reduce_prod::evaluate_product(inputs[0], outputs[0], reduction_axes, get_keep_dims()); } bool op::v1::ReduceProd::has_evaluate() const diff --git a/ngraph/core/src/op/reduce_sum.cpp b/ngraph/core/src/op/reduce_sum.cpp index 70c2b39ddc0..50b87763e84 100644 --- a/ngraph/core/src/op/reduce_sum.cpp +++ b/ngraph/core/src/op/reduce_sum.cpp @@ -3,14 +3,15 @@ // #include "ngraph/op/reduce_sum.hpp" +#include #include "itt.hpp" #include "ngraph/graph_util.hpp" #include "ngraph/op/broadcast.hpp" +#include "ngraph/op/util/evaluate_helpers.hpp" #include "ngraph/op/util/op_types.hpp" #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/sum.hpp" #include "ngraph/shape_util.hpp" -#include "util/evaluate_helpers.hpp" using namespace std; using namespace ngraph; @@ -47,7 +48,7 @@ namespace reduce_sum { out->set_shape(reduce(arg->get_shape(), axes, keep_dims)); runtime::reference::sum( - arg->get_data_ptr(), out->get_data_ptr(), arg->get_shape(), axes, keep_dims); + arg->get_data_ptr(), out->get_data_ptr(), arg->get_shape(), axes); return true; } @@ -75,13 +76,11 @@ bool op::v1::ReduceSum::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v1_ReduceSum_evaluate); - - NGRAPH_CHECK(inputs.size() == 2, - "The ReduceSum operation expects 2 input tensors. Got: ", - inputs.size()); + NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2)); + NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); const auto reduction_axes = get_normalized_axes_from_tensor( - inputs[1], get_input_partial_shape(0).rank(), get_friendly_name()); + inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name()); return reduce_sum::evaluate_sum(inputs[0], outputs[0], reduction_axes, get_keep_dims()); } diff --git a/ngraph/core/src/op/selu.cpp b/ngraph/core/src/op/selu.cpp index 680b67fc012..fd9aae0182a 100644 --- a/ngraph/core/src/op/selu.cpp +++ b/ngraph/core/src/op/selu.cpp @@ -5,65 +5,53 @@ #include "ngraph/op/selu.hpp" #include "itt.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/exp.hpp" -#include "ngraph/op/maximum.hpp" -#include "ngraph/op/minimum.hpp" -#include "ngraph/op/multiply.hpp" -#include "ngraph/op/subtract.hpp" - using namespace std; using namespace ngraph; -NGRAPH_SUPPRESS_DEPRECATED_START - -constexpr NodeTypeInfo op::v0::Selu::type_info; - -op::v0::Selu::Selu() - : FusedOp() -{ -} +NGRAPH_RTTI_DEFINITION(op::v0::Selu, "Selu", 0); op::v0::Selu::Selu(const Output& data, const Output& alpha, const Output& lambda) - : FusedOp({data, alpha, lambda}) + : Op({data, alpha, lambda}) { constructor_validate_and_infer_types(); } -void ngraph::op::v0::Selu::pre_validate_and_infer_types() +void op::v0::Selu::validate_and_infer_types() { - set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); + NGRAPH_OP_SCOPE(v0_Selu_validate_and_infer_types); + auto data_et = get_input_element_type(0); + auto alpha_et = get_input_element_type(1); + auto lambda_et = get_input_element_type(2); + auto result_et = element::dynamic; + + NODE_VALIDATION_CHECK(this, + element::Type::merge(result_et, result_et, data_et) && + element::Type::merge(result_et, result_et, alpha_et) && + element::Type::merge(result_et, result_et, lambda_et), + "Input element types do not match : ", + data_et, + " and ", + alpha_et, + " and ", + lambda_et); + + NODE_VALIDATION_CHECK(this, + result_et.is_dynamic() || result_et.is_real(), + "Input element types must be floating-point. Got: ", + result_et); + + set_output_type(0, result_et, get_input_partial_shape(0)); } -bool ngraph::op::v0::Selu::visit_attributes(AttributeVisitor& visitor) +bool op::v0::Selu::visit_attributes(AttributeVisitor& visitor) { NGRAPH_OP_SCOPE(v0_Selu_visit_attributes); return true; } -OutputVector op::v0::Selu::decompose_op() const -{ - const auto data = input_value(0); - const auto alpha = input_value(1); - const auto lambda = input_value(2); - const auto zero_node = op::Constant::create(data.get_element_type(), Shape{1}, {0}); - - // lambda * ((max(data, 0) + (alpha * exp(min(data, 0)) - alpha)) - return {std::make_shared( - lambda, - std::make_shared( - std::make_shared(data, zero_node), - std::make_shared( - std::make_shared( - alpha, - std::make_shared(std::make_shared(data, zero_node))), - alpha)))}; -} - shared_ptr op::v0::Selu::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_Selu_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); + return make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); } diff --git a/ngraph/core/src/op/space_to_batch.cpp b/ngraph/core/src/op/space_to_batch.cpp index 75c6ad435b0..88522853652 100644 --- a/ngraph/core/src/op/space_to_batch.cpp +++ b/ngraph/core/src/op/space_to_batch.cpp @@ -21,7 +21,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v1::SpaceToBatch::type_info; +NGRAPH_RTTI_DEFINITION(op::v1::SpaceToBatch, "SpaceToBatch", 1); ngraph::op::v1::SpaceToBatch::SpaceToBatch(const ngraph::Output& data, const ngraph::Output& block_shape, @@ -49,13 +49,13 @@ void op::v1::SpaceToBatch::validate_and_infer_types() NODE_VALIDATION_CHECK(this, pads_begin_type.is_integral_number(), - "crops_begin must be an integral number but got (", + "pads_begin must be an integral number but got (", pads_begin_type, ")."); NODE_VALIDATION_CHECK(this, pads_end_type.is_integral_number(), - "crops_end must be an integral number but got (", + "pads_end must be an integral number but got (", pads_end_type, ")."); diff --git a/ngraph/core/src/op/util/arithmetic_reduction.cpp b/ngraph/core/src/op/util/arithmetic_reduction.cpp index beb40801ea9..67b7abeaf20 100644 --- a/ngraph/core/src/op/util/arithmetic_reduction.cpp +++ b/ngraph/core/src/op/util/arithmetic_reduction.cpp @@ -16,7 +16,7 @@ op::util::ArithmeticReduction::ArithmeticReduction() {} op::util::ArithmeticReduction::ArithmeticReduction(const Output& arg, const Output& reduction_axes) - : Op({arg, reduction_axes}) + : ReductionBase(arg, reduction_axes) { } @@ -49,51 +49,15 @@ void op::util::ArithmeticReduction::set_reduction_axes(const AxisSet& reduction_ void op::util::ArithmeticReduction::validate_and_infer_types() { NGRAPH_OP_SCOPE(util_ArithmeticReduction_validate_and_infer_types); - auto input_shape = get_input_partial_shape(0); - const auto input_rank = input_shape.rank(); - PartialShape result_shape{PartialShape::dynamic()}; - - auto axes = get_constant_from_source(input_value(1)); - if (input_rank.is_static() && axes) - { - AxisSet reduction_axes; - const auto reduction_axes_val = axes->cast_vector(); - for (auto axis : reduction_axes_val) - { - try - { - axis = normalize_axis(this, axis, input_rank); - } - catch (const ngraph_error&) - { - NODE_VALIDATION_CHECK(this, - false, - "Reduction axis (", - axis, - ") is out of bounds ", - "(argument shape: ", - input_shape, - ", reduction axes: ", - reduction_axes, - ")"); - } - reduction_axes.insert(axis); - } - - std::vector dims; - for (int64_t i = 0; i < input_rank.get_length(); i++) - { - if (reduction_axes.count(i) == 0) - { - dims.push_back(input_shape[i]); - } - } - - result_shape = PartialShape(dims); - } + const PartialShape& axes_shape = get_input_partial_shape(1); + const Rank axes_rank = axes_shape.rank(); + NODE_VALIDATION_CHECK(this, + axes_rank.compatible(0) || axes_rank.compatible(1), + "Axes input must be a scalar or 1D input. Got: ", + axes_shape); + PartialShape result_shape = infer_reduction_output_shape(false); set_input_is_relevant_to_shape(1); - set_output_type(0, get_input_element_type(0), result_shape); } diff --git a/ngraph/core/src/op/util/arithmetic_reductions_keep_dims.cpp b/ngraph/core/src/op/util/arithmetic_reductions_keep_dims.cpp index dcb420c0838..0a7a5eeb792 100644 --- a/ngraph/core/src/op/util/arithmetic_reductions_keep_dims.cpp +++ b/ngraph/core/src/op/util/arithmetic_reductions_keep_dims.cpp @@ -32,61 +32,28 @@ bool ngraph::op::util::ArithmeticReductionKeepDims::visit_attributes(AttributeVi void op::util::ArithmeticReductionKeepDims::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_util_ArithmeticReductionKeepDims_validate_and_infer_types); - if (m_keep_dims) - { - auto input_shape = get_input_partial_shape(0); - auto input_rank = input_shape.rank(); - PartialShape result_shape{PartialShape::dynamic()}; - if (input_rank.is_static()) - result_shape = PartialShape::dynamic(input_rank); + const element::Type& data_et = get_input_element_type(0); + const PartialShape& axes_shape = get_input_partial_shape(1); + const element::Type& axes_et = get_input_element_type(1); - const auto& axes = get_constant_from_source(input_value(1)); - if (input_rank.is_static() && axes) - { - AxisSet reduction_axes; - auto reduction_axes_val = axes->cast_vector(); - for (auto axis : reduction_axes_val) - { - try - { - axis = normalize_axis(this, axis, input_rank); - } - catch (const ngraph_error&) - { - NODE_VALIDATION_CHECK(this, - false, - "Reduction axis (", - axis, - ") is out of bounds ", - "(argument shape: ", - input_shape, - ", reduction axes: ", - reduction_axes, - ")"); - } - reduction_axes.insert(axis); - } + NODE_VALIDATION_CHECK(this, + data_et.is_real() || data_et.is_integral_number(), + "Element type of data input must be numeric. Got: ", + data_et); - std::vector dims; - for (int64_t i = 0; i < input_rank.get_length(); i++) - { - if (reduction_axes.count(i) == 0) - { - dims.push_back(input_shape[i]); - } - else - { - dims.emplace_back(Dimension{1}); - } - } - result_shape = PartialShape(dims); - } - set_input_is_relevant_to_shape(1); - set_output_type(0, get_input_element_type(0), result_shape); - } - else - { - ArithmeticReduction::validate_and_infer_types(); - } + NODE_VALIDATION_CHECK(this, + axes_et.is_integral_number(), + "Element type of axes input must be integer. Got: ", + axes_et); + + const Rank axes_rank = axes_shape.rank(); + NODE_VALIDATION_CHECK(this, + axes_rank.compatible(0) || axes_rank.compatible(1), + "Axes input must be a scalar or 1D input. Got: ", + axes_shape); + + PartialShape result_shape = infer_reduction_output_shape(m_keep_dims); + set_input_is_relevant_to_shape(1); + set_output_type(0, data_et, result_shape); } diff --git a/ngraph/core/src/op/util/evaluate_helpers.cpp b/ngraph/core/src/op/util/evaluate_helpers.cpp new file mode 100644 index 00000000000..ac55bb5fe21 --- /dev/null +++ b/ngraph/core/src/op/util/evaluate_helpers.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ngraph/op/util/evaluate_helpers.hpp" + +namespace ngraph +{ + AxisSet get_normalized_axes_from_tensor(const HostTensorPtr tensor, + const ngraph::Rank& rank, + const std::string& node_description) + { + const auto axes_vector = host_tensor_2_vector(tensor); + const auto normalized_axes = ngraph::normalize_axes(node_description, axes_vector, rank); + return AxisSet{normalized_axes}; + } +} // namespace ngraph diff --git a/ngraph/core/src/op/util/logical_reduction.cpp b/ngraph/core/src/op/util/logical_reduction.cpp index bd3895834d8..5d5ad8222ea 100644 --- a/ngraph/core/src/op/util/logical_reduction.cpp +++ b/ngraph/core/src/op/util/logical_reduction.cpp @@ -15,17 +15,17 @@ NGRAPH_RTTI_DEFINITION(op::util::LogicalReduction, "LogicalReduction", 1); op::util::LogicalReduction::LogicalReduction() {} op::util::LogicalReduction::LogicalReduction(const Output& arg, const AxisSet& reduction_axes) - : Op({arg, - op::Constant::create( - element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) - ->output(0)}) + : ReductionBase(arg, + op::Constant::create( + element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) + ->output(0)) { add_provenance_group_member(input_value(1).get_node_shared_ptr()); } op::util::LogicalReduction::LogicalReduction(const Output& arg, const Output& reduction_axes) - : Op({arg, reduction_axes}) + : ReductionBase(arg, reduction_axes) { } @@ -54,59 +54,20 @@ void op::util::LogicalReduction::set_reduction_axes(const AxisSet& reduction_axe void op::util::LogicalReduction::validate_and_infer_types() { NGRAPH_OP_SCOPE(util_LogicalReduction_validate_and_infer_types); - auto input_shape = get_input_partial_shape(0); - auto input_rank = input_shape.rank(); - PartialShape result_shape{PartialShape::dynamic()}; + const element::Type& data_et = get_input_element_type(0); + const PartialShape& axes_shape = get_input_partial_shape(1); - set_input_is_relevant_to_shape(1); + NODE_VALIDATION_CHECK( + this, data_et.compatible(element::boolean), "Element type of data input must be boolean."); + const Rank axes_rank = axes_shape.rank(); NODE_VALIDATION_CHECK(this, - get_input_element_type(0).compatible(element::boolean), - "Input element type must be boolean."); + axes_rank.compatible(0) || axes_rank.compatible(1), + "Axes input must be a scalar or 1D input. Got: ", + axes_shape); - set_output_type(0, element::boolean, result_shape); - - if (input_rank.is_dynamic()) - return; - - if (const auto axes_const = get_constant_from_source(input_value(1))) - { - AxisSet reduction_axes; - auto reduction_axes_val = axes_const->cast_vector(); - for (auto axis : reduction_axes_val) - { - try - { - axis = normalize_axis(this, axis, input_rank); - } - catch (const ngraph_error&) - { - NODE_VALIDATION_CHECK(this, - false, - "Reduction axis (", - axis, - ") is out of bounds ", - "(argument shape: ", - input_shape, - ", reduction axes: ", - reduction_axes, - ")"); - } - reduction_axes.insert(axis); - } - - std::vector dims; - for (int64_t i = 0; i < input_rank.get_length(); i++) - { - if (reduction_axes.count(i) == 0) - { - dims.push_back(input_shape[i]); - } - } - - result_shape = PartialShape(dims); - } - - set_output_type(0, element::boolean, result_shape); + PartialShape result_shape = infer_reduction_output_shape(false); + set_input_is_relevant_to_shape(1); + set_output_type(0, data_et, result_shape); } diff --git a/ngraph/core/src/op/util/logical_reduction_keep_dims.cpp b/ngraph/core/src/op/util/logical_reduction_keep_dims.cpp index ea7188eaa84..f2d366e0a97 100644 --- a/ngraph/core/src/op/util/logical_reduction_keep_dims.cpp +++ b/ngraph/core/src/op/util/logical_reduction_keep_dims.cpp @@ -32,63 +32,26 @@ bool ngraph::op::util::LogicalReductionKeepDims::visit_attributes(AttributeVisit void op::util::LogicalReductionKeepDims::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_util_LogicalReductionKeepDims_validate_and_infer_types); - if (m_keep_dims) - { - const auto input_shape = get_input_partial_shape(0); - const auto input_rank = input_shape.rank(); - PartialShape result_shape{PartialShape::dynamic(input_rank)}; - set_input_is_relevant_to_shape(1); - set_output_type(0, get_input_element_type(0), result_shape); + const element::Type& data_et = get_input_element_type(0); + const PartialShape& axes_shape = get_input_partial_shape(1); + const element::Type& axes_et = get_input_element_type(1); - if (input_shape.is_dynamic()) - return; + NODE_VALIDATION_CHECK( + this, data_et.compatible(element::boolean), "Element type of data input must be boolean."); - if (auto axes_const = get_constant_from_source(input_value(1))) - { - AxisSet reduction_axes; - auto reduction_axes_val = axes_const->cast_vector(); - for (auto axis : reduction_axes_val) - { - try - { - axis = normalize_axis(this, axis, input_rank); - } - catch (const ngraph_error&) - { - NODE_VALIDATION_CHECK(this, - false, - "Reduction axis (", - axis, - ") is out of bounds ", - "(argument shape: ", - input_shape, - ", reduction axes: ", - reduction_axes, - ")"); - } - reduction_axes.insert(axis); - } + NODE_VALIDATION_CHECK(this, + axes_et.is_integral_number(), + "Element type of axes input must be integer. Got: ", + axes_et); - std::vector dims; - for (int64_t i = 0; i < input_rank.get_length(); i++) - { - if (reduction_axes.count(i) == 0) - { - dims.push_back(input_shape[i]); - } - else - { - dims.emplace_back(Dimension{1}); - } - } - result_shape = PartialShape(dims); - } + const Rank axes_rank = axes_shape.rank(); + NODE_VALIDATION_CHECK(this, + axes_rank.compatible(0) || axes_rank.compatible(1), + "Axes input must be a scalar or 1D input. Got: ", + axes_shape); - set_output_type(0, get_input_element_type(0), result_shape); - } - else - { - LogicalReduction::validate_and_infer_types(); - } + PartialShape result_shape = infer_reduction_output_shape(m_keep_dims); + set_input_is_relevant_to_shape(1); + set_output_type(0, data_et, result_shape); } diff --git a/ngraph/core/src/op/util/reduction_base.cpp b/ngraph/core/src/op/util/reduction_base.cpp new file mode 100644 index 00000000000..bdfead647fb --- /dev/null +++ b/ngraph/core/src/op/util/reduction_base.cpp @@ -0,0 +1,74 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ngraph/op/util/reduction_base.hpp" +#include "itt.hpp" +#include "ngraph/op/constant.hpp" +#include "ngraph/validation_util.hpp" + +using namespace std; +using namespace ngraph; + +NGRAPH_RTTI_DEFINITION(op::util::ReductionBase, "ReductionBase", 0); + +op::util::ReductionBase::ReductionBase() {} + +op::util::ReductionBase::ReductionBase(const Output& arg, const Output& reduction_axes) + : Op({arg, reduction_axes}) +{ +} + +PartialShape op::util::ReductionBase::infer_reduction_output_shape(const bool keep_dims) +{ + const PartialShape& data_ps = get_input_partial_shape(0); + PartialShape result_ps{PartialShape::dynamic()}; + Rank data_rank = data_ps.rank(); + + if (data_rank.is_static() && keep_dims) + { + result_ps = PartialShape::dynamic(data_rank); + } + + const auto& axes = get_constant_from_source(input_value(1)); + if (data_rank.is_static() && axes) + { + AxisSet reduction_axes; + auto reduction_axes_val = axes->cast_vector(); + for (auto axis : reduction_axes_val) + { + try + { + axis = normalize_axis(this, axis, data_rank); + } + catch (const ngraph_error&) + { + NODE_VALIDATION_CHECK(this, + false, + "Reduction axis (", + axis, + ") is out of bounds ", + "(argument shape: ", + data_ps, + ", reduction axes: ", + reduction_axes, + ")"); + } + reduction_axes.insert(axis); + } + std::vector dims; + for (int64_t i = 0; i < data_rank.get_length(); i++) + { + if (reduction_axes.count(i) == 0) + { + dims.push_back(data_ps[i]); + } + else if (keep_dims) + { + dims.emplace_back(Dimension{1}); + } + } + result_ps = PartialShape(dims); + } + return result_ps; +} diff --git a/ngraph/frontend/onnx_import/src/op/global_average_pool.cpp b/ngraph/frontend/onnx_import/src/op/global_average_pool.cpp index 2d99e69da5d..d0a7ea4020f 100644 --- a/ngraph/frontend/onnx_import/src/op/global_average_pool.cpp +++ b/ngraph/frontend/onnx_import/src/op/global_average_pool.cpp @@ -19,19 +19,6 @@ namespace ngraph { OutputVector global_average_pool(const Node& node) { - auto data = node.get_ng_inputs()[0]; - auto data_rank = data.get_partial_shape().rank(); - - NGRAPH_CHECK(data_rank.is_static(), - "The input data tensor's rank has to be known (static)"); - - auto data_rank_value = data_rank.get_length(); - - NGRAPH_CHECK(data_rank_value > 2, - "The input data tensor's rank has to be greater than 2." - "Provided data rank is: ", - data_rank_value); - // Generate axes for reduce operation which contain all spatial dims indexes. // Examples: // Input shape: [N, C, H, W] @@ -41,11 +28,22 @@ namespace ngraph // Input shape: [N, C, H, W, D] // Input spatial dimensions are H, W and D // Expected spatial dims indexes: [2, 3, 4] - size_t data_spatial_rank = data_rank_value - 2; - auto reduce_axes_vector = std::vector(data_spatial_rank); - std::iota(reduce_axes_vector.begin(), reduce_axes_vector.end(), 2); - auto reduce_axes = default_opset::Constant::create( - element::i64, Shape{data_spatial_rank}, reduce_axes_vector); + auto data = node.get_ng_inputs()[0]; + + const auto zero_node = + default_opset::Constant::create(element::i64, Shape{}, {0}); + const auto one_node = + default_opset::Constant::create(element::i64, Shape{}, {1}); + const auto two_node = + default_opset::Constant::create(element::i64, Shape{}, {2}); + + const auto data_shape = std::make_shared(data); + const auto data_rank = std::make_shared(data_shape); + const auto data_rank_as_scalar = + std::make_shared(data_rank); + + const auto reduce_axes = std::make_shared( + two_node, data_rank_as_scalar, one_node, element::i64); return {std::make_shared(data, reduce_axes, true)}; } diff --git a/ngraph/frontend/onnx_import/src/op/global_max_pool.cpp b/ngraph/frontend/onnx_import/src/op/global_max_pool.cpp index 866b7ac0201..49fec836f21 100644 --- a/ngraph/frontend/onnx_import/src/op/global_max_pool.cpp +++ b/ngraph/frontend/onnx_import/src/op/global_max_pool.cpp @@ -19,19 +19,6 @@ namespace ngraph { OutputVector global_max_pool(const Node& node) { - auto data = node.get_ng_inputs()[0]; - auto data_rank = data.get_partial_shape().rank(); - - NGRAPH_CHECK(data_rank.is_static(), - "The input data tensor's rank has to be known (static)"); - - auto data_rank_value = data_rank.get_length(); - - NGRAPH_CHECK(data_rank_value > 2, - "The input data tensor's rank has to be greater than 2." - "Provided data rank is: ", - data_rank_value); - // Generate axes for reduce operation which contain all spatial dims indexes. // Examples: // Input shape: [N, C, H, W] @@ -41,11 +28,22 @@ namespace ngraph // Input shape: [N, C, H, W, D] // Input spatial dimensions are H, W and D // Expected spatial dims indexes: [2, 3, 4] - size_t data_spatial_rank = data_rank_value - 2; - auto reduce_axes_vector = std::vector(data_spatial_rank); - std::iota(reduce_axes_vector.begin(), reduce_axes_vector.end(), 2); - auto reduce_axes = default_opset::Constant::create( - element::i64, Shape{data_spatial_rank}, reduce_axes_vector); + auto data = node.get_ng_inputs()[0]; + + const auto zero_node = + default_opset::Constant::create(element::i64, Shape{}, {0}); + const auto one_node = + default_opset::Constant::create(element::i64, Shape{}, {1}); + const auto two_node = + default_opset::Constant::create(element::i64, Shape{}, {2}); + + const auto data_shape = std::make_shared(data); + const auto data_rank = std::make_shared(data_shape); + const auto data_rank_as_scalar = + std::make_shared(data_rank); + + const auto reduce_axes = std::make_shared( + two_node, data_rank_as_scalar, one_node, element::i64); return {std::make_shared(data, reduce_axes, true)}; } diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index e25a40de184..c3c5ab80405 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -73,6 +73,8 @@ set(SRC op_eval/non_zero.cpp op_eval/reduce_l1.cpp op_eval/reduce_l2.cpp + op_eval/reduce_prod.cpp + op_eval/reduce_sum.cpp op_eval/roi_align.cpp op_eval/roi_pooling.cpp op_eval/round.cpp @@ -172,6 +174,13 @@ set(SRC type_prop/read_value.cpp type_prop/reduce_l1.cpp type_prop/reduce_l2.cpp + type_prop/reduce_logical_and.cpp + type_prop/reduce_logical_or.cpp + type_prop/reduce_max.cpp + type_prop/reduce_mean.cpp + type_prop/reduce_min.cpp + type_prop/reduce_prod.cpp + type_prop/reduce_sum.cpp type_prop/reorg_yolo.cpp type_prop/reshape.cpp type_prop/result.cpp @@ -188,6 +197,7 @@ set(SRC type_prop/scatter_nd_update.cpp type_prop/scatter_update.cpp type_prop/select.cpp + type_prop/selu.cpp type_prop/shape_of.cpp type_prop/shuffle_channels.cpp type_prop/softmax.cpp @@ -198,9 +208,6 @@ set(SRC type_prop/squared_difference.cpp type_prop/squeeze.cpp type_prop/swish.cpp - type_prop/reduce_mean.cpp - type_prop/reduce_prod.cpp - type_prop/reduce_sum.cpp type_prop/ti.cpp type_prop/tile.cpp type_prop/top_k.cpp @@ -265,8 +272,10 @@ set(SRC visitors/op/reverse_sequence.cpp visitors/op/rnn_cell.cpp visitors/op/roi_pooling.cpp + visitors/op/selu.cpp visitors/op/shuffle_channels.cpp visitors/op/softmax.cpp + visitors/op/space_to_batch.cpp visitors/op/space_to_depth.cpp visitors/op/split.cpp visitors/op/squared_difference.cpp @@ -347,6 +356,7 @@ set(MULTI_TEST_SRC backend/constant.in.cpp backend/convert.in.cpp backend/convert_like.in.cpp + backend/convolution_backprop.in.cpp backend/convolution.in.cpp backend/binary_convolution.in.cpp backend/clamp.in.cpp @@ -430,12 +440,14 @@ set(MULTI_TEST_SRC backend/round.in.cpp backend/scatter_nd_update.in.cpp backend/select.in.cpp + backend/selu.in.cpp backend/shape_of.in.cpp backend/sigmoid.in.cpp backend/sign.in.cpp backend/sin.in.cpp backend/sinh.in.cpp backend/softmax.in.cpp + backend/space_to_batch.in.cpp backend/split.in.cpp backend/sqrt.in.cpp backend/squared_difference.in.cpp diff --git a/ngraph/test/backend/convolution_backprop.in.cpp b/ngraph/test/backend/convolution_backprop.in.cpp new file mode 100644 index 00000000000..04269995956 --- /dev/null +++ b/ngraph/test/backend/convolution_backprop.in.cpp @@ -0,0 +1,1221 @@ +//***************************************************************************** +// Copyright 2017-2021 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "ngraph/runtime/tensor.hpp" +#include "runtime/backend.hpp" +#include "util/all_close.hpp" +#include "util/all_close_f.hpp" +#include "util/engine/test_engines.hpp" +#include "util/known_element_types.hpp" +#include "util/ndarray.hpp" +#include "util/test_case.hpp" +#include "util/test_control.hpp" +#include "util/test_tools.hpp" + +using namespace std; +using namespace ngraph; + +static string s_manifest = "${MANIFEST}"; +using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); + +static void ConvolutionBackpropTest(const std::vector& inputs, + const Shape inputs_shape, + const std::vector& filters, + const Shape filter_shape, + const std::vector& outputs, + const Shape outputs_shape, + const Strides& strides, + const CoordinateDiff& padding, + const Strides& dilations, + const CoordinateDiff& output_padding) +{ + const CoordinateDiff pads_begin{padding}; + const CoordinateDiff pads_end{padding}; + const op::PadType auto_pad{op::PadType::EXPLICIT}; + const CoordinateDiff out_padding{output_padding}; + + auto inputs_param = make_shared(element::f32, inputs_shape); + auto filters_param = make_shared(element::f32, filter_shape); + auto conv = make_shared(inputs_param, + filters_param, + strides, + pads_begin, + pads_end, + dilations, + auto_pad, + out_padding); + auto f = make_shared(conv, ParameterVector{inputs_param, filters_param}); + + auto test_case = test::TestCase(f); + test_case.add_input(inputs); + test_case.add_input(filters); + test_case.add_expected_output(outputs_shape, outputs); + test_case.run(); +} + +// --------------------- 1D convolution ------------------------------------------ +// clang-format off +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_1D_1batch_1channel) +{ + const Strides strides{1}; + const CoordinateDiff padding{0}; + const Strides dilations{1}; + const CoordinateDiff output_padding{0}; + + const Shape inputs_shape{1, 1, 4}; + const std::vector inputs{5.0f, 6.0f, 7.0f, 2.0f}; + + const Shape filter_shape{1, 1, 3}; + const std::vector filters{2.0f, 0.0f, 1.0f}; + + const Shape outputs_shape{1, 1, 6}; + const std::vector outputs{10.0f, 12.0f, 19.0f, 10.0f, 7.0f, 2.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_1D_1batch_1channel_padding) +{ + const Strides strides{1}; + const CoordinateDiff padding{1}; + const Strides dilations{1}; + const CoordinateDiff output_padding{0}; + + const Shape inputs_shape{1, 1, 4}; + const std::vector inputs{5.0f, 6.0f, 7.0f, 2.0f}; + + const Shape filter_shape{1, 1, 3}; + const std::vector filters{2.0f, 0.0f, 1.0f}; + + const Shape outputs_shape{1, 1, 4}; + const std::vector outputs{12.0f, 19.0f, 10.0f, 7.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_1D_1batch_1channel_stride) +{ + const Strides strides{2}; + const CoordinateDiff padding{0}; + const Strides dilations{1}; + const CoordinateDiff output_padding{0}; + + const Shape inputs_shape{1, 1, 2}; + const std::vector inputs{5.0f, 7.0f}; + + const Shape filter_shape{1, 1, 3}; + const std::vector filters{2.0f, 0.0f, 1.0f}; + + const Shape outputs_shape{1, 1, 5}; + const std::vector outputs{10.0f, 0.0f, 19.0f, 0.0f, 7.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_1D_1batch_1channel_output_padding) +{ + const Strides strides{1}; + const CoordinateDiff padding{1}; + const Strides dilations{1}; + const CoordinateDiff output_padding{1}; + + const Shape inputs_shape{1, 1, 4}; + const std::vector inputs{5.0f, 6.0f, 7.0f, 2.0f}; + + const Shape filter_shape{1, 1, 3}; + const std::vector filters{2.0f, 0.0f, 1.0f}; + + const Shape outputs_shape{1, 1, 5}; + const std::vector outputs{12.0f, 19.0f, 10.0f, 7.0f, 2.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_1D_1batch_1channel_dilation) +{ + const Strides strides{1}; + const CoordinateDiff padding{0}; + const Strides dilations{2}; + const CoordinateDiff output_padding{0}; + + const Shape inputs_shape{1, 1, 3}; + const std::vector inputs{8.0f, 5.0f, 1.0f}; + + const Shape filter_shape{1, 1, 3}; + const std::vector filters{2.0f, 0.0f, 1.0f}; + + const Shape outputs_shape{1, 1, 7}; + const std::vector outputs{16.0f, 10.0f, 2.0f, 0.0f, 8.0f, 5.0f, 1.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_1D_1batch_1channel_padding_stride_dilation) +{ + const Strides strides{2}; + const CoordinateDiff padding{2}; + const Strides dilations{2}; + const CoordinateDiff output_padding{0}; + + const Shape inputs_shape{1, 1, 4}; + const std::vector inputs{3.0f, 9.0f, 1.0f, 2.0f}; + + const Shape filter_shape{1, 1, 3}; + const std::vector filters{2.0f, 0.0f, 1.0f}; + + const Shape outputs_shape{1, 1, 7}; + const std::vector outputs{18.0f, 0.0f, 5.0f, 0.0f, 13.0f, 0.0f, 1.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_1D_1batch_2channel) +{ + const Strides strides{1}; + const CoordinateDiff padding{0}; + const Strides dilations{1}; + const CoordinateDiff output_padding{0}; + + const Shape inputs_shape{1, 1, 2}; + const std::vector inputs{10.0f, 3.0f}; + + const Shape filter_shape{1, 2, 3}; + const std::vector filters{ + // channel 1 + 2.0f, 0.0f, 1.0f, + // channel 2 + 1.0f, 0.0f, 2.0f}; + + const Shape outputs_shape{1, 2, 4}; + const std::vector outputs{ + // channel 1 + 20.0f, 6.0f, 10.0f, 3.0f, + // channel 2 + 10.0f, 3.0f, 20.0f, 6.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_1D_1batch_2filter) +{ + const Strides strides{1}; + const CoordinateDiff padding{0}; + const Strides dilations{1}; + const CoordinateDiff output_padding{0}; + + const Shape inputs_shape{1, 2, 2}; + const std::vector inputs{ + // channel 1 + 4.0f, 7.0f, + // channel 2 + 5.0f, 5.0f}; + + const Shape filter_shape{2, 1, 3}; + const std::vector filters{ + // filter 1 + 2.0f, 0.0f, 1.0f, + // filter 2 + 1.0f, 0.0f, 2.0f}; + + const Shape outputs_shape{1, 1, 4}; + const std::vector outputs{13.0f, 19.0f, 14.0f, 17.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_1D_2batch_1channel) +{ + const Strides strides{1}; + const CoordinateDiff padding{0}; + const Strides dilations{1}; + const CoordinateDiff output_padding{0}; + + const Shape inputs_shape{2, 1, 2}; + const std::vector inputs{ + // batch 1 + 1.0f, 3.0f, + // batch 2 + 2.0f, 2.0f}; + + const Shape filter_shape{1, 1, 3}; + const std::vector filters{2.0f, 0.0f, 1.0f}; + + const Shape outputs_shape{2, 1, 4}; + const std::vector outputs{ + // batch 1 + 2.0f, 6.0f, 1.0f, 3.0f, + // batch 2 + 4.0f, 4.0f, 2.0f, 2.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +// --------------------- 2D convolution ------------------------------------------ +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_2D_1batch_1channel) +{ + const Strides strides{1, 1}; + const CoordinateDiff padding{0, 0}; + const Strides dilations{1, 1}; + const CoordinateDiff output_padding{0, 0}; + + const Shape inputs_shape{1, 1, 2, 2}; + const std::vector inputs{1.0f, 3.0f, + 7.0f, 5.0f}; + + const Shape filter_shape{1, 1, 3, 3}; + const std::vector filters{1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 3.0f, 2.0f, 1.0f}; + + const Shape outputs_shape{1, 1, 4, 4}; + const std::vector outputs{1.0f, 5.0f, 9.0f, 9.0f, + 7.0f, 20.0f, 34.0f, 15.0f, + 3.0f, 18.0f, 12.0f, 3.0f, + 21.0f, 29.0f, 17.0f, 5.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_2D_1batch_1channel_output_padding) +{ + const Strides strides{1, 1}; + const CoordinateDiff padding{1, 1}; + const Strides dilations{1, 1}; + const CoordinateDiff output_padding{1, 1}; + + const Shape inputs_shape{1, 1, 2, 2}; + const std::vector inputs{1.0f, 3.0f, + 7.0f, 5.0f}; + + const Shape filter_shape{1, 1, 3, 3}; + const std::vector filters{1.0f, 2.0f, 3.0f, + 1.0f, 1.0f, 1.0f, + 3.0f, 2.0f, 1.0f}; + + const Shape outputs_shape{1, 1, 3, 3}; + const std::vector outputs{23.0f, 35.0f, 18.0f, + 23.0f, 19.0f, 8.0f, + 29.0f, 17.0f, 5.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_2D_1batch_1channel_padding) +{ + const Strides strides{1, 1}; + const CoordinateDiff padding{1, 1}; + const Strides dilations{1, 1}; + const CoordinateDiff output_padding{0, 0}; + + const Shape inputs_shape{1, 1, 4, 4}; + const std::vector inputs{1.0f, 3.0f, 5.0f, 7.0f, + 7.0f, 5.0f, 3.0f, 1.0f, + 2.0f, 4.0f, 6.0f, 8.0f, + 8.0f, 6.0f, 4.0f, 2.0f}; + + const Shape filter_shape{1, 1, 3, 3}; + const std::vector filters{1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f}; + + const Shape outputs_shape{1, 1, 4, 4}; + const std::vector outputs{20.0f, 37.0f, 27.0f, 18.0f, + 22.0f, 40.0f, 60.0f, 52.0f, + 41.0f, 69.0f, 49.0f, 31.0f, + 18.0f, 26.0f, 34.0f, 22.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_2D_1batch_1channel_stride) +{ + const Strides strides{2, 2}; + const CoordinateDiff padding{0, 0}; + const Strides dilations{1, 1}; + const CoordinateDiff output_padding{0, 0}; + + const Shape inputs_shape{1, 1, 2, 2}; + const std::vector inputs{2.0f, 5.0f, + 4.0f, 3.0f}; + + const Shape filter_shape{1, 1, 3, 3}; + const std::vector filters{1.0f, 2.0f, 3.0f, + 1.0f, 1.0f, 1.0f, + 3.0f, 2.0f, 1.0f}; + + const Shape outputs_shape{1, 1, 5, 5}; + const std::vector outputs{2.0f, 4.0f, 11.0f, 10.0f, 15.0f, + 2.0f, 2.0f, 7.0f, 5.0f, 5.0f, + 10.0f, 12.0f, 32.0f, 16.0f, 14.0f, + 4.0f, 4.0f, 7.0f, 3.0f, 3.0f, + 12.0f, 8.0f, 13.0f, 6.0f, 3.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_2D_1batch_1channel_dilation) +{ + const Strides strides{1, 1}; + const CoordinateDiff padding{0, 0}; + const Strides dilations{2, 2}; + const CoordinateDiff output_padding{0, 0}; + + const Shape inputs_shape{1, 1, 2, 2}; + const std::vector inputs{2.0f, 3.0f, + 4.0f, 3.0f}; + + const Shape filter_shape{1, 1, 3, 3}; + const std::vector filters{1.0f, 2.0f, 3.0f, + 1.0f, 1.0f, 1.0f, + 3.0f, 2.0f, 1.0f}; + + const Shape outputs_shape{1, 1, 6, 6}; + const std::vector outputs{2.f, 3.f, 4.f, 6.f, 6.f, 9.f, + 4.f, 3.f, 8.f, 6.f, 12.f, 9.f, + 2.f, 3.f, 2.f, 3.f, 2.f, 3.f, + 4.f, 3.f, 4.f, 3.f, 4.f, 3.f, + 6.f, 9.f, 4.f, 6.f, 2.f, 3.f, + 12.f, 9.f, 8.f, 6.f, 4.f, 3.f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_2D_1batch_1channel_padding_strides_dilation) +{ + const Strides strides{2, 2}; + const CoordinateDiff padding{2, 2}; + const Strides dilations{2, 2}; + const CoordinateDiff output_padding{0, 0}; + + const Shape inputs_shape{1, 1, 3, 3}; + const std::vector inputs{1.0f, 3.0f, 5.0f, + 7.0f, 5.0f, 3.0f, + 2.0f, 4.0f, 6.0f}; + + const Shape filter_shape{1, 1, 3, 3}; + const std::vector filters{1.0f, 2.0f, 3.0f, + 1.0f, 1.0f, 1.0f, + 3.0f, 2.0f, 1.0f}; + + const Shape outputs_shape{1, 1, 5, 5}; + const std::vector outputs{23.0f, 0.0f, 43.0f, 0.0f, 29.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 31.0f, 0.0f, 57.0f, 0.0f, 45.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 35.0f, 0.0f, 38.0f, 0.0f, 21.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_2D_1batch_2channel) +{ + const Strides strides{1, 1}; + const CoordinateDiff padding{0, 0}; + const Strides dilations{1, 1}; + const CoordinateDiff output_padding{0, 0}; + + const Shape inputs_shape{1, 1, 2, 2}; + const std::vector inputs{1.0f, 3.0f, + 7.0f, 5.0f}; + + const Shape filter_shape{1, 2, 3, 3}; + const std::vector filters{ + // channel 1 + 5.0f, 3.0f, 5.0f, + 1.0f, 3.0f, 1.0f, + 4.0f, 2.0f, 4.0f, + // channel 2 + -5.0f, 3.0f, 5.0f, + 1.0f, -3.0f, 1.0f, + 4.0f, 2.0f, -4.0f}; + + const Shape outputs_shape{1, 2, 4, 4}; + const std::vector outputs{ + // channel 1 + 5.0f, 18.0f, 14.0f, 15.0f, + 36.0f, 52.0f, 60.0f, 28.0f, + 11.0f, 40.0f, 32.0f, 17.0f, + 28.0f, 34.0f, 38.0f, 20.0f, + // channel 2 + -5.0f, -12.0f, 14.0f, 15.0f, + -34.0f, -4.0f, 42.0f, 28.0f, + 11.0f, -2.0f, -6.0f, -7.0f, + 28.0f, 34.0f, -18.0f, -20.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_2D_1batch_2filter) +{ + const Strides strides{1, 1}; + const CoordinateDiff padding{0, 0}; + const Strides dilations{1, 1}; + const CoordinateDiff output_padding{0, 0}; + + const Shape inputs_shape{1, 2, 2, 2}; + const std::vector inputs{ + // channel 1 + 1.0f, 3.0f, + 7.0f, 5.0f, + // channel 2 + 2.0f, 4.0f, + 8.0f, 6.0f}; + + const Shape filter_shape{2, 1, 3, 3}; + const std::vector filters{ + // channel 1 + 5.0f, 3.0f, 5.0f, + 1.0f, 3.0f, 1.0f, + 4.0f, 2.0f, 4.0f, + // channel 2 + -5.0f, 3.0f, 5.0f, + 1.0f, -3.0f, 1.0f, + 4.0f, 2.0f, -4.0f}; + + const Shape outputs_shape{1, 1, 4, 4}; + const std::vector outputs{ + -5.0f, 4.0f, 36.0f, 35.0f, + -2.0f, 44.0f, 108.0f, 62.0f, + 27.0f, 42.0f, 22.0f, 7.0f, + 60.0f, 74.0f, 18.0f, -4.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_2D_2batch_2filter) +{ + const Strides strides{1, 1}; + const CoordinateDiff padding{0, 0}; + const Strides dilations{1, 1}; + const CoordinateDiff output_padding{0, 0}; + + const Shape inputs_shape{1, 2, 1, 1}; + const std::vector inputs{ + // channel 1 + 2.0f, + // channel 2 + 3.0f}; + + const Shape filter_shape{2, 2, 2, 2}; + const std::vector filters{ + // batch 0 + // channel 1 + 5.0f, 3.0f, + 1.0f, 3.0f, + // channel 2 + -5.0f, 3.0f, + 1.0f, -3.0f, + // batch 1 + // channel 1 + 5.0f, 3.0f, + 1.0f, 3.0f, + // channel 2 + -5.0f, 3.0f, + 1.0f, -3.0f}; + + const Shape outputs_shape{1, 2, 2, 2}; + const std::vector outputs{ + 25.0f, 15.0f, 5.0f, 15.0f, -25.0f, 15.0f, 5.0f, -15.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_2D_2batch_1channel) +{ + const Strides strides{1, 1}; + const CoordinateDiff padding{0, 0}; + const Strides dilations{1, 1}; + const CoordinateDiff output_padding{0, 0}; + + const Shape inputs_shape{2, 1, 2, 2}; + const std::vector inputs{ + // batch 1 + 1.0f, 3.0f, + 1.0f, 3.0f, + // batch 2 + -1.0f, 3.0f, + 1.0f, 3.0f}; + + const Shape filter_shape{1, 1, 3, 3}; + const std::vector filters{-5.0f, 3.0f, 5.0f, + 1.0f, -3.0f, 1.0f, + 4.0f, 2.0f, -4.0f}; + + const Shape outputs_shape{2, 1, 4, 4}; + const std::vector outputs{ + // batch 1 + -5.0f, -12.0f, 14.0f, 15.0f, + -4.0f, -12.0f, 6.0f, 18.0f, + 5.0f, 14.0f, -6.0f, -9.0f, + 4.0f, 14.0f, 2.0f, -12.0f, + // batch 2 + 5.0f, -18.0f, 4.0f, 15.0f, + -6.0f, -6.0f, 4.0f, 18.0f, + -3.0f, 10.0f, 2.0f, -9.0f, + 4.0f, 14.0f, 2.0f, -12.0f}; + + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +// --------------------- 3D convolution ------------------------------------------ +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_3D_1batch_1channel) +{ + const Strides strides{1, 1, 1}; + const CoordinateDiff padding{0, 0, 0}; + const Strides dilations{1, 1, 1}; + const CoordinateDiff output_padding{0, 0, 0}; + + const Shape inputs_shape{1, 1, 2, 2, 2}; + const std::vector inputs{ + // depth: 1 + 15.0f, 3.0f, + 21.0f, 10.0f, + // depth: 2 + 10.0f, 13.0f, + 11.0f, 17.0f}; + + const Shape filter_shape{1, 1, 3, 3, 3}; + const std::vector filters{ + // depth: 1 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 2 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 3 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f}; + + const Shape outputs_shape{1, 1, 4, 4, 4}; + const std::vector outputs{ + // depth: 1 + 15.0f, 33.0f, 51.0f, 9.0f, + 21.0f, 67.0f, 86.0f, 30.0f, + 30.0f, 42.0f, 43.0f, 6.0f, + 42.0f, 41.0f, 52.0f, 20.0f, + // depth: 2 + 25.0f, 66.0f, 107.0f, 48.0f, + 32.0f, 116.0f, 166.0f, 81.0f, + 50.0f, 89.0f, 93.0f, 32.0f, + 64.0f, 86.0f, 91.0f, 54.0f, + // depth: 3 + 25.0f, 66.0f, 107.0f, 48.0f, + 32.0f, 116.0f, 166.0f, 81.0f, + 50.0f, 89.0f, 93.0f, 32.0f, + 64.0f, 86.0f, 91.0f, 54.0f, + // depth: 4 + 10.0f, 33.0f, 56.0f, 39.0f, + 11.0f, 49.0f, 80.0f, 51.0f, + 20.0f, 47.0f, 50.0f, 26.0f, + 22.0f, 45.0f, 39.0f, 34.0f + }; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_3D_1batch_1channel_output_padding) +{ + const Strides strides{1, 1, 1}; + const CoordinateDiff padding{1, 1, 1}; + const Strides dilations{1, 1, 1}; + const CoordinateDiff output_padding{1, 1, 1}; + + const Shape inputs_shape{1, 1, 2, 2, 2}; + const std::vector inputs{ + // depth: 1 + 15.0f, 3.0f, + 21.0f, 10.0f, + // depth: 2 + 10.0f, 13.0f, + 11.0f, 17.0f}; + + const Shape filter_shape{1, 1, 3, 3, 3}; + const std::vector filters{ + // depth: 1 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 2 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 3 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f}; + + const Shape outputs_shape{1, 1, 3, 3, 3}; + const std::vector outputs{ + // depth: 1 + 116.0f, 166.0f, 81.0f, + 89.0f, 93.0f, 32.0f, + 86.0f, 91.0f, 54.0f, + // depth: 2 + 116.0f, 166.0f, 81.0f, + 89.0f, 93.0f, 32.0f, + 86.0f, 91.0f, 54.0f, + // depth: 3 + 49.0f, 80.0f, 51.0f, + 47.0f, 50.0f, 26.0f, + 45.0f, 39.0f, 34.0f + }; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_3D_1batch_1channel_padding) +{ + const Strides strides{1, 1, 1}; + const CoordinateDiff padding{1, 1, 1}; + const Strides dilations{1, 1, 1}; + const CoordinateDiff output_padding{0, 0, 0}; + + const Shape inputs_shape{1, 1, 4, 4, 4}; + const std::vector inputs{ + // depth: 1 + 1.0f, 3.0f, 2.0f, 1.0f, + 1.0f, 3.0f, 3.0f, 1.0f, + 2.0f, 1.0f, 1.0f, 3.0f, + 3.0f, 2.0f, 3.0f, 3.0f, + // depth: 2 + 1.0f, 3.0f, 2.0f, 1.0f, + 1.0f, 3.0f, 3.0f, 1.0f, + 2.0f, 1.0f, 1.0f, 3.0f, + 3.0f, 2.0f, 3.0f, 3.0f, + // depth: 3 + 1.0f, 3.0f, 2.0f, 1.0f, + 1.0f, 3.0f, 3.0f, 1.0f, + 2.0f, 1.0f, 1.0f, 3.0f, + 3.0f, 2.0f, 3.0f, 3.0f, + // depth: 4 + 1.0f, 3.0f, 2.0f, 1.0f, + 1.0f, 3.0f, 3.0f, 1.0f, + 2.0f, 1.0f, 1.0f, 3.0f, + 3.0f, 2.0f, 3.0f, 3.0f + }; + + const Shape filter_shape{1, 1, 3, 3, 3}; + const std::vector filters{ + // depth: 1 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 2 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 3 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f}; + + const Shape outputs_shape{1, 1, 4, 4, 4}; + const std::vector outputs{ + // depth: 1 + 12.0f, 30.0f, 36.0f, 24.0f, + 26.0f, 42.0f, 42.0f, 30.0f, + 34.0f, 56.0f, 54.0f, 50.0f, + 14.0f, 18.0f, 24.0f, 16.0f, + // depth: 2 + 18.0f, 45.0f, 54.0f, 36.0f, + 39.0f, 63.0f, 63.0f, 45.0f, + 51.0f, 84.0f, 81.0f, 75.0f, + 21.0f, 27.0f, 36.0f, 24.0f, + // depth: 3 + 18.0f, 45.0f, 54.0f, 36.0f, + 39.0f, 63.0f, 63.0f, 45.0f, + 51.0f, 84.0f, 81.0f, 75.0f, + 21.0f, 27.0f, 36.0f, 24.0f, + // depth: 4 + 12.0f, 30.0f, 36.0f, 24.0f, + 26.0f, 42.0f, 42.0f, 30.0f, + 34.0f, 56.0f, 54.0f, 50.0f, + 14.0f, 18.0f, 24.0f, 16.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_3D_1batch_1channel_stride) +{ + const Strides strides{2, 2, 2}; + const CoordinateDiff padding{0, 0, 0}; + const Strides dilations{1, 1, 1}; + const CoordinateDiff output_padding{0, 0, 0}; + + const Shape inputs_shape{1, 1, 2, 2, 2}; + const std::vector inputs{ + // depth: 1 + 15.0f, 3.0f, + 21.0f, 10.0f, + // depth: 2 + 10.0f, 13.0f, + 11.0f, 17.0f}; + + const Shape filter_shape{1, 1, 3, 3, 3}; + const std::vector filters{ + // depth: 1 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 2 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 3 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f}; + + const Shape outputs_shape{1, 1, 5, 5, 5}; + const std::vector outputs{ + // depth: 1 + 15.0f, 30.0f, 48.0f, 6.0f, 9.0f, + 0.0f, 15.0f, 0.0f, 3.0f, 0.0f, + 51.0f, 57.0f, 109.0f, 23.0f, 36.0f, + 0.0f, 21.0f, 0.0f, 10.0f, 0.0f, + 42.0f, 21.0f, 62.0f, 10.0f, 20.0f, + // depth: 2 + 15.0f, 30.0f, 48.0f, 6.0f, 9.0f, + 0.0f, 15.0f, 0.0f, 3.0f, 0.0f, + 51.0f, 57.0f, 109.0f, 23.0f, 36.0f, + 0.0f, 21.0f, 0.0f, 10.0f, 0.0f, + 42.0f, 21.0f, 62.0f, 10.0f, 20.0f, + // depth: 3 + 25.0f, 50.0f, 91.0f, 32.0f, 48.0f, + 0.0f, 25.0f, 0.0f, 16.0f, 0.0f, + 82.0f, 89.0f, 205.0f, 70.0f, 113.0f, + 0.0f, 32.0f, 0.0f, 27.0f, 0.0f, + 64.0f, 32.0f, 118.0f, 27.0f, 54.0f, + // depth: 4 + 10.0f, 20.0f, 43.0f, 26.0f, 39.0f, + 0.0f, 10.0f, 0.0f, 13.0f, 0.0f, + 31.0f, 32.0f, 96.0f, 47.0f, 77.0f, + 0.0f, 11.0f, 0.0f, 17.0f, 0.0f, + 22.0f, 11.0f, 56.0f, 17.0f, 34.0f, + // depth: 5 + 10.0f, 20.0f, 43.0f, 26.0f, 39.0f, + 0.0f, 10.0f, 0.0f, 13.0f, 0.0f, + 31.0f, 32.0f, 96.0f, 47.0f, 77.0f, + 0.0f, 11.0f, 0.0f, 17.0f, 0.0f, + 22.0f, 11.0f, 56.0f, 17.0f, 34.0f + }; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_3D_1batch_1channel_padding_strides_dilation) +{ + const Strides strides{2, 2, 2}; + const CoordinateDiff padding{2, 2, 2}; + const Strides dilations{2, 2, 2}; + const CoordinateDiff output_padding{0, 0, 0}; + + const Shape inputs_shape{1, 1, 4, 4, 4}; + const std::vector inputs{ + // depth: 1 + 1.0f, 3.0f, 2.0f, 1.0f, + 1.0f, 3.0f, 3.0f, 1.0f, + 2.0f, 1.0f, 1.0f, 3.0f, + 3.0f, 2.0f, 3.0f, 3.0f, + // depth: 2 + 1.0f, 3.0f, 2.0f, 1.0f, + 1.0f, 3.0f, 3.0f, 1.0f, + 2.0f, 1.0f, 1.0f, 3.0f, + 3.0f, 2.0f, 3.0f, 3.0f, + // depth: 3 + 1.0f, 3.0f, 2.0f, 1.0f, + 1.0f, 3.0f, 3.0f, 1.0f, + 2.0f, 1.0f, 1.0f, 3.0f, + 3.0f, 2.0f, 3.0f, 3.0f, + // depth: 4 + 1.0f, 3.0f, 2.0f, 1.0f, + 1.0f, 3.0f, 3.0f, 1.0f, + 2.0f, 1.0f, 1.0f, 3.0f, + 3.0f, 2.0f, 3.0f, 3.0f + }; + + const Shape filter_shape{1, 1, 3, 3, 3}; + const std::vector filters{ + // depth: 1 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 2 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 3 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f}; + + const Shape outputs_shape{1, 1, 7, 7, 7}; + const std::vector outputs{ + // depth: 1 + 12.0f, 0.0f, 30.0f, 0.0f, 36.0f, 0.0f, 24.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 26.0f, 0.0f, 42.0f, 0.0f, 42.0f, 0.0f, 30.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 34.0f, 0.0f, 56.0f, 0.0f, 54.0f, 0.0f, 50.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 14.0f, 0.0f, 18.0f, 0.0f, 24.0f, 0.0f, 16.0f, + // depth: 2 + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + // depth: 3 + 18.0f, 0.0f, 45.0f, 0.0f, 54.0f, 0.0f, 36.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 39.0f, 0.0f, 63.0f, 0.0f, 63.0f, 0.0f, 45.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 51.0f, 0.0f, 84.0f, 0.0f, 81.0f, 0.0f, 75.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 21.0f, 0.0f, 27.0f, 0.0f, 36.0f, 0.0f, 24.0f, + // depth: 4 + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + // depth: 5 + 18.0f, 0.0f, 45.0f, 0.0f, 54.0f, 0.0f, 36.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 39.0f, 0.0f, 63.0f, 0.0f, 63.0f, 0.0f, 45.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 51.0f, 0.0f, 84.0f, 0.0f, 81.0f, 0.0f, 75.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 21.0f, 0.0f, 27.0f, 0.0f, 36.0f, 0.0f, 24.0f, + // depth: 6 + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + // depth: 7 + 12.0f, 0.0f, 30.0f, 0.0f, 36.0f, 0.0f, 24.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 26.0f, 0.0f, 42.0f, 0.0f, 42.0f, 0.0f, 30.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 34.0f, 0.0f, 56.0f, 0.0f, 54.0f, 0.0f, 50.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 14.0f, 0.0f, 18.0f, 0.0f, 24.0f, 0.0f, 16.0f + }; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_3D_1batch_2channel) +{ + const Strides strides{1, 1, 1}; + const CoordinateDiff padding{0, 0, 0}; + const Strides dilations{1, 1, 1}; + const CoordinateDiff output_padding{0, 0, 0}; + + const Shape inputs_shape{1, 1, 2, 2, 2}; + const std::vector inputs{ + // depth: 1 + 1.0f, 8.0f, + 1.0f, 3.0f, + // depth: 2 + 1.0f, 7.0f, + 3.0f, 8.0f}; + + const Shape filter_shape{1, 2, 3, 3, 3}; + const std::vector filters{ + // -- channel 1 -- + // depth: 1 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 2 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 3 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // -- channel 2 -- + // depth: 1 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 2 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 3 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f + }; + + const Shape outputs_shape{1, 2, 4, 4, 4}; + const std::vector outputs{ + // -- channel 1 -- + // depth: 1 + 1.0f, 10.0f, 19.0f, 24.0f, + 1.0f, 6.0f, 17.0f, 9.0f, + 2.0f, 18.0f, 13.0f, 16.0f, + 2.0f, 7.0f, 5.0f, 6.0f, + // depth: 2 + 2.0f, 19.0f, 36.0f, 45.0f, + 4.0f, 21.0f, 49.0f, 33.0f, + 4.0f, 36.0f, 30.0f, 30.0f, + 8.0f, 26.0f, 19.0f, 22.0f, + // depth: 3 + 2.0f, 19.0f, 36.0f, 45.0f, + 4.0f, 21.0f, 49.0f, 33.0f, + 4.0f, 36.0f, 30.0f, 30.0f, + 8.0f, 26.0f, 19.0f, 22.0f, + // depth: 4 + 1.0f, 9.0f, 17.0f, 21.0f, + 3.0f, 15.0f, 32.0f, 24.0f, + 2.0f, 18.0f, 17.0f, 14.0f, + 6.0f, 19.0f, 14.0f, 16.0f, + // -- channel 2 -- + // depth: 1 + 1.0f, 10.0f, 19.0f, 24.0f, + 1.0f, 6.0f, 17.0f, 9.0f, + 2.0f, 18.0f, 13.0f, 16.0f, + 2.0f, 7.0f, 5.0f, 6.0f, + // depth: 2 + 2.0f, 19.0f, 36.0f, 45.0f, + 4.0f, 21.0f, 49.0f, 33.0f, + 4.0f, 36.0f, 30.0f, 30.0f, + 8.0f, 26.0f, 19.0f, 22.0f, + // depth: 3 + 2.0f, 19.0f, 36.0f, 45.0f, + 4.0f, 21.0f, 49.0f, 33.0f, + 4.0f, 36.0f, 30.0f, 30.0f, + 8.0f, 26.0f, 19.0f, 22.0f, + // depth: 4 + 1.0f, 9.0f, 17.0f, 21.0f, + 3.0f, 15.0f, 32.0f, 24.0f, + 2.0f, 18.0f, 17.0f, 14.0f, + 6.0f, 19.0f, 14.0f, 16.0f + }; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_3D_1batch_2filter) +{ + const Strides strides{1, 1, 1}; + const CoordinateDiff padding{0, 0, 0}; + const Strides dilations{1, 1, 1}; + const CoordinateDiff output_padding{0, 0, 0}; + + const Shape inputs_shape{1, 2, 2, 2, 2}; + const std::vector inputs{ + // -- in 1 -- + // depth: 1 + 1.0f, 3.0f, + 2.0f, 5.0f, + // depth: 2 + 1.0f, 0.0f, + 3.0f, 6.0f, + // -- in 2 -- + // depth: 1 + 1.0f, 3.0f, + 2.0f, 5.0f, + // depth: 2 + 3.0f, 0.0f, + 1.0f, 8.0f}; + + const Shape filter_shape{2, 1, 3, 3, 3}; + const std::vector filters{ + // -- filter 1 -- + // depth: 1 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 2 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 3 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // -- filter 2 -- + // depth: 1 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 2 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 3 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f + }; + + const Shape outputs_shape{1, 1, 4, 4, 4}; + const std::vector outputs{ + // depth: 1 + 2.0f, 10.0f, 18.0f, 18.0f, + 4.0f, 20.0f, 38.0f, 30.0f, + 4.0f, 18.0f, 20.0f, 12.0f, + 8.0f, 24.0f, 18.0f, 20.0f, + // depth: 2 + 6.0f, 18.0f, 30.0f, 18.0f, + 8.0f, 46.0f, 78.0f, 72.0f, + 12.0f, 26.0f, 42.0f, 12.0f, + 16.0f, 56.0f, 40.0f, 48.0f, + // depth: 3 + 6.0f, 18.0f, 30.0f, 18.0f, + 8.0f, 46.0f, 78.0f, 72.0f, + 12.0f, 26.0f, 42.0f, 12.0f, + 16.0f, 56.0f, 40.0f, 48.0f, + // depth: 4 + 4.0f, 8.0f, 12.0f, 0.0f, + 4.0f, 26.0f, 40.0f, 42.0f, + 8.0f, 8.0f, 22.0f, 0.0f, + 8.0f, 32.0f, 22.0f, 28.0f + }; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} + +NGRAPH_TEST(${BACKEND_NAME}, convolution_backprop_3D_2batch_1channel) +{ + const Strides strides{1, 1, 1}; + const CoordinateDiff padding{0, 0, 0}; + const Strides dilations{1, 1, 1}; + const CoordinateDiff output_padding{0, 0, 0}; + + const Shape inputs_shape{2, 1, 2, 2, 2}; + const std::vector inputs{ + // -- batch 1 -- + // depth: 1 + 1.0f, 3.0f, + 2.0f, 5.0f, + // depth: 2 + 1.0f, 0.0f, + 6.0f, 4.0f, + // -- batch 2 -- + // depth: 1 + 1.0f, 5.0f, + 2.0f, 8.0f, + // depth: 2 + 2.0f, 1.0f, + 0.0f, 5.0f}; + const Shape filter_shape{1, 1, 3, 3, 3}; + const std::vector filters{ + // depth: 1 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 2 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f, + // depth: 3 + 1.0f, 2.0f, 3.0f, + 0.0f, 1.0f, 0.0f, + 2.0f, 1.0f, 2.0f}; + + const Shape outputs_shape{2, 1, 4, 4, 4}; + const std::vector outputs{ + // -- batch 1 -- + // depth: 1 + 1.0f, 5.0f, 9.0f, 9.0f, + 2.0f, 10.0f, 19.0f, 15.0f, + 2.0f, 9.0f, 10.0f, 6.0f, + 4.0f, 12.0f, 9.0f, 10.0f, + // depth: 2 + 2.0f, 7.0f, 12.0f, 9.0f, + 8.0f, 27.0f, 45.0f, 27.0f, + 4.0f, 16.0f, 16.0f, 6.0f, + 16.0f, 26.0f, 25.0f, 18.0f, + // depth: 3 + 2.0f, 7.0f, 12.0f, 9.0f, + 8.0f, 27.0f, 45.0f, 27.0f, + 4.0f, 16.0f, 16.0f, 6.0f, + 16.0f, 26.0f, 25.0f, 18.0f, + // depth: 4 + 1.0f, 2.0f, 3.0f, 0.0f, + 6.0f, 17.0f, 26.0f, 12.0f, + 2.0f, 7.0f, 6.0f, 0.0f, + 12.0f, 14.0f, 16.0f, 8.0f, + // -- batch 2 -- + // depth: 1 + 1.0f, 7.0f, 13.0f, 15.0f, + 2.0f, 13.0f, 27.0f, 24.0f, + 2.0f, 13.0f, 15.0f, 10.0f, + 4.0f, 18.0f, 12.0f, 16.0f, + // depth: 2 + 3.0f, 12.0f, 21.0f, 18.0f, + 2.0f, 20.0f, 38.0f, 39.0f, + 6.0f, 17.0f, 25.0f, 12.0f, + 4.0f, 28.0f, 17.0f, 26.0f, + // depth: 3 + 3.0f, 12.0f, 21.0f, 18.0f, + 2.0f, 20.0f, 38.0f, 39.0f, + 6.0f, 17.0f, 25.0f, 12.0f, + 4.0f, 28.0f, 17.0f, 26.0f, + // depth: 4 + 2.0f, 5.0f, 8.0f, 3.0f, + 0.0f, 7.0f, 11.0f, 15.0f, + 4.0f, 4.0f, 10.0f, 2.0f, + 0.0f, 10.0f, 5.0f, 10.0f}; + + ConvolutionBackpropTest(inputs, inputs_shape, filters, filter_shape, outputs, outputs_shape, + strides, padding, dilations, output_padding); +} diff --git a/ngraph/test/backend/reduce_prod.in.cpp b/ngraph/test/backend/reduce_prod.in.cpp index 6f63089acaa..dbd7d3c743b 100644 --- a/ngraph/test/backend/reduce_prod.in.cpp +++ b/ngraph/test/backend/reduce_prod.in.cpp @@ -80,95 +80,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_rows) EXPECT_TRUE(test::all_close_f((vector{2, 12, 30}), read_vector(result))); } -NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_rows_zero) -{ - Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); - auto f = - make_shared(make_shared(A, axes, false), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3, 3, 3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1, 1, 1}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_cols_zero) -{ - // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). - Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); - auto f = - make_shared(make_shared(A, axes, false), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3, 3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1, 1}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, reduce_product_vector_zero) -{ - Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{}, 0); - auto f = - make_shared(make_shared(A, axes, false), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_to_scalar_zero_by_zero) -{ - Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); - auto f = - make_shared(make_shared(A, axes, false), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1}), read_vector(result))); -} - NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; @@ -283,31 +194,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_scalar) read_vector(result))); } -NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_eliminate_zero_dim) -{ - Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{3, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); - auto f = - make_shared(make_shared(A, axes, false), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - - // Overwrite the initial result vector to make sure we're not just coincidentally getting the - // right value. - copy_data(result, vector{2112, 2112, 2112, 2112, 2112, 2112}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1, 1, 1, 1, 1, 1}), read_vector(result))); -} - NGRAPH_TEST(${BACKEND_NAME}, reduce_product_2d_to_scalar_int32) { Shape shape_a{3, 3}; @@ -433,95 +319,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_rows) EXPECT_TRUE(test::all_close_f((vector{2, 12, 30}), read_vector(result))); } -NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_rows_zero) -{ - Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); - auto f = - make_shared(make_shared(A, axes, true), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3, 3, 3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1, 1, 1}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_cols_zero) -{ - // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). - Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); - auto f = - make_shared(make_shared(A, axes, true), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3, 3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1, 1}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_vector_zero) -{ - Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{1}; - auto axes = make_shared(element::i32, Shape{}, 0); - auto f = - make_shared(make_shared(A, axes, true), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_to_scalar_zero_by_zero) -{ - Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{1, 1}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); - auto f = - make_shared(make_shared(A, axes, true), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1}), read_vector(result))); -} - NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; @@ -636,31 +433,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_scalar) read_vector(result))); } -NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_eliminate_zero_dim) -{ - Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{3, 1, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); - auto f = - make_shared(make_shared(A, axes, true), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - - // Overwrite the initial result vector to make sure we're not just coincidentally getting the - // right value. - copy_data(result, vector{2112, 2112, 2112, 2112, 2112, 2112}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1, 1, 1, 1, 1, 1}), read_vector(result))); -} - NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_2d_to_scalar_int32) { Shape shape_a{3, 3}; diff --git a/ngraph/test/backend/reduce_sum.in.cpp b/ngraph/test/backend/reduce_sum.in.cpp index 50560a07c6e..97bc81d98eb 100644 --- a/ngraph/test/backend/reduce_sum.in.cpp +++ b/ngraph/test/backend/reduce_sum.in.cpp @@ -151,95 +151,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_rows) EXPECT_TRUE(test::all_close_f((vector{3, 7, 11}), read_vector(result))); } -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_rows_zero) -{ - Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); - auto f = - make_shared(make_shared(A, axes, false), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3, 3, 3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{0, 0, 0}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_cols_zero) -{ - // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). - Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); - auto f = - make_shared(make_shared(A, axes, false), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3, 3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{0, 0}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_vector_zero) -{ - Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{}, 0); - auto f = - make_shared(make_shared(A, axes, false), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{0}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_to_scalar_zero_by_zero) -{ - Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); - auto f = - make_shared(make_shared(A, axes, false), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{0}), read_vector(result))); -} - NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; @@ -376,56 +287,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_scalar_int32) read_vector(result)); } -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_eliminate_zero_dim) -{ - Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{3, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); - auto f = - make_shared(make_shared(A, axes, false), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - - // Overwrite the initial result vector to make sure we're not just coincidentally getting the - // right value. - copy_data(result, vector{2112, 2112, 2112, 2112, 2112, 2112}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{0, 0, 0, 0, 0, 0}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_eliminate_zero_dim_int32) -{ - Shape shape_a{3, 0, 2}; - auto A = make_shared(element::i32, shape_a); - Shape shape_rt{3, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); - auto f = - make_shared(make_shared(A, axes, false), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::i32, shape_rt); - - // Overwrite the initial result vector to make sure we're not just coincidentally getting the - // right value. - copy_data(result, vector{2112, 2112, 2112, 2112, 2112, 2112}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{0, 0, 0, 0, 0, 0}), read_vector(result)); -} - NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_5d_to_scalar) { Shape shape_a{3, 3, 3, 3, 3}; @@ -489,27 +350,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_2d_to_scalar_int8) EXPECT_EQ(std::vector{45}, read_vector(result)); } -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_trivial_in_double) -{ - Shape shape{4, 3}; - Shape rshape{3}; - auto A = make_shared(element::f64, shape); - auto axes = make_shared(element::i32, Shape{}, 0); - auto f = - make_shared(make_shared(A, axes, false), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f64, shape); - copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result = backend->create_tensor(element::f64, rshape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{30, 22, 26}), read_vector(result))); -} - #if NGRAPH_INTERPRETER_ENABLE #ifndef _WIN32 @@ -548,39 +388,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_acc) } #endif -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_acc_double) -{ - std::string backend_name = "${BACKEND_NAME}"; - if (backend_name == "INTERPRETER") - { - return; - } - Shape shape_a{10, 10, 20, 300}; - auto A = make_shared(element::f64, shape_a); - - Shape shape_rt{10}; - auto axes = make_shared(element::i32, Shape{3}, vector{1, 2, 3}); - auto f = - make_shared(make_shared(A, axes, false), ParameterVector{A}); - - test::Uniform rng(1000000000.0L, 1000000000.001L, 2112); - vector> args; - for (shared_ptr param : f->get_parameters()) - { - vector tensor_val(shape_size(param->get_shape())); - rng.initialize(tensor_val); - args.push_back(tensor_val); - } - - auto ref_func = clone_function(*f); - auto bk_func = clone_function(*f); - - auto ref_results = execute(ref_func, args, "INTERPRETER"); - auto bk_results = execute(bk_func, args, "${BACKEND_NAME}"); - - EXPECT_TRUE(test::all_close(ref_results.at(0), bk_results.at(0), 0.0, 1e-5)); -} - NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_simple_float) { std::string backend_name = "${BACKEND_NAME}"; @@ -611,106 +418,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_simple_float) test::all_close_f(ref_results.at(0), bk_results.at(0), DEFAULT_FLOAT_TOLERANCE_BITS - 1)); } -#ifndef _WIN32 -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_simple_double) -{ - std::string backend_name = "${BACKEND_NAME}"; - if (backend_name == "INTERPRETER") - { - return; - } - Shape shape_a{20}; - auto A = make_shared(element::f64, shape_a); - - Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{}, 0); - auto f = - make_shared(make_shared(A, axes, false), ParameterVector{A}); - - vector> args; - args.push_back(vector{10000000000000000.0L, - 0.2L, - 0.3L, - 0.4L, - 0.5L, - 0.6L, - 0.7L, - 0.8L, - 0.9L, - 0.7L, - 0.9L, - 0.7L, - 0.3L, - 0.6L, - 0.8L, - 0.4L, - 0.6L, - 0.5L, - 0.8L, - 0.7L}); - - auto ref_func = clone_function(*f); - auto bk_func = clone_function(*f); - - auto ref_results = execute(ref_func, args, "INTERPRETER"); - auto bk_results = execute(bk_func, args, "${BACKEND_NAME}"); - - EXPECT_TRUE(test::all_close(ref_results.at(0), bk_results.at(0), 0.0, 2.0)); -} #endif -#endif - -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_dynamic) -{ - // Create a graph for f(x,axes:int32) = Sum(x,Convert(axes)). - auto x = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, PartialShape{Dimension::dynamic()}); - auto axes_i64 = make_shared(axes, element::i64); - - auto sum = make_shared(x, axes_i64, false); - ASSERT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic()); - - auto f = make_shared(NodeVector{sum}, ParameterVector{x, axes}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}", true); - - auto ex = backend->compile(f); - - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); - - std::vector x_shapes{ - Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}}; - std::vector> axeses{{}, {0}, {1}, {0, 1}, {}, {0}}; - std::vector> inputs{{1, 2, 3, 4, 5, 6}, - {1, 2, 3, 4, 5, 6}, - {1, 2, 3, 4, 5, 6}, - {1, 2, 3, 4, 5, 6}, - {1, 2, 3, 4, 5}, - {1, 2, 3, 4, 5}}; - std::vector expected_result_shapes{ - Shape{2, 3}, Shape{3}, Shape{2}, Shape{}, Shape{5}, Shape{}}; - std::vector> expected_results{ - {1, 2, 3, 4, 5, 6}, {5, 7, 9}, {6, 15}, {21}, {1, 2, 3, 4, 5}, {15}}; - - for (size_t i = 0; i < x_shapes.size(); i++) - { - auto t_x = backend->create_tensor(element::f32, x_shapes[i]); - auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()}); - - copy_data(t_x, inputs[i]); - copy_data(t_axes, axeses[i]); - - ex->call_with_validate({t_r}, {t_x, t_axes}); - - ASSERT_EQ(t_r->get_shape(), expected_result_shapes[i]); - - auto results = read_vector(t_r); - - ASSERT_TRUE(test::all_close_f(results, expected_results[i], MIN_FLOAT_TOLERANCE_BITS)); - } -} - NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_inf) { Shape shape{7, 4}; @@ -874,95 +583,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_rows) EXPECT_TRUE(test::all_close_f((vector{3, 7, 11}), read_vector(result))); } -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_rows_zero) -{ - Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); - auto f = - make_shared(make_shared(A, axes, true), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3, 3, 3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{0, 0, 0}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_cols_zero) -{ - // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). - Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); - auto f = - make_shared(make_shared(A, axes, true), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3, 3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{0, 0}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_vector_zero) -{ - Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{1}; - auto axes = make_shared(element::i32, Shape{}, 0); - auto f = - make_shared(make_shared(A, axes, true), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{0}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_to_scalar_zero_by_zero) -{ - Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{1, 1}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); - auto f = - make_shared(make_shared(A, axes, true), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{0}), read_vector(result))); -} - NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; @@ -1099,56 +719,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_scalar_int32) read_vector(result)); } -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_eliminate_zero_dim) -{ - Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{3, 1, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); - auto f = - make_shared(make_shared(A, axes, true), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - - // Overwrite the initial result vector to make sure we're not just coincidentally getting the - // right value. - copy_data(result, vector{2112, 2112, 2112, 2112, 2112, 2112}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{0, 0, 0, 0, 0, 0}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_eliminate_zero_dim_int32) -{ - Shape shape_a{3, 0, 2}; - auto A = make_shared(element::i32, shape_a); - Shape shape_rt{3, 1, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); - auto f = - make_shared(make_shared(A, axes, true), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::i32, shape_rt); - - // Overwrite the initial result vector to make sure we're not just coincidentally getting the - // right value. - copy_data(result, vector{2112, 2112, 2112, 2112, 2112, 2112}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{0, 0, 0, 0, 0, 0}), read_vector(result)); -} - NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_5d_to_scalar) { Shape shape_a{3, 3, 3, 3, 3}; @@ -1212,27 +782,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_2d_to_scalar_int8) EXPECT_EQ(std::vector{45}, read_vector(result)); } -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_trivial_in_double) -{ - Shape shape{4, 3}; - Shape rshape{1, 3}; - auto A = make_shared(element::f64, shape); - auto axes = make_shared(element::i32, Shape{}, 0); - auto f = - make_shared(make_shared(A, axes, true), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f64, shape); - copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result = backend->create_tensor(element::f64, rshape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{30, 22, 26}), read_vector(result))); -} - #if NGRAPH_INTERPRETER_ENABLE #ifndef _WIN32 @@ -1271,38 +820,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_acc) } #endif -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_acc_double) -{ - std::string backend_name = "${BACKEND_NAME}"; - if (backend_name == "INTERPRETER") - { - return; - } - Shape shape_a{10, 10, 20, 300}; - auto A = make_shared(element::f64, shape_a); - - Shape shape_rt{10, 1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{1, 2, 3}); - auto f = - make_shared(make_shared(A, axes, true), ParameterVector{A}); - - test::Uniform rng(1000000000.0L, 1000000000.001L, 2112); - vector> args; - for (shared_ptr param : f->get_parameters()) - { - vector tensor_val(shape_size(param->get_shape())); - rng.initialize(tensor_val); - args.push_back(tensor_val); - } - - auto ref_func = clone_function(*f); - auto bk_func = clone_function(*f); - - auto ref_results = execute(ref_func, args, "INTERPRETER"); - auto bk_results = execute(bk_func, args, "${BACKEND_NAME}"); - - EXPECT_TRUE(test::all_close(ref_results.at(0), bk_results.at(0), 0.0, 1e-5)); -} NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_simple_float) { @@ -1334,106 +851,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_simple_float) test::all_close_f(ref_results.at(0), bk_results.at(0), DEFAULT_FLOAT_TOLERANCE_BITS - 1)); } -#ifndef _WIN32 -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_simple_double) -{ - std::string backend_name = "${BACKEND_NAME}"; - if (backend_name == "INTERPRETER") - { - return; - } - Shape shape_a{20}; - auto A = make_shared(element::f64, shape_a); - - Shape shape_rt{1}; - auto axes = make_shared(element::i32, Shape{}, 0); - auto f = - make_shared(make_shared(A, axes, true), ParameterVector{A}); - - vector> args; - args.push_back(vector{10000000000000000.0L, - 0.2L, - 0.3L, - 0.4L, - 0.5L, - 0.6L, - 0.7L, - 0.8L, - 0.9L, - 0.7L, - 0.9L, - 0.7L, - 0.3L, - 0.6L, - 0.8L, - 0.4L, - 0.6L, - 0.5L, - 0.8L, - 0.7L}); - - auto ref_func = clone_function(*f); - auto bk_func = clone_function(*f); - - auto ref_results = execute(ref_func, args, "INTERPRETER"); - auto bk_results = execute(bk_func, args, "${BACKEND_NAME}"); - - EXPECT_TRUE(test::all_close(ref_results.at(0), bk_results.at(0), 0.0, 2.0)); -} #endif -#endif - -NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_dynamic) -{ - // Create a graph for f(x,axes:int32) = Sum(x,Convert(axes)). - auto x = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, PartialShape{Dimension::dynamic()}); - auto axes_i64 = make_shared(axes, element::i64); - - auto sum = make_shared(x, axes_i64, true); - ASSERT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic()); - - auto f = make_shared(NodeVector{sum}, ParameterVector{x, axes}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}", true); - - auto ex = backend->compile(f); - - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); - - std::vector x_shapes{ - Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}}; - std::vector> axeses{{}, {0}, {1}, {0, 1}, {}, {0}}; - std::vector> inputs{{1, 2, 3, 4, 5, 6}, - {1, 2, 3, 4, 5, 6}, - {1, 2, 3, 4, 5, 6}, - {1, 2, 3, 4, 5, 6}, - {1, 2, 3, 4, 5}, - {1, 2, 3, 4, 5}}; - std::vector expected_result_shapes{ - Shape{2, 3}, Shape{1, 3}, Shape{2, 1}, Shape{1, 1}, Shape{5}, Shape{1}}; - std::vector> expected_results{ - {1, 2, 3, 4, 5, 6}, {5, 7, 9}, {6, 15}, {21}, {1, 2, 3, 4, 5}, {15}}; - - for (size_t i = 0; i < x_shapes.size(); i++) - { - auto t_x = backend->create_tensor(element::f32, x_shapes[i]); - auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()}); - - copy_data(t_x, inputs[i]); - copy_data(t_axes, axeses[i]); - - ex->call_with_validate({t_r}, {t_x, t_axes}); - - ASSERT_EQ(t_r->get_shape(), expected_result_shapes[i]); - - auto results = read_vector(t_r); - - ASSERT_TRUE(test::all_close_f(results, expected_results[i], MIN_FLOAT_TOLERANCE_BITS)); - } -} - NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_inf) { Shape shape{7, 4}; diff --git a/ngraph/test/backend/selu.in.cpp b/ngraph/test/backend/selu.in.cpp new file mode 100644 index 00000000000..2524ee045fd --- /dev/null +++ b/ngraph/test/backend/selu.in.cpp @@ -0,0 +1,99 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/engine/test_engines.hpp" +#include "util/test_case.hpp" +#include "util/test_control.hpp" + +using namespace std; +using namespace ngraph; + +static string s_manifest = "${MANIFEST}"; +using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); + +NGRAPH_TEST(${BACKEND_NAME}, selu_2Dfprop) +{ + Shape rt_shape{2}; + Shape c_shape{1}; + element::Type et = element::f32; + + auto input = make_shared(et, rt_shape); + auto alpha = op::Constant::create(et, c_shape, {1.67326324}); + auto lambda = op::Constant::create(et, c_shape, {1.05070098}); + auto selu = make_shared(input, alpha, lambda); + auto f = make_shared(selu, ParameterVector{input}); + + vector input_data{-1, 3}; + vector expected_out{-1.1113307, 3.152103}; + + auto test_case = test::TestCase(f); + test_case.add_input(rt_shape, input_data); + test_case.add_expected_output(rt_shape, expected_out); + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, selu_4Dfprop) +{ + Shape in_shape{4}; + Shape c_shape{1}; + element::Type et = element::f32; + + auto input = make_shared(et, in_shape); + auto alpha = op::Constant::create(et, c_shape, {1.67326324}); + auto lambda = op::Constant::create(et, c_shape, {1.05070098}); + auto selu = make_shared(input, alpha, lambda); + auto f = make_shared(selu, ParameterVector{input}); + + vector in_vec{-1.0, 0.0, 1.0, 2.0}; + vector out_vec{-1.1113307, 0., 1.050701, 2.101402}; + + auto test_case = test::TestCase(f); + test_case.add_input(in_shape, in_vec); + test_case.add_expected_output(in_shape, out_vec); + test_case.run_with_tolerance_as_fp(1e-4f); +} + +NGRAPH_TEST(${BACKEND_NAME}, selu_1Dfprop) +{ + Shape in_shape{1}; + Shape c_shape{1}; + element::Type et = element::f32; + + auto input = make_shared(et, in_shape); + auto alpha = op::Constant::create(et, c_shape, {1.67326324}); + auto lambda = op::Constant::create(et, c_shape, {1.05070098}); + auto selu = make_shared(input, alpha, lambda); + auto f = make_shared(selu, ParameterVector{input}); + + vector in_vec{112.0}; + vector out_vec{117.67851}; + + auto test_case = test::TestCase(f); + test_case.add_input(in_shape, in_vec); + test_case.add_expected_output(in_shape, out_vec); + test_case.run_with_tolerance_as_fp(1e-4f); +} + +NGRAPH_TEST(${BACKEND_NAME}, selu_3Dfprop_negative) +{ + Shape in_shape{3}; + Shape c_shape{1}; + element::Type et = element::f32; + + auto input = make_shared(et, in_shape); + auto alpha = op::Constant::create(et, c_shape, {1.67326324}); + auto lambda = op::Constant::create(et, c_shape, {1.05070098}); + auto selu = make_shared(input, alpha, lambda); + auto f = make_shared(selu, ParameterVector{input}); + + vector in_vec{-3.0, -12.5, -7.0}; + vector out_vec{-1.6705687, -1.7580928, -1.7564961}; + + auto test_case = test::TestCase(f); + test_case.add_input(in_shape, in_vec); + test_case.add_expected_output(in_shape, out_vec); + test_case.run_with_tolerance_as_fp(1e-4f); +} diff --git a/ngraph/test/backend/space_to_batch.in.cpp b/ngraph/test/backend/space_to_batch.in.cpp new file mode 100644 index 00000000000..6e9f2080434 --- /dev/null +++ b/ngraph/test/backend/space_to_batch.in.cpp @@ -0,0 +1,115 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/engine/test_engines.hpp" +#include "util/test_case.hpp" +#include "util/test_control.hpp" + +using namespace std; +using namespace ngraph; + +static string s_manifest = "${MANIFEST}"; +using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); + +static void SpaceToBatchTest(const std::vector& inputs, + const Shape inputs_shape, + const std::vector& block_shapes, + const Shape blocks_shape, + const std::vector& pads_begins, + const std::vector& pads_ends, + const Shape pads_shape, + const std::vector& outputs, + const Shape outputs_shape) +{ + auto inputs_param = make_shared(element::f32, inputs_shape); + auto block_shapes_param = make_shared(element::i64, blocks_shape, block_shapes); + auto pads_begins_param = make_shared(element::i64, pads_shape, pads_begins); + auto pads_ends_param = make_shared(element::i64, pads_shape, pads_ends); + + auto space_to_batch = make_shared( + inputs_param, block_shapes_param, pads_begins_param, pads_ends_param); + auto f = make_shared(space_to_batch, ParameterVector{inputs_param}); + + auto test_case = test::TestCase(f); + test_case.add_input(inputs); + test_case.add_expected_output(outputs_shape, outputs); + test_case.run(); +} + + +NGRAPH_TEST(${BACKEND_NAME}, space_to_batch_4D) +{ + const Shape inputs_shape{1, 1, 2, 2}; + const std::vector inputs{1.0f, 1.0f, + 1.0f, 1.0f}; + + const Shape blocks_shape{4}; + const std::vector block_shapes{1, 1, 1, 1}; + + const Shape pads_shape{4}; + const std::vector pads_begins{0, 0 ,0, 0}; + const std::vector pads_ends{0, 0, 0, 0}; + + const Shape outputs_shape{1, 1, 2, 2}; + const std::vector outputs{1.0f, 1.0f, + 1.0f, 1.0f}; + + + SpaceToBatchTest(inputs, inputs_shape, block_shapes, blocks_shape, pads_begins, + pads_ends, pads_shape, outputs, outputs_shape); +} + +NGRAPH_TEST(${BACKEND_NAME}, space_to_batch_5D) +{ + const Shape inputs_shape{1, 1, 3, 2, 1}; + const std::vector inputs{1.0f, 1.0f, 1.0f, + 1.0f, 1.0f, 1.0f}; + + const Shape blocks_shape{5}; + const std::vector block_shapes{1, 1, 3, 2, 2}; + + const Shape pads_shape{5}; + const std::vector pads_begins{0, 0 ,1, 0, 3}; + const std::vector pads_ends{0, 0, 2, 0, 0}; + + const Shape outputs_shape{12, 1, 2, 1, 2}; + const std::vector outputs{0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}; + + + SpaceToBatchTest(inputs, inputs_shape, block_shapes, blocks_shape, pads_begins, + pads_ends, pads_shape, outputs, outputs_shape); +} + +NGRAPH_TEST(${BACKEND_NAME}, space_to_batch_4x4) +{ + const Shape inputs_shape{1, 1, 4, 4}; + const std::vector inputs{1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 1.0f}; + + const Shape blocks_shape{4}; + const std::vector block_shapes{1, 1, 1, 1}; + + const Shape pads_shape{4}; + const std::vector pads_begins{0, 0, 1, 0}; + const std::vector pads_ends{0, 0, 0, 0}; + + const Shape outputs_shape{1, 1, 5, 4}; + const std::vector outputs{0.0f, 0.0f, 0.0f, 0.0f, + 1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 1.0f}; + + SpaceToBatchTest(inputs, inputs_shape, block_shapes, blocks_shape, pads_begins, + pads_ends, pads_shape, outputs, outputs_shape); +} diff --git a/ngraph/test/op_eval/reduce_prod.cpp b/ngraph/test/op_eval/reduce_prod.cpp new file mode 100644 index 00000000000..d2cd89888ea --- /dev/null +++ b/ngraph/test/op_eval/reduce_prod.cpp @@ -0,0 +1,244 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/test_control.hpp" +#include "util/all_close.hpp" +#include "util/all_close_f.hpp" +#include "util/ndarray.hpp" + + +using namespace std; +using namespace ngraph; + +static string s_manifest = "${MANIFEST}"; + +TEST(op_eval, reduce_product_matrix_rows_zero) +{ + Shape shape_a{3, 0}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{3}; + auto axes = make_shared(element::i32, Shape{}, 1); + auto f = + make_shared(make_shared(A, axes, false), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + copy_data(result, vector({3, 3, 3})); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{1, 1, 1}), read_vector(result))); +} + +TEST(op_eval, reduce_product_matrix_cols_zero) +{ + // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). + Shape shape_a{0, 2}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{2}; + auto axes = make_shared(element::i32, Shape{}, 0); + auto f = + make_shared(make_shared(A, axes, false), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + copy_data(result, vector({3, 3})); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{1, 1}), read_vector(result))); +} + +TEST(op_eval, reduce_product_vector_zero) +{ + Shape shape_a{0}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{}; + auto axes = make_shared(element::i32, Shape{}, 0); + auto f = + make_shared(make_shared(A, axes, false), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + copy_data(result, vector({3})); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{1}), read_vector(result))); +} + +TEST(op_eval, reduce_product_matrix_to_scalar_zero_by_zero) +{ + Shape shape_a{0, 0}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{}; + auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto f = + make_shared(make_shared(A, axes, false), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + copy_data(result, vector({3})); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{1}), read_vector(result))); +} + +TEST(op_eval, reduce_product_3d_eliminate_zero_dim) +{ + Shape shape_a{3, 0, 2}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{3, 2}; + auto axes = make_shared(element::i32, Shape{}, 1); + auto f = + make_shared(make_shared(A, axes, false), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + + // Overwrite the initial result vector to make sure we're not just coincidentally getting the + // right value. + copy_data(result, vector{2112, 2112, 2112, 2112, 2112, 2112}); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{1, 1, 1, 1, 1, 1}), read_vector(result))); +} + +TEST(op_eval, reduce_product_keep_matrix_rows_zero) +{ + Shape shape_a{3, 0}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{3, 1}; + auto axes = make_shared(element::i32, Shape{}, 1); + auto f = + make_shared(make_shared(A, axes, true), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + copy_data(result, vector({3, 3, 3})); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{1, 1, 1}), read_vector(result))); +} + +TEST(op_eval, reduce_product_keep_matrix_cols_zero) +{ + // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). + Shape shape_a{0, 2}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{1, 2}; + auto axes = make_shared(element::i32, Shape{}, 0); + auto f = + make_shared(make_shared(A, axes, true), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + copy_data(result, vector({3, 3})); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{1, 1}), read_vector(result))); +} + +TEST(op_eval, reduce_product_keep_vector_zero) +{ + Shape shape_a{0}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{1}; + auto axes = make_shared(element::i32, Shape{}, 0); + auto f = + make_shared(make_shared(A, axes, true), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + copy_data(result, vector({3})); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{1}), read_vector(result))); +} + +TEST(op_eval, reduce_product_keep_matrix_to_scalar_zero_by_zero) +{ + Shape shape_a{0, 0}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{1, 1}; + auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto f = + make_shared(make_shared(A, axes, true), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + copy_data(result, vector({3})); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{1}), read_vector(result))); +} + +TEST(op_eval, reduce_product_keep_3d_eliminate_zero_dim) +{ + Shape shape_a{3, 0, 2}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{3, 1, 2}; + auto axes = make_shared(element::i32, Shape{}, 1); + auto f = + make_shared(make_shared(A, axes, true), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + + // Overwrite the initial result vector to make sure we're not just coincidentally getting the + // right value. + copy_data(result, vector{2112, 2112, 2112, 2112, 2112, 2112}); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{1, 1, 1, 1, 1, 1}), read_vector(result))); +} diff --git a/ngraph/test/op_eval/reduce_sum.cpp b/ngraph/test/op_eval/reduce_sum.cpp new file mode 100644 index 00000000000..bad7aad21ed --- /dev/null +++ b/ngraph/test/op_eval/reduce_sum.cpp @@ -0,0 +1,395 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/test_control.hpp" +#include "util/all_close.hpp" +#include "util/all_close_f.hpp" +#include "util/ndarray.hpp" + + +using namespace std; +using namespace ngraph; + +static string s_manifest = "${MANIFEST}"; + +TEST(op_eval, reduce_sum_matrix_rows_zero) +{ + Shape shape_a{3, 0}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{3}; + auto axes = make_shared(element::i32, Shape{}, 1); + auto f = + make_shared(make_shared(A, axes, false), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + copy_data(result, vector({3, 3, 3})); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0, 0, 0}), read_vector(result))); +} + +TEST(op_eval, reduce_sum_vector_zero) +{ + Shape shape_a{0}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{}; + auto axes = make_shared(element::i32, Shape{}, 0); + auto f = + make_shared(make_shared(A, axes, false), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + copy_data(result, vector({3})); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0}), read_vector(result))); +} + + +TEST(op_eval, reduce_sum_matrix_cols_zero) +{ + // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). + Shape shape_a{0, 2}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{2}; + auto axes = make_shared(element::i32, Shape{}, 0); + auto f = + make_shared(make_shared(A, axes, false), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + copy_data(result, vector({3, 3})); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0, 0}), read_vector(result))); +} + +TEST(op_eval, reduce_sum_matrix_to_scalar_zero_by_zero) +{ + Shape shape_a{0, 0}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{}; + auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto f = + make_shared(make_shared(A, axes, false), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + copy_data(result, vector({3})); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0}), read_vector(result))); +} + +TEST(op_eval, reduce_sum_3d_eliminate_zero_dim) +{ + Shape shape_a{3, 0, 2}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{3, 2}; + auto axes = make_shared(element::i32, Shape{}, 1); + auto f = + make_shared(make_shared(A, axes, false), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + + // Overwrite the initial result vector to make sure we're not just coincidentally getting the + // right value. + copy_data(result, vector{2112, 2112, 2112, 2112, 2112, 2112}); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0, 0, 0, 0, 0, 0}), read_vector(result))); +} + +TEST(op_eval, reduce_sum_3d_eliminate_zero_dim_int32) +{ + Shape shape_a{3, 0, 2}; + auto A = make_shared(element::i32, shape_a); + Shape shape_rt{3, 2}; + auto axes = make_shared(element::i32, Shape{}, 1); + auto f = + make_shared(make_shared(A, axes, false), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::i32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::i32, shape_rt); + + // Overwrite the initial result vector to make sure we're not just coincidentally getting the + // right value. + copy_data(result, vector{2112, 2112, 2112, 2112, 2112, 2112}); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_EQ((vector{0, 0, 0, 0, 0, 0}), read_vector(result)); +} + +TEST(op_eval, reduce_sum_dynamic) +{ + // Create a graph for f(x,axes:int32) = Sum(x,Convert(axes)). + auto x = make_shared(element::f32, PartialShape::dynamic()); + auto axes = make_shared(element::i32, PartialShape{Dimension::dynamic()}); + auto axes_i64 = make_shared(axes, element::i64); + + auto sum = make_shared(x, axes_i64, false); + ASSERT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic()); + + auto f = make_shared(NodeVector{sum}, ParameterVector{x, axes}); + + auto backend = runtime::Backend::create("INTERPRETER", true); + + auto ex = backend->compile(f); + + auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + + std::vector x_shapes{ + Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}}; + std::vector> axeses{{}, {0}, {1}, {0, 1}, {}, {0}}; + std::vector> inputs{{1, 2, 3, 4, 5, 6}, + {1, 2, 3, 4, 5, 6}, + {1, 2, 3, 4, 5, 6}, + {1, 2, 3, 4, 5, 6}, + {1, 2, 3, 4, 5}, + {1, 2, 3, 4, 5}}; + std::vector expected_result_shapes{ + Shape{2, 3}, Shape{3}, Shape{2}, Shape{}, Shape{5}, Shape{}}; + std::vector> expected_results{ + {1, 2, 3, 4, 5, 6}, {5, 7, 9}, {6, 15}, {21}, {1, 2, 3, 4, 5}, {15}}; + + for (size_t i = 0; i < x_shapes.size(); i++) + { + auto t_x = backend->create_tensor(element::f32, x_shapes[i]); + auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()}); + + copy_data(t_x, inputs[i]); + copy_data(t_axes, axeses[i]); + + ex->call_with_validate({t_r}, {t_x, t_axes}); + + ASSERT_EQ(t_r->get_shape(), expected_result_shapes[i]); + + auto results = read_vector(t_r); + + ASSERT_TRUE(test::all_close_f(results, expected_results[i], MIN_FLOAT_TOLERANCE_BITS)); + } +} + +TEST(op_eval, reduce_sum_keep_matrix_rows_zero) +{ + Shape shape_a{3, 0}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{3, 1}; + auto axes = make_shared(element::i32, Shape{}, 1); + auto f = + make_shared(make_shared(A, axes, true), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + copy_data(result, vector({3, 3, 3})); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0, 0, 0}), read_vector(result))); +} + +TEST(op_eval, reduce_sum_keep_matrix_cols_zero) +{ + // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). + Shape shape_a{0, 2}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{1, 2}; + auto axes = make_shared(element::i32, Shape{}, 0); + auto f = + make_shared(make_shared(A, axes, true), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + copy_data(result, vector({3, 3})); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0, 0}), read_vector(result))); +} + +TEST(op_eval, reduce_sum_keep_vector_zero) +{ + Shape shape_a{0}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{1}; + auto axes = make_shared(element::i32, Shape{}, 0); + auto f = + make_shared(make_shared(A, axes, true), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + copy_data(result, vector({3})); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0}), read_vector(result))); +} + +TEST(op_eval, reduce_sum_keep_matrix_to_scalar_zero_by_zero) +{ + Shape shape_a{0, 0}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{1, 1}; + auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto f = + make_shared(make_shared(A, axes, true), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + copy_data(result, vector({3})); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0}), read_vector(result))); +} + +TEST(op_eval, reduce_sum_keep_3d_eliminate_zero_dim) +{ + Shape shape_a{3, 0, 2}; + auto A = make_shared(element::f32, shape_a); + Shape shape_rt{3, 1, 2}; + auto axes = make_shared(element::i32, Shape{}, 1); + auto f = + make_shared(make_shared(A, axes, true), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::f32, shape_rt); + + // Overwrite the initial result vector to make sure we're not just coincidentally getting the + // right value. + copy_data(result, vector{2112, 2112, 2112, 2112, 2112, 2112}); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0, 0, 0, 0, 0, 0}), read_vector(result))); +} + +TEST(op_eval, reduce_sum_keep_3d_eliminate_zero_dim_int32) +{ + Shape shape_a{3, 0, 2}; + auto A = make_shared(element::i32, shape_a); + Shape shape_rt{3, 1, 2}; + auto axes = make_shared(element::i32, Shape{}, 1); + auto f = + make_shared(make_shared(A, axes, true), ParameterVector{A}); + + auto backend = runtime::Backend::create("INTERPRETER"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::i32, shape_a); + copy_data(a, vector{}); + auto result = backend->create_tensor(element::i32, shape_rt); + + // Overwrite the initial result vector to make sure we're not just coincidentally getting the + // right value. + copy_data(result, vector{2112, 2112, 2112, 2112, 2112, 2112}); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_EQ((vector{0, 0, 0, 0, 0, 0}), read_vector(result)); +} + +TEST(op_eval, reduce_sum_keep_dynamic) +{ + // Create a graph for f(x,axes:int32) = Sum(x,Convert(axes)). + auto x = make_shared(element::f32, PartialShape::dynamic()); + auto axes = make_shared(element::i32, PartialShape{Dimension::dynamic()}); + auto axes_i64 = make_shared(axes, element::i64); + + auto sum = make_shared(x, axes_i64, true); + ASSERT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic()); + + auto f = make_shared(NodeVector{sum}, ParameterVector{x, axes}); + + auto backend = runtime::Backend::create("INTERPRETER", true); + + auto ex = backend->compile(f); + + auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + + std::vector x_shapes{ + Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}}; + std::vector> axeses{{}, {0}, {1}, {0, 1}, {}, {0}}; + std::vector> inputs{{1, 2, 3, 4, 5, 6}, + {1, 2, 3, 4, 5, 6}, + {1, 2, 3, 4, 5, 6}, + {1, 2, 3, 4, 5, 6}, + {1, 2, 3, 4, 5}, + {1, 2, 3, 4, 5}}; + std::vector expected_result_shapes{ + Shape{2, 3}, Shape{1, 3}, Shape{2, 1}, Shape{1, 1}, Shape{5}, Shape{1}}; + std::vector> expected_results{ + {1, 2, 3, 4, 5, 6}, {5, 7, 9}, {6, 15}, {21}, {1, 2, 3, 4, 5}, {15}}; + + for (size_t i = 0; i < x_shapes.size(); i++) + { + auto t_x = backend->create_tensor(element::f32, x_shapes[i]); + auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()}); + + copy_data(t_x, inputs[i]); + copy_data(t_axes, axeses[i]); + + ex->call_with_validate({t_r}, {t_x, t_axes}); + + ASSERT_EQ(t_r->get_shape(), expected_result_shapes[i]); + + auto results = read_vector(t_r); + + ASSERT_TRUE(test::all_close_f(results, expected_results[i], MIN_FLOAT_TOLERANCE_BITS)); + } +} diff --git a/ngraph/test/runtime/ie/unit_test.manifest b/ngraph/test/runtime/ie/unit_test.manifest index f95ce0d951e..80f2421fb41 100644 --- a/ngraph/test/runtime/ie/unit_test.manifest +++ b/ngraph/test/runtime/ie/unit_test.manifest @@ -368,36 +368,18 @@ any_2x0_to_scalar all_trivial all_2x0_to_scalar all_dynamic -reduce_sum_keep_dynamic -reduce_sum_keep_stable_simple_double -reduce_sum_keep_stable_acc_double -reduce_sum_keep_stable_acc -reduce_sum_keep_3d_eliminate_zero_dim -reduce_sum_keep_vector_zero -reduce_sum_keep_matrix_rows_zero -reduce_sum_dynamic -reduce_sum_3d_eliminate_zero_dim_int32 -reduce_sum_keep_3d_eliminate_zero_dim_int32 -reduce_sum_keep_trivial_in_double -reduce_sum_3d_eliminate_zero_dim -reduce_sum_matrix_to_scalar_zero_by_zero -reduce_sum_vector_zero -reduce_sum_matrix_cols_zero -reduce_sum_matrix_rows_zero -reduce_sum_keep_matrix_to_scalar_zero_by_zero -reduce_sum_keep_matrix_cols_zero -reduce_product_matrix_columns +# Reduce ops disabled/accuracy: 56520 +# disabled reference implementation +reduce_sum_keep_2d_to_scalar_int8 +reduce_sum_2d_to_scalar_int8 +reduce_product_to_scalar_int8 +reduce_product_keep_to_scalar_int8 # accuracy reduce_sum_keep_stable_acc -reduce_sum_keep_2d_to_scalar_int8 reduce_sum_keep_3d_to_scalar_int32 reduce_sum_keep_large_1d_to_scalar -reduce_sum_stable_simple_double -reduce_sum_stable_acc_double reduce_sum_stable_acc -reduce_sum_trivial_in_double -reduce_sum_2d_to_scalar_int8 reduce_sum_3d_to_scalar_int32 reduce_sum_large_1d_to_scalar @@ -478,17 +460,6 @@ onnx_dyn_shapes_model_tile_static gather_4d_indices_axis_0_uint8 tensor_constant_with_op constant_equality_bool -reduce_product_matrix_rows -reduce_product_3d_to_matrix_most_sig -reduce_product_3d_to_matrix_least_sig -reduce_product_keep_matrix_columns -reduce_product_keep_matrix_rows -reduce_product_keep_3d_to_matrix_most_sig -reduce_product_keep_3d_to_matrix_least_sig -reduce_product_matrix_columns_dynamic -reduce_product_matrix_rows_dynamic -reduce_product_keep_matrix_columns_dynamic -reduce_product_keep_matrix_rows_dynamic reduce_min_matrix_columns reduce_min_matrix_rows reduce_min_matrix_rows_int32 @@ -505,18 +476,6 @@ reduce_min_keep_matrix_columns_dynamic reduce_min_keep_matrix_rows_dynamic # zero dimension / result mismatch -reduce_product_matrix_rows_zero -reduce_product_matrix_cols_zero -reduce_product_vector_zero -reduce_product_matrix_to_scalar_zero_by_zero -reduce_product_3d_eliminate_zero_dim -reduce_product_to_scalar_int8 -reduce_product_keep_matrix_rows_zero -reduce_product_keep_matrix_cols_zero -reduce_product_keep_vector_zero -reduce_product_keep_matrix_to_scalar_zero_by_zero -reduce_product_keep_3d_eliminate_zero_dim -reduce_product_keep_to_scalar_int8 reduce_min_to_scalar_int8 reduce_min_matrix_rows_zero reduce_min_matrix_cols_zero diff --git a/ngraph/test/runtime/interpreter/evaluates_map.cpp b/ngraph/test/runtime/interpreter/evaluates_map.cpp index 61010117779..4446a4a2434 100644 --- a/ngraph/test/runtime/interpreter/evaluates_map.cpp +++ b/ngraph/test/runtime/interpreter/evaluates_map.cpp @@ -283,7 +283,8 @@ namespace op->get_dilations(), op->get_pads_begin(), op->get_pads_end(), - op->get_strides()); + op->get_strides(), + op->get_output_padding()); return true; } diff --git a/ngraph/test/runtime/interpreter/unit_test.manifest b/ngraph/test/runtime/interpreter/unit_test.manifest index b5003831265..9e123149152 100644 --- a/ngraph/test/runtime/interpreter/unit_test.manifest +++ b/ngraph/test/runtime/interpreter/unit_test.manifest @@ -3,6 +3,7 @@ fake_quantize_pdpd INTERPRETER.onnx_model_quant_conv_linear INTERPRETER.onnx_top_k_opset_10 +# Failed in ubuntu18 i386 reduce_sum_large_1d_to_scalar reduce_sum_keep_large_1d_to_scalar @@ -31,11 +32,8 @@ INTERPRETER.onnx_model_matmul_integer_4d_no_zero_point # Disabled tests for disabled reference implementations INTERPRETER.onnx_dyn_shapes_expand_uint16_dyn_shape INTERPRETER.sum_2d_to_scalar_int8 -INTERPRETER.sum_trivial_in_double INTERPRETER.reduce_sum_2d_to_scalar_int8 -INTERPRETER.reduce_sum_trivial_in_double INTERPRETER.reduce_sum_keep_2d_to_scalar_int8 -INTERPRETER.reduce_sum_keep_trivial_in_double INTERPRETER.reduce_product_to_scalar_int8 INTERPRETER.reduce_product_keep_to_scalar_int8 INTERPRETER.reduce_min_to_scalar_int8 diff --git a/ngraph/test/type_prop/reduce_l1.cpp b/ngraph/test/type_prop/reduce_l1.cpp index 07947615747..03f22e20f82 100644 --- a/ngraph/test/type_prop/reduce_l1.cpp +++ b/ngraph/test/type_prop/reduce_l1.cpp @@ -2,47 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "util/type_prop.hpp" +#include "reduce_ops.hpp" -using namespace std; -using namespace ngraph; - -TEST(type_prop, reduce_l1_v4_axis_out_of_range) -{ - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); - try - { - auto reduce_sum = make_shared(arg, axes); - // Should have thrown, so fail if it didn't - FAIL() << "Incorrect axes values exception not thrown"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis (")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, reduce_l1_v4_shape_if_keep_dims) -{ - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); - auto keep_dims = true; - auto reduce_prod = make_shared(arg, axes, keep_dims); - ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1})); -} - -TEST(type_prop, reduce_l1_v4_shape_if_not_keep_dims) -{ - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); - auto keep_dims = false; - auto reduce_prod = make_shared(arg, axes, keep_dims); - ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3})); -} +using Type = ::testing::Types; +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_l1, ReduceTest, Type); +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_l1_et, ReduceArithmeticTest, Type); diff --git a/ngraph/test/type_prop/reduce_l2.cpp b/ngraph/test/type_prop/reduce_l2.cpp index f2c2541b385..64a9c02fb87 100644 --- a/ngraph/test/type_prop/reduce_l2.cpp +++ b/ngraph/test/type_prop/reduce_l2.cpp @@ -2,47 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "util/type_prop.hpp" +#include "reduce_ops.hpp" -using namespace std; -using namespace ngraph; - -TEST(type_prop, reduce_l2_v4_axis_out_of_range) -{ - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); - try - { - auto reduce_sum = make_shared(arg, axes); - // Should have thrown, so fail if it didn't - FAIL() << "Incorrect axes values exception not thrown"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis (")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, reduce_l2_v4_shape_if_keep_dims) -{ - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); - auto keep_dims = true; - auto reduce_prod = make_shared(arg, axes, keep_dims); - ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1})); -} - -TEST(type_prop, reduce_l2_v4_shape_if_not_keep_dims) -{ - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); - auto keep_dims = false; - auto reduce_prod = make_shared(arg, axes, keep_dims); - ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3})); -} +using Type = ::testing::Types; +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_l2, ReduceTest, Type); +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_l2_et, ReduceArithmeticTest, Type); diff --git a/ngraph/test/type_prop/reduce_logical_and.cpp b/ngraph/test/type_prop/reduce_logical_and.cpp new file mode 100644 index 00000000000..f6e051bb50f --- /dev/null +++ b/ngraph/test/type_prop/reduce_logical_and.cpp @@ -0,0 +1,9 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "reduce_ops.hpp" + +using Type = ::testing::Types; +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_logical_and, ReduceTest, Type); +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_logical_and_et, ReduceLogicalTest, Type); diff --git a/ngraph/test/type_prop/reduce_logical_or.cpp b/ngraph/test/type_prop/reduce_logical_or.cpp new file mode 100644 index 00000000000..b73d1789a20 --- /dev/null +++ b/ngraph/test/type_prop/reduce_logical_or.cpp @@ -0,0 +1,9 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "reduce_ops.hpp" + +using Type = ::testing::Types; +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_logical_or, ReduceTest, Type); +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_logical_or_et, ReduceLogicalTest, Type); diff --git a/ngraph/test/type_prop/reduce_max.cpp b/ngraph/test/type_prop/reduce_max.cpp new file mode 100644 index 00000000000..d907d1066af --- /dev/null +++ b/ngraph/test/type_prop/reduce_max.cpp @@ -0,0 +1,9 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "reduce_ops.hpp" + +using Type = ::testing::Types; +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_max, ReduceTest, Type); +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_max_et, ReduceArithmeticTest, Type); diff --git a/ngraph/test/type_prop/reduce_mean.cpp b/ngraph/test/type_prop/reduce_mean.cpp index 5f184fab97d..607e8392f1e 100644 --- a/ngraph/test/type_prop/reduce_mean.cpp +++ b/ngraph/test/type_prop/reduce_mean.cpp @@ -2,71 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "util/type_prop.hpp" +#include "reduce_ops.hpp" -using namespace std; -using namespace ngraph; - -TEST(type_prop, reduce_mean_v1_axis_out_of_range) -{ - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); - try - { - auto reduce_sum = make_shared(arg, axes); - - // Should have thrown, so fail if it didn't - FAIL() << "Incorrect axes values exception not thrown"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis (")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, reduce_mean_v1_shape_if_keep_dims) -{ - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); - auto keep_dims = true; - auto reduce_prod = make_shared(arg, axes, keep_dims); - ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1})); -} - -TEST(type_prop, reduce_mean_v1_shape_if_not_keep_dims) -{ - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); - auto keep_dims = false; - auto reduce_prod = make_shared(arg, axes, keep_dims); - ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3})); -} - -TEST(type_prop, reduce_mean_dynamic_shape) -{ - auto arg = - make_shared(element::f32, PartialShape{3, 4, 5, Dimension::dynamic()}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); - auto keep_dims = true; - auto reduce_prod = make_shared(arg, axes, keep_dims); - ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible( - PartialShape{3, 1, 1, Dimension::dynamic()})); -} - -TEST(type_prop, reduce_mean_reduce_dynamic_shape) -{ - auto arg = - make_shared(element::f32, PartialShape{3, 4, 5, Dimension::dynamic()}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 3}); - auto keep_dims = true; - auto reduce_prod = make_shared(arg, axes, keep_dims); - - ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible( - PartialShape{3, 1, 5, Dimension::dynamic()})); -} +using Type = ::testing::Types; +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_mean, ReduceTest, Type); +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_mean_et, ReduceArithmeticTest, Type); diff --git a/ngraph/test/type_prop/reduce_min.cpp b/ngraph/test/type_prop/reduce_min.cpp new file mode 100644 index 00000000000..d02f12f8a0a --- /dev/null +++ b/ngraph/test/type_prop/reduce_min.cpp @@ -0,0 +1,9 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "reduce_ops.hpp" + +using Type = ::testing::Types; +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_min, ReduceTest, Type); +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_min_et, ReduceArithmeticTest, Type); diff --git a/ngraph/test/type_prop/reduce_ops.hpp b/ngraph/test/type_prop/reduce_ops.hpp new file mode 100644 index 00000000000..9aa7503e448 --- /dev/null +++ b/ngraph/test/type_prop/reduce_ops.hpp @@ -0,0 +1,388 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/type_prop.hpp" + +using namespace std; +using namespace ngraph; + +struct ReduceParams +{ + PartialShape data_ps; + element::Type data_et; + Shape axes_ps; + std::vector axes; + element::Type axes_et; + bool keep_dims; +}; + +template +static std::shared_ptr makeReduceOp(const ReduceParams& p, bool axes_as_param = false) +{ + auto in_data = make_shared(p.data_et, p.data_ps); + shared_ptr in_axes; + if (axes_as_param) + { + in_axes = make_shared(p.axes_et, p.axes_ps); + } + else + { + if (shape_size(p.axes_ps) != p.axes.size()) + { + throw ngraph_error("Axes shape does not match with axes elements"); + } + in_axes = make_shared(p.axes_et, p.axes_ps, p.axes); + } + return make_shared(in_data, in_axes, p.keep_dims); +} + +template +class ReduceTest : public testing::Test +{ +}; + +TYPED_TEST_CASE_P(ReduceTest); + +TYPED_TEST_P(ReduceTest, reduce_basic_shape_infer) +{ + PartialShape data_ps{3, 4, 5}; + element::Type data_et = element::dynamic; + + Shape axes_ps{2}; + element::Type axes_et = element::i64; + std::vector axes{1, 2}; + + bool keep_dims = false; + + PartialShape out_ps{3}; + + const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims}; + auto reduce_op = makeReduceOp(params); + ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps)); +} + +TYPED_TEST_P(ReduceTest, reduce_basic_shape_infer_keep_dims) +{ + PartialShape data_ps{3, 4, 5}; + element::Type data_et = element::dynamic; + + Shape axes_ps{2}; + element::Type axes_et = element::i64; + std::vector axes{1, 2}; + + bool keep_dims = true; + + PartialShape out_ps{3, 1, 1}; + + const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims}; + auto reduce_op = makeReduceOp(params); + ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps)); +} + +TYPED_TEST_P(ReduceTest, reduce_basic_shape_infer_scalar_axis) +{ + PartialShape data_ps{3, 4, 5}; + element::Type data_et = element::dynamic; + + Shape axes_ps{}; + element::Type axes_et = element::i64; + std::vector axes{1}; + + bool keep_dims = false; + + PartialShape out_ps{3, 5}; + + const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims}; + auto reduce_op = makeReduceOp(params); + ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps)); +} + +TYPED_TEST_P(ReduceTest, reduce_basic_shape_infer_axes_as_param) +{ + PartialShape data_ps{3, 4, 5}; + element::Type data_et = element::dynamic; + + Shape axes_ps{2}; + element::Type axes_et = element::i32; + std::vector axes; + + bool keep_dims = false; + + PartialShape out_ps{PartialShape::dynamic()}; + + const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims}; + bool axes_as_param = true; + auto reduce_op = makeReduceOp(params, axes_as_param); + ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps)); +} + +TYPED_TEST_P(ReduceTest, reduce_dynamic_shape_reduced_axes_static) +{ + PartialShape data_ps{3, 4, 5, Dimension::dynamic()}; + element::Type data_et = element::dynamic; + + Shape axes_ps{2}; + element::Type axes_et = element::i64; + std::vector axes{1, 2}; + + bool keep_dims = false; + + PartialShape out_ps{3, Dimension::dynamic()}; + + const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims}; + auto reduce_op = makeReduceOp(params); + ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps)); +} + +TYPED_TEST_P(ReduceTest, reduce_dynamic_shape_reduced_axes_static_keep_dims) +{ + PartialShape data_ps{3, 4, 5, Dimension::dynamic()}; + element::Type data_et = element::dynamic; + + Shape axes_ps{2}; + element::Type axes_et = element::i64; + std::vector axes{1, 2}; + + bool keep_dims = true; + + PartialShape out_ps{3, 1, 1, Dimension::dynamic()}; + + const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims}; + auto reduce_op = makeReduceOp(params); + ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps)); +} + +TYPED_TEST_P(ReduceTest, reduce_dynamic_shape_reduced_axes_not_static) +{ + PartialShape data_ps{Dimension::dynamic(), 4, 5, Dimension::dynamic()}; + element::Type data_et = element::dynamic; + + Shape axes_ps{2}; + element::Type axes_et = element::i64; + std::vector axes{2, 3}; + + bool keep_dims = false; + + PartialShape out_ps{Dimension::dynamic(), 4}; + + const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims}; + auto reduce_op = makeReduceOp(params); + ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps)); +} + +TYPED_TEST_P(ReduceTest, reduce_dynamic_shape_reduced_axes_not_static_keep_dims) +{ + PartialShape data_ps{Dimension::dynamic(), 4, 5, Dimension::dynamic()}; + element::Type data_et = element::dynamic; + + Shape axes_ps{2}; + element::Type axes_et = element::i64; + std::vector axes{2, 3}; + + bool keep_dims = true; + + PartialShape out_ps{Dimension::dynamic(), 4, 1, 1}; + + const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims}; + auto reduce_op = makeReduceOp(params); + ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps)); +} + +TYPED_TEST_P(ReduceTest, reduce_dynamic_shape_data) +{ + PartialShape data_ps{PartialShape::dynamic()}; + element::Type data_et = element::dynamic; + + Shape axes_ps{2}; + element::Type axes_et = element::i64; + std::vector axes{1, 2}; + + bool keep_dims = false; + + PartialShape out_ps{PartialShape::dynamic()}; + + const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims}; + auto reduce_op = makeReduceOp(params); + ASSERT_TRUE(reduce_op->get_output_partial_shape(0).same_scheme(out_ps)); +} + +TYPED_TEST_P(ReduceTest, reduce_invalid_axis_out_of_range) +{ + PartialShape data_ps{1, 2, 3}; + element::Type data_et = element::dynamic; + + Shape axes_ps{2}; + element::Type axes_et = element::i64; + std::vector axes{2, 3}; + + bool keep_dims = false; + + const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims}; + try + { + auto reduce_op = makeReduceOp(params); + FAIL() << "Invalid axes values not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), "Reduction axis ("); + } + catch (...) + { + FAIL() << "Axes input values validation check failed for unexpected reason"; + } +} + +TYPED_TEST_P(ReduceTest, reduce_invalid_axes_shape) +{ + PartialShape data_ps{1, 2, 3}; + element::Type data_et = element::dynamic; + + Shape axes_ps{2, 1}; + element::Type axes_et = element::i64; + std::vector axes{0, 1}; + + bool keep_dims = true; + + const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims}; + try + { + auto reduce_op = makeReduceOp(params); + FAIL() << "Invalid shape of axes input not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), "Axes input must be a scalar or 1D input."); + } + catch (...) + { + FAIL() << "Axes input shape validation check failed for unexpected reason"; + } +} + +TYPED_TEST_P(ReduceTest, reduce_invalid_axes_et) +{ + element::Type data_et = element::dynamic; + PartialShape data_ps{1, 2, 3}; + + element::Type axes_et = element::f32; + Shape axes_ps{2}; + std::vector axes{0, 1}; + + bool keep_dims = true; + + const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims}; + try + { + auto reduce_op = makeReduceOp(params); + FAIL() << "Invalid element type of axes input not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), "Element type of axes input must be integer."); + } + catch (...) + { + FAIL() << "Axes input element type validation check failed for unexpected reason"; + } +} + +REGISTER_TYPED_TEST_CASE_P( + ReduceTest, + reduce_basic_shape_infer, + reduce_basic_shape_infer_keep_dims, + reduce_basic_shape_infer_scalar_axis, + reduce_basic_shape_infer_axes_as_param, + reduce_dynamic_shape_data, + reduce_dynamic_shape_reduced_axes_static, + reduce_dynamic_shape_reduced_axes_static_keep_dims, + reduce_dynamic_shape_reduced_axes_not_static, + reduce_dynamic_shape_reduced_axes_not_static_keep_dims, + reduce_invalid_axis_out_of_range, + reduce_invalid_axes_shape, + reduce_invalid_axes_et); + +template +class ReduceArithmeticTest : public testing::Test +{ +}; + +TYPED_TEST_CASE_P(ReduceArithmeticTest); + +TYPED_TEST_P(ReduceArithmeticTest, reduce_arithmetic_invalid_data_et) +{ + element::Type data_et = element::boolean; + PartialShape data_ps{1, 2, 3}; + + element::Type axes_et = element::i32; + Shape axes_ps{2}; + std::vector axes{0, 1}; + + bool keep_dims = true; + + const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims}; + try + { + auto reduce_op = makeReduceOp(params); + FAIL() << "Invalid element type of data input not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), "Element type of data input must be numeric."); + } + catch (...) + { + FAIL() << "Data input element type validation check failed for unexpected reason"; + } +} + +REGISTER_TYPED_TEST_CASE_P( + ReduceArithmeticTest, + reduce_arithmetic_invalid_data_et); + +template +class ReduceLogicalTest : public testing::Test +{ +}; + +TYPED_TEST_CASE_P(ReduceLogicalTest); + +TYPED_TEST_P(ReduceLogicalTest, reduce_logical_invalid_data_et) +{ + std::vector element_types{ + element::f32, + element::i32, + element::u32}; + PartialShape data_ps{1, 2, 3}; + + element::Type axes_et = element::i32; + Shape axes_ps{2}; + std::vector axes{0, 1}; + + bool keep_dims = true; + + for (const auto& data_et : element_types) + { + const ReduceParams params{data_ps, data_et, axes_ps, axes, axes_et, keep_dims}; + try + { + auto reduce_op = makeReduceOp(params); + FAIL() << "Invalid element type of data input not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), "Element type of data input must be boolean."); + } + catch (...) + { + FAIL() << "Data input element type validation check failed for unexpected reason"; + } + } +} + +REGISTER_TYPED_TEST_CASE_P( + ReduceLogicalTest, + reduce_logical_invalid_data_et); diff --git a/ngraph/test/type_prop/reduce_prod.cpp b/ngraph/test/type_prop/reduce_prod.cpp index 243f158d7e2..3a6ea0a9e89 100644 --- a/ngraph/test/type_prop/reduce_prod.cpp +++ b/ngraph/test/type_prop/reduce_prod.cpp @@ -2,48 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "util/type_prop.hpp" +#include "reduce_ops.hpp" -using namespace std; -using namespace ngraph; - -TEST(type_prop, reduce_prod_v1_axis_out_of_range) -{ - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); - try - { - auto reduce_prod = make_shared(arg, axes); - - // Should have thrown, so fail if it didn't - FAIL() << "Incorrect axes values exception not thrown"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis (")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, reduce_prod_v1_shape_if_keep_dims) -{ - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); - auto keep_dims = true; - auto reduce_prod = make_shared(arg, axes, keep_dims); - ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1})); -} - -TEST(type_prop, reduce_prod_v1_shape_if_not_keep_dims) -{ - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); - auto keep_dims = false; - auto reduce_prod = make_shared(arg, axes, keep_dims); - ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3})); -} +using Type = ::testing::Types; +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_prod, ReduceTest, Type); +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_prod_et, ReduceArithmeticTest, Type); diff --git a/ngraph/test/type_prop/reduce_sum.cpp b/ngraph/test/type_prop/reduce_sum.cpp index 0a1f0422a3a..7bfce1dfc70 100644 --- a/ngraph/test/type_prop/reduce_sum.cpp +++ b/ngraph/test/type_prop/reduce_sum.cpp @@ -2,48 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "util/type_prop.hpp" +#include "reduce_ops.hpp" -using namespace std; -using namespace ngraph; - -TEST(type_prop, reduce_sum_v1_axis_out_of_range) -{ - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); - try - { - auto reduce_sum = make_shared(arg, axes); - - // Should have thrown, so fail if it didn't - FAIL() << "Incorrect axes values exception not thrown"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis (")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, reduce_sum_v1_shape_if_keep_dims) -{ - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); - auto keep_dims = true; - auto reduce_prod = make_shared(arg, axes, keep_dims); - ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1})); -} - -TEST(type_prop, reduce_sum_v1_shape_if_not_keep_dims) -{ - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); - auto keep_dims = false; - auto reduce_prod = make_shared(arg, axes, keep_dims); - ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3})); -} +using Type = ::testing::Types; +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_sum, ReduceTest, Type); +INSTANTIATE_TYPED_TEST_CASE_P(type_prop_reduce_sum_et, ReduceArithmeticTest, Type); diff --git a/ngraph/test/type_prop/selu.cpp b/ngraph/test/type_prop/selu.cpp new file mode 100644 index 00000000000..66f89a84f11 --- /dev/null +++ b/ngraph/test/type_prop/selu.cpp @@ -0,0 +1,171 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/type_prop.hpp" + +using namespace std; +using namespace ngraph; + +TEST(type_prop, selu_basic_inference_f32_3D) +{ + const auto param = make_shared(element::f32, Shape{1, 32, 32}); + const auto alpha = make_shared(element::f32, Shape{1}); + const auto lambda = make_shared(element::f32, Shape{1}); + const auto selu = make_shared(param, alpha, lambda); + + ASSERT_EQ(selu->get_element_type(), element::f32); + ASSERT_EQ(selu->get_shape(), (Shape{1, 32, 32})); +} + +TEST(type_prop, selu_basic_inference_f16_3D) +{ + const auto param = make_shared(element::f16, Shape{1, 32, 32}); + const auto alpha = make_shared(element::f16, Shape{1}); + const auto lambda = make_shared(element::f16, Shape{1}); + const auto selu = make_shared(param, alpha, lambda); + + ASSERT_EQ(selu->get_element_type(), element::f16); + ASSERT_EQ(selu->get_shape(), (Shape{1, 32, 32})); +} + +TEST(type_prop, selu_basic_inference_f32_5D) +{ + const auto param = make_shared(element::f32, Shape{12, 135, 221, 31, 15}); + const auto alpha = make_shared(element::f32, Shape{1}); + const auto lambda = make_shared(element::f32, Shape{1}); + const auto selu = make_shared(param, alpha, lambda); + + ASSERT_EQ(selu->get_element_type(), element::f32); + ASSERT_EQ(selu->get_shape(), (Shape{12, 135, 221, 31, 15})); +} + +TEST(type_prop, selu_basic_inference_f16_5D) +{ + const auto param = make_shared(element::f16, Shape{12, 135, 221, 31, 15}); + const auto alpha = make_shared(element::f16, Shape{1}); + const auto lambda = make_shared(element::f16, Shape{1}); + const auto selu = make_shared(param, alpha, lambda); + + ASSERT_EQ(selu->get_element_type(), element::f16); + ASSERT_EQ(selu->get_shape(), (Shape{12, 135, 221, 31, 15})); +} + +TEST(type_prop, selu_incompatible_input_type_boolean) +{ + // Invalid data input element type + try + { + auto data = make_shared(element::boolean, Shape{1, 2, 3, 4}); + const auto alpha = make_shared(element::boolean, Shape{1}); + const auto lambda = make_shared(element::boolean, Shape{1}); + auto selu = make_shared(data, alpha, lambda); + // Data input expected to be of numeric type + FAIL() << "Invalid input type not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), std::string("Input element types must be floating-point")); + } + catch (...) + { + FAIL() << "Input type check failed for unexpected reason"; + } +} + +TEST(type_prop, selu_incompatible_input_type_i32) +{ + // Invalid data input element type + try + { + auto data = make_shared(element::i32, Shape{1, 2, 3, 4}); + const auto alpha = make_shared(element::i32, Shape{1}); + const auto lambda = make_shared(element::i32, Shape{1}); + auto selu = make_shared(data, alpha, lambda); + // Data input expected to be of numeric type + FAIL() << "Invalid input type not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), std::string("Input element types must be floating-point")); + } + catch (...) + { + FAIL() << "Input type check failed for unexpected reason"; + } +} + +TEST(type_prop, selu_incompatible_input_type_u16) +{ + // Invalid data input element type + try + { + auto data = make_shared(element::u16, Shape{1, 2, 3, 4}); + const auto alpha = make_shared(element::u16, Shape{1}); + const auto lambda = make_shared(element::u16, Shape{1}); + auto selu = make_shared(data, alpha, lambda); + // Data input expected to be of numeric type + FAIL() << "Invalid input type not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), std::string("Input element types must be floating-point")); + } + catch (...) + { + FAIL() << "Input type check failed for unexpected reason"; + } +} + +TEST(type_prop, selu_incompatible_input_types) +{ + // Invalid data input element type + try + { + auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); + const auto alpha = make_shared(element::f32, Shape{1}); + const auto lambda = make_shared(element::u16, Shape{1}); + auto selu = make_shared(data, alpha, lambda); + // Data input expected to be of numeric type + FAIL() << "Inavlid input types not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), std::string("Input element types do not match")); + } + catch (...) + { + FAIL() << "Input type check failed for unexpected reason"; + } +} + +TEST(type_prop, selu_dynamic_rank_input_shape_2D) +{ + const PartialShape param_shape{Dimension::dynamic(), 10}; + const auto param = std::make_shared(element::f32, param_shape); + const auto alpha = make_shared(element::f32, Shape{2, 1}); + const auto lambda = make_shared(element::f32, Shape{1}); + const auto op = std::make_shared(param, alpha, lambda); + ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{Dimension(), 10})); +} + +TEST(type_prop, selu_dynamic_rank_input_shape_3D) +{ + const PartialShape param_shape{100, Dimension::dynamic(), 58}; + const auto param = std::make_shared(element::f32, param_shape); + const auto alpha = make_shared(element::f32, Shape{1}); + const auto lambda = make_shared(element::f32, Shape{1}); + const auto op = std::make_shared(param, alpha, lambda); + ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{100, Dimension(), 58})); +} + +TEST(type_prop, selu_dynamic_rank_input_shape_full) +{ + const auto param = std::make_shared(element::f32, PartialShape::dynamic()); + const auto alpha = make_shared(element::f32, Shape{1}); + const auto lambda = make_shared(element::f32, Shape{1}); + const auto op = std::make_shared(param, alpha, lambda); + ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); +} diff --git a/ngraph/test/type_prop/space_to_batch.cpp b/ngraph/test/type_prop/space_to_batch.cpp index cf4b3ffdf7f..18cce1d1648 100644 --- a/ngraph/test/type_prop/space_to_batch.cpp +++ b/ngraph/test/type_prop/space_to_batch.cpp @@ -110,3 +110,104 @@ TEST(type_prop, space_to_batch_dynamic_shape_dynamic_rank) ASSERT_EQ(space_to_batch->get_element_type(), element::f32); ASSERT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape::dynamic()); } + +TEST(type_prop, space_to_batch_invalid_element_type_block_shape) +{ + auto data = make_shared(element::f32, Shape{2, 128}); + auto block_shape = make_shared(element::f32, Shape{2}, vector{1, 5}); + auto pads_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); + auto pads_end = make_shared(element::i64, Shape{2}, vector{0, 0}); + + try + { + auto space_to_batch = + make_shared(data, block_shape, pads_begin, pads_end); + // Input element type is float32 + FAIL() << "Invalid f32 element type for block_shape not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), "block_shape must be an integral number"); + } + catch(...) + { + FAIL() << "Integral element type node validation check failed for unexpected reason"; + } + +} + +TEST(type_prop, space_to_batch_invalid_element_type_pads_begin) +{ + auto data = make_shared(element::f32, Shape{2, 128}); + auto block_shape = make_shared(element::i64, Shape{2}, vector{1, 5}); + auto pads_begin = make_shared(element::f32, Shape{2}, vector{0, 2}); + auto pads_end = make_shared(element::i64, Shape{2}, vector{0, 0}); + + try + { + auto space_to_batch = + make_shared(data, block_shape, pads_begin, pads_end); + // Input element type is float32 + FAIL() << "Invalid f32 element type for pads_begin not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), "pads_begin must be an integral number but got"); + } + catch(...) + { + FAIL() << "Integral element type node validation check failed for unexpected reason"; + } + +} + +TEST(type_prop, space_to_batch_invalid_element_type_pads_end) +{ + auto data = make_shared(element::f32, Shape{2, 128}); + auto block_shape = make_shared(element::i16, Shape{2}, vector{1, 5}); + auto pads_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); + auto pads_end = make_shared(element::f32, Shape{2}, vector{0, 0}); + + try + { + auto space_to_batch = + make_shared(data, block_shape, pads_begin, pads_end); + // Input element type is float32 + FAIL() << "Invalid f32 element type for pads_end not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), "pads_end must be an integral number but got"); + } + catch(...) + { + FAIL() << "Integral element type node validation check failed for unexpected reason"; + } + +} + +TEST(type_prop, space_to_batch_invalid_value_block_shape) +{ + auto data = make_shared(element::f32, Shape{2, 128}); + auto block_shape = make_shared(element::i64, Shape{2}, vector{-1, -5}); + auto pads_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); + auto pads_end = make_shared(element::i64, Shape{2}, vector{0, 0}); + + try + { + auto space_to_batch = + make_shared(data, block_shape, pads_begin, pads_end); + // Input element type is float32 + FAIL() << "Invalid block_shape value not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), "block_shape values must be greater than 0"); + } + catch(...) + { + FAIL() << "block_shape value node validation check failed for unexpected reason"; + } + +} + diff --git a/ngraph/test/visitors/op/reduce_ops.hpp b/ngraph/test/visitors/op/reduce_ops.hpp index dfbf1b6eba8..4c5a165b2bc 100644 --- a/ngraph/test/visitors/op/reduce_ops.hpp +++ b/ngraph/test/visitors/op/reduce_ops.hpp @@ -24,7 +24,7 @@ TYPED_TEST_CASE_P(ReduceOpsAttrTest); TYPED_TEST_P(ReduceOpsAttrTest, reduce_ops) { Shape in_shape{3, 4, 5}; - element::Type in_et = element::f32; + element::Type in_et = element::dynamic; Shape axes_shape{2}; element::Type axes_et = element::i64; diff --git a/ngraph/test/visitors/op/selu.cpp b/ngraph/test/visitors/op/selu.cpp new file mode 100644 index 00000000000..3a83deaea53 --- /dev/null +++ b/ngraph/test/visitors/op/selu.cpp @@ -0,0 +1,30 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" + +#include "ngraph/ngraph.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/opsets/opset1.hpp" + +#include "util/visitor.hpp" + +using namespace std; +using namespace ngraph; +using ngraph::test::NodeBuilder; + +TEST(attributes, selu_op) +{ + NodeBuilder::get_ops().register_factory(); + const auto data_input = make_shared(element::f32, Shape{1, 2, 3}); + const auto alpha = make_shared(element::f32, Shape{1}); + const auto lambda = make_shared(element::f32, Shape{1}); + + const auto op = make_shared(data_input, alpha, lambda); + + NodeBuilder builder(op); + const auto expected_attr_count = 0; + + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); +} diff --git a/ngraph/test/visitors/op/space_to_batch.cpp b/ngraph/test/visitors/op/space_to_batch.cpp new file mode 100644 index 00000000000..c105cd2a454 --- /dev/null +++ b/ngraph/test/visitors/op/space_to_batch.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" + +#include "ngraph/ngraph.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/opsets/opset2.hpp" + +#include "util/visitor.hpp" + +using namespace std; +using namespace ngraph; +using ngraph::test::NodeBuilder; + + +TEST(attributes, space_to_batch_op) +{ + using namespace opset2; + + NodeBuilder::get_ops().register_factory(); + auto data = make_shared(element::f32, Shape{2, 128}); + auto block_shape = make_shared(element::i64, Shape{2}, vector{1, 5}); + auto pads_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); + auto pads_end = make_shared(element::i64, Shape{2}, vector{0, 0}); + auto op = make_shared(data, block_shape, pads_begin, pads_end); + + NodeBuilder builder(op); + const auto expected_attr_count = 0; + + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); +} diff --git a/tests/fuzz/README.md b/tests/fuzz/README.md index 5f16f9f9998..19b34e80c17 100644 --- a/tests/fuzz/README.md +++ b/tests/fuzz/README.md @@ -18,14 +18,72 @@ To run fuzzing you will need [LLVM](https://apt.llvm.org/) components: - lld (linker) - libc++ -## Reproducing Failure Found by Fuzzing -1. Build `fuzz` test target: +## Building fuzz tests + +1. Build openvino + +Build openvino with options `ENABLE_FUZZING` and `ENABLE_SANITIZER` enabled. It +is recommended to use clang compiler. + ```bash -cmake -DENABLE_TESTS=ON .. && ninja fuzz +(\ +mkdir -p build && cd build && \ +CC=clang CXX=clang++ cmake .. -DENABLE_FUZZING=ON -DENABLE_SANITIZER=ON -DTREAT_WARNING_AS_ERROR=OFF && \ +cmake --build . \ +) ``` -2. Run fuzzing test passing a failure reproducer as a command-line argument: -``` bash -./read_network-fuzzer crash-reproducer +2. Build fuzz tests + +Build fuzz tests with options `ENABLE_FUZZING` and `ENABLE_SANITIZER` enabled. +You should use the same compiler as was used for the openvino build. + +```bash +(\ +mkdir -p tests/fuzz/build && cd tests/fuzz/build && \ +CC=clang CXX=clang++ cmake .. -DENABLE_FUZZING=ON -DENABLE_SANITIZER=ON -DTREAT_WARNING_AS_ERROR=OFF -DInferenceEngine_DIR=$(pwd)/../../../build && \ +cmake --build . \ +) ``` + +## Running fuzz tests + +1. Prepare fuzzing corpus + +Fuzzing engine needs a set of valid inputs to start fuzzing from. Those files +are called a fuzzing corpus. Place valid inputs for the fuzzing test into +directory. + +Intel employees can get the corpus as described here +https://wiki.ith.intel.com/x/2N42bg. + +2. Run fuzzing + +```bash +./read_network-fuzzer -max_total_time=600 ./read_network-corpus +``` +Consider adding those useful command line options: +- `-jobs=$(nproc)` runs multiple fuzzing jobs in parallel. +- `-rss_limit_mb=0` to ignore out-of-memory issues. + +## Analyzing fuzzing quality + +### Explore code coverage + +To build coverage report after fuzz test execution run: + +``` +llvm-profdata merge -sparse *.profraw -o default.profdata && \ +llvm-cov show ./read_network-fuzzer -instr-profile=default.profdata -format=html -output-dir=read_network-coverage +``` + +## Reproducing findings + +Fuzzing run halts on the first issue identified, prints issue details to stdout and save data to reproduce the issue as a file in the current folder. To debug the issue pass reproducer as command line argument to fuzz test + +```bash +./read_network-fuzzer crash-409b5eeed46a8445b7f7b7a2ce5b60a9ad895e3b +``` + +It is recommended but not required to use binaries built for fuzzing to debug the issues. A binaries built without `ENABLE_FUZZING` options can also be used to reproduce and debug the issues. \ No newline at end of file diff --git a/tests/fuzz/fuzz-testhelper/CMakeLists.txt b/tests/fuzz/fuzz-testhelper/CMakeLists.txt index 2c47dbd2d3f..52e92798d0c 100644 --- a/tests/fuzz/fuzz-testhelper/CMakeLists.txt +++ b/tests/fuzz/fuzz-testhelper/CMakeLists.txt @@ -6,7 +6,7 @@ set(TARGET_NAME fuzz-testhelper) file( GLOB SRC_FILES - ${CMAKE_CURRENT_SOURCE_DIR}/*-testhelper.cc) + ${CMAKE_CURRENT_SOURCE_DIR}/*.cc) add_library( ${TARGET_NAME} STATIC diff --git a/tests/fuzz/fuzz-testhelper/fuzz-utils.cc b/tests/fuzz/fuzz-testhelper/fuzz-utils.cc new file mode 100644 index 00000000000..16e63e6576d --- /dev/null +++ b/tests/fuzz/fuzz-testhelper/fuzz-utils.cc @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "fuzz-utils.h" +#include +#include +#include +#include +#ifndef _WIN32 +#include +#endif // _WIN32 + +MemoryFile::MemoryFile(const void *data, size_t size) { +#ifdef _WIN32 + throw std::exception("MemoryFile is not implemented for Windows"); +#else // _WIN32 + m_name = strdup("/dev/shm/fuzz-XXXXXX"); + if (!m_name) + throw std::bad_alloc(); + int fd = mkstemp(m_name); + if (size) { + size_t nbytes = write(fd, data, size); + if (nbytes != size) { + free(m_name); + close(fd); + throw std::runtime_error("Failed to write " + std::to_string(size) + + " bytes to " + m_name); + } + } + close(fd); +#endif // _WIN32 +} + +MemoryFile::~MemoryFile() { +#ifndef _WIN32 + unlink(m_name); + free(m_name); +#endif // _WIN32 +} diff --git a/tests/fuzz/fuzz-testhelper/fuzz-utils.h b/tests/fuzz/fuzz-testhelper/fuzz-utils.h new file mode 100644 index 00000000000..f167587eb65 --- /dev/null +++ b/tests/fuzz/fuzz-testhelper/fuzz-utils.h @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +class MemoryFile { + public: + /// Create a memory backed file + MemoryFile(const void *data, size_t size); + /// Delete memory backed file + ~MemoryFile(); + + /// Get path to a file. + const char *name() { return m_name; } + + private: + char *m_name; +}; diff --git a/tests/fuzz/src/CMakeLists.txt b/tests/fuzz/src/CMakeLists.txt index b58a2d018aa..b9400d9e7e1 100644 --- a/tests/fuzz/src/CMakeLists.txt +++ b/tests/fuzz/src/CMakeLists.txt @@ -9,11 +9,14 @@ add_custom_target(fuzz) # Fuzz test target name is source file name without extension. FILE(GLOB tests "*-fuzzer.cc") +add_subdirectory(../../../thirdparty/cnpy ${CMAKE_CURRENT_BINARY_DIR}/cnpy) +add_subdirectory(../../../thirdparty/zlib ${CMAKE_CURRENT_BINARY_DIR}/zlib) + foreach(test_source ${tests}) get_filename_component(test_name ${test_source} NAME_WE) add_fuzzer(${test_name} ${test_source}) - target_link_libraries(${test_name} PRIVATE IE::inference_engine) + target_link_libraries(${test_name} PRIVATE IE::inference_engine cnpy zlib) add_dependencies(fuzz ${test_name}) endforeach() diff --git a/tests/fuzz/src/cnpy_npy_load-fuzzer.cc b/tests/fuzz/src/cnpy_npy_load-fuzzer.cc new file mode 100644 index 00000000000..257dc22908b --- /dev/null +++ b/tests/fuzz/src/cnpy_npy_load-fuzzer.cc @@ -0,0 +1,21 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include "fuzz-utils.h" + + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t * inputData, size_t inputSize) { + MemoryFile file(inputData, inputSize); + + try { + cnpy::NpyArray array = cnpy::npy_load(file.name()); + } + catch (const std::exception&) { + return 0; // fail gracefully on expected exceptions + } + + return 0; +} \ No newline at end of file diff --git a/tests/stress_tests/scripts/memcheck_upload.py b/tests/stress_tests/scripts/memcheck_upload.py index 663a513c856..a3eeb3906b4 100644 --- a/tests/stress_tests/scripts/memcheck_upload.py +++ b/tests/stress_tests/scripts/memcheck_upload.py @@ -67,6 +67,7 @@ def metadata_from_manifest(manifest): 'commit_sha': repo_trigger['revision'], 'commit_date': repo_trigger['commit_time'], 'repo_url': repo_trigger['url'], + 'branch': repo_trigger['branch'], 'target_branch': repo_trigger['target_branch'], 'event_type': manifest['components'][PRODUCT_NAME]['build_event'].lower(), f'{PRODUCT_NAME}_version': manifest['components'][PRODUCT_NAME]['version'], diff --git a/tests/time_tests/test_runner/conftest.py b/tests/time_tests/test_runner/conftest.py index d0cb928ce07..f60be60d963 100644 --- a/tests/time_tests/test_runner/conftest.py +++ b/tests/time_tests/test_runner/conftest.py @@ -325,10 +325,11 @@ def manifest_metadata(request): "repo_url": {"type": "string"}, "commit_sha": {"type": "string"}, "commit_date": {"type": "string"}, + "branch": {"type": "string"}, "target_branch": {"type": "string"}, "version": {"type": "string"} }, - "required": ["product_type", "repo_url", "commit_sha", "commit_date", "target_branch", "version"], + "required": ["product_type", "repo_url", "commit_sha", "commit_date", "branch", "target_branch", "version"], "additionalProperties": false } """ diff --git a/tests/time_tests/test_runner/utils.py b/tests/time_tests/test_runner/utils.py index 990af4c9b6b..c68cc4fd7c2 100644 --- a/tests/time_tests/test_runner/utils.py +++ b/tests/time_tests/test_runner/utils.py @@ -57,7 +57,8 @@ def metadata_from_manifest(manifest: Path): 'commit_sha': repo_trigger['revision'], 'commit_date': repo_trigger['commit_time'], 'repo_url': repo_trigger['url'], - 'target_branch': repo_trigger['branch'], + 'branch': repo_trigger['branch'], + 'target_branch': repo_trigger['target_branch'], 'version': manifest['components'][PRODUCT_NAME]['version'] } diff --git a/thirdparty/cnpy/cnpy.cpp b/thirdparty/cnpy/cnpy.cpp index 26d0614bca1..ed277deb5fe 100644 --- a/thirdparty/cnpy/cnpy.cpp +++ b/thirdparty/cnpy/cnpy.cpp @@ -90,7 +90,9 @@ void cnpy::parse_npy_header(unsigned char* buffer,size_t& word_size, std::vector //byte order code | stands for not applicable. //not sure when this applies except for byte array loc1 = header.find("descr")+9; - bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false); + bool littleEndian = false; + if (loc1 < header.size()) + littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false); assert(littleEndian); //char type = header[loc1+1]; @@ -148,7 +150,9 @@ void cnpy::parse_npy_header(FILE* fp, size_t& word_size, std::vector& sh if (loc1 == std::string::npos) throw std::runtime_error("parse_npy_header: failed to find header keyword: 'descr'"); loc1 += 9; - bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false); + bool littleEndian = false; + if (loc1 < header.size()) + littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false); assert(littleEndian); //char type = header[loc1+1]; diff --git a/thirdparty/cnpy/cnpy.h b/thirdparty/cnpy/cnpy.h index 750251f480e..e8935e8937e 100644 --- a/thirdparty/cnpy/cnpy.h +++ b/thirdparty/cnpy/cnpy.h @@ -27,6 +27,11 @@ namespace cnpy { { num_vals = 1; for(size_t i = 0;i < shape.size();i++) num_vals *= shape[i]; + if (word_size && + num_vals > std::vector().max_size() / word_size) + throw std::length_error("NpyArray of " + std::to_string(num_vals) + + "*" + std::to_string(word_size) + + " elements is too big."); data_holder = std::shared_ptr>( new std::vector(num_vals * word_size)); } diff --git a/tools/benchmark/main.py b/tools/benchmark/main.py index 26ef6246f0c..0cc0a511848 100644 --- a/tools/benchmark/main.py +++ b/tools/benchmark/main.py @@ -150,7 +150,7 @@ def run(args): set_throughput_streams() if MULTI_DEVICE_NAME in device_name and CPU_DEVICE_NAME in device_name: - logger.warning("Turn on GPU trottling. Multi-device execution with the CPU + GPU performs best with GPU trottling hint, " + + logger.warning("Turn on GPU throttling. Multi-device execution with the CPU + GPU performs best with GPU throttling hint, " + "which releases another CPU thread (that is otherwise used by the GPU driver for active polling)") config[device]['GPU_PLUGIN_THROTTLE'] = '1' elif device == MYRIAD_DEVICE_NAME: