diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/minimum_maximum.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/minimum_maximum.cpp new file mode 100644 index 00000000000..407b52a78e6 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/minimum_maximum.cpp @@ -0,0 +1,52 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shared_test_classes/single_layer/minimum_maximum.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +namespace { +TEST_P(MaxMinLayerTest, Serialize) { Serialize(); } + +const std::vector>> inShapes = { + {{2}, {1}}, + {{1, 1, 1, 3}, {1}}, + {{1, 2, 4}, {1}}, + {{1, 4, 4}, {1}}, + {{1, 4, 4, 1}, {1}}, + {{256, 56}, {256, 56}}, + {{8, 1, 6, 1}, {7, 1, 5}}, +}; + +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16, +}; + +const std::vector opType = { + ngraph::helpers::MinMaxOpType::MINIMUM, + ngraph::helpers::MinMaxOpType::MAXIMUM, +}; + +const std::vector inputType = { + ngraph::helpers::InputLayerType::CONSTANT, + ngraph::helpers::InputLayerType::PARAMETER, +}; + +INSTANTIATE_TEST_CASE_P(smoke_maximum, MaxMinLayerTest, + ::testing::Combine( + ::testing::ValuesIn(inShapes), + ::testing::ValuesIn(opType), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::ValuesIn(inputType), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + MaxMinLayerTest::getTestCaseName); + +} // namespace diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 48651cadf4f..89f2d9d1cec 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -157,6 +157,7 @@ set(SRC type_prop/loop.cpp type_prop/matmul.cpp type_prop/max_pool.cpp + type_prop/minimum.cpp type_prop/mish.cpp type_prop/mvn.cpp type_prop/non_max_suppression.cpp diff --git a/ngraph/test/type_prop/minimum.cpp b/ngraph/test/type_prop/minimum.cpp new file mode 100644 index 00000000000..ea985e1348f --- /dev/null +++ b/ngraph/test/type_prop/minimum.cpp @@ -0,0 +1,265 @@ +//***************************************************************************** +// Copyright (C) 2021 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/type_prop.hpp" + +using namespace std; +using namespace ngraph; + +TEST(type_prop, minimum_2D_same) +{ + auto A = make_shared(element::f32, Shape{2, 2}); + auto B = make_shared(element::f32, Shape{2, 2}); + + auto minimum = make_shared(A, B); + + ASSERT_EQ(minimum->get_element_type(), element::f32); + ASSERT_EQ(minimum->get_shape(), (Shape{2, 2})); +} + +TEST(type_prop, minimum_4D_same) +{ + auto A = make_shared(element::f32, Shape{2, 2, 3, 3}); + auto B = make_shared(element::f32, Shape{2, 2, 3, 3}); + + auto minimum = make_shared(A, B); + + ASSERT_EQ(minimum->get_element_type(), element::f32); + ASSERT_EQ(minimum->get_shape(), (Shape{2, 2, 3, 3})); +} + +TEST(type_prop, minimum_default_autobroadcast) +{ + auto A = make_shared(element::f32, Shape{2, 2}); + auto B = make_shared(element::f32, Shape{2, 2}); + + auto minimum = make_shared(A, B); + + ASSERT_EQ(minimum->get_element_type(), element::f32); + ASSERT_EQ(minimum->get_shape(), (Shape{2, 2})); + ASSERT_EQ(minimum->get_autob(), op::AutoBroadcastType::NUMPY); +} + +TEST(type_prop, minimum_no_autobroadcast) +{ + auto A = make_shared(element::f32, Shape{2, 2}); + auto B = make_shared(element::f32, Shape{2, 2}); + + auto minimum = make_shared(A, B, op::AutoBroadcastSpec::NONE); + + ASSERT_EQ(minimum->get_element_type(), element::f32); + ASSERT_EQ(minimum->get_shape(), (Shape{2, 2})); + ASSERT_EQ(minimum->get_autob(), op::AutoBroadcastType::NONE); +} + +TEST(type_prop, minimum_4D_x_scalar_numpy_broadcast) +{ + auto A = make_shared(element::f32, Shape{2, 3, 4, 5}); + auto B = make_shared(element::f32, Shape{1}); + + auto minimum = make_shared(A, B); + + ASSERT_EQ(minimum->get_element_type(), element::f32); + ASSERT_EQ(minimum->get_shape(), (Shape{2, 3, 4, 5})); +} + +TEST(type_prop, minimum_4D_x_1D_numpy_broadcast) +{ + auto A = make_shared(element::f32, Shape{2, 3, 4, 5}); + auto B = make_shared(element::f32, Shape{5}); + + auto minimum = make_shared(A, B); + + ASSERT_EQ(minimum->get_element_type(), element::f32); + ASSERT_EQ(minimum->get_shape(), (Shape{2, 3, 4, 5})); +} + +TEST(type_prop, minimum_2D_x_4D_numpy_broadcast) +{ + auto A = make_shared(element::f32, Shape{4, 5}); + auto B = make_shared(element::f32, Shape{2, 3, 4, 5}); + + auto minimum = make_shared(A, B); + + ASSERT_EQ(minimum->get_element_type(), element::f32); + ASSERT_EQ(minimum->get_shape(), (Shape{2, 3, 4, 5})); +} + +TEST(type_prop, minimum_3D_x_4D_numpy_broadcast) +{ + auto A = make_shared(element::f32, Shape{1, 4, 5}); + auto B = make_shared(element::f32, Shape{2, 3, 1, 1}); + + auto minimum = make_shared(A, B); + + ASSERT_EQ(minimum->get_element_type(), element::f32); + ASSERT_EQ(minimum->get_shape(), (Shape{2, 3, 4, 5})); +} + +TEST(type_prop, minimum_4D_x_3D_numpy_broadcast) +{ + auto A = make_shared(element::f32, Shape{8, 1, 6, 1}); + auto B = make_shared(element::f32, Shape{7, 1, 5}); + + auto minimum = make_shared(A, B); + + ASSERT_EQ(minimum->get_element_type(), element::f32); + ASSERT_EQ(minimum->get_shape(), (Shape{8, 7, 6, 5})); + ASSERT_EQ(minimum->get_autob(), op::AutoBroadcastType::NUMPY); +} + +TEST(type_prop, minimum_incompatible_element_types) +{ + auto A = make_shared(element::f32, Shape{2, 2, 3, 3}); + auto B = make_shared(element::i32, Shape{2, 2, 3, 3}); + + try + { + auto minimum = make_shared(A, B); + // Should have thrown, so fail if it didn't + FAIL() << "Incompatible element types not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), std::string("Argument element types are inconsistent")); + } + catch (...) + { + FAIL() << "Minimum element type validation failed for unexpexted reason"; + } +} + +TEST(type_prop, minimum_incompatible_boolean_type) +{ + auto A = make_shared(element::boolean, Shape{2, 2, 3, 3}); + auto B = make_shared(element::boolean, Shape{2, 2, 3, 3}); + + try + { + auto minimum = make_shared(A, B); + // Should have thrown, so fail if it didn't + FAIL() << "Incompatible boolean type not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), + std::string("Arguments cannot have boolean element type")); + } + catch (...) + { + FAIL() << "Minimum element type validation failed for unexpexted reason"; + } +} + +TEST(type_prop, minimum_1D_x_1D_incompatible) +{ + auto A = make_shared(element::f32, Shape{3}); + auto B = make_shared(element::f32, Shape{4}); + + try + { + auto minimum = make_shared(A, B); + // Should have thrown, so fail if it didn't + FAIL() << "Incompatible matrix dimensions not detected. "; + } + catch (const ngraph_error& error) + { + EXPECT_HAS_SUBSTRING(error.what(), std::string("Argument shapes are inconsistent")); + } + catch (...) + { + FAIL() << "Minimum shape validation failed for unexpected reason"; + } +} + +TEST(type_prop, minimum_3D_x_3D_incompatible) +{ + auto A = make_shared(element::f32, Shape{3, 5, 6}); + auto B = make_shared(element::f32, Shape{4, 10, 12}); + + try + { + auto minimum = make_shared(A, B); + // Should have thrown, so fail if it didn't + FAIL() << "Incompatible matrix dimensions not detected. "; + } + catch (const ngraph_error& error) + { + EXPECT_HAS_SUBSTRING(error.what(), std::string("Argument shapes are inconsistent")); + } + catch (...) + { + FAIL() << "Minimum shape validation failed for unexpected reason"; + } +} + +TEST(type_prop, minimum_5D_x_5D_incompatible) +{ + auto A = make_shared(element::f32, Shape{389, 112, 12}); + auto B = make_shared(element::f32, Shape{389, 112, 19}); + + try + { + auto minimum = make_shared(A, B); + // Should have thrown, so fail if it didn't + FAIL() << "Incompatible matrix dimensions not detected. "; + } + catch (const ngraph_error& error) + { + EXPECT_HAS_SUBSTRING(error.what(), std::string("Argument shapes are inconsistent")); + } + catch (...) + { + FAIL() << "Minimum shape validation failed for unexpected reason"; + } +} + +TEST(type_prop, minimum_3D_dynamic_shape) +{ + Dimension dynamic = Dimension::dynamic(); + auto A = make_shared(element::f32, PartialShape{dynamic, dynamic, 6}); + auto B = make_shared(element::f32, PartialShape{dynamic, dynamic, 6}); + + auto minimum = make_shared(A, B); + + ASSERT_EQ(minimum->get_element_type(), element::f32); + ASSERT_EQ(minimum->get_output_partial_shape(0), (PartialShape{dynamic, dynamic, 6})); +} + +TEST(type_prop, minimum_5D_dynamic_shape) +{ + Dimension dynamic = Dimension::dynamic(); + auto A = + make_shared(element::f32, PartialShape{dynamic, 4, dynamic, dynamic, 6}); + auto B = + make_shared(element::f32, PartialShape{dynamic, 4, dynamic, dynamic, 6}); + + auto minimum = make_shared(A, B); + + ASSERT_EQ(minimum->get_element_type(), element::f32); + ASSERT_EQ(minimum->get_output_partial_shape(0), + (PartialShape{dynamic, 4, dynamic, dynamic, 6})); +} + +TEST(type_prop, minimum_full_dynamic_shape) +{ + auto param = std::make_shared(element::f64, PartialShape::dynamic()); + const auto op = std::make_shared(param, param); + ASSERT_EQ(op->get_element_type(), element::f64); + ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); +}