diff --git a/.gitattributes b/.gitattributes index de9c0b51d76..78133600185 100644 --- a/.gitattributes +++ b/.gitattributes @@ -2,7 +2,6 @@ # Set default behavior to automatically normalize line endings. ############################################################################### * text=auto - ############################################################################### # Set default behavior for command prompt diff. # @@ -11,9 +10,7 @@ # Note: This is only used by command line ############################################################################### #*.cs diff=csharp - *.py text eol=lf - ############################################################################### # Set the merge driver for project and solution files # @@ -36,7 +33,6 @@ #*.modelproj merge=binary #*.sqlproj merge=binary #*.wwaproj merge=binary - ############################################################################### # behavior for image files # @@ -45,7 +41,6 @@ #*.jpg binary #*.png binary #*.gif binary - ############################################################################### # diff behavior for common document formats # @@ -63,9 +58,9 @@ #*.PDF diff=astextplain #*.rtf diff=astextplain #*.RTF diff=astextplain - *.PNG filter=lfs diff=lfs merge=lfs -text *.png filter=lfs diff=lfs merge=lfs -text *.jpg filter=lfs diff=lfs merge=lfs -text *.gif filter=lfs diff=lfs merge=lfs -text *.vsdx filter=lfs diff=lfs merge=lfs -text +*.bmp filter=lfs diff=lfs merge=lfs -text diff --git a/cmake/developer_package/packaging.cmake b/cmake/developer_package/packaging.cmake index fd7d39a35ec..4cb21210d4a 100644 --- a/cmake/developer_package/packaging.cmake +++ b/cmake/developer_package/packaging.cmake @@ -13,9 +13,9 @@ include(CPackComponent) set(IE_CPACK_IE_DIR deployment_tools/inference_engine) function(ie_cpack_set_library_dir) if(WIN32) - set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER}/${CMAKE_BUILD_TYPE} PARENT_SCOPE) - set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/bin/${ARCH_FOLDER}/${CMAKE_BUILD_TYPE} PARENT_SCOPE) - set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER}/${CMAKE_BUILD_TYPE} PARENT_SCOPE) + set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER}/$ PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/bin/${ARCH_FOLDER}/$ PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER}/$ PARENT_SCOPE) else() set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE) set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE) diff --git a/cmake/developer_package/version.cmake b/cmake/developer_package/version.cmake index e807a3ea31f..054bc10c78e 100644 --- a/cmake/developer_package/version.cmake +++ b/cmake/developer_package/version.cmake @@ -31,19 +31,18 @@ macro(ie_parse_ci_build_number) set(IE_VERSION_MAJOR ${CMAKE_MATCH_1}) set(IE_VERSION_MINOR ${CMAKE_MATCH_2}) set(IE_VERSION_PATCH ${CMAKE_MATCH_3}) - set(has_ci_version ON) - else() - set(IE_VERSION_MAJOR 0) - set(IE_VERSION_MINOR 0) - set(IE_VERSION_PATCH 0) endif() if(NOT DEFINED repo_root) message(FATAL_ERROR "repo_root is not defined") endif() - if(DEFINED IEDevScripts_DIR AND DEFINED OpeenVINO_SOURCE_DIR AND NOT DEFINED custom_build) - set(ie_version_hpp "${IE_MAIN_SOURCE_DIR}/include/ie_version.hpp") + macro(ie_get_hpp_version) + if(NOT DEFINED OpenVINO_SOURCE_DIR) + return() + endif() + + set(ie_version_hpp "${OpenVINO_SOURCE_DIR}/inference-engine/include/ie_version.hpp") if(NOT EXISTS ${ie_version_hpp}) message(FATAL_ERROR "File ie_version.hpp with IE_VERSION definitions is not found") endif() @@ -57,6 +56,13 @@ macro(ie_parse_ci_build_number) string(REGEX REPLACE ".+IE_VERSION_PATCH[ ]+([0-9]+).*" "\\1" IE_VERSION_PATCH_HPP "${IE_VERSION_PARTS}") + set(ie_hpp_version_is_found ON) + endmacro() + + # detect OpenVINO version via ie_version.hpp + ie_get_hpp_version() + + if(ie_hpp_version_is_found) foreach(var IE_VERSION_MAJOR IE_VERSION_MINOR IE_VERSION_PATCH) if(DEFINED ${var} AND NOT ${var} EQUAL ${var}_HPP) message(FATAL_ERROR "${var} parsed from CI_BUILD_NUMBER (${${var}}) \ @@ -66,13 +72,10 @@ macro(ie_parse_ci_build_number) set(${var} ${${var}_HPP}) endif() endforeach() - elseif(has_ci_version) - message(WARNING "OpeenVINO_SOURCE_DIR is not defined. No way to compare versions") - else() - message(WARNING "No way to detect OpenVINO version. Supposing 0.0.0.0") endif() set(IE_VERSION "${IE_VERSION_MAJOR}.${IE_VERSION_MINOR}.${IE_VERSION_PATCH}") + message(STATUS "OpenVINO version is ${IE_VERSION}") endmacro() if (DEFINED ENV{CI_BUILD_NUMBER}) diff --git a/docs/IE_DG/img/applying_low_latency.png b/docs/IE_DG/img/applying_low_latency.png old mode 100755 new mode 100644 diff --git a/docs/IE_DG/img/applying_low_latency_2.png b/docs/IE_DG/img/applying_low_latency_2.png old mode 100755 new mode 100644 diff --git a/docs/IE_DG/img/llt2_use_const_initializer.png b/docs/IE_DG/img/llt2_use_const_initializer.png old mode 100755 new mode 100644 diff --git a/docs/IE_DG/img/low_latency_limitation_1.png b/docs/IE_DG/img/low_latency_limitation_1.png old mode 100755 new mode 100644 diff --git a/docs/IE_DG/img/low_latency_limitation_2.png b/docs/IE_DG/img/low_latency_limitation_2.png old mode 100755 new mode 100644 diff --git a/docs/img/OV-diagram-step1.png b/docs/img/OV-diagram-step1.png index d1ff39f1aaa..da3212d0713 100644 Binary files a/docs/img/OV-diagram-step1.png and b/docs/img/OV-diagram-step1.png differ diff --git a/docs/img/OV-diagram-step4.png b/docs/img/OV-diagram-step4.png index 75fe645a313..7df9835e8e8 100644 Binary files a/docs/img/OV-diagram-step4.png and b/docs/img/OV-diagram-step4.png differ diff --git a/docs/ops/arithmetic/Asinh_3.md b/docs/ops/arithmetic/Asinh_3.md index 9db15defa47..6fae01555d0 100644 --- a/docs/ops/arithmetic/Asinh_3.md +++ b/docs/ops/arithmetic/Asinh_3.md @@ -4,33 +4,29 @@ **Category**: Arithmetic unary operation -**Short description**: *Asinh* performs element-wise hyperbolic inverse sine (arcsinh) operation with given tensor. +**Short description**: *Asinh* performs element-wise inverse hyperbolic sine operation (arcsinh) on a given input tensor. -**Attributes**: - - No attributes available. - -**Inputs** - -* **1**: A tensor of type *T*. **Required.** - -**Outputs** - -* **1**: The result of element-wise asinh operation. A tensor of type *T*. - -**Types** - -* *T*: any floating point type. - -*Asinh* does the following with the input tensor *a*: +**Detailed description**: *Asinh* performs element-wise inverse hyperbolic sine operation on a given input tensor, based on the following mathematical formula: \f[ a_{i} = asinh(a_{i}) \f] -**Examples** +**Attributes**: *Asinh* operation has no attributes. -*Example 1* +**Inputs** + +* **1**: A tensor of type *T* and arbitrary shape. **Required.** + +**Outputs** + +* **1**: The result of element-wise *Asinh* operation. A tensor of type *T* and the same shape as input tensor. + +**Types** + +* *T*: any numeric type. + +**Example** ```xml diff --git a/docs/ops/arithmetic/Cosh_1.md b/docs/ops/arithmetic/Cosh_1.md index 08b3be9f421..7f1e3055dd3 100644 --- a/docs/ops/arithmetic/Cosh_1.md +++ b/docs/ops/arithmetic/Cosh_1.md @@ -4,33 +4,29 @@ **Category**: Arithmetic unary operation -**Short description**: *Cosh* performs element-wise hyperbolic cosine operation with given tensor. +**Short description**: *Cosh* performs element-wise hyperbolic cosine operation on a given input tensor. -**Attributes**: - - No attributes available. - -**Inputs** - -* **1**: An tensor of type *T*. **Required.** - -**Outputs** - -* **1**: The result of element-wise cosh operation. A tensor of type *T*. - -**Types** - -* *T*: any numeric type. - -*Cosh* does the following with the input tensor *a*: +**Detailed description**: *Cosh* performs element-wise hyperbolic cosine (cosh) operation on a given input tensor, based on the following mathematical formula: \f[ a_{i} = cosh(a_{i}) \f] -**Examples** +**Attributes**: *Cosh* operation has no attributes. -*Example 1* +**Inputs** + +* **1**: A tensor of type *T* and arbitrary shape. **Required.** + +**Outputs** + +* **1**: The result of element-wise *Cosh* operation. A tensor of type *T* and the same shape as the input tensor. + +**Types** + +* *T*: any numeric type. + +**Example** ```xml diff --git a/docs/ops/movement/BatchToSpace_2.md b/docs/ops/movement/BatchToSpace_2.md index 936d597792e..85fdd781f68 100644 --- a/docs/ops/movement/BatchToSpace_2.md +++ b/docs/ops/movement/BatchToSpace_2.md @@ -4,46 +4,87 @@ **Category**: *Data movement* -**Short description**: The *BatchToSpace* operation reshapes the "batch" dimension 0 into N - 1 dimensions of shape `block_shape` + [batch] and interleaves these blocks back into the grid defined by the spatial dimensions `[1, ..., N - 1]` to obtain a result with the same rank as `data` input. The spatial dimensions of this intermediate result are then optionally cropped according to `crops_begin` and `crops_end` to produce the output. This is the reverse of the *SpaceToBatch* operation. +**Short description**: *BatchToSpace* operation permutes the batch dimension on a given input `data` into blocks in the spatial dimensions specified by `block_shape` input. The spatial dimensions are then optionally cropped according to `crops_begin` and `crops_end` inputs to produce the output. -**Detailed description**: +**Detailed description** -The *BatchToSpace* operation is similar to the TensorFlow* operation [BatchToSpaceND](https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd) +*BatchToSpace* operation is equivalent to the following operation steps on the input `data` with shape `[batch, D_1, D_2, ..., D_{N-1}]` and `block_shape`, `crops_begin`, `crops_end` inputs with shape `[N]` to produce the output tensor \f$y\f$. -The operation is equivalent to the following transformation of the input tensors `data` with shape `[batch, D_1, D_2 ... D_{N-1}]` and `block_shape`, `crops_begin`, `crops_end` of shape `[N]` to *Y* output tensor. +1. Reshape `data` input to produce a tensor of shape \f$[B_1, \dots, B_{N - 1}, \frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, D_1, D_2, \dots, D_{N - 1}]\f$ +\f[x^{\prime} = reshape(data, [B_1, \dots, B_{N - 1}, \frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, D_1, D_2, \dots, D_{N - 1}])\f] - note: B_0 is expected to be 1. - x' = reshape(`data`, [B_1, ..., B_{N - 1}, batch / (B_1 * ... B_{N - 1}), D_1, D_2, ..., D_{N - 1}]), where B_i = block_shape[i] +2. Permute dimensions of \f$x^{\prime}\f$ to produce a tensor of shape \f$[\frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, D_1, B_1, D_2, B_2, \dots, D_{N-1}, B_{N - 1}]\f$ +\f[x^{\prime\prime} = transpose(x', [N, N + 1, 0, N + 2, 1, \dots, N + N - 1, N - 1])\f] - x'' = transpose(x', [N, N + 1, 0, N + 2, 1, ..., N + N - 1, N - 1]) +3. Reshape \f$x^{\prime\prime}\f$ to produce a tensor of shape \f$[\frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, D_1 \times B_1, D_2 \times B_2, \dots, D_{N - 1} \times B_{N - 1}]\f$ +\f[x^{\prime\prime\prime} = reshape(x^{\prime\prime}, [\frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, D_1 \times B_1, D_2 \times B_2, \dots, D_{N - 1} \times B_{N - 1}])\f] - x''' = reshape(x'', [batch / (B_1 * ... * B_{N - 1}), D_1 * B_1, D_2 * B_2, ... , D_{N - 1} * B_{N - 1}]) +4. Crop the start and end of spatial dimensions of \f$x^{\prime\prime\prime}\f$ according to `crops_begin` and `crops_end` inputs to produce the output \f$y\f$ of shape: +\f[\left[\frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, crop(D_1 \times B_1, CB_1, CE_1), crop(D_2 \times B_2, CB_2, CE_2), \dots , crop(D_{N - 1} \times B_{N - 1}, CB_{N - 1}, CE_{N - 1})\right]\f] - Crop the start and end of dimensions according to `crops_begin`, `crops_end` to produce the output of shape: - note: `crops_begin[0], crops_end[0]` are expected to be 0. - `y = [batch / (B_1 * ... * B_{N - 1}), crop(D_1 * B_1, crops_begin[1], crops_end[1]), crop(D_2 * B_2, crops_begin[2], crops_end[2]), ... , crop(D_{N - 1} * B_{N - 1}, crops_begin[N - 1], crops_end[N - 1])]` +Where -**Attributes** +- \f$B_i\f$ = block_shape[i] +- \f$B_0\f$ is expected to be 1 +- \f$CB_i\f$ = crops_begin[i] +- \f$CE_i\f$ = crops_end[i] +- \f$CB_0\f$ and \f$CE_0\f$ are expected to be 0 +- \f$CB_i + CE_i \leq D_i \times B_i \f$ - No attributes available. +*BatchToSpace* operation is the reverse of *SpaceToBatch* operation. + +**Attributes**: *BatchToSpace* operation has no attributes. **Inputs** -* **1**: `data` - input N-D tensor `[batch, D_1, D_2 ... D_{N-1}]` of *T1* type with rank >= 2. **Required.** -* **2**: `block_shape` - input 1-D tensor of *T2* type with shape `[N]` that is equal to the size of `data` input shape. All values must be >= 1.`block_shape[0]` is expected to be 1. **Required.** -* **3**: `crops_begin` - input 1-D tensor of *T2* type with shape `[N]` that is equal to the size of `data` input shape. All values must be non-negative. crops_begin specifies the amount to crop from the beginning along each axis of `data` input . It is required that `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i]`. `crops_begin[0]` is expected to be 0. **Required.** -* **4**: `crops_end` - input 1-D tensor of *T2* type with shape `[N]` that is equal to the size of `data` input shape. All values must be non-negative. crops_end specifies the amount to crop from the ending along each axis of `data` input. It is required that `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i]`. `crops_end[0]` is expected to be 0. **Required.** +* **1**: `data` - A tensor of type *T* and rank greater than or equal to 2. Layout is `[batch, D_1, D_2 ... D_{N-1}]` (number of batches, spatial axes). **Required.** +* **2**: `block_shape` - Specifies the block sizes of `batch` axis of `data` input which are moved to the corresponding spatial axes. A 1D tensor of type *T_INT* and shape `[N]`. All element values must be greater than or equal to 1.`block_shape[0]` is expected to be 1. **Required.** +* **3**: `crops_begin` - Specifies the amount to crop from the beginning along each axis of `data` input. A 1D tensor of type *T_INT* and shape `[N]`. All element values must be greater than or equal to 0. `crops_begin[0]` is expected to be 0. **Required.** +* **4**: `crops_end` - Specifies the amount to crop from the ending along each axis of `data` input. A 1D tensor of type *T_INT* and shape `[N]`. All element values must be greater than or equal to 0. `crops_end[0]` is expected to be 0. **Required.** +* **Note**: `N` corresponds to the rank of `data` input. +* **Note**: `batch` axis of `data` input must be evenly divisible by the cumulative product of `block_shape` elements. +* **Note**: It is required that `crops_begin[i] + crops_end[i] <= block_shape[i] * input_shape[i]`. **Outputs** -* **1**: N-D tensor with shape `[batch / (block_shape[0] * block_shape[1] * ... * block_shape[N - 1]), D_1 * block_shape[1] - crops_begin[1] - crops_end[1], D_2 * block_shape[2] - crops_begin[2] - crops_end[2], ..., D_{N - 1} * block_shape[N - 1] - crops_begin[N - 1] - crops_end[N - 1]` of the same type as `data` input. +* **1**: Permuted tensor of type *T* with the same rank as `data` input tensor, and shape `[batch / (block_shape[0] * block_shape[1] * ... * block_shape[N - 1]), D_1 * block_shape[1] - crops_begin[1] - crops_end[1], D_2 * block_shape[2] - crops_begin[2] - crops_end[2], ..., D_{N - 1} * block_shape[N - 1] - crops_begin[N - 1] - crops_end[N - 1]`. **Types** -* *T1*: any supported type. -* *T2*: any supported integer type. +* *T*: any supported type. +* *T_INT*: any supported integer type. -**Example** +**Examples** + +*Example: 2D input tensor `data`* + +```xml + + + + 10 + 2 + + + 2 + + + 2 + + + 2 + + + + + 2 + 8 + + + +``` + +*Example: 5D input tensor `data`* ```xml diff --git a/inference-engine/src/gna_plugin/transformations/convert_padded2valid_conv.cpp b/inference-engine/src/gna_plugin/transformations/convert_padded2valid_conv.cpp index 019c7747a5a..b07add2183d 100644 --- a/inference-engine/src/gna_plugin/transformations/convert_padded2valid_conv.cpp +++ b/inference-engine/src/gna_plugin/transformations/convert_padded2valid_conv.cpp @@ -29,19 +29,12 @@ struct ConvData { size_t input_height; size_t input_width; size_t input_channel_count; - size_t filter_height; - size_t filter_width; size_t filter_count; - size_t filter_dilation_width; - size_t filter_dilation_height; - size_t filter_stride_width; - size_t filter_stride_height; size_t pads_begin_width; size_t pads_begin_height; size_t pads_end_width; size_t pads_end_height; ngraph::op::PadType padding_type; - ngraph::Shape output_shape; ngraph::element::Type element_type; }; @@ -55,27 +48,18 @@ static bool VerifyAndGetConvParams(std::shared_ptr return false; } - conv_data.output_shape = conv->get_output_shape(0); conv_data.padding_type = conv->get_auto_pad(); conv_data.input_channel_count = conv->input_value(0).get_shape()[1]; conv_data.input_height = conv->input_value(0).get_shape()[2]; conv_data.input_width = conv->input_value(0).get_shape()[3]; conv_data.filter_count = conv->input_value(1).get_shape()[0]; - conv_data.filter_height = conv->input_value(1).get_shape()[2]; - conv_data.filter_width = conv->input_value(1).get_shape()[3]; - conv_data.filter_dilation_height = conv->get_dilations()[0]; - conv_data.filter_dilation_width = conv->get_dilations()[1]; - conv_data.filter_stride_height = conv->get_strides()[0]; - conv_data.filter_stride_width = conv->get_strides()[1]; conv_data.pads_begin_height = conv->get_pads_begin()[0]; conv_data.pads_begin_width = conv->get_pads_begin()[1]; conv_data.pads_end_height = conv->get_pads_end()[0]; conv_data.pads_end_width = conv->get_pads_end()[1]; conv_data.element_type = conv->get_element_type(); - IE_ASSERT(conv_data.filter_count == conv_data.output_shape[1]); - - return true; + return conv_data.pads_begin_height || conv_data.pads_end_height || conv_data.pads_begin_width || conv_data.pads_end_width; } static bool TransposeOrderMatches(std::shared_ptr transpose, std::vector order) { @@ -117,75 +101,9 @@ static bool VerifyMaxPool(std::shared_ptr max_pool) { auto pool_kernel = max_pool->get_kernel(); // Check if MaxPool vertical stride == pool size - // (TODO: remove when 50386 and 50379 are fixed and also verify pool_kernel[0] > 8 limitation below, gna_limitations can be used then) // Check if padding is VALID return (max_pool->get_auto_pad() == ngraph::op::PadType::VALID && - pool_kernel.size() == 2 && pool_strides.size() == 2 && - pool_kernel[0] == pool_strides[0] && pool_kernel[0] <= 8); -} - -static size_t GetRequiredInputPadding(size_t input_size, size_t filter_size, size_t stride_size, size_t dilation_size, size_t output_size) { - size_t partial_padding_size = (output_size - 1) * stride_size + (filter_size - 1) * dilation_size + 1; - - // This way of padding size calculation avoids problem with fractional numbers - return (partial_padding_size > input_size) ? (partial_padding_size - input_size) : 0; -} - -static size_t CalculateOutputSize(size_t input_size, size_t filter_size, size_t stride_size, size_t dilation_size, size_t padding_size) { - return (input_size + padding_size - ((filter_size - 1) * dilation_size + 1)) / stride_size + 1; -} - -static bool CalculatePadding(ConvData& conv_data) { - size_t output_height{ 0 }; - size_t output_width{ 0 }; - - switch (conv_data.padding_type) { - case ngraph::op::PadType::EXPLICIT: - // all paddings already set - break; - case ngraph::op::PadType::VALID: - conv_data.pads_begin_height = 0; - conv_data.pads_begin_width = 0; - conv_data.pads_end_height = 0; - conv_data.pads_end_width = 0; - break; - case ngraph::op::PadType::SAME_LOWER: - case ngraph::op::PadType::SAME_UPPER: - { - output_height = conv_data.output_shape[2]; - output_width = conv_data.output_shape[3]; - - size_t pads_width = GetRequiredInputPadding(conv_data.input_width, conv_data.filter_width, - conv_data.filter_stride_width, conv_data.filter_dilation_width, output_width); - size_t pads_height = GetRequiredInputPadding(conv_data.input_height, conv_data.filter_height, - conv_data.filter_stride_height, conv_data.filter_dilation_height, output_height); - - conv_data.pads_begin_width = conv_data.pads_end_width = pads_width / 2; - conv_data.pads_begin_height = conv_data.pads_end_height = pads_height / 2; - - if (conv_data.padding_type == ngraph::op::PadType::SAME_LOWER) { - conv_data.pads_begin_width += (pads_width % 2); - conv_data.pads_begin_height += (pads_height % 2); - } else { - conv_data.pads_end_width += (pads_width % 2); - conv_data.pads_end_height += (pads_height % 2); - } - break; - } - default: - break; - } - - output_width = CalculateOutputSize(conv_data.input_width, conv_data.filter_width, conv_data.filter_stride_width, - conv_data.filter_dilation_width, conv_data.pads_begin_width + conv_data.pads_end_width); - output_height = CalculateOutputSize(conv_data.input_height, conv_data.filter_height, conv_data.filter_stride_height, - conv_data.filter_dilation_height, conv_data.pads_begin_height + conv_data.pads_end_height); - - IE_ASSERT(output_width == conv_data.output_shape[3]); - IE_ASSERT(output_height == conv_data.output_shape[2]); - - // Check if any calculated padding is non-zero, otherwise there is no need to decompose such convolution - return conv_data.pads_begin_height || conv_data.pads_end_height || conv_data.pads_begin_width || conv_data.pads_end_width; + pool_kernel.size() == 2 && pool_strides.size() == 2); } static std::shared_ptr FlatCrop(ngraph::Output input, size_t offset, size_t size) { @@ -227,7 +145,7 @@ static std::shared_ptr CreatePaddedNet(std::shared_ptrinput_value(0).get_shape()) }), false); - // zero padding + // Constant with zero padding auto const_holding_padding = std::make_shared(conv_data.element_type, ngraph::Shape{ 1, biggest_padding }, 0); copy_runtime_info(conv, const_holding_padding); @@ -342,9 +260,6 @@ static bool Convert(std::shared_ptr leading_transpose, if (max_pool && !VerifyMaxPool(std::dynamic_pointer_cast(max_pool))) return false; - if (!CalculatePadding(conv_data)) - return false; - GeneratePadding(std::dynamic_pointer_cast(leading_transpose), std::dynamic_pointer_cast(conv), conv_data); diff --git a/inference-engine/src/offline_transformations/src/moc_transformations.cpp b/inference-engine/src/offline_transformations/src/moc_transformations.cpp index 745e173b4d7..50ae6bcaa7e 100644 --- a/inference-engine/src/offline_transformations/src/moc_transformations.cpp +++ b/inference-engine/src/offline_transformations/src/moc_transformations.cpp @@ -5,12 +5,54 @@ #include #include "moc_transformations.hpp" -#include "pruning.hpp" #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include NGRAPH_RTTI_DEFINITION(ngraph::pass::MOCTransformations, "MOCTransformations", 0); bool ngraph::pass::MOCTransformations::run_on_function(std::shared_ptr f) { + // To avoid issues with dynamism we make nGraph Function dynamic and after we apply all + // transformations we restore original shapes to the nGraph Function back + std::unordered_map input_shapes; + for (auto && param : f->get_parameters()) { + input_shapes[param.get()] = param->get_partial_shape(); + param->set_partial_shape(PartialShape::dynamic(param->get_partial_shape().rank())); + } + f->validate_nodes_and_infer_types(); + + ngraph::pass::Manager manager(get_pass_config()); + + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + + auto common_fusions = manager.register_pass(); + common_fusions->add_matcher(); + common_fusions->add_matcher(); + common_fusions->add_matcher(); + common_fusions->add_matcher(); + common_fusions->add_matcher(); + common_fusions->add_matcher(); + common_fusions->add_matcher(); + common_fusions->set_name("ngraph::pass::CommonFusions"); + + manager.run_passes(f); + + // Restore original shapes to the nGraph Function + for (auto && param : f->get_parameters()) { + param->set_partial_shape(input_shapes.at(param.get())); + } + f->validate_nodes_and_infer_types(); + return false; } \ No newline at end of file diff --git a/inference-engine/src/transformations/include/ngraph_ops/framework_node.hpp b/inference-engine/src/transformations/include/ngraph_ops/framework_node.hpp index c19a8500ecc..1a5729d8ecd 100644 --- a/inference-engine/src/transformations/include/ngraph_ops/framework_node.hpp +++ b/inference-engine/src/transformations/include/ngraph_ops/framework_node.hpp @@ -71,8 +71,11 @@ public: std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + void cache_output_descriptor(); + private: std::vector> m_inputs_desc; + std::vector> m_output_desc; FrameworkNodeAttrs m_attrs; }; diff --git a/inference-engine/src/transformations/include/transformations/common_optimizations/pad_fusion.hpp b/inference-engine/src/transformations/include/transformations/common_optimizations/pad_fusion.hpp index 7c11f1ee02a..37c903de952 100644 --- a/inference-engine/src/transformations/include/transformations/common_optimizations/pad_fusion.hpp +++ b/inference-engine/src/transformations/include/transformations/common_optimizations/pad_fusion.hpp @@ -12,6 +12,7 @@ namespace ngraph { namespace pass { class TRANSFORMATIONS_API PadFusion; +class TRANSFORMATIONS_API PadElimination; class TRANSFORMATIONS_API PadFusionAvgPool; class TRANSFORMATIONS_API PadFusionMaxPool; class TRANSFORMATIONS_API PadFusionConvolution; @@ -22,6 +23,16 @@ class TRANSFORMATIONS_API PadFusionGroupConvolutionBackpropData; } // namespace pass } // namespace ngraph +/** + * @ingroup ie_transformation_common_api + * @brief PadElimination eliminates pad that does nothing + */ +class ngraph::pass::PadElimination: public ngraph::pass::MatcherPass { +public: + NGRAPH_RTTI_DECLARATION; + PadElimination(); +}; + /** * @ingroup ie_transformation_common_api * @brief PadFusion transformation replaces following graph: @@ -113,5 +124,6 @@ public: add_matcher(); add_matcher(); add_matcher(); + add_matcher(); } }; diff --git a/inference-engine/src/transformations/src/ngraph_ops/framework_node.cpp b/inference-engine/src/transformations/src/ngraph_ops/framework_node.cpp index 567c54bf04d..b25143c20f5 100644 --- a/inference-engine/src/transformations/src/ngraph_ops/framework_node.cpp +++ b/inference-engine/src/transformations/src/ngraph_ops/framework_node.cpp @@ -25,31 +25,71 @@ shared_ptr op::FrameworkNode::clone_with_new_inputs(const OutputVector& ne return node; } +void op::FrameworkNode::cache_output_descriptor() { + for (size_t i = 0; i < get_output_size(); ++i) { + m_output_desc.emplace_back(get_output_partial_shape(i), get_output_element_type(i)); + } +} + void op::FrameworkNode::validate_and_infer_types() { INTERNAL_OP_SCOPE(FrameworkNode_validate_and_infer_types); // Save initial inputs descriptors bool initialize_input_desc = m_inputs_desc.empty(); + bool reset_output_shape_to_dynamic = false; + bool reset_output_shape_to_original = false; for (uint64_t i = 0; i < get_input_size(); i++) { // TODO: store constant values - const auto& new_input_desc = - std::make_tuple(get_input_partial_shape(i), get_input_element_type(i)); + const auto& input_pshape = get_input_partial_shape(i); + const auto& input_type = get_input_element_type(i); + const auto& rank = input_pshape.rank(); + + const auto & get_error_message = [&]() { + std::stringstream out; + out << "Input descriptor for " << get_friendly_name() + << " node has been changed:" << std::endl; + out << "Before: " << std::get<0>(m_inputs_desc[i]) << ", " + << std::get<1>(m_inputs_desc[i]) << std::endl; + out << "After: " << input_pshape << ", " + << input_type << std::endl; + out << "Please specify InferenceEngine Extensions to support this case."; + return out.str(); + }; if (initialize_input_desc) { - m_inputs_desc.push_back(new_input_desc); + m_inputs_desc.emplace_back(input_pshape, input_type); } else { - auto get_message = [&]() { - std::stringstream out; - out << "Input descriptor for " << get_friendly_name() - << " node has been changed:" << std::endl; - out << "Before: " << std::get<0>(m_inputs_desc[i]) << ", " - << std::get<1>(m_inputs_desc[i]) << std::endl; - out << "After: " << std::get<0>(new_input_desc) << ", " - << std::get<1>(new_input_desc) << std::endl; - out << "Please specify InferenceEngine Extensions to support this case."; - return out.str(); - }; + const auto& orig_input_pshape = std::get<0>(m_inputs_desc[i]); + if (orig_input_pshape == input_pshape) { + reset_output_shape_to_original = true; + } else if (input_pshape.rank().is_dynamic()) { + reset_output_shape_to_dynamic = true; + } else if (rank.is_static() && orig_input_pshape.rank().is_static() && + rank.get_length() == orig_input_pshape.rank().get_length()) { + for (int64_t dim = 0; dim < rank.get_length(); ++dim) { + NODE_VALIDATION_CHECK(this, input_pshape[dim].is_dynamic() || + (orig_input_pshape[dim].is_static() && + orig_input_pshape[dim].get_length() == input_pshape[dim].get_length()), + get_error_message()); + } + reset_output_shape_to_dynamic = true; + } else { + NODE_VALIDATION_CHECK(this, m_inputs_desc[i] == std::make_tuple(input_pshape, input_type), get_error_message()); + } + } + } - NODE_VALIDATION_CHECK(this, m_inputs_desc[i] == new_input_desc, get_message()); + if (reset_output_shape_to_dynamic) { + cache_output_descriptor(); + for (size_t i = 0; i < get_output_size(); ++i) { + if (get_output_partial_shape(i).rank().is_static()) { + set_output_type(i, get_output_element_type(i), PartialShape::dynamic()); + } + } + } + + if (reset_output_shape_to_original && !m_output_desc.empty()) { + for (size_t i = 0; i < get_output_size(); ++i) { + set_output_type(i, std::get<1>(m_output_desc[i]), std::get<0>(m_output_desc[i])); } } } diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/pad_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/pad_fusion.cpp index c8e53e74c1f..b2655f8797c 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/pad_fusion.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/pad_fusion.cpp @@ -12,6 +12,7 @@ #include #include #include +#include using namespace ngraph; @@ -385,3 +386,34 @@ pass::PadFusionGroupConvolutionBackpropData::PadFusionGroupConvolutionBackpropDa auto m = std::make_shared(conv_pattern, matcher_name); this->register_matcher(m, callback); } + +NGRAPH_RTTI_DEFINITION(pass::PadElimination, "PadElimination", 0); + +pass::PadElimination::PadElimination() { + MATCHER_SCOPE(PadElimination); + auto pad_node_pattern = pattern::wrap_type(); + + matcher_pass_callback callback = [=](pattern::Matcher& m) { + auto pad = m.get_match_root(); + + auto pad_begin_const = ngraph::get_constant_from_source(pad->input_value(1)); + auto pad_end_const = ngraph::get_constant_from_source(pad->input_value(2)); + + if (!pad_begin_const || !pad_end_const) { + return false; + } + + const auto pad_begin_value = pad_begin_const->cast_vector(); + const auto pad_end_value = pad_end_const->cast_vector(); + + if (std::any_of(pad_begin_value.begin(), pad_begin_value.end(), [](int64_t value) { return value != 0; }) || + std::any_of(pad_end_value.begin(), pad_end_value.end(), [](int64_t value) { return value != 0; })) { + return false; + } + + return replace_output_update_name(pad->output(0), pad->input_value(0)); + }; + + auto m = std::make_shared(pad_node_pattern, matcher_name); + this->register_matcher(m, callback); +} \ No newline at end of file diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/softplus_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/softplus_fusion.cpp index 0c768800eea..c9a19623352 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/softplus_fusion.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/softplus_fusion.cpp @@ -19,26 +19,20 @@ ngraph::pass::SoftPlusFusion::SoftPlusFusion() { // fuses ln(exp(x) + 1.0) operations into SoftPlus(x) auto input = ngraph::pattern::any_input(); auto exp = std::make_shared(input); - auto add_constant = ngraph::pattern::wrap_type(); + auto add_constant = ngraph::pattern::wrap_type( + pattern::type_matches_any({element::f32, element::f16})); auto add = std::make_shared(exp, add_constant); auto log = std::make_shared(add); ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher &m) { - auto &pattern_to_output = m.get_pattern_value_map(); + const auto &pattern_to_output = m.get_pattern_value_map(); auto exp_input = pattern_to_output.at(input); auto constant = std::dynamic_pointer_cast(pattern_to_output.at(add_constant).get_node_shared_ptr()); + if (!constant) return false; - if (constant == nullptr) { - return false; - } - - if (constant->get_element_type() == ngraph::element::f32 || constant->get_element_type() == ngraph::element::f16) { - auto data = constant->cast_vector(); - if (data.size() != 1 || data[0] != 1.0) { - return false; - } - } else { + auto data = constant->cast_vector(); + if (data.size() != 1 || data[0] != 1.0) { return false; } diff --git a/inference-engine/src/transformations/src/transformations/op_conversions/convert_topk3.cpp b/inference-engine/src/transformations/src/transformations/op_conversions/convert_topk3.cpp index 97d9031bd99..45eba021464 100644 --- a/inference-engine/src/transformations/src/transformations/op_conversions/convert_topk3.cpp +++ b/inference-engine/src/transformations/src/transformations/op_conversions/convert_topk3.cpp @@ -40,6 +40,7 @@ ngraph::pass::ConvertTopK3::ConvertTopK3() { last1 = new_topk->output(1); new_topk->set_friendly_name(topk->get_friendly_name()); } else if (topk->get_output_target_inputs(0).size() == 0) { + last0 = topk->output(0); last1 = std::make_shared(new_topk->output(1), topk->get_index_element_type()); new_ops.push_back(last1.get_node_shared_ptr()); diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/activation.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/activation.cpp index 371808cbbc5..355afbaf687 100644 --- a/inference-engine/tests/functional/inference_engine/serialization/single_layer/activation.cpp +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/activation.cpp @@ -37,6 +37,7 @@ const std::map>> activationTypes {Negative, {}}, {Acos, {}}, {Asin, {}}, + {Asinh, {}}, {Atan, {}}, {Cos, {}}, {Cosh, {}}, diff --git a/inference-engine/tests/functional/inference_engine/transformations/framework_node_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/framework_node_test.cpp new file mode 100644 index 00000000000..a87bb245856 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/transformations/framework_node_test.cpp @@ -0,0 +1,59 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include + +#include +#include + +#include "common_test_utils/ngraph_test_utils.hpp" + + +using namespace testing; +using namespace ngraph; + + +TEST(TransformationTests, FrameworkNode) { + auto param = std::make_shared(element::i64, Shape{1, 64}); + auto f_node = std::make_shared(OutputVector{param}); + f_node->set_output_type(0, element::i64, Shape{1, 64}); + + // Set partially dynamic shape + param->set_partial_shape(PartialShape{Dimension::dynamic(), 64}); + param->validate_and_infer_types(); + + ASSERT_NO_THROW(f_node->validate_and_infer_types()); + ASSERT_EQ(f_node->get_output_partial_shape(0), PartialShape::dynamic()); + + // Set dynamic shape + param->set_partial_shape(PartialShape::dynamic(2)); + param->validate_and_infer_types(); + + ASSERT_NO_THROW(f_node->validate_and_infer_types()); + ASSERT_EQ(f_node->get_output_partial_shape(0), PartialShape::dynamic()); + + // Set fully dynamic shape + param->set_partial_shape(PartialShape::dynamic()); + param->validate_and_infer_types(); + + ASSERT_NO_THROW(f_node->validate_and_infer_types()); + ASSERT_EQ(f_node->get_output_partial_shape(0), PartialShape::dynamic()); + + // Set original static shape + param->set_partial_shape(Shape{1, 64}); + param->validate_and_infer_types(); + + ASSERT_NO_THROW(f_node->validate_and_infer_types()); + ASSERT_EQ(f_node->get_output_partial_shape(0), PartialShape({1, 64})); + + // Set different static shape + param->set_partial_shape(Shape{2, 64}); + param->validate_and_infer_types(); + + ASSERT_THROW(f_node->validate_and_infer_types(), ngraph_error::exception); +} \ No newline at end of file diff --git a/inference-engine/tests/functional/inference_engine/transformations/pad_fusion.cpp b/inference-engine/tests/functional/inference_engine/transformations/pad_fusion.cpp index ea96555ea01..e3e6bc40081 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/pad_fusion.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/pad_fusion.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include "common_test_utils/ngraph_test_utils.hpp" @@ -21,6 +22,42 @@ using namespace testing; using namespace ngraph; +TEST(TransformationTests, PadElimination) { + std::shared_ptr f(nullptr), f_ref(nullptr); + + Shape data_shape{1, 3, 14, 14}; + { + auto data = std::make_shared(element::i32, data_shape); + set_tensor_name(data, "param"); + auto pads_begin = opset5::Constant::create(element::i32, Shape{4}, {0, 0, 0, 0}); + auto pads_end = opset5::Constant::create(element::i32, Shape{4}, {0, 0, 0, 0}); + auto pad = std::make_shared(data, pads_begin, pads_end, op::PadMode::CONSTANT); + set_tensor_name(pad, "pad"); + auto filters = std::make_shared(element::i32, Shape{1, 3, 4, 4}); + auto conv = std::make_shared(pad, filters, Strides{1, 1}, + CoordinateDiff{0, 0}, CoordinateDiff{1, 1}, Shape{1, 1}); + set_tensor_name(conv, "conv"); + f = std::make_shared(NodeVector{conv}, ParameterVector{data, filters}); + pass::Manager m; + m.register_pass(); + m.register_pass(); + m.run_passes(f); + ASSERT_NO_THROW(check_rt_info(f)); + } + { + auto data = std::make_shared(element::i32, data_shape); + set_tensor_names(data, {"param", "pad"}); + auto filters = std::make_shared(element::i32, Shape{1, 3, 4, 4}); + auto conv = std::make_shared(data, filters, Strides{1, 1}, + CoordinateDiff{0, 0}, CoordinateDiff{1, 1}, Shape{1, 1}, + op::PadType::EXPLICIT); + set_tensor_name(conv, "conv"); + f_ref = std::make_shared(NodeVector{conv}, ParameterVector{data, filters}); + } + + auto res = compare_functions(f, f_ref); + ASSERT_TRUE(res.first) << res.second; +} TEST(TransformationTests, PadFusionAvgPoolExcludePad) { std::shared_ptr f(nullptr), f_ref(nullptr); @@ -155,9 +192,11 @@ TEST(TransformationTests, PadFusionConvolutionBackpropData) { auto pads_begin = opset5::Constant::create(element::i32, Shape{4}, {0, 0, 1, 1}); auto pads_end = opset5::Constant::create(element::i32, Shape{4}, {0, 0, 2, 2}); auto pad = std::make_shared(data, pads_begin, pads_end, op::PadMode::CONSTANT); + set_tensor_name(pad, "pad"); auto filters = std::make_shared(element::f32, Shape{3, 2, 5, 5}); auto conv = std::make_shared(pad, filters, Strides{1, 1}, CoordinateDiff{4, 4}, CoordinateDiff{3, 3}, Shape{1, 1}); + set_tensor_name(conv, "conv"); f = std::make_shared(NodeVector{conv}, ParameterVector{data, filters}); pass::Manager m; @@ -171,6 +210,7 @@ TEST(TransformationTests, PadFusionConvolutionBackpropData) { auto filters = std::make_shared(element::f32, Shape{3, 2, 5, 5}); auto conv = std::make_shared(data, filters, Strides{1, 1}, CoordinateDiff{3, 3}, CoordinateDiff{1, 1}, Shape{1, 1}); + set_tensor_name(conv, "conv"); f_ref = std::make_shared(NodeVector{conv}, ParameterVector{data, filters}); } @@ -389,12 +429,15 @@ TEST(TransformationTests, NegativePadFusionConvolutionBackpropDataTooSmallPad) { Shape data_shape{1, 3, 14, 14}; { auto data = std::make_shared(element::f32, data_shape); + set_tensor_name(data, "data"); auto pads_begin = opset5::Constant::create(element::i32, Shape{4}, {0, 0, 2, 2}); auto pads_end = opset5::Constant::create(element::i32, Shape{4}, {0, 0, 2, 2}); auto pad = std::make_shared(data, pads_begin, pads_end, op::PadMode::CONSTANT); + set_tensor_name(pad, "pad"); auto filters = std::make_shared(element::f32, Shape{3, 2, 5, 5}); auto conv = std::make_shared(pad, filters, Strides{1, 1}, CoordinateDiff{1, 1}, CoordinateDiff{1, 1}, Shape{1, 1}); + set_tensor_name(conv, "conv"); f = std::make_shared(NodeVector{conv}, ParameterVector{data, filters}); pass::Manager m; @@ -405,12 +448,15 @@ TEST(TransformationTests, NegativePadFusionConvolutionBackpropDataTooSmallPad) { } { auto data = std::make_shared(element::f32, data_shape); + set_tensor_name(data, "data"); auto pads_begin = opset5::Constant::create(element::i32, Shape{4}, {0, 0, 2, 2}); auto pads_end = opset5::Constant::create(element::i32, Shape{4}, {0, 0, 2, 2}); auto pad = std::make_shared(data, pads_begin, pads_end, op::PadMode::CONSTANT); + set_tensor_name(pad, "pad"); auto filters = std::make_shared(element::f32, Shape{3, 2, 5, 5}); auto conv = std::make_shared(pad, filters, Strides{1, 1}, CoordinateDiff{1, 1}, CoordinateDiff{1, 1}, Shape{1, 1}); + set_tensor_name(conv, "conv"); f_ref = std::make_shared(NodeVector{conv}, ParameterVector{data, filters}); } diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/activation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/activation.cpp index 0c6a8763844..a1a62dcd2dc 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/activation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/activation.cpp @@ -38,6 +38,7 @@ const std::map>> activationTypes {Negative, {}}, {Acos, {}}, {Asin, {}}, + {Asinh, {}}, {Atan, {}}, {Cos, {}}, {Cosh, {}}, @@ -65,6 +66,7 @@ const std::map>> activationTypes // List of operations that should be tested also with integer precision const std::map>> intActivationTypes = { + {Asinh, {}}, {Atan, {}}, {Negative, {}}, {Ceiling, {}}, diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/batch_to_space.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/batch_to_space.cpp index cd81911f707..0ebf53d7bed 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/batch_to_space.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/batch_to_space.cpp @@ -16,6 +16,39 @@ const std::vector net_precisions = { InferenceEngine::Precision::I32 }; +const std::vector> data_shapes_2D = { + {12, 4}, + {48, 3} +}; + +const std::vector> block_shapes_2D = { + {1, 2}, + {1, 6} +}; + +const std::vector> crops_2D = { + {0, 0}, + {0, 1} +}; + +const auto batch_to_space_2d_tests = ::testing::Combine( + ::testing::ValuesIn(block_shapes_2D), + ::testing::ValuesIn(crops_2D), + ::testing::ValuesIn(crops_2D), + ::testing::ValuesIn(data_shapes_2D), + ::testing::ValuesIn(net_precisions), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(CommonTestUtils::DEVICE_CPU)); + +INSTANTIATE_TEST_CASE_P( + smoke_BatchToSpace_2D, + BatchToSpaceLayerTest, + batch_to_space_2d_tests, + BatchToSpaceLayerTest::getTestCaseName); + const std::vector> data_shapes_4D = { {4, 1, 2, 2}, {4, 3, 2, 2}, @@ -39,7 +72,7 @@ const std::vector> crops_end_4D = { {0, 0, 0, 2} }; -const auto space_to_batch_4d_spatial_dims_tests = ::testing::Combine( +const auto batch_to_space_4d_spatial_dims_tests = ::testing::Combine( ::testing::Values(block_shapes_4D[0]), ::testing::ValuesIn(crops_begin_4D), ::testing::ValuesIn(crops_end_4D), @@ -51,7 +84,7 @@ const auto space_to_batch_4d_spatial_dims_tests = ::testing::Combine( ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(CommonTestUtils::DEVICE_CPU)); -const auto space_to_batch_4d_channel_dim_tests = ::testing::Combine( +const auto batch_to_space_4d_channel_dim_tests = ::testing::Combine( ::testing::Values(block_shapes_4D[1]), ::testing::Values(crops_begin_4D[0]), ::testing::Values(crops_end_4D[0]), @@ -66,13 +99,13 @@ const auto space_to_batch_4d_channel_dim_tests = ::testing::Combine( INSTANTIATE_TEST_CASE_P( smoke_BatchToSpace_4D_spatial_dims, BatchToSpaceLayerTest, - space_to_batch_4d_spatial_dims_tests, + batch_to_space_4d_spatial_dims_tests, BatchToSpaceLayerTest::getTestCaseName); INSTANTIATE_TEST_CASE_P( smoke_BatchToSpace_4D_channel_dim, BatchToSpaceLayerTest, - space_to_batch_4d_channel_dim_tests, + batch_to_space_4d_channel_dim_tests, BatchToSpaceLayerTest::getTestCaseName); const std::vector> data_shapes_5D = { @@ -96,7 +129,7 @@ const std::vector> crops_end_5D = { {0, 0, 0, 0, 1} }; -const auto space_to_batch_5d_spatial_dims_tests = ::testing::Combine( +const auto batch_to_space_5d_spatial_dims_tests = ::testing::Combine( ::testing::Values(block_shapes_5D[0]), ::testing::ValuesIn(crops_begin_5D), ::testing::ValuesIn(crops_end_5D), @@ -108,7 +141,7 @@ const auto space_to_batch_5d_spatial_dims_tests = ::testing::Combine( ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(CommonTestUtils::DEVICE_CPU)); -const auto space_to_batch_5d_channel_dim_tests = ::testing::Combine( +const auto batch_to_space_5d_channel_dim_tests = ::testing::Combine( ::testing::Values(block_shapes_5D[1]), ::testing::Values(crops_begin_5D[0]), ::testing::Values(crops_end_5D[0]), @@ -123,13 +156,13 @@ const auto space_to_batch_5d_channel_dim_tests = ::testing::Combine( INSTANTIATE_TEST_CASE_P( smoke_BatchToSpace_5D_spatial_dims, BatchToSpaceLayerTest, - space_to_batch_5d_spatial_dims_tests, + batch_to_space_5d_spatial_dims_tests, BatchToSpaceLayerTest::getTestCaseName); INSTANTIATE_TEST_CASE_P( smoke_BatchToSpace_5D_channel_dim, BatchToSpaceLayerTest, - space_to_batch_5d_channel_dim_tests, + batch_to_space_5d_channel_dim_tests, BatchToSpaceLayerTest::getTestCaseName); -} // namespace \ No newline at end of file +} // namespace diff --git a/inference-engine/tests/functional/plugin/gna/pass_tests/padded2valid_conv.cpp b/inference-engine/tests/functional/plugin/gna/pass_tests/convert_padded2valid_conv.cpp similarity index 98% rename from inference-engine/tests/functional/plugin/gna/pass_tests/padded2valid_conv.cpp rename to inference-engine/tests/functional/plugin/gna/pass_tests/convert_padded2valid_conv.cpp index 010bbd7a5c0..0823567e578 100644 --- a/inference-engine/tests/functional/plugin/gna/pass_tests/padded2valid_conv.cpp +++ b/inference-engine/tests/functional/plugin/gna/pass_tests/convert_padded2valid_conv.cpp @@ -247,10 +247,10 @@ const std::vector> configs2D = { }; const std::vector padTypes = { + op::PadType::VALID, op::PadType::EXPLICIT, op::PadType::SAME_LOWER, - op::PadType::SAME_UPPER, - op::PadType::VALID + op::PadType::SAME_UPPER }; const std::vector models = { @@ -277,14 +277,14 @@ const std::vector> maxpool1DPools = { {1, 2} }; const std::vector> maxpool1DStrides = { {1, 1} }; const std::vector> input2DNHWC = { {1, 16, 16, 32} }; -const std::vector> kernels2D = { {2, 2}, {4, 1}, {1, 3}}; +const std::vector> kernels2D = { {2, 2}, {4, 1}, {1, 3} }; const std::vector> strides2D = { {1, 1}, {1, 2}, {2, 1}, {2, 2} }; const std::vector> padBegins2D = { {1, 2} }; const std::vector> padEnds2D = { {3, 1} }; const std::vector> dilations2D = { {1, 1} }; -const std::vector numOutChannels2D = { 32 }; -const std::vector> biases2D = { {1, 32, 1, 1} }; -const std::vector> transpBiases2D = { {1, 1, 1, 32} }; +const std::vector numOutChannels2D = { 8 }; +const std::vector> biases2D = { {1, 8, 1, 1} }; +const std::vector> transpBiases2D = { {1, 1, 1, 8} }; const std::vector> maxpool2DPools = { {2, 2} }; const std::vector> maxpool2DStrides = { {2, 1} }; diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/activation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/activation.cpp index c62641c0b4c..6bbf98451fe 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/activation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/activation.cpp @@ -35,6 +35,7 @@ const std::map>> activationTypes {Negative, {}}, {Acos, {}}, {Asin, {}}, + {Asinh, {}}, {Atan, {}}, {Cos, {}}, {Cosh, {}}, diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/activation.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/activation.hpp index 684942ee184..5a9c5226f6f 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/activation.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/activation.hpp @@ -40,6 +40,7 @@ static std::map activationNames = {ngraph::helpers::ActivationTypes::Negative, "Negative"}, {ngraph::helpers::ActivationTypes::Acos, "Acos"}, {ngraph::helpers::ActivationTypes::Asin, "Asin"}, + {ngraph::helpers::ActivationTypes::Asinh, "Asinh"}, {ngraph::helpers::ActivationTypes::Atan, "Atan"}, {ngraph::helpers::ActivationTypes::Cos, "Cos"}, {ngraph::helpers::ActivationTypes::Cosh, "Cosh"}, diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp b/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp index 3d0ec47531d..1af6642ba87 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp +++ b/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp @@ -779,6 +779,14 @@ void check_rt_info(const std::shared_ptr& f) { } } +void set_tensor_name(ngraph::Output output, const std::string & name) { + output.get_tensor_ptr()->set_names({name}); +} + +void set_tensor_names(ngraph::Output output, const std::unordered_set & names) { + output.get_tensor_ptr()->set_names(names); +} + NGRAPH_RTTI_DEFINITION(TestOpMultiOut, "TestOp", 0); namespace attributes { diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.hpp b/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.hpp index ab636060af1..116226da962 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.hpp +++ b/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.hpp @@ -101,6 +101,10 @@ inline std::pair compare_functions( void check_rt_info(const std::shared_ptr& f); +void set_tensor_name(ngraph::Output output, const std::string & name); + +void set_tensor_names(ngraph::Output output, const std::unordered_set & names); + namespace ngraph { namespace pass { class InjectionPass; diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py index 4c3cf62280b..3241dfd7013 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py @@ -6,6 +6,7 @@ VERIFIED_OP_REFERENCES = [ 'Acos-1', 'Add-1', 'Asin-1', + 'Asinh-3', 'Assign-6', 'AvgPool-1', 'BatchNormInference-5', @@ -23,6 +24,7 @@ VERIFIED_OP_REFERENCES = [ 'Convolution-1', 'Constant-1', 'Cos-1', + 'Cosh-1', 'DeformableConvolution-1', 'DeformablePSROIPooling-1', 'DetectionOutput-1', diff --git a/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp index de2dbab0612..47405a8e51c 100644 --- a/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp @@ -100,6 +100,7 @@ enum ActivationTypes { Negative, Acos, Asin, + Asinh, Atan, Cos, Cosh, diff --git a/inference-engine/tests/ngraph_helpers/ngraph_functions/src/activation.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/activation.cpp index 97dea94963a..d8dce877b0f 100644 --- a/inference-engine/tests/ngraph_helpers/ngraph_functions/src/activation.cpp +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/activation.cpp @@ -48,6 +48,8 @@ std::shared_ptr makeActivation(const ngraph::Output &in, return std::make_shared(in); case ngraph::helpers::ActivationTypes::Asin: return std::make_shared(in); + case ngraph::helpers::ActivationTypes::Asinh: + return std::make_shared(in); case ngraph::helpers::ActivationTypes::Atan: return std::make_shared(in); case ngraph::helpers::ActivationTypes::Cos: diff --git a/inference-engine/tests/unit/gna/ngraph/transformations/gna_convert_padded2valid_conv.cpp b/inference-engine/tests/unit/gna/ngraph/transformations/gna_convert_padded2valid_conv.cpp new file mode 100644 index 00000000000..b8259bf3d84 --- /dev/null +++ b/inference-engine/tests/unit/gna/ngraph/transformations/gna_convert_padded2valid_conv.cpp @@ -0,0 +1,453 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include + +#include "transformations/convert_padded2valid_conv.hpp" +#include "common_test_utils/ngraph_test_utils.hpp" +#include +#include +#include +#include + +namespace testing { + +namespace { + +enum class modelType { + TranspConvTransp = 0, /* Transpose(NHWC->NCHW) => Conv => Transpose(NCHW->NHWC) */ + TranspConvBcastAddTransp, /* Transpose(NHWC->NCHW) => Conv => Broadcasted Add (Bias) => Transpose(NCHW->NHWC) */ + TranspConvBcastAddMaxPoolTransp, /* Transpose(NHWC->NCHW) => Conv => Broadcasted Add (Bias) => MaxPooling => Transpose(NCHW->NHWC) (2D Max Pool case) */ + TranspConvBcastAddActTransp, /* Transpose(NHWC->NCHW) => Conv => Broadcasted Add (Bias) => Activation Function => Transpose(NCHW->NHWC) */ + TranspConvBcastAddMaxPoolActTransp, /* Transpose(NHWC->NCHW) => Conv => Broadcasted Add (Bias) => MaxPool => Activation Function => Transpose(NCHW->NHWC) */ + TranspConvTranspBcastAdd, /* Transpose(NHWC->NCHW) => conv => Transpose(NCHW->NHWC) => Bias */ + TranspConvTranspBcastAddAct /* Transpose(NHWC->NCHW) => Conv => Transpose(NCHW->NHWC) => Bias => Activation Function */ +}; + +struct ConvData { + size_t input_height; + size_t input_width; + size_t input_channel_count; + size_t pads_begin_width; + size_t pads_begin_height; + size_t pads_end_width; + size_t pads_end_height; +}; + +void GetConvParams(std::shared_ptr conv, ConvData& conv_data) { + conv_data.input_channel_count = conv->input_value(0).get_shape()[1]; + conv_data.input_height = conv->input_value(0).get_shape()[2]; + conv_data.input_width = conv->input_value(0).get_shape()[3]; + conv_data.pads_begin_height = conv->get_pads_begin()[0]; + conv_data.pads_begin_width = conv->get_pads_begin()[1]; + conv_data.pads_end_height = conv->get_pads_end()[0]; + conv_data.pads_end_width = conv->get_pads_end()[1]; +} + +std::shared_ptr createFunction(const modelType& model, + const ngraph::Output& input_node, + const ngraph::Shape& filters_shape, + const ngraph::Strides& conv_stride, + const ngraph::CoordinateDiff& pads_begin, + const ngraph::CoordinateDiff& pads_end, + const ngraph::Strides& conv_dilation, + const ngraph::Shape& bias_shape, + const ngraph::Strides& maxpool_stride, + const ngraph::Shape& maxpool_shape, + const ngraph::op::PadType& pad_type, + ConvData* conv_data) { + auto transpose_in_order = std::make_shared(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 3, 1, 2}); + auto transpose_in = std::make_shared(input_node, transpose_in_order); + auto filters = std::make_shared(ngraph::element::i64, + ngraph::Shape{4, input_node.get_shape()[3], filters_shape[0], filters_shape[1]}); + auto conv = std::make_shared(transpose_in, filters, conv_stride, pads_begin, pads_end, conv_dilation, pad_type); + if (conv_data) + GetConvParams(conv, *conv_data); + auto transpose_out_order = std::make_shared(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 2, 3, 1}); + auto bias_const = std::make_shared(ngraph::element::i64, bias_shape); + ngraph::Output last_op = std::make_shared(conv, transpose_out_order); + + switch (model) { + case modelType::TranspConvBcastAddTransp: + { + auto bcast_add = std::make_shared(conv, bias_const); + last_op = std::make_shared(bcast_add, transpose_out_order); + } + break; + + case modelType::TranspConvBcastAddMaxPoolTransp: + { + auto bcast_add = std::make_shared(conv, bias_const); + auto maxpool = std::make_shared(bcast_add, maxpool_stride, ngraph::Shape{0, 0}, ngraph::Shape{0, 0}, maxpool_shape, + ngraph::op::RoundingType::FLOOR, ngraph::op::PadType::VALID); + auto transpose = std::make_shared(maxpool, transpose_out_order); + last_op = std::make_shared(transpose); + } + break; + + case modelType::TranspConvBcastAddActTransp: + { + auto bcast_add = std::make_shared(conv, bias_const); + auto activation = std::make_shared(bcast_add); + last_op = std::make_shared(activation, transpose_out_order); + } + break; + + case modelType::TranspConvBcastAddMaxPoolActTransp: + { + auto bcast_add = std::make_shared(conv, bias_const); + auto maxpool = std::make_shared(bcast_add, maxpool_stride, ngraph::Shape{0, 0}, ngraph::Shape{0, 0}, maxpool_shape, + ngraph::op::RoundingType::FLOOR, ngraph::op::PadType::VALID); + auto activation = std::make_shared(maxpool); + last_op = std::make_shared(activation, transpose_out_order); + } + break; + + case modelType::TranspConvTranspBcastAdd: + { + last_op = std::make_shared(last_op, bias_const); + } + break; + + case modelType::TranspConvTranspBcastAddAct: + { + auto bcast_add = std::make_shared(last_op, bias_const); + last_op = std::make_shared(bcast_add); + } + break; + + case modelType::TranspConvTransp: + default: + break; + } + + return std::make_shared(last_op); +} + +std::shared_ptr get_initial_function(const modelType& model, + const ngraph::PartialShape& input_shape, + const ngraph::Shape& filters_shape, + const ngraph::Strides& conv_stride, + const ngraph::CoordinateDiff& pads_begin, + const ngraph::CoordinateDiff& pads_end, + const ngraph::Strides& conv_dilation, + const ngraph::Shape& bias_shape, + const ngraph::Strides& maxpool_stride, + const ngraph::Shape& maxpool_shape, + const ngraph::op::PadType& pad_type, + ConvData& conv_data) { + auto inputParams = std::make_shared(ngraph::element::i64, input_shape); + auto result = createFunction(model, inputParams, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, bias_shape, + maxpool_stride, maxpool_shape, pad_type, &conv_data); + return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{inputParams}); +} + +// --------------------------------------------------------------------------------------------------------------------- + +class ConvertPadded2ValidConvTestInvalidFixture : public CommonTestUtils::TestsCommon, + public ::testing::WithParamInterface> { +public: + void SetUp() override; +public: + std::shared_ptr function, reference_function; + modelType model; +}; + +void ConvertPadded2ValidConvTestInvalidFixture::SetUp() { + ngraph::PartialShape input_shape; + ngraph::Shape filters_shape, bias_shape, maxpool_shape; + ngraph::Strides conv_stride, conv_dilation, maxpool_stride; + ngraph::CoordinateDiff pads_begin, pads_end; + ngraph::op::PadType pad_type; + ConvData conv_data; + std::tie(model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, + bias_shape, maxpool_stride, maxpool_shape, pad_type) = this->GetParam(); + + function = get_initial_function(model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, + bias_shape, maxpool_stride, maxpool_shape, pad_type, conv_data); + reference_function = get_initial_function(model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, + bias_shape, maxpool_stride, maxpool_shape, pad_type, conv_data); +} + +// --------------------------------------------------------------------------------------------------------------------- + +class ConvertPadded2ValidConvTestFixture: public CommonTestUtils::TestsCommon, + public ::testing::WithParamInterface> { +public: + void SetUp() override; + std::shared_ptr get_reference(const modelType& model, + const ngraph::PartialShape& input_shape, + const ngraph::Shape& filters_shape, + const ngraph::Strides& conv_stride, + const ngraph::CoordinateDiff& pads_begin, + const ngraph::CoordinateDiff& pads_end, + const ngraph::Strides& conv_dilation, + const ngraph::Shape& bias_shape, + const ngraph::Strides& maxpool_stride, + const ngraph::Shape& maxpool_shape, + const ngraph::op::PadType& pad_type, + const ConvData& conv_data); +public: + std::shared_ptr function, reference_function; + modelType model; +}; + +void ConvertPadded2ValidConvTestFixture::SetUp() { + ngraph::PartialShape input_shape; + ngraph::Shape filters_shape, bias_shape, maxpool_shape; + ngraph::Strides conv_stride, conv_dilation, maxpool_stride; + ngraph::CoordinateDiff pads_begin, pads_end; + ngraph::op::PadType pad_type; + ConvData conv_data; + std::tie(model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, + bias_shape, maxpool_stride, maxpool_shape, pad_type) = this->GetParam(); + + function = get_initial_function(model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, + bias_shape, maxpool_stride, maxpool_shape, pad_type, conv_data); + reference_function = get_reference(model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, + bias_shape, maxpool_stride, maxpool_shape, pad_type, conv_data); +} + +std::shared_ptr FlatCrop(ngraph::Output input, size_t offset, size_t size) { + return std::make_shared( + input, // data + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)0, offset}), // begin sice index + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)0, offset + size}), // end slice index + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides + std::vector{1, 0}, // begin mask + std::vector{1, 0}); // end mask +} + +void InsertPadding(ngraph::OutputVector& input_rows_to_concat, size_t size, + const std::shared_ptr padding_const, size_t biggest_padding) { + + if (size == biggest_padding) { + input_rows_to_concat.push_back(padding_const); + } else { + auto slice = FlatCrop(padding_const, 0, size); + input_rows_to_concat.push_back(slice); + } +} + +std::shared_ptr CreatePaddedNet(const ngraph::Output& input_node, + const ConvData& conv_data) { + size_t flat_left_padding = conv_data.input_channel_count * conv_data.pads_begin_width; + size_t flat_right_padding = conv_data.input_channel_count * conv_data.pads_end_width; + size_t padded_row_size = flat_left_padding + conv_data.input_channel_count * conv_data.input_width + flat_right_padding; + size_t flat_top_padding = padded_row_size * conv_data.pads_begin_height; + size_t flat_bottom_padding = padded_row_size * conv_data.pads_end_height; + size_t biggest_padding = std::max(std::max(flat_left_padding, flat_right_padding), std::max(flat_top_padding, flat_bottom_padding)); + + if (conv_data.input_height > 1 && (flat_top_padding > 1 || flat_bottom_padding > 1)) { + biggest_padding = biggest_padding > padded_row_size ? biggest_padding : padded_row_size; + } + + if (!biggest_padding) + return nullptr; + + auto flat_input = std::make_shared(input_node, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, + ngraph::Shape{1ull, shape_size(input_node.get_shape())}), false); + + // Constant with zero padding + auto const_holding_padding = std::make_shared(ngraph::element::i64, ngraph::Shape{1, biggest_padding}, 0); + + std::shared_ptr original_row = flat_input; + ngraph::OutputVector input_rows_to_concat; + + // Add top padding + for (size_t p = 0; p < conv_data.pads_begin_height; p++) { + InsertPadding(input_rows_to_concat, padded_row_size, const_holding_padding, biggest_padding); + } + + if (flat_left_padding || flat_right_padding) { + // Pad every row of input plain if neccessary + for (size_t h = 0; h < conv_data.input_height; h++) { + // left padding input right padding + // | | | + // +--------------+-----------+ + // | + // concat + + if (conv_data.input_height > 1) + original_row = FlatCrop(flat_input, h * conv_data.input_width * conv_data.input_channel_count, + conv_data.input_width * conv_data.input_channel_count); + + ngraph::OutputVector single_row_concat_inputs; + if (flat_left_padding) { + InsertPadding(single_row_concat_inputs, flat_left_padding, const_holding_padding, biggest_padding); + } + single_row_concat_inputs.push_back(original_row); + if (flat_right_padding) { + InsertPadding(single_row_concat_inputs, flat_right_padding, const_holding_padding, biggest_padding); + } + auto padded_row_concat = std::make_shared(single_row_concat_inputs, 1); + + input_rows_to_concat.push_back(padded_row_concat); + } + } else { + input_rows_to_concat.push_back(original_row); + } + + // Bottom padding + for (size_t p = 0; p < conv_data.pads_end_height; p++) { + InsertPadding(input_rows_to_concat, padded_row_size, const_holding_padding, biggest_padding); + } + + auto padded_input_plane = std::make_shared(input_rows_to_concat, 1); + return padded_input_plane; +} + +std::shared_ptr ConvertPadded2ValidConvTestFixture::get_reference(const modelType& model, + const ngraph::PartialShape& input_shape, + const ngraph::Shape& filters_shape, + const ngraph::Strides& conv_stride, + const ngraph::CoordinateDiff& pads_begin, + const ngraph::CoordinateDiff& pads_end, + const ngraph::Strides& conv_dilation, + const ngraph::Shape& bias_shape, + const ngraph::Strides& maxpool_stride, + const ngraph::Shape& maxpool_shape, + const ngraph::op::PadType& pad_type, + const ConvData& conv_data) { + auto inputParams = std::make_shared(ngraph::element::i64, input_shape); + + // Add padding where neccessary + + // padding + // padding + // ... row ... + // ... row ... + // ........... + // ... row ... + // padding + // padding + auto padded_input_plane = CreatePaddedNet(inputParams, conv_data); + std::shared_ptr result; + + if (padded_input_plane) { + auto shape_const = std::make_shared(ngraph::element::i64, ngraph::Shape{4}, + ngraph::Shape{static_cast(1), + conv_data.pads_begin_height + conv_data.input_height + conv_data.pads_end_height, + conv_data.pads_begin_width + conv_data.input_width + conv_data.pads_end_width, + conv_data.input_channel_count}); + auto padded_input_plane_reshaped = std::make_shared(padded_input_plane, shape_const, false); + result = createFunction(model, padded_input_plane_reshaped, filters_shape, conv_stride, + ngraph::CoordinateDiff{0, 0}, ngraph::CoordinateDiff{0, 0}, conv_dilation, bias_shape, + maxpool_stride, maxpool_shape, ngraph::op::PadType::EXPLICIT, nullptr); + } else { + // Valid padding + result = createFunction(model, inputParams, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, bias_shape, + maxpool_stride, maxpool_shape, pad_type, nullptr); + } + + return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{inputParams}); +} + +// --------------------------------------------------------------------------------------------------------------------- + +void execute_test(const modelType& model, std::shared_ptr function, std::shared_ptr reference_function) { + ngraph::pass::Manager manager; + manager.register_pass(); + + switch (model) { + default: + case modelType::TranspConvTransp: + manager.register_pass(); + break; + case modelType::TranspConvBcastAddTransp: + manager.register_pass(); + break; + case modelType::TranspConvBcastAddMaxPoolTransp: + manager.register_pass(); + break; + case modelType::TranspConvBcastAddActTransp: + manager.register_pass(); + break; + case modelType::TranspConvBcastAddMaxPoolActTransp: + manager.register_pass(); + break; + case modelType::TranspConvTranspBcastAdd: + manager.register_pass(); + break; + case modelType::TranspConvTranspBcastAddAct: + manager.register_pass(); + break; + } + + manager.run_passes(function); + const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::ATTRIBUTES); + const FunctionsComparator::Result result = func_comparator(function, reference_function); + ASSERT_TRUE(result.valid); +} + +TEST_P(ConvertPadded2ValidConvTestFixture, CompareFunctions) { + execute_test(model, function, reference_function); +} + +INSTANTIATE_TEST_SUITE_P(ConvertPadded2ValidConvTestSuite, ConvertPadded2ValidConvTestFixture, + ::testing::Values( + std::make_tuple(modelType::TranspConvTransp, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT), + std::make_tuple(modelType::TranspConvBcastAddTransp, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT), + std::make_tuple(modelType::TranspConvBcastAddMaxPoolTransp, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT), + std::make_tuple(modelType::TranspConvBcastAddActTransp, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::SAME_LOWER), + std::make_tuple(modelType::TranspConvBcastAddMaxPoolActTransp, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::SAME_UPPER), + std::make_tuple(modelType::TranspConvTranspBcastAdd, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 1, 1, 4}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT), + std::make_tuple(modelType::TranspConvTranspBcastAddAct, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 1, 1, 4}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT))); + +TEST_P(ConvertPadded2ValidConvTestInvalidFixture, CompareFunctions) { + execute_test(model, function, reference_function); +} + +INSTANTIATE_TEST_SUITE_P(ConvertPadded2ValidConvInvalidTestSuite, ConvertPadded2ValidConvTestInvalidFixture, + ::testing::Values( + std::make_tuple(modelType::TranspConvTransp, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::SAME_UPPER), + std::make_tuple(modelType::TranspConvBcastAddTransp, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT), + std::make_tuple(modelType::TranspConvBcastAddMaxPoolTransp, ngraph::PartialShape{2, 16, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{5, 1}, ngraph::op::PadType::EXPLICIT), + std::make_tuple(modelType::TranspConvBcastAddActTransp, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::SAME_LOWER), + std::make_tuple(modelType::TranspConvBcastAddMaxPoolActTransp, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 5}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 4}, ngraph::op::PadType::SAME_UPPER), + std::make_tuple(modelType::TranspConvTranspBcastAdd, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 1, 1, 4}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT), + std::make_tuple(modelType::TranspConvTranspBcastAddAct, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 1, 1, 4}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT))); + +} // namespace + +} // namespace testing diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16.cpp index a80696c0b60..bef3794a657 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16.cpp @@ -9,7 +9,6 @@ namespace kernel_selector { // Sub-group size used by "kernel_name_bfyx_os_iyx_osv16" kernel. -constexpr size_t sub_group_size = 16; ConvolutionKernel_bfyx_os_iyx_osv16::ConvolutionKernel_bfyx_os_iyx_osv16() : ConvolutionKernelBase("convolution_gpu_bfyx_os_iyx_osv16") { @@ -94,6 +93,9 @@ static void shrink_blocks_to_output_size(size_t output_x, size_t output_y, size_ block_x -= unused_x / simds_x; block_y -= unused_y / simds_y; + + block_x = Align(block_x, 2); + block_y = Align(block_y, 2); } ConvolutionKernel_bfyx_os_iyx_osv16::AutoTuneOption ConvolutionKernel_bfyx_os_iyx_osv16::GetAutoTuneOptions( @@ -107,9 +109,11 @@ ConvolutionKernel_bfyx_os_iyx_osv16::AutoTuneOption ConvolutionKernel_bfyx_os_iy const convolution_params& cp = static_cast(p); + const auto& sub_group_size = GetSubGroupSize(cp); + if (cp.stride.x == 1 && cp.stride.y == 1) { if (cp.filterSize.x == 1 && cp.filterSize.y == 1) { - option.blockWidth = 16; + option.blockWidth = sub_group_size; option.blockHeight = 1; option.prefetch = 4; // if less than 16 values is required to compute one single row of output @@ -143,13 +147,13 @@ ConvolutionKernel_bfyx_os_iyx_osv16::AutoTuneOption ConvolutionKernel_bfyx_os_iy if (cp.filterSize.x != 1 || cp.filterSize.y != 1 || cp.output.Batch().v != 1) { shrink_blocks_to_output_size(cp.output.X().v, cp.output.Y().v, option.blockWidth, option.blockHeight); } - return option; } ConvolutionKernelBase::DispatchData ConvolutionKernel_bfyx_os_iyx_osv16::SetDefault(const convolution_params& cp, int autoTuneIndex) const { DispatchData dispatchData = ConvolutionKernelBase::SetDefault(cp); + const auto& sub_group_size = GetSubGroupSize(cp); const auto of_maps = cp.output.Feature().v; const auto of_maps_per_group = of_maps / cp.groups; @@ -196,6 +200,9 @@ bool ConvolutionKernel_bfyx_os_iyx_osv16::Validate(const Params& p, const option JitConstants ConvolutionKernel_bfyx_os_iyx_osv16::GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const { + const convolution_params& cp = static_cast(params); + const auto& sub_group_size = GetSubGroupSize(cp); + const auto of_maps = params.output.Feature().v; const auto of_maps_per_group = of_maps / params.groups; const size_t of_threads_per_batch = RoundUp(of_maps_per_group, sub_group_size); @@ -209,7 +216,7 @@ JitConstants ConvolutionKernel_bfyx_os_iyx_osv16::GetJitConstants(const convolut jit.Merge(MakeFusedOpsJitConstants(params, {conf_scalar})); } - + jit.AddConstant(MakeJitConstant("OSV_SIZE", 16)); jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", dispatchData.lws[2])); jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_WIDTH", dispatchData.cldnnStyle.blockWidth)); jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_HEIGHT", dispatchData.cldnnStyle.blockHeight)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16.h index 6bc617bb3f3..9da52609636 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16.h @@ -4,6 +4,7 @@ #pragma once +#include "api/cldnn/runtime/device_info.hpp" #include "convolution_kernel_base.h" #include #include @@ -34,6 +35,16 @@ protected: bool Validate(const Params& p, const optional_params& o) const override; bool NeedPaddedInput() const override { return true; } DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; + size_t GetSubGroupSize(const convolution_params& params) const { + if (params.engineInfo.computeUnitsCount <= 24) { + // Smaller # EU tends to be computation bounds. + // In such case, using larger worksize will result in larger computational inefficiency + // w.r.t the unalined output feature + return (params.output.Feature().v > 8) ? 16 : 8; + } else { + return 16; + } + } private: struct AutoTuneOption { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/convolution_gpu_bfyx_os_iyx_osv16.cl b/inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/convolution_gpu_bfyx_os_iyx_osv16.cl index 679fa45de52..b622f1c1698 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/convolution_gpu_bfyx_os_iyx_osv16.cl +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/convolution_gpu_bfyx_os_iyx_osv16.cl @@ -83,12 +83,11 @@ KERNEL(convolution_gpu_bfyx_os_iyx_osv16)( uint fmg = feature_idx / SUB_GROUP_SIZE; const uint g = split_idx; #endif - UNIT_TYPE in[IN_BLOCK_ARRAY_SIZE]; UNIT_TYPE out[OUTPUT_BLOCK_WIDTH * OUTPUT_BLOCK_HEIGHT]; UNIT_TYPE w[PREFETCH]; uint in_addr; - uint weight_addr = fmg * FILTER_IFM_NUM * FILTER_SIZE_X * FILTER_SIZE_Y * SUB_GROUP_SIZE + lid; + uint weight_addr = fmg * FILTER_IFM_NUM * FILTER_SIZE_X * FILTER_SIZE_Y * OSV_SIZE + lid; #if GROUPED weight_addr += g * FILTER_GROUPS_PITCH; @@ -156,7 +155,7 @@ KERNEL(convolution_gpu_bfyx_os_iyx_osv16)( in_addr += INPUT0_FEATURE_PITCH; for(int pf=0; pfNUL if errorlevel 1 ( echo Error^: Python is not installed. Please install Python 3.5 ^(64-bit^) or higher from https://www.python.org/downloads/ @@ -35,10 +37,16 @@ if not "%python_ver%"=="okay" ( :: install Python modules +set USE_VENV="false" +set VENV_DIR=%USERPROFILE%\Documents\Intel\OpenVINO\venv_openvino IF /I "%1%" EQU "" ( set postfix= ) ELSE ( + IF /I "%1%" EQU "venv" ( + set postfix= + set USE_VENV="true" + ) ELSE ( IF /I "%1%" EQU "caffe" ( set postfix=_caffe ) ELSE ( @@ -59,6 +67,7 @@ IF /I "%1%" EQU "" ( ) ELSE ( echo Unsupported framework goto error + ) ) ) ) @@ -67,10 +76,20 @@ IF /I "%1%" EQU "" ( ) ) -pip3 install --user -r ..\requirements%postfix%.txt +IF /I "%2%" EQU "venv" ( + set USE_VENV="true" +) + +IF %USE_VENV% == "true" ( + python -m venv "%VENV_DIR%" + call "%VENV_DIR%\Scripts\activate.bat" +) + +python -m pip install -U pip +python -m pip install -r "%ROOT_DIR%..\requirements%postfix%.txt" :: Chek MO version -set python_command='python "%~dp0..\mo\utils\extract_release_version.py"' +set python_command='python "%ROOT_DIR%..\mo\utils\extract_release_version.py"' FOR /F "delims=" %%i IN (%python_command%) DO set mo_release_version=%%i IF "%mo_release_version%" == "None.None" ( set mo_is_custom="true" @@ -80,12 +99,12 @@ IF "%mo_release_version%" == "None.None" ( :: Check if existing IE Python bindings satisfy requirements set errorlevel= -python "%~dp0..\mo\utils\find_ie_version.py" +python "%ROOT_DIR%..\mo\utils\find_ie_version.py" IF %errorlevel% EQU 0 goto ie_search_end :: Check if OV already installed via pip set errorlevel= -pip3 show openvino +python -m pip show openvino IF %errorlevel% EQU 0 ( IF %mo_is_custom% == "true" ( echo [ WARNING ] OpenVINO ^(TM^) Toolkit version installed in pip is incompatible with the Model Optimizer @@ -110,7 +129,7 @@ IF %mo_is_custom% == "true" ( ) set errorlevel= -pip3 install openvino==%mo_release_version% +python -m pip install openvino==%mo_release_version% IF %errorlevel% NEQ 0 ( echo [ WARNING ] Could not find the OpenVINO ^(TM^) toolkit version %mo_release_version% in pip echo [ WARNING ] The highest OpenVINO ^(TM^) toolkit version will be installed ^(may be incompatible with current Model Optimizer version^) @@ -119,17 +138,17 @@ IF %errorlevel% NEQ 0 ( ) set errorlevel= -python "%~dp0..\mo\utils\find_ie_version.py" +python "%ROOT_DIR%..\mo\utils\find_ie_version.py" IF %errorlevel% EQU 0 goto ie_search_end echo [ WARNING ] The installed OpenVINO ^(TM^) toolkit version %mo_release_version% does not work as expected. Uninstalling... -pip3 uninstall -y openvino +python -m pip uninstall -y openvino echo [ WARNING ] Consider building the Inference Engine Python API from sources goto ie_search_end :install_last_ov set errorlevel= -pip3 install openvino +python -m pip install openvino IF %errorlevel% NEQ 0 ( echo [ WARNING ] Could not find OpenVINO ^(TM^) toolkit version available in pip for installation echo [ WARNING ] Consider building the Inference Engine Python API from sources @@ -137,16 +156,26 @@ IF %errorlevel% NEQ 0 ( ) set errorlevel= -python "%~dp0..\mo\utils\find_ie_version.py" +python "%ROOT_DIR%..\mo\utils\find_ie_version.py" IF %errorlevel% EQU 0 goto ie_search_end echo [ WARNING ] The installed highest OpenVINO ^(TM^) toolkit version doesn't work as expected. Uninstalling... -pip3 uninstall -y openvino +python -m pip uninstall -y openvino echo [ WARNING ] Consider building the Inference Engine Python API from sources goto ie_search_end :ie_search_end +IF %USE_VENV% == "true" ( + echo. + echo Before running the Model Optimizer, please activate virtualenv environment by running "%VENV_DIR%\Scripts\activate.bat" +) ELSE ( + echo. + echo [ WARNING ] All Model Optimizer dependencies are installed globally. + echo [ WARNING ] If you want to keep Model Optimizer in separate sandbox + echo [ WARNING ] run install_prerequisites.bat "{caffe|tf|tf2|mxnet|kaldi|onnx}" venv +) + echo ***************************************************************************************** echo Optional: To speed up model conversion process, install protobuf-*.egg located in the echo "model-optimizer\install_prerequisites" folder or building protobuf library from sources. diff --git a/model-optimizer/install_prerequisites/install_prerequisites.sh b/model-optimizer/install_prerequisites/install_prerequisites.sh index bd2deb23ae5..17c26201cf1 100755 --- a/model-optimizer/install_prerequisites/install_prerequisites.sh +++ b/model-optimizer/install_prerequisites/install_prerequisites.sh @@ -35,6 +35,7 @@ for ((i=1;i <= $#;i++)) { esac } +VENV_DIR="$HOME/venv_openvino" SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]-$0}" )" && pwd )" if [[ -f /etc/centos-release ]]; then @@ -53,23 +54,8 @@ if [[ $DISTRO == "centos" ]]; then elif command -v python3.5 >/dev/null 2>&1; then python_binary=python3.5 fi - - if [ -z "$python_binary" ]; then - sudo -E yum install -y https://centos7.iuscommunity.org/ius-release.rpm - sudo -E yum install -y python36u python36u-pip - sudo -E pip3.6 install virtualenv - python_binary=python3.6 - fi - # latest pip is needed to install tensorflow - sudo -E "$python_binary" -m pip install --upgrade pip -elif [[ $DISTRO == "ubuntu" ]]; then - sudo -E apt update - sudo -E apt -y --no-install-recommends install python3-pip python3-venv +else python_binary=python3 - sudo -E "$python_binary" -m pip install --upgrade pip -elif [[ "$OSTYPE" == "darwin"* ]]; then - python_binary=python3 - python3 -m pip install --upgrade pip fi install_latest_ov() { @@ -181,21 +167,21 @@ find_ie_bindings() { } if [[ $V_ENV -eq 1 ]]; then - "$python_binary" -m venv "$SCRIPTDIR/../venv${postfix}" - source "$SCRIPTDIR/../venv${postfix}/bin/activate" - venv_python_binary="$SCRIPTDIR/../venv${postfix}/bin/$python_binary" - $venv_python_binary -m pip install -r "$SCRIPTDIR/../requirements${postfix}.txt" + "$python_binary" -m venv "$VENV_DIR" + source "$VENV_DIR/bin/activate" + venv_python_binary="$VENV_DIR/bin/$python_binary" + # latest pip is needed to install tensorflow + "$venv_python_binary" -m pip install --upgrade pip + "$venv_python_binary" -m pip install -r "$SCRIPTDIR/../requirements${postfix}.txt" find_ie_bindings "$venv_python_binary" false echo - echo "Before running the Model Optimizer, please activate virtualenv environment by running \"source ${SCRIPTDIR}/../venv${postfix}/bin/activate\"" + echo "Before running the Model Optimizer, please activate virtualenv environment by running \"source $VENV_DIR/bin/activate\"" else - if [[ "$OSTYPE" == "darwin"* ]]; then - python3 -m pip install -r "$SCRIPTDIR/../requirements${postfix}.txt" - find_ie_bindings python3 false - else - sudo -E $python_binary -m pip install -r "$SCRIPTDIR/../requirements${postfix}.txt" - find_ie_bindings $python_binary true - fi + # latest pip is needed to install tensorflow + "$python_binary" -m pip install --upgrade pip + "$python_binary" -m pip install -r "$SCRIPTDIR/../requirements${postfix}.txt" + find_ie_bindings "$python_binary" false + echo echo "[WARNING] All Model Optimizer dependencies are installed globally." echo "[WARNING] If you want to keep Model Optimizer in separate sandbox" echo "[WARNING] run install_prerequisites.sh \"{caffe|tf|tf2|mxnet|kaldi|onnx}\" venv" diff --git a/model-optimizer/mo/front/tf/extractors/utils.py b/model-optimizer/mo/front/tf/extractors/utils.py index c8fac1de697..c9d07083b38 100644 --- a/model-optimizer/mo/front/tf/extractors/utils.py +++ b/model-optimizer/mo/front/tf/extractors/utils.py @@ -70,8 +70,18 @@ def tf_tensor_content(tf_dtype, shape, pb_tensor): log.error(decode_err_msg, extra={'is_warning': True}) value = np.array(type_helper[1](pb_tensor)) - if len(shape) == 0 or shape.prod() == 0: - if len(value) == 1: + # Ignore an empty value, if len(shape) > 1 + # For example, value = [] and shape = [1, 1, 0] + # This is needed to reshape this value later and to return reshaped value = [[[]]] + # Otherwise there can be failures during partial inference, because we are storing an empty value with incorrect + # shape + if len(shape) == 0 or (len(shape) == 1 and shape.prod() == 0): + try: + value_length = len(value) + except TypeError: + # case, when value is a scalar + value_length = 0 + if value_length == 1: # return scalar if shape is [] otherwise broadcast according to shape try: return np.array(value[0], dtype=type_helper[0]) diff --git a/model-optimizer/unit_tests/extensions/front/Mish_fusion_test.py b/model-optimizer/unit_tests/extensions/front/Mish_fusion_test.py deleted file mode 100644 index 10e48daefad..00000000000 --- a/model-optimizer/unit_tests/extensions/front/Mish_fusion_test.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from extensions.front.Mish_fusion import MishFusion -from mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op, result, build_graph_with_edge_attrs - -ref_nodes = {**regular_op('input', {'type': 'Parameter'}), - **regular_op('mish', {'type': 'Mish', 'name': 'final_mul'}), - **result('result') - } -ref_edges = [('input', 'mish'), ('mish', 'result')] - - -class MishFusionTest(unittest.TestCase): - nodes = { - **regular_op('input', {'type': 'Parameter'}), - **regular_op('softplus', {'op': 'SoftPlus'}), - **regular_op('tanh', {'op': 'Tanh'}), - **regular_op('mul', {'op': 'Mul', 'name': 'final_mul'}), - **result('result'), - } - - edges = [('input', 'softplus', {'in': 0, 'out': 0}), - ('input', 'mul', {'in': 0, 'out': 0}), - ('softplus', 'tanh', {'in': 0, 'out': 0}), - ('tanh', 'mul', {'in': 1, 'out': 0}), - ('mul', 'result', {'in': 0, 'out': 0})] - - def test_mish_fusion(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {}) - - graph_ref = build_graph(ref_nodes, ref_edges) - graph.stage = 'front' - - MishFusion().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - self.assertTrue(len(graph.get_op_nodes(name='final_mul')) == 1 and - graph.get_op_nodes(name='final_mul')[0].op == 'Mish') - - def test_mish_fusion_different_source(self): - # check case when different tensors goes to Mul and SoftPlus - graph = build_graph_with_edge_attrs({ - **regular_op('input', {'type': 'Parameter'}), - **regular_op('input_2', {'type': 'Parameter'}), - **regular_op('softplus', {'op': 'SoftPlus'}), - **regular_op('tanh', {'op': 'Tanh'}), - **regular_op('mul', {'op': 'Mul', 'name': 'final_mul'}), - **result('result'), - }, [('input', 'softplus', {'in': 0, 'out': 0}), - ('input_2', 'mul', {'in': 0, 'out': 0}), - ('softplus', 'tanh', {'in': 0, 'out': 0}), - ('tanh', 'mul', {'in': 1, 'out': 0}), - ('mul', 'result', {'in': 0, 'out': 0})], {}) - - graph_ref = graph.copy() - graph.stage = 'front' - - MishFusion().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) diff --git a/model-optimizer/unit_tests/extensions/front/Softplus_fusion_test.py b/model-optimizer/unit_tests/extensions/front/Softplus_fusion_test.py deleted file mode 100644 index 0dd93f8d95c..00000000000 --- a/model-optimizer/unit_tests/extensions/front/Softplus_fusion_test.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from extensions.front.Softplus_fusion import SoftplusFusion -from mo.front.common.partial_infer.utils import float_array -from mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const, regular_op, result, build_graph_with_edge_attrs - -ref_nodes = {**regular_op('input', {'type': 'Parameter'}), - **regular_op('softplus', {'type': 'SoftPlus', 'name': 'final_log'}), - **result('result') - } -ref_edges = [('input', 'softplus'), ('softplus', 'result')] - - -class SoftplusFusionTest(unittest.TestCase): - nodes = { - **regular_op('input', {'type': 'Parameter'}), - **regular_op('exp', {'op': 'Exp'}), - **const('const_1', float_array([1.0])), - **regular_op('add', {'op': 'Add'}), - **regular_op('ln', {'op': 'Log', 'name': 'final_log'}), - **result('result'), - } - - edges = [('input', 'exp', {'in': 0, 'out': 0}), - ('const_1', 'add', {'in': 0, 'out': 0}), - ('exp', 'add', {'in': 1, 'out': 0}), - ('add', 'ln', {'in': 0, 'out': 0}), - ('ln', 'result', {'in': 0, 'out': 0})] - - def test_softplus_fusion_test(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {}) - - graph_ref = build_graph(ref_nodes, ref_edges) - graph.stage = 'front' - - SoftplusFusion().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - self.assertTrue(len(graph.get_op_nodes(name='final_log')) == 1 and - graph.get_op_nodes(name='final_log')[0].op == 'SoftPlus') - - def test_softplus_fusion_test_wrong_const(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {'const_1': {'value': float_array([0.9999])}}) - - graph_ref = graph.copy() - graph.stage = 'front' - - SoftplusFusion().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - diff --git a/model-optimizer/unit_tests/extensions/front/Swish_fusion_test.py b/model-optimizer/unit_tests/extensions/front/Swish_fusion_test.py deleted file mode 100644 index ccf9985536b..00000000000 --- a/model-optimizer/unit_tests/extensions/front/Swish_fusion_test.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from extensions.front.Swish_fusion import SwishWithSigmoidWithoutBeta, SwishWithSigmoidWithBeta -from mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op, result, build_graph_with_edge_attrs - -ref_nodes = {**regular_op('input', {'type': 'Parameter'}), - **regular_op('swish', {'type': 'Swish', 'name': 'final_mul'}), - **result('result') - } -ref_edges = [('input', 'swish'), ('swish', 'result')] - - -class SwishWithSigmoidWithoutBetaTest(unittest.TestCase): - nodes = { - **regular_op('input', {'type': 'Parameter'}), - **regular_op('sigmoid', {'op': 'Sigmoid'}), - **regular_op('mul', {'op': 'Mul', 'name': 'final_mul'}), - **result('result'), - } - - edges = [('input', 'mul', {'in': 0, 'out': 0}), - ('input', 'sigmoid', {'in': 0, 'out': 0}), - ('sigmoid', 'mul', {'in': 1, 'out': 0}), - ('mul', 'result', {'in': 0, 'out': 0})] - - def test_swish_with_sigmoid_without_beta_test(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {}) - - graph_ref = build_graph(ref_nodes, ref_edges) - graph.stage = 'front' - - SwishWithSigmoidWithoutBeta().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - self.assertTrue(len(graph.get_op_nodes(name='final_mul')) == 1 and - graph.get_op_nodes(name='final_mul')[0].op == 'Swish') - - def test_swish_with_sigmoid_without_beta_different_tensors(self): - graph = build_graph_with_edge_attrs({ - **regular_op('input', {'type': 'Parameter'}), - **regular_op('input_2', {'type': 'Parameter'}), - **regular_op('sigmoid', {'op': 'Sigmoid'}), - **regular_op('mul', {'op': 'Mul', 'name': 'final_mul'}), - **result('result'), - }, [('input_2', 'mul', {'in': 0, 'out': 0}), - ('input', 'sigmoid', {'in': 0, 'out': 0}), - ('sigmoid', 'mul', {'in': 1, 'out': 0}), - ('mul', 'result', {'in': 0, 'out': 0})], {}) - - graph_ref = graph.copy() - graph.stage = 'front' - - SwishWithSigmoidWithoutBeta().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - -class SwishWithSigmoidWithBetaTest(unittest.TestCase): - nodes = { - **regular_op('input', {'type': 'Parameter'}), - **regular_op('beta', {'type': 'Parameter'}), - **regular_op('mul_beta', {'op': 'Mul'}), - **regular_op('sigmoid', {'op': 'Sigmoid'}), - **regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}), - **result('result'), - } - - edges = [('input', 'mul_beta', {'in': 0, 'out': 0}), - ('input', 'mul_2', {'in': 0, 'out': 0}), - ('beta', 'mul_beta', {'in': 1, 'out': 0}), - ('mul_beta', 'sigmoid', {'in': 0, 'out': 0}), - ('sigmoid', 'mul_2', {'in': 1, 'out': 0}), - ('mul_2', 'result', {'in': 0, 'out': 0})] - - def test_swish_with_sigmoid_with_beta_test(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {}) - - new_ref_nodes = ref_nodes.copy() - new_ref_nodes.update(**regular_op('beta', {'type': 'Parameter'})) - - graph_ref = build_graph(new_ref_nodes, ref_edges + [('beta', 'swish')]) - graph.stage = 'front' - - SwishWithSigmoidWithBeta().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - self.assertTrue(len(graph.get_op_nodes(name='final_mul')) == 1 and - graph.get_op_nodes(name='final_mul')[0].op == 'Swish') - - def test_swish_with_sigmoid_with_beta_different_tensors(self): - graph = build_graph_with_edge_attrs({ - **regular_op('input', {'type': 'Parameter'}), - **regular_op('input_2', {'type': 'Parameter'}), - **regular_op('beta', {'type': 'Parameter'}), - **regular_op('mul_beta', {'op': 'Mul'}), - **regular_op('sigmoid', {'op': 'Sigmoid'}), - **regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}), - **result('result'), - }, [('input', 'mul_beta', {'in': 0, 'out': 0}), - ('input_2', 'mul_2', {'in': 0, 'out': 0}), - ('beta', 'mul_beta', {'in': 1, 'out': 0}), - ('mul_beta', 'sigmoid', {'in': 0, 'out': 0}), - ('sigmoid', 'mul_2', {'in': 1, 'out': 0}), - ('mul_2', 'result', {'in': 0, 'out': 0})], {}) - - graph_ref = graph.copy() - graph.stage = 'front' - - SwishWithSigmoidWithBeta().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) diff --git a/model-optimizer/unit_tests/extensions/front/onnx/quantize_dequantize_linear_test.py b/model-optimizer/unit_tests/extensions/front/onnx/quantize_dequantize_linear_test.py deleted file mode 100644 index 3d07eb06236..00000000000 --- a/model-optimizer/unit_tests/extensions/front/onnx/quantize_dequantize_linear_test.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from extensions.front.onnx.quantize_dequantize_linear import QuantizeDequantizeLinear -from mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -# quantize and dequantize share tensors with scale/zp -nodes0_attributes = { - 'input': {'kind': 'op', 'op': 'AnyOp'}, - 'quantize': {'kind': 'op', 'op': 'QuantizeLinear'}, - 'dequantize': {'kind': 'op', 'op': 'DequantizeLinear'}, - 'scale_param': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'zerop_param': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'out': {'kind': 'op', 'op': 'AnyOp'}, -} - -# quantize and dequantize do not share tensors with scale/zp -nodes1_attributes = { - 'input': {'kind': 'op', 'op': 'AnyOp'}, - 'quantize': {'kind': 'op', 'op': 'QuantizeLinear'}, - 'dequantize': {'kind': 'op', 'op': 'DequantizeLinear'}, - 'scale_param_q': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'zerop_param_q': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'scale_param_dq': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'zerop_param_dq': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'out': {'kind': 'op', 'op': 'AnyOp'}, -} - -nodes_ref_attributes = { - 'input': {'kind': 'op', 'op': 'AnyOp'}, - 'fq': {'kind': 'op', 'op': 'FakeQuantize'}, - 'min_param': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'max_param': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'out': {'kind': 'op', 'op': 'AnyOp'}, -} - - -class TestQuantizeDeQuantize2FakeQuantize(unittest.TestCase): - - def test_quantizedequantize2fakequantize_0(self): - # testing the code path with uint8 zero-point - graph = build_graph(nodes1_attributes, - [('input', 'quantize'), - ('quantize', 'dequantize'), - ('scale_param_q', 'quantize'), - ('zerop_param_q', 'quantize'), - ('scale_param_dq', 'dequantize'), - ('zerop_param_dq', 'dequantize'), - ('dequantize', 'out'), - ], - {'scale_param_q': {'shape': np.array([1]), 'value': np.float32(1.0 / 255)}, - 'zerop_param_q': {'shape': np.array([1]), 'value': np.uint8(0)}, - 'scale_param_dq': {'shape': np.array([1]), 'value': np.float32(1.0 / 255)}, - 'zerop_param_dq': {'shape': np.array([1]), 'value': np.uint8(0)}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_ref_attributes, - [('input', 'fq', {'in': 0}), - ('min_param', 'fq', {'out': 0, 'in': 1}), - ('min_param', 'fq', {'out': 0, 'in': 3}), - ('max_param', 'fq', {'out': 0, 'in': 2}), - ('max_param', 'fq', {'out': 0, 'in': 4}), - ('fq', 'out'), - ], - {'fq': {'levels': 256}, - 'min_param': {'value': np.float32(0.0)}, - 'max_param': {'value': np.float32(1.0)}, - }, nodes_with_edges_only=True) - - graph.stage = 'front' - tested_class = QuantizeDequantizeLinear() - tested_class.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_quantizedequantize2fakequantize_1(self): - # testing the code path with int8 zero-point - graph = build_graph(nodes0_attributes, - [('input', 'quantize'), - ('quantize', 'dequantize'), - ('scale_param', 'quantize'), - ('zerop_param', 'quantize'), - ('scale_param', 'dequantize'), - ('zerop_param', 'dequantize'), - ('dequantize', 'out'), - ], - {'scale_param': {'shape': np.array([1]), 'value': np.float32(1.0 / 255)}, - 'zerop_param': {'shape': np.array([1]), 'value': np.int8(0)}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_ref_attributes, - [('input', 'fq', {'in': 0}), - ('min_param', 'fq', {'out': 0, 'in': 1}), - ('min_param', 'fq', {'out': 0, 'in': 3}), - ('max_param', 'fq', {'out': 0, 'in': 2}), - ('max_param', 'fq', {'out': 0, 'in': 4}), - ('fq', 'out'), - ], - {'fq': {'levels': 256}, - 'min_param': {'value': np.float32(-128.0 / 255)}, - 'max_param': {'value': np.float32(127.0 / 255)}, - }, nodes_with_edges_only=True) - - graph.stage = 'front' - tested_class = QuantizeDequantizeLinear() - tested_class.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/model-optimizer/unit_tests/mo/front/tf/extractors/utils_test.py b/model-optimizer/unit_tests/mo/front/tf/extractors/utils_test.py index b2ba651fba2..d9d3fc329c8 100644 --- a/model-optimizer/unit_tests/mo/front/tf/extractors/utils_test.py +++ b/model-optimizer/unit_tests/mo/front/tf/extractors/utils_test.py @@ -211,3 +211,30 @@ class TensorContentParsing(unittest.TestCase): with self.assertLogs(log.getLogger(), level="ERROR") as cm: result = tf_tensor_content(pb_tensor.dtype, shape, pb_tensor) self.assertEqual([warning_message, warning_message], cm.output) + + def test_empty_value(self): + pb_tensor = PB({ + 'dtype': 1, + 'float_val': [] + }) + + shape = int64_array([1, 1, 0]) + tf_dtype = pb_tensor.dtype + ref = np.array([[[]]], dtype=np.float32) + res = tf_tensor_content(tf_dtype, shape, pb_tensor) + + self.assertEqual(res.shape, ref.shape) + self.assertTrue(np.all(res == ref)) + + def test_scalar_value(self): + pb_tensor = PB({ + 'dtype': 3, + 'int_val': 4 + }) + + shape = int64_array([]) + tf_dtype = pb_tensor.dtype + ref = np.array(4, dtype=np.int32) + res = tf_tensor_content(tf_dtype, shape, pb_tensor) + + self.assertEqual(ref, res) diff --git a/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_python_api/CMakeLists.txt b/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_python_api/CMakeLists.txt index 711f85ecc2f..1a94414055b 100644 --- a/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_python_api/CMakeLists.txt +++ b/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_python_api/CMakeLists.txt @@ -8,7 +8,7 @@ set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY_OLD ${CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY} set(CMAKE_PDB_OUTPUT_DIRECTORY_OLD ${CMAKE_PDB_OUTPUT_DIRECTORY}) set(PYTHON_VERSION python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}) -message("Python version=${PYTHON_VERSION}") +message(STATUS "Python version=${PYTHON_VERSION}") if(WIN32) set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python_api/${PYTHON_VERSION}/) diff --git a/ngraph/core/include/ngraph/descriptor/tensor.hpp b/ngraph/core/include/ngraph/descriptor/tensor.hpp index 676bd9eddd0..381e528e531 100644 --- a/ngraph/core/include/ngraph/descriptor/tensor.hpp +++ b/ngraph/core/include/ngraph/descriptor/tensor.hpp @@ -45,6 +45,7 @@ namespace ngraph const std::unordered_set& get_names() const; void set_names(const std::unordered_set& names); + void add_names(const std::unordered_set& names); void set_tensor_type(const element::Type& element_type, const PartialShape& pshape); void set_element_type(const element::Type& elemenet_type); void set_partial_shape(const PartialShape& partial_shape); diff --git a/ngraph/core/include/ngraph/op/asinh.hpp b/ngraph/core/include/ngraph/op/asinh.hpp index 1f781862b9d..62a4eba36a4 100644 --- a/ngraph/core/include/ngraph/op/asinh.hpp +++ b/ngraph/core/include/ngraph/op/asinh.hpp @@ -19,8 +19,8 @@ namespace ngraph class NGRAPH_API Asinh : public util::UnaryElementwiseArithmetic { public: - static constexpr NodeTypeInfo type_info{"Asinh", 3}; - const NodeTypeInfo& get_type_info() const override { return type_info; } + NGRAPH_RTTI_DECLARATION; + /// \brief Constructs an Asinh operation. Asinh() = default; /// \brief Constructs an Asinh operation. diff --git a/ngraph/core/include/ngraph/op/cosh.hpp b/ngraph/core/include/ngraph/op/cosh.hpp index 7e52bf1679f..6fcf30e8a08 100644 --- a/ngraph/core/include/ngraph/op/cosh.hpp +++ b/ngraph/core/include/ngraph/op/cosh.hpp @@ -16,8 +16,8 @@ namespace ngraph class NGRAPH_API Cosh : public util::UnaryElementwiseArithmetic { public: - static constexpr NodeTypeInfo type_info{"Cosh", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } + NGRAPH_RTTI_DECLARATION; + /// \brief Constructs a hyperbolic cosine operation. Cosh() = default; /// \brief Constructs a hyperbolic cosine operation. diff --git a/ngraph/core/include/ngraph/slice_plan.hpp b/ngraph/core/include/ngraph/slice_plan.hpp index cc26285fd32..6a1ac2dde03 100644 --- a/ngraph/core/include/ngraph/slice_plan.hpp +++ b/ngraph/core/include/ngraph/slice_plan.hpp @@ -12,7 +12,7 @@ namespace ngraph { // - // In various places, like ConstantFolding and DynElimination, it is + // In various places, like ConstantFolding, it is // useful to transform DynSlice by converting it to a sequence of ops: // // Slice (to do the basic slicing) diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/asinh.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/asinh.hpp index 02db44c13b4..9fc58a3dff4 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/asinh.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/asinh.hpp @@ -13,7 +13,8 @@ namespace ngraph { namespace reference { - template + template ::value, bool>::type = true> void asinh(const T* arg, T* out, size_t count) { for (size_t i = 0; i < count; i++) @@ -21,6 +22,16 @@ namespace ngraph out[i] = std::asinh(arg[i]); } } + + template ::value, bool>::type = true> + void asinh(const T* arg, T* out, size_t count) + { + for (size_t i = 0; i < count; i++) + { + out[i] = std::roundl(std::asinh(arg[i])); + } + } } // namespace reference } // namespace runtime } // namespace ngraph diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/cosh.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/cosh.hpp index 2529f9a8893..a9a7e17982f 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/cosh.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/cosh.hpp @@ -13,7 +13,8 @@ namespace ngraph { namespace reference { - template + template ::value, bool>::type = true> void cosh(const T* arg, T* out, size_t count) { for (size_t i = 0; i < count; i++) @@ -21,6 +22,16 @@ namespace ngraph out[i] = std::cosh(arg[i]); } } + + template ::value, bool>::type = true> + void cosh(const T* arg, T* out, size_t count) + { + for (size_t i = 0; i < count; i++) + { + out[i] = std::roundl(std::cosh(arg[i])); + } + } } // namespace reference } // namespace runtime } // namespace ngraph diff --git a/ngraph/core/src/descriptor/tensor.cpp b/ngraph/core/src/descriptor/tensor.cpp index 58b06df90ad..1d8335fee08 100644 --- a/ngraph/core/src/descriptor/tensor.cpp +++ b/ngraph/core/src/descriptor/tensor.cpp @@ -123,6 +123,14 @@ void descriptor::Tensor::set_names(const std::unordered_set& names) m_names = names; } +void descriptor::Tensor::add_names(const std::unordered_set& names) +{ + for (const auto& name : names) + { + m_names.insert(name); + } +} + ostream& operator<<(ostream& out, const descriptor::Tensor& tensor) { std::string names; diff --git a/ngraph/core/src/graph_util.cpp b/ngraph/core/src/graph_util.cpp index 12a439f17e3..901672723a1 100644 --- a/ngraph/core/src/graph_util.cpp +++ b/ngraph/core/src/graph_util.cpp @@ -167,11 +167,9 @@ void ngraph::replace_node(std::shared_ptr target, // Change I's connected upstream output to O_rep for (size_t i = 0; i < target->get_output_size(); i++) { - for (auto& input : target->output(i).get_target_inputs()) - { - input.replace_source_output(replacement->output(output_order[i])); - } + target->output(i).replace(replacement->output(output_order[i])); } + replacement->add_node_control_dependents(target); replacement->add_node_control_dependencies(target); target->clear_control_dependents(); @@ -912,7 +910,15 @@ bool ngraph::replace_output_update_name(Output output, const Output& replacement.get_tensor().set_name(output.get_node()->get_friendly_name()); NGRAPH_SUPPRESS_DEPRECATED_END } + + // Save replacement tensor names before replacement as they will be + // overrided by the output tensor names + auto output_names = replacement.get_tensor_ptr()->get_names(); output.replace(replacement); + + // Restore back original replacement tensor names + replacement.get_tensor().add_names(output_names); + copy_runtime_info({replacement.get_node_shared_ptr(), output.get_node_shared_ptr()}, replacement.get_node_shared_ptr()); return true; diff --git a/ngraph/core/src/node_output.cpp b/ngraph/core/src/node_output.cpp index 9799c49b8fa..e64dd472f35 100644 --- a/ngraph/core/src/node_output.cpp +++ b/ngraph/core/src/node_output.cpp @@ -76,6 +76,7 @@ namespace ngraph { input.replace_source_output(replacement); } + replacement.get_tensor_ptr()->set_names(get_tensor_ptr()->get_names()); } using RTMap = std::map>; diff --git a/ngraph/core/src/op/asinh.cpp b/ngraph/core/src/op/asinh.cpp index ed7b2191bd6..16ea8b046d1 100644 --- a/ngraph/core/src/op/asinh.cpp +++ b/ngraph/core/src/op/asinh.cpp @@ -7,6 +7,7 @@ #include "itt.hpp" #include "ngraph/op/asinh.hpp" +#include "ngraph/op/util/elementwise_args.hpp" #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/asinh.hpp" #include "ngraph/type/element_type.hpp" @@ -14,7 +15,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v3::Asinh::type_info; +NGRAPH_RTTI_DEFINITION(op::v3::Asinh, "Asinh", 3, util::UnaryElementwiseArithmetic); op::v3::Asinh::Asinh(const Output& arg) : UnaryElementwiseArithmetic(arg) @@ -32,25 +33,26 @@ shared_ptr op::v3::Asinh::clone_with_new_inputs(const OutputVector& new_ar namespace asinhop { template - bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out) + inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) { - runtime::reference::asinh( - arg0->get_data_ptr(), out->get_data_ptr(), shape_size(arg0->get_shape())); + runtime::reference::asinh(arg0->get_data_ptr(), out->get_data_ptr(), count); return true; } bool evaluate_asinh(const HostTensorPtr& arg0, const HostTensorPtr& out) { bool rc = true; + size_t count = shape_size(arg0->get_shape()); out->set_unary(arg0); + switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_asinh, i32, arg0, out); - NGRAPH_TYPE_CASE(evaluate_asinh, i64, arg0, out); - NGRAPH_TYPE_CASE(evaluate_asinh, u32, arg0, out); - NGRAPH_TYPE_CASE(evaluate_asinh, u64, arg0, out); - NGRAPH_TYPE_CASE(evaluate_asinh, f16, arg0, out); - NGRAPH_TYPE_CASE(evaluate_asinh, f32, arg0, out); + NGRAPH_TYPE_CASE(evaluate_asinh, i32, arg0, out, count); + NGRAPH_TYPE_CASE(evaluate_asinh, i64, arg0, out, count); + NGRAPH_TYPE_CASE(evaluate_asinh, u32, arg0, out, count); + NGRAPH_TYPE_CASE(evaluate_asinh, u64, arg0, out, count); + NGRAPH_TYPE_CASE(evaluate_asinh, f16, arg0, out, count); + NGRAPH_TYPE_CASE(evaluate_asinh, f32, arg0, out, count); default: rc = false; break; } return rc; @@ -65,7 +67,7 @@ bool op::v3::Asinh::evaluate(const HostTensorVector& outputs, const HostTensorVe bool op::v3::Asinh::has_evaluate() const { - NGRAPH_OP_SCOPE(v1_Asinh_has_evaluate); + NGRAPH_OP_SCOPE(v3_Asinh_has_evaluate); switch (get_input_element_type(0)) { case ngraph::element::i32: diff --git a/ngraph/core/src/op/batch_to_space.cpp b/ngraph/core/src/op/batch_to_space.cpp index 8e2e98553cf..9ec3c0f7bd1 100644 --- a/ngraph/core/src/op/batch_to_space.cpp +++ b/ngraph/core/src/op/batch_to_space.cpp @@ -88,8 +88,8 @@ void op::v1::BatchToSpace::validate_and_infer_types() if (data_rank.is_static()) { NODE_VALIDATION_CHECK(this, - (data_rank.get_length() >= 4), - "data input must have rank greater than or equal to 4. Got: ", + (data_rank.get_length() >= 2), + "data input must have rank greater or equal than 2. Got: ", data_rank.get_length()); if (inputs_same_ps.is_static()) @@ -197,7 +197,7 @@ namespace } auto data_shape = data->get_shape(); auto data_rank = data_shape.size(); - if (!(data_rank == 4 || data_rank == 5)) + if (data_rank < 2) { return false; } @@ -346,7 +346,6 @@ bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector& outputs, bool ngraph::op::v1::BatchToSpace::has_evaluate() const { NGRAPH_OP_SCOPE(v1_BatchToSpace_has_evaluate); - return !get_input_partial_shape(0).is_dynamic() && - (get_input_shape(0).size() == 4 || get_input_shape(0).size() == 5) && + return !get_input_partial_shape(0).is_dynamic() && get_input_shape(0).size() >= 2 && get_input_shape(0).size() <= shape_size(get_input_shape(1)); } diff --git a/ngraph/core/src/op/cosh.cpp b/ngraph/core/src/op/cosh.cpp index d4f22bc6f46..3a8d169a67f 100644 --- a/ngraph/core/src/op/cosh.cpp +++ b/ngraph/core/src/op/cosh.cpp @@ -5,16 +5,16 @@ #include "itt.hpp" #include "ngraph/op/cosh.hpp" -#include "ngraph/op/multiply.hpp" -#include "ngraph/op/sinh.hpp" #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/cosh.hpp" +#include "ngraph/validation_util.hpp" + using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::Cosh::type_info; +NGRAPH_RTTI_DEFINITION(op::v0::Cosh, "Cosh", 0, util::UnaryElementwiseArithmetic); op::Cosh::Cosh(const Output& arg) : UnaryElementwiseArithmetic(arg) @@ -68,6 +68,7 @@ namespace coshop bool op::Cosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v0_Cosh_evaluate); + NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); return coshop::evaluate_cosh(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/util/scatter_nd_base.cpp b/ngraph/core/src/op/util/scatter_nd_base.cpp index c8a1d13931c..9040645cdd2 100644 --- a/ngraph/core/src/op/util/scatter_nd_base.cpp +++ b/ngraph/core/src/op/util/scatter_nd_base.cpp @@ -57,11 +57,13 @@ void op::util::ScatterNDBase::validate_and_infer_types() NODE_VALIDATION_CHECK(this, inputs_rank.is_dynamic() || indices_rank.is_dynamic() || + indices_shape[indices_rank.get_length() - 1].is_dynamic() || indices_shape[indices_rank.get_length() - 1].get_length() <= inputs_rank.get_length(), "Last dimension of indices can be at most the rank of inputs"); - if (inputs_rank.is_static() && indices_rank.is_static() && updates_rank.is_static()) + if (inputs_rank.is_static() && indices_rank.is_static() && updates_rank.is_static() && + indices_shape[indices_rank.get_length() - 1].is_static()) { auto expected_updates_rank = indices_rank.get_length() + inputs_rank.get_length() - indices_shape[indices_rank.get_length() - 1].get_length() - 1; diff --git a/ngraph/frontend/CMakeLists.txt b/ngraph/frontend/CMakeLists.txt index 8a25cc9b650..b4afff783db 100644 --- a/ngraph/frontend/CMakeLists.txt +++ b/ngraph/frontend/CMakeLists.txt @@ -3,9 +3,6 @@ # if(NOT WIN32) - message(${CMAKE_SOURCE_DIR}/thirdparty/cmake_static_protobuf) - message(BINARY ${CMAKE_CURRENT_BINARY_DIR}) - # There seems no suitable other way to identify exact output binary name for libprotobuf if(CMAKE_BUILD_TYPE STREQUAL "Debug") # Use 'protobufd' directly as it is done in the same way in protobuf cmake files @@ -15,8 +12,6 @@ if(NOT WIN32) set(PROTOBUF_STATIC_LIB_OUTPUT ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/${CMAKE_STATIC_LIBRARY_PREFIX}protobuf${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() - message("Static protobuf lib: ${PROTOBUF_STATIC_LIB_OUTPUT}") - file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/libprotobuf_static) add_custom_command( OUTPUT diff --git a/ngraph/frontend/paddlepaddle/CMakeLists.txt b/ngraph/frontend/paddlepaddle/CMakeLists.txt index bf5c396ac6c..cc6f300f8da 100644 --- a/ngraph/frontend/paddlepaddle/CMakeLists.txt +++ b/ngraph/frontend/paddlepaddle/CMakeLists.txt @@ -29,8 +29,6 @@ else() set(PDPD_PROTOC_EXECUTABLE $) endif() -message("PDPD_PROTOC_EXECUTABLE is [${PDPD_PROTOC_EXECUTABLE}]") - foreach(INFILE ${proto_files}) get_filename_component(FILE_DIR ${INFILE} DIRECTORY) get_filename_component(FILE_WE ${INFILE} NAME_WE) diff --git a/ngraph/python/CMakeLists.txt b/ngraph/python/CMakeLists.txt index 82b414d0499..2eb4b03a2e5 100644 --- a/ngraph/python/CMakeLists.txt +++ b/ngraph/python/CMakeLists.txt @@ -19,7 +19,7 @@ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/pybind11 EXCLUDE_FROM_ALL) # PYTHON_VERSION_MAJOR and PYTHON_VERSION_MINOR are defined inside pybind11 set(PYTHON_VERSION python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}) -message("Python version=${PYTHON_VERSION}") +message(STATUS "Python version=${PYTHON_VERSION}") set(LIBRARY_OUTPUT_DIRECTORY_BIN ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}) if(OpenVINO_SOURCE_DIR) diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 8147e9008bd..8dbf0f888bc 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -75,13 +75,10 @@ set(SRC op_eval/strided_slice.cpp op_eval/transpose.cpp op_eval/variadic_split.cpp - op_is.cpp opset1.cpp partial_shape.cpp pass_config.cpp - pass_liveness.cpp pass_manager.cpp - pass_shape_relevance.cpp pattern.cpp provenance.cpp replace_node.cpp @@ -95,6 +92,7 @@ set(SRC type_prop/adaptive_avg_pool.cpp type_prop/adaptive_max_pool.cpp type_prop/asin.cpp + type_prop/asinh.cpp type_prop/assign.cpp type_prop/atan.cpp type_prop/avg_pool.cpp @@ -112,6 +110,7 @@ set(SRC type_prop/convolution.cpp type_prop/convolution_backprop_data.cpp type_prop/cos.cpp + type_prop/cosh.cpp type_prop/ctc_greedy_decoder.cpp type_prop/ctc_greedy_decoder_seq_len.cpp type_prop/ctc_loss.cpp @@ -231,6 +230,7 @@ set(SRC visitors/value_map.cpp visitors/op/adaptive_avg_pool.cpp visitors/op/adaptive_max_pool.cpp + visitors/op/asinh.cpp visitors/op/atan.cpp visitors/op/batch_norm.cpp visitors/op/batch_to_space.cpp @@ -241,6 +241,7 @@ set(SRC visitors/op/convert.cpp visitors/op/convolution_backprop.cpp visitors/op/cos.cpp + visitors/op/cosh.cpp visitors/op/cum_sum.cpp visitors/op/deformable_convolution.cpp visitors/op/deformable_psroi_pooling.cpp diff --git a/ngraph/test/backend/asinh.in.cpp b/ngraph/test/backend/asinh.in.cpp index 5cde50b53bb..d3e8fe44c88 100644 --- a/ngraph/test/backend/asinh.in.cpp +++ b/ngraph/test/backend/asinh.in.cpp @@ -49,3 +49,18 @@ NGRAPH_TEST(${BACKEND_NAME}, asinh) test_case.add_expected_output(shape, expected); test_case.run(); } + +NGRAPH_TEST(${BACKEND_NAME}, asinh_i32) +{ + Shape shape{11}; + auto A = make_shared(element::i32, shape); + auto f = make_shared(make_shared(A), ParameterVector{A}); + + vector input{-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5}; + vector expected{-2, -2, -2, -1, -1, 0, 1, 1, 2, 2, 2}; + + auto test_case = test::TestCase(f); + test_case.add_input(input); + test_case.add_expected_output(shape, expected); + test_case.run(); +} diff --git a/ngraph/test/backend/batch_to_space.in.cpp b/ngraph/test/backend/batch_to_space.in.cpp index ac6f07e3002..91fbbafed53 100644 --- a/ngraph/test/backend/batch_to_space.in.cpp +++ b/ngraph/test/backend/batch_to_space.in.cpp @@ -79,6 +79,27 @@ NGRAPH_TEST_P(${BACKEND_NAME}, BatchToSpaceTestFloat, BatchToSpaceTestFloatCases BatchToSpaceTestExecute(GetParam()); } +const test::NDArray input_with_shape_4x3( + {{1.0f, 2.0f, 3.0f}, + {4.0f, 5.0f, 6.0f}, + {7.0f, 8.0f, 9.0f}, + {10.0f, 11.0f, 12.0f}}); + +const test::NDArray zero_crops_2d({0, 0}); + +NGRAPH_INSTANTIATE_TEST_SUITE_P( + ${BACKEND_NAME}, + batch_to_space_2d_without_crops, + BatchToSpaceTestFloat, + testing::Values( + BatchToSpaceParams{input_with_shape_4x3, + test::NDArray({1, 2}), + zero_crops_2d, + zero_crops_2d, + test::NDArray( + {{1.0f, 7.0f, 2.0f, 8.0f, 3.0f, 9.0f}, + {4.0f, 10.0f, 5.0f, 11.0f, 6.0f, 12.0f}})})); + const test::NDArray input_with_shape_4x1x1x3( {{{{1.0f, 2.0f, 3.0f}}}, {{{4.0f, 5.0f, 6.0f}}}, diff --git a/ngraph/test/backend/cosh.in.cpp b/ngraph/test/backend/cosh.in.cpp index 1877b50e6be..f0711c5f48f 100644 --- a/ngraph/test/backend/cosh.in.cpp +++ b/ngraph/test/backend/cosh.in.cpp @@ -2,23 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include -#include - -// clang-format off -#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS -#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS -#endif - -#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS -#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS -#endif -// clang-format on - #include "gtest/gtest.h" #include "ngraph/ngraph.hpp" #include "util/engine/test_engines.hpp" @@ -31,7 +14,7 @@ using namespace ngraph; static string s_manifest = "${MANIFEST}"; using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); -NGRAPH_TEST(${BACKEND_NAME}, cosh) +NGRAPH_TEST(${BACKEND_NAME}, cosh_float) { Shape shape{6}; auto A = make_shared(element::f32, shape); @@ -49,3 +32,16 @@ NGRAPH_TEST(${BACKEND_NAME}, cosh) test_case.add_expected_output(shape, expected); test_case.run(); } + +NGRAPH_TEST(${BACKEND_NAME}, cosh_int) +{ + Shape shape{5}; + auto A = make_shared(element::i32, shape); + auto f = make_shared(make_shared(A), ParameterVector{A}); + + auto test_case = test::TestCase(f); + test_case.add_input({1, 5, 2, 3, 3}); + test_case.add_expected_output(shape, + {2, 74, 4, 10, 10}); + test_case.run(); +} diff --git a/ngraph/test/dyn_elimination.cpp b/ngraph/test/dyn_elimination.cpp deleted file mode 100644 index 14a64c1d80b..00000000000 --- a/ngraph/test/dyn_elimination.cpp +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "gtest/gtest.h" - -#include "ngraph/ngraph.hpp" -#include "ngraph/pass/constant_folding.hpp" -#include "ngraph/pass/dyn_elimination.hpp" -#include "ngraph/pass/manager.hpp" -#include "pass/opset0_downgrade.hpp" -#include "util/all_close_f.hpp" -#include "util/test_tools.hpp" - -using namespace ngraph; -using namespace std; - -TEST(dyn_elimination, transpose) -{ - Shape shape_in{2, 4, 6, 8}; - auto param = make_shared(element::boolean, shape_in); - - auto constant_perm = - make_shared(element::i64, Shape{4}, vector{2, 3, 1, 0}); - - auto transpose = make_shared(param, constant_perm); - - auto f = make_shared(transpose, ParameterVector{param}); - - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); - - ASSERT_EQ(count_ops_of_type(f), 0); - ASSERT_EQ(count_ops_of_type(f), 1); - - auto new_reshape = - as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - ASSERT_TRUE(new_reshape); - - ASSERT_EQ(new_reshape->get_input_order(), (AxisVector{2, 3, 1, 0})); - ASSERT_EQ(new_reshape->get_output_shape(0), (Shape{6, 8, 4, 2})); - ASSERT_EQ(new_reshape->get_output_element_type(0), element::boolean); -} - -// For now, we can't handle the case where the input has dynamic shapes, -// because the classic Reshape op demands a Shape. Probably won't be able to -// deal with this until/unless we make a "StaticTranspose". Just make sure -// we don't crash or mangle the graph. -TEST(dyn_elimination, transpose_dyn_shape) -{ - PartialShape shape_in{2, 4, Dimension::dynamic(), 8}; - - auto param = make_shared(element::boolean, shape_in); - - auto constant_perm = - make_shared(element::i64, Shape{4}, vector{2, 3, 1, 0}); - - auto transpose = make_shared(param, constant_perm); - - auto f = make_shared(transpose, ParameterVector{param}); - - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); - - ASSERT_EQ(count_ops_of_type(f), 1); - ASSERT_EQ(count_ops_of_type(f), 1); - - auto new_transpose = - as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - ASSERT_TRUE(new_transpose); - - ASSERT_EQ(new_transpose->get_output_element_type(0), element::boolean); - ASSERT_TRUE(new_transpose->get_output_partial_shape(0).relaxes( - PartialShape{Dimension::dynamic(), 8, 4, 2})); -} - -TEST(dyn_elimination, range) -{ - auto constant_start = make_shared(element::i64, Shape{}, vector{0}); - auto constant_stop = make_shared(element::i64, Shape{}, vector{5}); - auto constant_step = make_shared(element::i64, Shape{}, vector{2}); - - auto range = make_shared(constant_start, constant_stop, constant_step); - - ASSERT_EQ(range->get_element_type(), element::i64); - ASSERT_EQ(range->get_shape(), (Shape{3})); - - auto f = make_shared(range, ParameterVector{}); - - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); - - ASSERT_EQ(count_ops_of_type(f), 0); - ASSERT_EQ(count_ops_of_type(f), 1); - - auto replacement = - as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - - ASSERT_NE(replacement, nullptr); - ASSERT_EQ(replacement->get_element_type(), element::i64); - ASSERT_EQ(replacement->get_shape(), (Shape{3})); - - auto vals = replacement->get_vector(); - - ASSERT_EQ(vals, (vector{0, 2, 4})); -} - -TEST(dyn_elimination, range_f64) -{ - auto constant_start = make_shared(element::f64, Shape{}, vector{-0.5}); - auto constant_stop = make_shared(element::f64, Shape{}, vector{2}); - auto constant_step = make_shared(element::f64, Shape{}, vector{0.25}); - - auto range = make_shared(constant_start, constant_stop, constant_step); - - ASSERT_EQ(range->get_element_type(), element::f64); - ASSERT_EQ(range->get_shape(), (Shape{10})); - - auto f = make_shared(range, ParameterVector{}); - - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); - - ASSERT_EQ(count_ops_of_type(f), 0); - ASSERT_EQ(count_ops_of_type(f), 1); - - auto replacement = - as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - - ASSERT_NE(replacement, nullptr); - ASSERT_EQ(replacement->get_element_type(), element::f64); - ASSERT_EQ(replacement->get_shape(), (Shape{10})); - - auto vals = replacement->get_vector(); - - ASSERT_TRUE(test::all_close_f( - vals, vector{-0.5, -0.25, 0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75})); -} diff --git a/ngraph/test/op_is.cpp b/ngraph/test/op_is.cpp deleted file mode 100644 index a3f81b3d800..00000000000 --- a/ngraph/test/op_is.cpp +++ /dev/null @@ -1,577 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "gtest/gtest.h" - -#include "ngraph/ngraph.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/validation_util.hpp" -#include "op/convolution.hpp" -#include "op/group_conv.hpp" -#include "util/test_tools.hpp" - -using namespace ngraph; - -NGRAPH_SUPPRESS_DEPRECATED_START - -namespace -{ - void op_is_Abs() - { - op::Abs node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Acos() - { - op::Acos node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Asin() - { - op::Asin node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Atan() - { - op::Atan node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_AvgPool() - { - op::AvgPool node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_BatchNormInference() - { - op::v0::BatchNormInference node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Broadcast() - { - op::v1::Broadcast node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Ceiling() - { - op::Ceiling node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Clamp() - { - op::Clamp node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Concat() - { - op::Concat node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Constant() - { - op::Constant node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Convert() - { - op::Convert node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Convolution() - { - op::v0::Convolution node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_ConvolutionBackpropData() - { - op::v0::ConvolutionBackpropData node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Cos() - { - op::Cos node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Cosh() - { - op::Cosh node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_CumSum() - { - op::CumSum node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_DepthToSpace() - { - op::DepthToSpace node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Elu() - { - op::Elu node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Erf() - { - op::Erf node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Exp() - { - op::Exp node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_FakeQuantize() - { - op::FakeQuantize node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Floor() - { - op::Floor node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_GRN() - { - op::GRN node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Gather() - { - op::v1::Gather node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Gelu() - { - op::Gelu node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_GroupConvolution() - { - op::v0::GroupConvolution node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_GroupConvolutionBackpropData() - { - op::v0::GroupConvolutionBackpropData node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_HardSigmoid() - { - op::HardSigmoid node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Interpolate() - { - op::v0::Interpolate node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Log() - { - op::Log node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_LRN() - { - op::LRN node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_LSTMSequence() - { - op::v0::LSTMSequence node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_MatMul() - { - op::MatMul node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_NormalizeL2() - { - op::v0::NormalizeL2 node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_MVN() - { - op::MVN node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Negative() - { - op::Negative node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Parameter() - { - op::Parameter node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_PRelu() - { - op::PRelu node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_PriorBox() - { - op::PriorBox node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Range() - { - op::Range node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Relu() - { - op::Relu node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Result() - { - op::Result node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_ReverseSequence() - { - op::ReverseSequence node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Selu() - { - op::Selu node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_ShapeOf() - { - op::ShapeOf node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_ShuffleChannels() - { - op::ShuffleChannels node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Sigmoid() - { - op::Sigmoid node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Sign() - { - op::Sign node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Sin() - { - op::Sin node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Sinh() - { - op::Sinh node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_SpaceToDepth() - { - op::SpaceToDepth node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Sqrt() - { - op::Sqrt node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_SquaredDifference() - { - op::SquaredDifference node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_TRUE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Squeeze() - { - op::Squeeze node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Tan() - { - op::Tan node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Tanh() - { - op::Tanh node; - EXPECT_TRUE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_TensorIterator() - { - op::TensorIterator node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Tile() - { - op::v0::Tile node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Unsqueeze() - { - op::v0::Unsqueeze node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - - void op_is_Xor() - { - op::Xor node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_TRUE(op::is_binary_elementwise_logical(&node)); - } -} // namespace - -TEST(op_is, check) -{ - NGRAPH_SUPPRESS_DEPRECATED_START -#define NGRAPH_OP(a, b) op_is_##a(); -#include "opset0_tbl.hpp" -#undef NGRAPH_OP - NGRAPH_SUPPRESS_DEPRECATED_END -} diff --git a/ngraph/test/pass_liveness.cpp b/ngraph/test/pass_liveness.cpp deleted file mode 100644 index 5c3553de381..00000000000 --- a/ngraph/test/pass_liveness.cpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "ngraph/log.hpp" -#include "ngraph/ngraph.hpp" -#include "ngraph/pass/manager.hpp" -#include "ngraph/pass/visualize_tree.hpp" -#include "pass/liveness.hpp" - -#include "util/test_tools.hpp" - -using namespace std; -using namespace ngraph; -namespace ng = ngraph; - -TEST(liveness, constant) -{ - Shape shape{1}; - auto c = op::Constant::create(element::i32, shape, {5}); - auto f = make_shared(make_shared(c), ParameterVector{}); - - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); - - auto tmp = f->get_ordered_ops(); - vector> sorted{tmp.begin(), tmp.end()}; - ASSERT_EQ(3, sorted.size()); - EXPECT_EQ(0, sorted[0]->liveness_new_list.size()); - EXPECT_EQ(0, sorted[0]->liveness_free_list.size()); - - // op::Negative is live on output to op::Result - // op::Negative is new - EXPECT_EQ(1, sorted[1]->liveness_new_list.size()); - EXPECT_EQ(0, sorted[1]->liveness_free_list.size()); - - // op::Negative is live on input to op::Result - EXPECT_EQ(0, sorted[2]->liveness_new_list.size()); - // op::Negative is freed - EXPECT_EQ(1, sorted[2]->liveness_free_list.size()); -} diff --git a/ngraph/test/pass_shape_relevance.cpp b/ngraph/test/pass_shape_relevance.cpp deleted file mode 100644 index d288f8d0b52..00000000000 --- a/ngraph/test/pass_shape_relevance.cpp +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "ngraph/ngraph.hpp" -#include "ngraph/pass/manager.hpp" -#include "pass/shape_relevance.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -using namespace ngraph; -using namespace std; - -TEST(shape_relevance, simple) -{ - auto param0 = make_shared(element::f32, Shape{4, 6}); - auto param1 = make_shared(element::f32, Shape{4, 6}); - auto x = make_shared(param0, param1); - - auto f = make_shared(x, ParameterVector{param0, param1}); - - pass::Manager manager; - manager.register_pass(); - manager.run_passes(f); - - ASSERT_FALSE(param0->is_relevant_to_shapes()); - ASSERT_FALSE(param1->is_relevant_to_shapes()); -} - -TEST(shape_relevance, param_direct) -{ - auto param0 = make_shared(element::f32, Shape{4, 6}); - auto param1 = make_shared(element::i64, Shape{4}); - auto x = make_shared(param0, param1, true); - - auto f = make_shared(x, ParameterVector{param0, param1}); - - pass::Manager manager; - manager.register_pass(); - manager.run_passes(f); - - ASSERT_FALSE(param0->is_relevant_to_shapes()); - ASSERT_TRUE(param1->is_relevant_to_shapes()); -} - -TEST(shape_relevance, param_indirect) -{ - auto param0 = make_shared(element::f32, Shape{4, 6}); - auto param1 = make_shared(element::i64, Shape{4}); - auto param2 = make_shared(element::i64, Shape{2}); - - auto c = make_shared(NodeVector{param1, param2}, 0); - auto x = make_shared(param0, c, true); - - auto f = make_shared(x, ParameterVector{param0, param1, param2}); - - pass::Manager manager; - manager.register_pass(); - manager.run_passes(f); - - ASSERT_FALSE(param0->is_relevant_to_shapes()); - ASSERT_TRUE(param1->is_relevant_to_shapes()); - ASSERT_TRUE(param2->is_relevant_to_shapes()); -} - -TEST(shape_relevance, param_shape_of_direct_v0) -{ - auto param0 = make_shared(element::f32, Shape{4, 6}); - - auto x = make_shared(param0, make_shared(param0), true); - - auto f = make_shared(x, ParameterVector{param0}); - - pass::Manager manager; - manager.register_pass(); - manager.run_passes(f); - - ASSERT_FALSE(param0->is_relevant_to_shapes()); -} - -TEST(shape_relevance, param_shape_of_direct_v3) -{ - auto param0 = make_shared(element::f32, Shape{4, 6}); - - auto x = make_shared(param0, make_shared(param0), true); - - auto f = make_shared(x, ParameterVector{param0}); - - pass::Manager manager; - manager.register_pass(); - manager.run_passes(f); - - ASSERT_FALSE(param0->is_relevant_to_shapes()); -} - -TEST(shape_relevance, param_shape_of_direct_i32_v3) -{ - auto param0 = make_shared(element::f32, Shape{4, 6}); - - auto x = make_shared( - param0, make_shared(param0, element::i32), true); - - auto f = make_shared(x, ParameterVector{param0}); - - pass::Manager manager; - manager.register_pass(); - manager.run_passes(f); - - ASSERT_FALSE(param0->is_relevant_to_shapes()); -} - -TEST(shape_relevance, param_shape_of_indirect_v0) -{ - auto param0 = make_shared(element::f32, Shape{4, 6}); - - auto s = make_shared(param0); - auto r = make_shared( - s, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); - auto x = make_shared(param0, r, true); - - auto f = make_shared(x, ParameterVector{param0}); - - pass::Manager manager; - manager.register_pass(); - manager.run_passes(f); - - ASSERT_FALSE(param0->is_relevant_to_shapes()); -} - -TEST(shape_relevance, param_shape_of_indirect_v3) -{ - auto param0 = make_shared(element::f32, Shape{4, 6}); - - auto s = make_shared(param0); - auto r = make_shared( - s, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); - auto x = make_shared(param0, r, true); - - auto f = make_shared(x, ParameterVector{param0}); - - pass::Manager manager; - manager.register_pass(); - manager.run_passes(f); - - ASSERT_FALSE(param0->is_relevant_to_shapes()); -} - -TEST(shape_relevance, param_shape_of_indirect_i32_v3) -{ - auto param0 = make_shared(element::f32, Shape{4, 6}); - - auto s = make_shared(param0, element::i32); - auto r = make_shared( - s, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); - auto x = make_shared(param0, r, true); - - auto f = make_shared(x, ParameterVector{param0}); - - pass::Manager manager; - manager.register_pass(); - manager.run_passes(f); - - ASSERT_FALSE(param0->is_relevant_to_shapes()); -} diff --git a/ngraph/test/provenance.cpp b/ngraph/test/provenance.cpp index c814eb55c1e..f4878341ab7 100644 --- a/ngraph/test/provenance.cpp +++ b/ngraph/test/provenance.cpp @@ -15,7 +15,6 @@ #include "ngraph/ngraph.hpp" #include "ngraph/pass/manager.hpp" #include "ngraph/provenance.hpp" -#include "pass/fused_op_decomposition.hpp" #include "util/provenance_enabler.hpp" using namespace std; @@ -380,61 +379,6 @@ TEST(provenance, builder) } } -TEST(provenance, fused_copy_origin_tags) -{ - test::ProvenanceEnabler provenance_enabler; - - auto p1 = make_shared(element::f32, PartialShape{2, 3, 4}); - p1->add_provenance_tag("P1"); - auto g = make_shared(p1); - g->add_provenance_tag("G"); - auto r = make_shared(g); - auto f = make_shared(ResultVector{r}, ParameterVector{p1}); - - pass::Manager manager; - manager.register_pass(); - manager.run_passes(f); - - traverse_nodes(f, [&](const std::shared_ptr& node) { - auto tags = node->get_provenance_tags(); - if (node == p1) - { - EXPECT_EQ(tags.size(), 1); - EXPECT_TRUE(tags.find("P1") != tags.end()); - } - else if (node == r) - { - } - else - { - EXPECT_TRUE(tags.find("G") != tags.end()); - EXPECT_TRUE(tags.find("") != tags.end()); - } - }); -} - -TEST(provenance, fused_decomposition_tag) -{ - test::ProvenanceEnabler provenance_enabler; - - auto p1 = make_shared(element::f32, PartialShape{2, 3, 4}); - auto fused_op = make_shared(p1); - auto result = make_shared(fused_op); - auto f = make_shared(ResultVector{result}, ParameterVector{p1}); - - pass::Manager manager; - manager.register_pass(); - manager.run_passes(f); - - const auto tag = ""; - auto tag_check = [&tag](std::shared_ptr node) { - auto tags = node->get_provenance_tags(); - EXPECT_TRUE(tags.find(tag) != tags.end()); - }; - const auto decomposed_op = f->get_result()->get_input_node_shared_ptr(0); - traverse_nodes(as_node_vector(decomposed_op->outputs()), tag_check, {p1}); -} - TEST(provenance, empty_group) { auto p1 = make_shared(element::i32, PartialShape{2, 3, 4}); diff --git a/ngraph/test/replace_node.cpp b/ngraph/test/replace_node.cpp index 498fb8ad1d7..c69b73d1623 100644 --- a/ngraph/test/replace_node.cpp +++ b/ngraph/test/replace_node.cpp @@ -108,3 +108,63 @@ TEST(replace_node, replace_nodes) ASSERT_EQ(z_replacement->get_input_node_shared_ptr(0), x_replacement); ASSERT_EQ(z_replacement->get_input_node_shared_ptr(1), mul); } + +TEST(replace_node, simple_node_replacement) +{ + auto param = std::make_shared(element::i64, Shape{1, 64}); + param->output(0).get_tensor().set_names({"a", "b"}); + auto relu = std::make_shared(param); + relu->output(0).get_tensor().set_names({"c", "d"}); + + auto new_relu = std::make_shared(param); + new_relu->output(0).get_tensor().set_names({"f"}); + replace_node(relu, new_relu); + + ASSERT_EQ(new_relu->output(0).get_tensor().get_names(), std::unordered_set({"c", "d"})); +} + +TEST(replace_node, node_elimination) +{ + auto param = std::make_shared(element::i64, Shape{1, 64}); + param->output(0).get_tensor().set_names({"a", "b"}); + auto relu1 = std::make_shared(param); + relu1->output(0).get_tensor().set_names({"c", "d"}); + auto relu2 = std::make_shared(relu1); + relu2->output(0).get_tensor().set_names({"e", "f"}); + + ASSERT_TRUE(replace_output_update_name(relu2->output(0), relu2->input_value(0))); + ASSERT_EQ(relu1->output(0).get_tensor().get_names(), std::unordered_set({"c", "d", "e", "f"})); + ASSERT_EQ(param->output(0).get_tensor().get_names(), std::unordered_set({"a", "b"})); +} + +TEST(replace_node, output_replacement) +{ + auto param = std::make_shared(element::i64, Shape{1, 64}); + param->output(0).get_tensor().set_names({"a", "b"}); + auto relu = std::make_shared(param); + relu->output(0).get_tensor().set_names({"c", "d"}); + + auto new_relu = std::make_shared(param); + new_relu->output(0).get_tensor().set_names({"f"}); + + relu->output(0).replace(new_relu->output(0)); + + ASSERT_EQ(new_relu->output(0).get_tensor().get_names(), std::unordered_set({"c", "d"})); +} + +TEST(replace_node, source_replacement) +{ + auto param = std::make_shared(element::i64, Shape{1, 64}); + param->output(0).get_tensor().set_names({"a", "b"}); + + auto param1 = std::make_shared(element::i64, Shape{1, 64}); + param1->output(0).get_tensor().set_names({"c", "d"}); + + auto relu = std::make_shared(param); + relu->input(0).replace_source_output(param1->output(0)); + + ASSERT_EQ(param->output(0).get_tensor().get_names(), std::unordered_set({"a", "b"})); + ASSERT_EQ(param1->output(0).get_tensor().get_names(), std::unordered_set({"c", "d"})); +} + + diff --git a/ngraph/test/runtime/CMakeLists.txt b/ngraph/test/runtime/CMakeLists.txt index aa0ddfd6962..d17a37a8b70 100644 --- a/ngraph/test/runtime/CMakeLists.txt +++ b/ngraph/test/runtime/CMakeLists.txt @@ -14,26 +14,8 @@ set (SRC performance_counter.hpp dynamic/dynamic_backend.cpp dynamic/dynamic_backend.hpp - op/avg_pool.cpp - op/avg_pool.hpp - op/convolution.cpp - op/convolution.hpp - op/group_conv.cpp - op/group_conv.hpp pass/dyn_elimination.cpp pass/dyn_elimination.hpp - pass/fused_op_decomposition.cpp - pass/fused_op_decomposition.hpp - pass/implicit_broadcast_elimination.cpp - pass/implicit_broadcast_elimination.hpp - pass/liveness.cpp - pass/liveness.hpp - pass/opset0_downgrade.cpp - pass/opset0_downgrade.hpp - pass/opset1_downgrade.cpp - pass/opset1_downgrade.hpp - pass/opset1_upgrade.cpp - pass/opset1_upgrade.hpp pass/shape_relevance.cpp pass/shape_relevance.hpp ) diff --git a/ngraph/test/runtime/dynamic/dynamic_backend.cpp b/ngraph/test/runtime/dynamic/dynamic_backend.cpp index c7efd915b9e..0ef1f656837 100644 --- a/ngraph/test/runtime/dynamic/dynamic_backend.cpp +++ b/ngraph/test/runtime/dynamic/dynamic_backend.cpp @@ -15,8 +15,6 @@ #include "ngraph/specialize_function.hpp" #include "ngraph/util.hpp" #include "pass/dyn_elimination.hpp" -#include "pass/opset0_downgrade.hpp" -#include "pass/opset1_downgrade.hpp" #include "pass/shape_relevance.hpp" using namespace std; @@ -239,10 +237,8 @@ bool runtime::dynamic::DynamicExecutable::call( pass::Manager passes; // Opset1Downgrade should be moved below DynElimination // when ConstantFolding for v3 ops will be ready - passes.register_pass(); passes.register_pass(); passes.register_pass(); - passes.register_pass(); // Converts dynamic v1 variants to v0 ops passes.set_per_pass_validation(false); // FIXME(amprocte): Vile, temporary hack: we need to do repeated rounds of diff --git a/ngraph/test/runtime/ie/ie_executable.cpp b/ngraph/test/runtime/ie/ie_executable.cpp index 419cd435380..cd31f1e0a7e 100644 --- a/ngraph/test/runtime/ie/ie_executable.cpp +++ b/ngraph/test/runtime/ie/ie_executable.cpp @@ -8,7 +8,6 @@ #include "ngraph/pass/manager.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" -#include "pass/opset1_upgrade.hpp" using namespace std; using namespace ngraph; @@ -63,7 +62,7 @@ namespace memcpy(blob_ptr, data, data_size * elem_type.size()); return blob; } -} +} // namespace namespace { @@ -78,21 +77,18 @@ namespace ie_ops.insert(opset4.begin(), opset4.end()); auto& opset5 = get_opset5().get_type_info_set(); ie_ops.insert(opset5.begin(), opset5.end()); - auto& opset6= get_opset6().get_type_info_set(); + auto& opset6 = get_opset6().get_type_info_set(); ie_ops.insert(opset6.begin(), opset6.end()); - auto& opset7= get_opset7().get_type_info_set(); + auto& opset7 = get_opset7().get_type_info_set(); ie_ops.insert(opset7.begin(), opset7.end()); return ie_ops; } -} +} // namespace runtime::ie::IE_Executable::IE_Executable(shared_ptr func, string device) : m_device{device} { static std::set ie_ops = get_ie_ops(); - pass::Manager passes; - passes.register_pass(); - passes.run_passes(func); for (const auto& node : func->get_ops()) { diff --git a/ngraph/test/runtime/interpreter/int_executable.hpp b/ngraph/test/runtime/interpreter/int_executable.hpp index d7edc790aab..8c901cd93c5 100644 --- a/ngraph/test/runtime/interpreter/int_executable.hpp +++ b/ngraph/test/runtime/interpreter/int_executable.hpp @@ -21,7 +21,6 @@ #include "ngraph/runtime/reference/reorg_yolo.hpp" #include "ngraph/runtime/reference/tensor_iterator.hpp" #include "ngraph/runtime/tensor.hpp" -#include "op/avg_pool.hpp" namespace ngraph { diff --git a/ngraph/test/runtime/op/atan2.cpp b/ngraph/test/runtime/op/atan2.cpp deleted file mode 100644 index 4d0af74cba3..00000000000 --- a/ngraph/test/runtime/op/atan2.cpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "atan2.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/divide.hpp" -#include "ngraph/op/multiply.hpp" -#include "ngraph/op/negative.hpp" -#include "ngraph/op/sqrt.hpp" -#include "ngraph/op/subtract.hpp" - -using namespace std; -using namespace ngraph; - -constexpr NodeTypeInfo op::v0::Atan2::type_info; - -op::v0::Atan2::Atan2(const Output& y, const Output& x, const AutoBroadcastSpec& autob) - : BinaryElementwiseArithmetic(y, x, autob) -{ - constructor_validate_and_infer_types(); -} - -shared_ptr op::v0::Atan2::clone_with_new_inputs(const OutputVector& new_args) const -{ - check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); -} - -bool op::v0::Atan2::visit_attributes(AttributeVisitor& visitor) -{ - BinaryElementwiseArithmetic::visit_attributes(visitor); - return true; -} diff --git a/ngraph/test/runtime/op/atan2.hpp b/ngraph/test/runtime/op/atan2.hpp deleted file mode 100644 index 683edc85162..00000000000 --- a/ngraph/test/runtime/op/atan2.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include "backend_visibility.hpp" -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Elementwise full arctan operation - class BACKEND_API Atan2 : public util::BinaryElementwiseArithmetic - { - public: - static constexpr NodeTypeInfo type_info{"Atan2", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - Atan2() - : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NONE) - { - } - - /// \brief atan2(y,x) is the angle from the origin to the point (x,y) (note reversed - /// order). - /// - /// \param y - /// \param x - Atan2(const Output& y, - const Output& x, - const AutoBroadcastSpec& autob = AutoBroadcastSpec()); - std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor& visitor) override; - }; - } - } -} diff --git a/ngraph/test/runtime/op/avg_pool.cpp b/ngraph/test/runtime/op/avg_pool.cpp deleted file mode 100644 index d8f8d8bb091..00000000000 --- a/ngraph/test/runtime/op/avg_pool.cpp +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "avg_pool.hpp" -#include "ngraph/attribute_visitor.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/validation_util.hpp" - -using namespace std; -using namespace ngraph; - -// *** AvgPool OP SET 0 *** -constexpr NodeTypeInfo op::v0::AvgPool::type_info; - -op::v0::AvgPool::AvgPool(const Output& arg, - const Shape& window_shape, - const Strides& window_movement_strides, - const Shape& padding_below, - const Shape& padding_above, - bool include_padding_in_avg_computation, - const PadType& pad_type, - bool ceil_mode) - : Op({arg}) - , m_window_shape(window_shape) - , m_window_movement_strides(window_movement_strides) - , m_padding_below(padding_below) - , m_padding_above(padding_above) - , m_include_padding_in_avg_computation(include_padding_in_avg_computation) - , m_pad_type(pad_type) - , m_ceil_mode(ceil_mode) -{ - constructor_validate_and_infer_types(); -} - -op::v0::AvgPool::AvgPool(const Output& arg, - const Shape& window_shape, - const Strides& window_movement_strides, - const Shape& padding_below, - const Shape& padding_above, - bool include_padding_in_avg_computation, - const PadType& pad_type) - : AvgPool(arg, - window_shape, - window_movement_strides, - padding_below, - padding_above, - include_padding_in_avg_computation, - pad_type, - false) -{ -} - -op::v0::AvgPool::AvgPool(const Output& arg, - const Shape& window_shape, - const Strides& window_movement_strides, - const Shape& padding_below, - const Shape& padding_above, - bool include_padding_in_avg_computation) - : AvgPool(arg, - window_shape, - window_movement_strides, - padding_below, - padding_above, - include_padding_in_avg_computation, - PadType::EXPLICIT) -{ -} - -bool op::v0::AvgPool::visit_attributes(AttributeVisitor& visitor) -{ - visitor.on_attribute("window_shape", m_window_shape); - visitor.on_attribute("window_movement_strides", m_window_movement_strides); - visitor.on_attribute("padding_below", m_padding_below); - visitor.on_attribute("padding_above", m_padding_above); - visitor.on_attribute("include_padding_in_avg_computation", - m_include_padding_in_avg_computation); - visitor.on_attribute("pad_type", m_pad_type); - visitor.on_attribute("ceil_mode", m_ceil_mode); - return true; -} - -void op::v0::AvgPool::validate_and_infer_types() -{ - if (0 == m_window_movement_strides.size()) - { - m_window_movement_strides = Strides(m_window_shape.size(), 1); - } - - if (0 == m_padding_below.size()) - { - m_padding_below = Shape(m_window_shape.size(), 0); - } - - if (0 == m_padding_above.size()) - { - m_padding_above = Shape(m_window_shape.size(), 0); - } - - const PartialShape& arg_shape = get_input_partial_shape(0); - - if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER) - { - if (arg_shape.is_static()) - { - CoordinateDiff padding_above, padding_below; - infer_auto_padding(arg_shape.to_shape(), - m_window_shape, - m_window_movement_strides, - Strides(m_window_shape.size(), 1), // No dilation - m_pad_type, - padding_above, - padding_below); - m_padding_above = Shape(padding_above.begin(), padding_above.end()); - m_padding_below = Shape(padding_below.begin(), padding_below.end()); - } - } - - // infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for - // now still take Shape (no negative padding). - CoordinateDiff padding_below(m_padding_below.begin(), m_padding_below.end()); - CoordinateDiff padding_above(m_padding_above.begin(), m_padding_above.end()); - - set_output_type(0, - get_input_element_type(0), - infer_batched_pooling_forward(this, - arg_shape, - padding_below, - padding_above, - m_window_shape, - m_window_movement_strides, - m_include_padding_in_avg_computation, - m_ceil_mode)); -} - -op::v0::AvgPool::AvgPool(const Output& arg, - const Shape& window_shape, - const Strides& window_movement_strides) - : AvgPool(arg, window_shape, window_movement_strides, Shape(), Shape(), false) -{ -} - -op::v0::AvgPool::AvgPool(const Output& arg, const Shape& window_shape) - : AvgPool(arg, window_shape, Strides(), Shape(), Shape(), false) -{ -} - -const Shape& op::v0::AvgPool::get_window_shape() const -{ - return m_window_shape; -} - -void op::v0::AvgPool::set_window_shape(const Shape& window_shape) -{ - m_window_shape = window_shape; -} - -const Strides& op::v0::AvgPool::get_window_movement_strides() const -{ - return m_window_movement_strides; -} - -void op::v0::AvgPool::set_window_movement_strides(const Strides& window_movement_strides) -{ - m_window_movement_strides = window_movement_strides; -} - -const Shape& op::v0::AvgPool::get_padding_below() const -{ - return m_padding_below; -} - -void op::v0::AvgPool::set_padding_below(const Shape& padding_below) -{ - m_padding_below = padding_below; -} - -const Shape& op::v0::AvgPool::get_padding_above() const -{ - return m_padding_above; -} - -void op::v0::AvgPool::set_padding_above(const Shape& padding_above) -{ - m_padding_above = padding_above; -} - -bool op::v0::AvgPool::get_include_padding_in_avg_computation() const -{ - return m_include_padding_in_avg_computation; -} - -void op::v0::AvgPool::set_include_padding_in_avg_computation( - bool include_padding_in_avg_computation) -{ - m_include_padding_in_avg_computation = include_padding_in_avg_computation; -} - -const op::PadType& op::v0::AvgPool::get_pad_type() const -{ - return m_pad_type; -} - -void op::v0::AvgPool::set_pad_type(const op::PadType& pad_type) -{ - m_pad_type = pad_type; -} - -bool op::v0::AvgPool::get_ceil_mode() const -{ - return m_ceil_mode; -} - -void op::v0::AvgPool::set_ceil_mode(bool ceil_mode) -{ - m_ceil_mode = ceil_mode; -} - -shared_ptr op::v0::AvgPool::clone_with_new_inputs(const OutputVector& new_args) const -{ - check_new_args_count(this, new_args); - return make_shared(new_args.at(0), - m_window_shape, - m_window_movement_strides, - m_padding_below, - m_padding_above, - m_include_padding_in_avg_computation, - m_pad_type, - m_ceil_mode); -} - -shared_ptr op::v0::AvgPool::get_default_value() const -{ - return Constant::create(get_element_type(), get_shape(), {0}); -} diff --git a/ngraph/test/runtime/op/avg_pool.hpp b/ngraph/test/runtime/op/avg_pool.hpp deleted file mode 100644 index 701b3e174f3..00000000000 --- a/ngraph/test/runtime/op/avg_pool.hpp +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "backend_visibility.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Batched average pooling operation, with optional padding and window stride. - /// - class BACKEND_API AvgPool : public Op - { - public: - static constexpr NodeTypeInfo type_info{"AvgPool", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - /// \brief Constructs a batched average pooling operation. - AvgPool() = default; - - /// \brief Constructs a batched average pooling operation. - /// - /// \param arg The output producing the input data batch tensor.
- /// `[d1, dn]` - /// \param window_shape The window shape.
- /// `[n]` - /// \param window_movement_strides The window movement strides.
- /// `[n]` - /// \param padding_below The below-padding shape.
- /// `[n]` - /// \param padding_above The above-padding shape.
- /// `[n]` - /// \param include_padding_in_avg_computation If true then averages include padding - /// elements, each treated as the number zero. If false, padding elements are - /// entirely ignored when computing averages. \param pad_type Padding type to use - /// for additional padded dimensions \param ceil_mode Whether to use ceiling while - /// computing output shape. - AvgPool(const Output& arg, - const Shape& window_shape, - const Strides& window_movement_strides, - const Shape& padding_below, - const Shape& padding_above, - bool include_padding_in_avg_computation, - const PadType& pad_type, - bool ceil_mode); - - /// \brief Constructs a batched average pooling operation. - /// - /// \param arg The output producing the input data batch tensor.
- /// `[d1, dn]` - /// \param window_shape The window shape.
- /// `[n]` - /// \param window_movement_strides The window movement strides.
- /// `[n]` - /// \param padding_below The below-padding shape.
- /// `[n]` - /// \param padding_above The above-padding shape.
- /// `[n]` - /// \param include_padding_in_avg_computation If true then averages include padding - /// elements, each treated as the number zero. If false, padding elements are - /// entirely ignored when computing averages. \param pad_type Padding type to use - /// for additional padded dimensions - AvgPool(const Output& arg, - const Shape& window_shape, - const Strides& window_movement_strides, - const Shape& padding_below, - const Shape& padding_above, - bool include_padding_in_avg_computation, - const PadType& pad_type); - - /// \brief Constructs a batched average pooling operation. - /// - /// \param arg The output producing the input data batch tensor.
- /// `[d1, dn]` - /// \param window_shape The window shape.
- /// `[n]` - /// \param window_movement_strides The window movement strides.
- /// `[n]` - /// \param padding_below The below-padding shape.
- /// `[n]` - /// \param padding_above The above-padding shape.
- /// `[n]` - /// \param include_padding_in_avg_computation If true then averages include padding - /// elements, each treated as the number zero. If false, padding elements are - /// entirely ignored when computing averages. - AvgPool(const Output& arg, - const Shape& window_shape, - const Strides& window_movement_strides, - const Shape& padding_below, - const Shape& padding_above, - bool include_padding_in_avg_computation = false); - - /// \brief Constructs a batched, unpadded average pooling operation (i.e., all - /// padding shapes are set to 0). - /// - /// \param arg The output producing the input data batch tensor.
- /// `[d1, ..., dn]` - /// \param window_shape The window shape.
- /// `[n]` - /// \param window_movement_strides The window movement strides.
- /// `[n]` - AvgPool(const Output& arg, - const Shape& window_shape, - const Strides& window_movement_strides); - - /// \brief Constructs an unstrided batched convolution operation (i.e., all window - /// movement strides are 1 and all padding shapes are set to 0). - /// - /// \param arg The output producing the input data batch tensor.
- /// `[d1, ..., dn]` - /// \param window_shape The window shape.
- /// `[n]` - AvgPool(const Output& arg, const Shape& window_shape); - - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The window shape. - const Shape& get_window_shape() const; - void set_window_shape(const Shape& window_shape); - /// \return The window movement strides. - const Strides& get_window_movement_strides() const; - void set_window_movement_strides(const Strides& window_movement_strides); - /// \return The below-padding shape. - const Shape& get_padding_below() const; - void set_padding_below(const Shape& padding_below); - /// \return The above-padding shape. - const Shape& get_padding_above() const; - void set_padding_above(const Shape& padding_above); - bool get_include_padding_in_avg_computation() const; - void - set_include_padding_in_avg_computation(bool include_padding_in_avg_computation); - /// \return The pad type for pooling. - const PadType& get_pad_type() const; - void set_pad_type(const PadType& pad_type); - bool get_ceil_mode() const; - void set_ceil_mode(bool ceil_mode); - /// \return The default value for AvgPool. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - - protected: - Shape m_window_shape; - Strides m_window_movement_strides; - Shape m_padding_below; - Shape m_padding_above; - bool m_include_padding_in_avg_computation{false}; - PadType m_pad_type{PadType::EXPLICIT}; - bool m_ceil_mode{false}; - }; - } // namespace v0 - } // namespace op -} // namespace ngraph diff --git a/ngraph/test/runtime/op/convolution.cpp b/ngraph/test/runtime/op/convolution.cpp deleted file mode 100644 index 1d4d8bea6c6..00000000000 --- a/ngraph/test/runtime/op/convolution.cpp +++ /dev/null @@ -1,343 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "convolution.hpp" -#include "ngraph/axis_vector.hpp" -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/reshape.hpp" -#include "ngraph/util.hpp" -#include "ngraph/validation_util.hpp" - -using namespace std; -using namespace ngraph; - -// *** Convolution OP SET 0 *** -constexpr NodeTypeInfo op::v0::Convolution::type_info; - -op::v0::Convolution::Convolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const Strides& data_dilation_strides, - const PadType& pad_type) - : Op({data_batch, filters}) - , m_window_movement_strides(window_movement_strides) - , m_window_dilation_strides(window_dilation_strides) - , m_padding_below(padding_below) - , m_padding_above(padding_above) - , m_data_dilation_strides(data_dilation_strides) - , m_pad_type(pad_type) -{ - constructor_validate_and_infer_types(); -} - -bool op::v0::Convolution::visit_attributes(AttributeVisitor& visitor) -{ - visitor.on_attribute("window_movement_strides", m_window_movement_strides); - visitor.on_attribute("window_dilation_strides", m_window_dilation_strides); - visitor.on_attribute("data_dilation_strides", m_data_dilation_strides); - visitor.on_attribute("padding_below", m_padding_below); - visitor.on_attribute("padding_above", m_padding_above); - visitor.on_attribute("pad_type", m_pad_type); - return true; -} - -void op::v0::Convolution::validate_and_infer_types() -{ - const PartialShape& data_batch_shape = get_input_partial_shape(0); - element::Type data_batch_et = get_input_element_type(0); - const PartialShape& filters_shape = get_input_partial_shape(1); - element::Type filters_et = get_input_element_type(1); - - if (m_data_dilation_strides.size() == 0) - { - m_data_dilation_strides = conv_default_strides(this, data_batch_shape, filters_shape); - } - - if (m_window_movement_strides.size() == 0) - { - m_window_movement_strides = conv_default_strides(this, data_batch_shape, filters_shape); - } - - if (m_window_dilation_strides.size() == 0) - { - m_window_dilation_strides = conv_default_strides(this, data_batch_shape, filters_shape); - } - - if (m_padding_below.size() == 0) - { - m_padding_below = conv_default_padding(this, data_batch_shape, filters_shape); - } - - if (m_padding_above.size() == 0) - { - m_padding_above = conv_default_padding(this, data_batch_shape, filters_shape); - } - - if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER) - { - if (data_batch_shape.is_static() && filters_shape.is_static()) - { - // TODO: data dilation - m_padding_below.clear(); - m_padding_above.clear(); - auto filter_shape = filters_shape.to_shape(); - filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I} - infer_auto_padding(data_batch_shape.to_shape(), - filter_shape, - m_window_movement_strides, - m_window_dilation_strides, - m_pad_type, - m_padding_above, - m_padding_below); - } - } - - element::Type result_et; - PartialShape result_shape; - - NODE_VALIDATION_CHECK( - this, - element::Type::merge(result_et, data_batch_et, filters_et), - "Element types for data batch and filters do not match (data batch element type: ", - data_batch_et, - ", filters element type: ", - filters_et, - ")."); - - result_shape = infer_convolution_forward(this, - data_batch_shape, - m_data_dilation_strides, - m_padding_below, - m_padding_above, - filters_shape, - m_window_movement_strides, - m_window_dilation_strides); - - set_output_type(0, result_et, result_shape); -} - -op::v0::Convolution::Convolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above) - : Convolution(data_batch, - filters, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - Strides()) -{ -} - -op::v0::Convolution::Convolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides) - : Convolution(data_batch, - filters, - window_movement_strides, - window_dilation_strides, - CoordinateDiff(), - CoordinateDiff()) -{ -} - -op::v0::Convolution::Convolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides) - : Convolution(data_batch, - filters, - window_movement_strides, - Strides(), - CoordinateDiff(), - CoordinateDiff()) -{ -} - -op::v0::Convolution::Convolution(const Output& data_batch, const Output& filters) - : Convolution(data_batch, filters, Strides(), Strides(), CoordinateDiff(), CoordinateDiff()) -{ -} - -shared_ptr op::v0::Convolution::clone_with_new_inputs(const OutputVector& new_args) const -{ - check_new_args_count(this, new_args); - return make_shared(new_args.at(0), - new_args.at(1), - m_window_movement_strides, - m_window_dilation_strides, - m_padding_below, - m_padding_above, - m_data_dilation_strides, - m_pad_type); -} - -constexpr NodeTypeInfo op::v0::ConvolutionBackpropData::type_info; -shared_ptr op::v0::Convolution::get_default_value() const -{ - return ngraph::make_constant_from_string("0", get_element_type(), get_shape()); -} - -op::v0::ConvolutionBackpropData::ConvolutionBackpropData( - const Shape& data_batch_shape, - const Output& filters, - const Output& output_delta, - const Strides& window_movement_strides_forward, - const Strides& window_dilation_strides_forward, - const CoordinateDiff& padding_below_forward, - const CoordinateDiff& padding_above_forward, - const Strides& data_dilation_strides_forward) - : Op({filters, output_delta}) - , m_data_batch_shape(data_batch_shape) - , m_window_movement_strides_forward(window_movement_strides_forward) - , m_window_dilation_strides_forward(window_dilation_strides_forward) - , m_padding_below_forward(padding_below_forward) - , m_padding_above_forward(padding_above_forward) - , m_data_dilation_strides_forward(data_dilation_strides_forward) -{ - constructor_validate_and_infer_types(); -} - -bool op::v0::ConvolutionBackpropData::visit_attributes(AttributeVisitor& visitor) -{ - visitor.on_attribute("data_batch_shape", m_data_batch_shape); - visitor.on_attribute("window_movement_strides_forward", m_window_movement_strides_forward); - visitor.on_attribute("window_dilation_strides_forward", m_window_dilation_strides_forward); - visitor.on_attribute("padding_below_forward", m_padding_below_forward); - visitor.on_attribute("padding_above_forward", m_padding_above_forward); - visitor.on_attribute("data_dilation_strides_forward", m_data_dilation_strides_forward); - return true; -} - -void op::v0::ConvolutionBackpropData::validate_and_infer_types() -{ - // Backprop to data is itself convolution, with inputs/outputs/attributes transmogrified as - // follows. - // - // Forward Backward - // "N" axis for data batch 0 0 - // "C" axis for data batch 1 1 - // "Co" axis for filters 0 0 - // "Ci" axis for filters 1 1 - // "N" axis for output 0 0 - // "C" axis for output 1 1 - // Data batch x delta - // Data batch shape S_x S_o - // Filters f reverse(f) [on spatial axes] - // Filters shape S_f S_f - // Window movement strides q_x p_x - // Window dilation strides p_f p_f - // Padding below a_x (S_f - 1)p_f - a_x - // Padding above b_x (S_f - 1)p_f + - // + ((a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f) - // % q_x) - // - b_x - // Data dilation strides p_x q_x - // Output shape S_o S_x - // - // To _validate_, we simply need to check/infer the output shape of the forward convolution, - // then check to make sure that the incoming delta has the same shape as the forward output. - const PartialShape& filters_shape = get_input_partial_shape(0); - element::Type filters_et = get_input_element_type(0); - const PartialShape& delta_shape = get_input_partial_shape(1); - element::Type delta_et = get_input_element_type(1); - - element::Type forward_result_et; - PartialShape forward_result_shape; - - NODE_VALIDATION_CHECK( - this, - element::Type::merge(forward_result_et, delta_et, filters_et), - "Element types for data batch and filters do not match (data batch element type: ", - delta_et, - ", filters element type: ", - filters_et, - ")."); - - forward_result_shape = infer_convolution_forward(this, - m_data_batch_shape, - m_data_dilation_strides_forward, - m_padding_below_forward, - m_padding_above_forward, - filters_shape, - m_window_movement_strides_forward, - m_window_dilation_strides_forward); - - NODE_VALIDATION_CHECK(this, - forward_result_shape.compatible(delta_shape), - "Inferred forward output shape (", - forward_result_shape, - ") does not match shape of ", - "delta (", - delta_shape, - ")."); - - set_output_type(0, forward_result_et, m_data_batch_shape); -} - -shared_ptr - op::v0::ConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const -{ - check_new_args_count(this, new_args); - return make_shared(m_data_batch_shape, - new_args.at(0), - new_args.at(1), - m_window_movement_strides_forward, - m_window_dilation_strides_forward, - m_padding_below_forward, - m_padding_above_forward, - m_data_dilation_strides_forward); -} - -CoordinateDiff op::v0::ConvolutionBackpropData::compute_backward_delta_out_pad_below() const -{ - auto& in_shape = get_data_batch_shape(); - auto& filter_dilation = get_window_dilation_strides_forward(); - auto& filter_shape = get_input_shape(0); - auto& in_pad_below = get_padding_below_forward(); - size_t spatial_dim_count = static_cast(in_shape.size()) - 2; - - CoordinateDiff backward_delta_out_pad_below; - backward_delta_out_pad_below.resize(spatial_dim_count); - - for (size_t i = 0; i < spatial_dim_count; i++) - { - backward_delta_out_pad_below[i] = - (static_cast(filter_shape[i + 2]) - 1) * filter_dilation[i] - - in_pad_below[i]; - } - return backward_delta_out_pad_below; -} - -CoordinateDiff op::v0::ConvolutionBackpropData::compute_backward_delta_out_pad_above() const -{ - auto& in_shape = get_data_batch_shape(); - auto& filter_dilation = get_window_dilation_strides_forward(); - auto& filter_shape = get_input_shape(0); - auto& in_pad_below = get_padding_below_forward(); - auto& in_pad_above = get_padding_above_forward(); - auto& in_dilation = get_data_dilation_strides_forward(); - auto& stride = get_window_movement_strides_forward(); - size_t spatial_dim_count = static_cast(in_shape.size()) - 2; - - CoordinateDiff backward_delta_out_pad_above; - backward_delta_out_pad_above.resize(spatial_dim_count); - - for (size_t i = 0; i < spatial_dim_count; i++) - { - backward_delta_out_pad_above[i] = - (static_cast(filter_shape[i + 2]) - 1) * filter_dilation[i] + - ((in_pad_below[i] + ((in_shape[i + 2]) - 1) * in_dilation[i] + in_pad_above[i] - - (static_cast(filter_shape[i + 2]) - 1) * filter_dilation[i]) % - stride[i]) - - in_pad_above[i]; - } - return backward_delta_out_pad_above; -} diff --git a/ngraph/test/runtime/op/convolution.hpp b/ngraph/test/runtime/op/convolution.hpp deleted file mode 100644 index f5d45d26918..00000000000 --- a/ngraph/test/runtime/op/convolution.hpp +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "backend_visibility.hpp" -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Batched convolution operation, with optional window dilation and stride. - /// - class BACKEND_API Convolution : public Op - { - public: - static constexpr NodeTypeInfo type_info{"Convolution", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - /// \brief Constructs a batched convolution operation. - Convolution() = default; - /// \brief Constructs a batched convolution operation. - /// - /// \param data_batch The node producing the input data batch tensor.
- /// `[N, C_IN, D1, ... Df]` - /// \param filters The node producing the filters tensor.
- /// `[C_OUT, C_IN, F1, ... Ff]` - /// \param window_movement_strides The window movement strides.
- /// `[f]` - /// \param window_dilation_strides The window dilation strides.
- /// `[f]` - /// \param padding_below The padding-below sizes.
- /// `[f]` - /// \param padding_above The padding-above sizes.
- /// `[f]` - /// \param data_dilation_strides The data dilation strides.
- /// `[f]` - /// \param pad_type The pad type for automatically computing padding sizes.
- /// `[f]` - /// - /// Output `[N, C_OUT, R1, ... Rf]` - /// - Convolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const Strides& data_dilation_strides, - const PadType& pad_type = PadType::EXPLICIT); - - /// \brief Constructs a batched convolution operation with no data dilation (i.e., - /// all - /// data dilation strides are 1). - /// ngraph/test/runtime/interpreter/unit_test.manifest - /// \param data_batch The node producing the input data batch tensor.
- /// `[N, C_IN, D1, ... Df]` - /// \param filters The node producing the filters tensor.
- /// `[C_OUT, C_IN, F1, ... Ff]` - /// \param window_movement_strides The window movement strides.
- /// `[f]` - /// \param window_dilation_strides The window dilation strides.
- /// `[f]` - /// \param padding_below The padding-below sizes.
- /// `[f]` - /// \param padding_above The padding-above sizes.
- /// `[f]` - /// - /// Output `[N, C_OUT, R1, ... Rf]` - /// - Convolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above); - - /// \brief Constructs a batched convolution operation with no padding or data - /// dilation - /// (i.e., padding above and below are 0 everywhere, and all data dilation - /// strides are 1). - /// - /// \param data_batch The node producing the input data batch tensor.
- /// `[N, C_IN, D1, ... Df]` - /// \param filters The node producing the filters tensor.
- /// `[C_OUT, C_IN, F1, ... Ff]` - /// \param window_movement_strides The window movement strides.
- /// `[f]` - /// \param window_dilation_strides The window dilation strides.
- /// `[f]` - /// - /// Output `[N, C_OUT, R1, ... Rf]` - /// - Convolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides); - - /// \brief Constructs a batched convolution operation with no window dilation, - /// padding, - /// or data dilation (i.e., padding above and below are 0 everywhere, and all - /// window/data dilation strides are 1). - /// - /// \param data_batch The node producing the input data batch tensor.
- /// `[N, C_IN, D1, ... Df]` - /// \param filters The node producing the filters tensor.
- /// `[C_OUT, C_IN, F1, ... Ff]` - /// \param window_movement_strides The window movement strides.
- /// `[f]` - /// - /// Output `[N, C_OUT, R1, ... Rf]` - /// - Convolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides); - - /// \brief Constructs a batched convolution operation with no window dilation or - /// movement stride (i.e., padding above and below are 0 everywhere, and all - /// window/data dilation strides and window movement strides are 1). - /// - /// \param data_batch The node producing the input data batch tensor.
- /// `[N, C_IN, D1, ... Df]` - /// \param filters The node producing the filters tensor.
- /// `[C_OUT, C_IN, F1, ... Ff]` - /// - /// Output `[N, C_OUT, R1, ... Rf]` - /// - Convolution(const Output& data_batch, const Output& filters); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The window movement strides. - const Strides& get_window_movement_strides() const - { - return m_window_movement_strides; - } - void set_window_movement_strides(const Strides& window_movement_strides) - { - m_window_movement_strides = window_movement_strides; - } - /// \return The window dilation strides. - const Strides& get_window_dilation_strides() const - { - return m_window_dilation_strides; - } - void set_window_dilation_strides(const Strides& window_dilation_strides) - { - m_window_dilation_strides = window_dilation_strides; - } - /// \return The padding-below sizes (possibly negative). - const CoordinateDiff& get_padding_below() const { return m_padding_below; } - void set_padding_below(const CoordinateDiff& padding_below) - { - m_padding_below = padding_below; - } - /// \return The padding-above sizes (possibly negative). - const CoordinateDiff& get_padding_above() const { return m_padding_above; } - void set_adding_above(const CoordinateDiff& padding_above) - { - m_padding_above = padding_above; - } - /// \return The input data dilation strides. - const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; } - void set_data_dilation_strides(const Strides& data_dilation_strides) - { - m_data_dilation_strides = data_dilation_strides; - } - /// \return The pad type for convolution. - const PadType& get_pad_type() const { return m_pad_type; } - void set_pad_type(const PadType& pad_type) { m_pad_type = pad_type; } - /// \return The default value for Convolution. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - - protected: - Strides m_window_movement_strides; - Strides m_window_dilation_strides; - CoordinateDiff m_padding_below; - CoordinateDiff m_padding_above; - Strides m_data_dilation_strides; - PadType m_pad_type; - }; - - /// \brief Data batch backprop for batched convolution operation. - class BACKEND_API ConvolutionBackpropData : public Op - { - public: - static constexpr NodeTypeInfo type_info{"ConvolutionBackpropData", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - /// \brief Constructs a batched-convolution data batch-backprop operation. - ConvolutionBackpropData() = default; - /// - /// \brief Constructs a batched-convolution data batch-backprop operation. - /// - /// \param data_batch_shape The shape of the data batch from - /// forward-prop. - /// \param filters The node producing the filters from - /// forward-prop. - /// \param data The node producing output delta. - /// \param window_movement_strides_forward The window movement strides from - /// forward-prop. - /// \param window_dilation_strides_forward The window dilation strides from - /// forward-prop. - /// \param padding_below_forward The padding-below sizes from - /// forward-prop. - /// \param padding_above_forward The padding-above sizes from - /// forward-prop. - /// \param data_dilation_strides_forward The data dilation strides from - /// forward-prop. - /// - ConvolutionBackpropData(const Shape& data_batch_shape, - const Output& filters, - const Output& data, - const Strides& window_movement_strides_forward, - const Strides& window_dilation_strides_forward, - const CoordinateDiff& padding_below_forward, - const CoordinateDiff& padding_above_forward, - const Strides& data_dilation_strides_forward); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The data batch shape. - const Shape& get_data_batch_shape() const { return m_data_batch_shape; } - void set_data_batch_shape(const Shape& data_batch_shape) - { - m_data_batch_shape = data_batch_shape; - } - /// \return The window movement strides from the forward prop. - const Strides& get_window_movement_strides_forward() const - { - return m_window_movement_strides_forward; - } - void set_window_movement_strides_forward( - const Strides& window_movement_strides_forward) - { - m_window_movement_strides_forward = window_movement_strides_forward; - } - /// \return The window dilation strides from the forward prop. - const Strides& get_window_dilation_strides_forward() const - { - return m_window_dilation_strides_forward; - } - void set_window_dilation_strides_forward( - const Strides& window_dilation_strides_forward) - { - m_window_dilation_strides_forward = window_dilation_strides_forward; - } - /// \return The padding-below sizes (possibly negative) from the forward prop. - const CoordinateDiff& get_padding_below_forward() const - { - return m_padding_below_forward; - } - void set_padding_below_forward(const CoordinateDiff& padding_below_forward) - { - m_padding_below_forward = padding_below_forward; - } - /// \return The padding-above sizes (possibly negative) from the forward prop. - const CoordinateDiff& get_padding_above_forward() const - { - return m_padding_above_forward; - } - void set_padding_above_forward(const CoordinateDiff& padding_above_forward) - { - m_padding_above_forward = padding_above_forward; - } - /// \return The input data dilation strides from the forward prop. - const Strides& get_data_dilation_strides_forward() const - { - return m_data_dilation_strides_forward; - } - void set_data_dilation_strides_forward(const Strides& data_dilation_strides_forward) - { - m_data_dilation_strides_forward = data_dilation_strides_forward; - } - - // Compute the pad_above values to be used if in a convolution - CoordinateDiff compute_backward_delta_out_pad_above() const; - CoordinateDiff compute_backward_delta_out_pad_below() const; - - protected: - Shape m_data_batch_shape; - Strides m_window_movement_strides_forward; - Strides m_window_dilation_strides_forward; - CoordinateDiff m_padding_below_forward; - CoordinateDiff m_padding_above_forward; - Strides m_data_dilation_strides_forward; - }; - } // namespace v0 - } // namespace op -} // namespace ngraph diff --git a/ngraph/test/runtime/op/group_conv.cpp b/ngraph/test/runtime/op/group_conv.cpp deleted file mode 100644 index 1a9ce2b464a..00000000000 --- a/ngraph/test/runtime/op/group_conv.cpp +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "convolution.hpp" -#include "group_conv.hpp" -#include "ngraph/attribute_visitor.hpp" -#include "ngraph/builder/reshape.hpp" -#include "ngraph/builder/split.hpp" -#include "ngraph/op/concat.hpp" -#include "ngraph/op/convolution.hpp" -#include "ngraph/validation_util.hpp" - -using namespace std; -using namespace ngraph; - -NGRAPH_SUPPRESS_DEPRECATED_START - -//------------------------------------------------------------------------------ -// v0::GroupConvolution -//------------------------------------------------------------------------------ - -constexpr NodeTypeInfo op::v0::GroupConvolution::type_info; - -op::v0::GroupConvolution::GroupConvolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const Strides& data_dilation_strides, - const size_t groups, - const PadType& pad_type) - : FusedOp({data_batch, filters}) - , m_window_movement_strides(window_movement_strides) - , m_window_dilation_strides(window_dilation_strides) - , m_padding_below(padding_below) - , m_padding_above(padding_above) - , m_data_dilation_strides(data_dilation_strides) - , m_groups(groups) - , m_pad_type(pad_type) - , m_groups_in_filters(false) -{ - constructor_validate_and_infer_types(); -} - -op::v0::GroupConvolution::GroupConvolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const Strides& data_dilation_strides, - const PadType& pad_type) - : FusedOp({data_batch, filters}) - , m_window_movement_strides(window_movement_strides) - , m_window_dilation_strides(window_dilation_strides) - , m_padding_below(padding_below) - , m_padding_above(padding_above) - , m_data_dilation_strides(data_dilation_strides) - , m_groups(0) - , m_pad_type(pad_type) - , m_groups_in_filters(true) -{ - constructor_validate_and_infer_types(); -} - -void op::v0::GroupConvolution::pre_validate_and_infer_types() -{ - auto data_shape = get_input_partial_shape(0); - auto filters_shape = get_input_partial_shape(1); - - if (data_shape.is_static() && filters_shape.is_static()) - { - // Update groups - if (m_groups_in_filters) - { - m_groups = get_input_partial_shape(1)[0].get_length(); - } - - // Data channels - NODE_VALIDATION_CHECK(this, - data_shape.to_shape()[1] % get_groups() == 0, - "Data channels not a multiple of group size"); - // Output channels - NODE_VALIDATION_CHECK(this, - filters_shape.to_shape()[0] % get_groups() == 0, - "# Filters not a multiple of group size"); - - // Input Filters - NODE_VALIDATION_CHECK(this, - (filters_shape.to_shape()[m_groups_in_filters ? 2 : 1] * - get_groups()) == data_shape.to_shape()[1], - "Incorrect number of channels per filter"); - } - else - { - set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); - } -} - -void op::v0::GroupConvolution::post_validate_and_infer_types() -{ - auto data_shape = get_input_partial_shape(0); - auto filters_shape = get_input_partial_shape(1); - if (data_shape.is_static() && filters_shape.is_static()) - { - if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER) - { - m_padding_below.clear(); - m_padding_above.clear(); - auto filter_shape = filters_shape.to_shape(); - filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I} - infer_auto_padding(data_shape.to_shape(), - filter_shape, - m_window_movement_strides, - m_window_dilation_strides, - m_pad_type, - m_padding_above, - m_padding_below); - } - } -} - -Shape op::v0::GroupConvolution::get_weights_dimensions() const -{ - auto data_shape = get_input_shape(0); - auto weights_shape = get_input_shape(1); - // check if weights already includes groups - if (m_groups_in_filters) - { - return weights_shape; - } - // reshape weights into 5d tensors that includes groups - const size_t OC = 0; - const size_t OC_IN_OUTPUT = 1; - const size_t IC = 1; - Shape weights_shape_groups{weights_shape}; - // adjust output and channel given a number of groups - - weights_shape_groups.at(OC) = get_shape().at(OC_IN_OUTPUT) / get_groups(); - weights_shape_groups.at(IC) = data_shape.at(IC) / get_groups(); - // push_front the number of groups - weights_shape_groups.insert(weights_shape_groups.begin(), get_groups()); - return weights_shape_groups; -} - -shared_ptr op::v0::GroupConvolution::clone_with_new_inputs(const OutputVector& new_args) const -{ - check_new_args_count(this, new_args); - - if (m_groups_in_filters) - { - return make_shared(new_args.at(0), - new_args.at(1), - get_window_movement_strides(), - get_window_dilation_strides(), - get_padding_below(), - get_padding_above(), - get_data_dilation_strides(), - get_pad_type()); - } - else - { - return make_shared(new_args.at(0), - new_args.at(1), - get_window_movement_strides(), - get_window_dilation_strides(), - get_padding_below(), - get_padding_above(), - get_data_dilation_strides(), - get_groups(), - get_pad_type()); - } -} - -OutputVector op::v0::GroupConvolution::decompose_op() const -{ - auto data = input_value(0); - auto filters = input_value(1); - auto filters_shape = get_input_shape(1); - // Split one convolution op to N ops where N is the number of groups - // and concat results after computation. - NodeVector convolution_nodes; - - // slice data - auto sliced_data = builder::opset1::split(data, get_groups(), 1); - // slice filters - auto sliced_filters = builder::opset1::split(filters, get_groups(), 0); - auto shape = Shape(std::next(std::begin(filters_shape), 1), std::end(filters_shape)); - for (std::size_t group{0}; group < get_groups(); ++group) - { - auto sliced_filter = sliced_filters[group]; - if (m_groups_in_filters) - { - // Remove group dimension after slicing - sliced_filter = builder::opset1::reshape(sliced_filters[group], shape); - } - convolution_nodes.push_back( - std::make_shared(sliced_data[group], - sliced_filter, - m_window_movement_strides, - m_window_dilation_strides, - m_padding_below, - m_padding_above, - m_data_dilation_strides, - m_pad_type)); - } - std::size_t concatenation_axis = 1; - return {std::make_shared(convolution_nodes, concatenation_axis)}; -} - -//------------------------------------------------------------------------------ -// v0::GroupConvolutionBackpropData -//------------------------------------------------------------------------------ - -constexpr NodeTypeInfo op::v0::GroupConvolutionBackpropData::type_info; - -op::v0::GroupConvolutionBackpropData::GroupConvolutionBackpropData( - const Output& data_batch, - const Output& filters, - const Output& output_delta, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const size_t groups) - : FusedOp({data_batch, filters, output_delta}) - , m_window_movement_strides(window_movement_strides) - , m_window_dilation_strides(window_dilation_strides) - , m_padding_below(padding_below) - , m_padding_above(padding_above) - , m_groups(groups) -{ - constructor_validate_and_infer_types(); -} - -void op::v0::GroupConvolutionBackpropData::pre_validate_and_infer_types() -{ - element::Type data_element_type = get_input_element_type(2); - element::Type filters_elem_type = get_input_element_type(1); - - NODE_VALIDATION_CHECK(this, - data_element_type.is_dynamic() || data_element_type.is_real(), - "Output delta element type must be f16, bf16, f32, f64 or dynamic (got ", - data_element_type, - ")."); - NODE_VALIDATION_CHECK(this, - filters_elem_type.is_dynamic() || filters_elem_type.is_real(), - "Filters element type must be f16, bf16, f32, f64 or dynamic (got ", - filters_elem_type, - ")."); - - PartialShape data_pshape = get_input_partial_shape(0); - PartialShape filters_pshape = get_input_partial_shape(1); - PartialShape delta_pshape = get_input_partial_shape(2); - - if (data_pshape.is_dynamic() || filters_pshape.is_dynamic() || delta_pshape.is_dynamic()) - { - set_output_type(0, data_element_type, PartialShape::dynamic()); - } -} - -shared_ptr - op::v0::GroupConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const -{ - if (new_args.size() != 3) - { - throw ngraph_error("Incorrect number of new arguments"); - } - - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - get_window_movement_strides(), - get_window_dilation_strides(), - get_padding_below(), - get_padding_above(), - get_groups()); -} - -OutputVector op::v0::GroupConvolutionBackpropData::decompose_op() const -{ - auto filters = input_value(1); - auto output_delta = input_value(2); - auto data_shape = get_input_shape(0); - - NodeVector sliced_inputs; - - auto groups = get_groups(); - // slice data shape - data_shape[1] /= groups; - // slice delta - auto sliced_delta = builder::opset1::split(output_delta, groups, 1); - // slice filters - auto sliced_filters = builder::opset1::split(filters, groups, 0); - - auto num_spatials = get_window_movement_strides().size(); - - for (size_t i = 0; i < groups; ++i) - { - auto sliced_conv = std::make_shared( - data_shape, - sliced_filters[i], - sliced_delta[i], - get_window_movement_strides(), - get_window_dilation_strides(), - get_padding_below(), - get_padding_above(), - Strides(num_spatials, 1)); // default data dilation strides - - sliced_inputs.push_back(sliced_conv); - } - - size_t concatenation_axis = 1; - return {std::make_shared(sliced_inputs, concatenation_axis)}; -} diff --git a/ngraph/test/runtime/op/group_conv.hpp b/ngraph/test/runtime/op/group_conv.hpp deleted file mode 100644 index 088d9ca4be6..00000000000 --- a/ngraph/test/runtime/op/group_conv.hpp +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "backend_visibility.hpp" -#include "ngraph/op/convolution.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/fused_op.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Group Convolution - class BACKEND_API GroupConvolution : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"GroupConvolution", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - GroupConvolution() = default; - GroupConvolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const Strides& data_dilation_strides, - const size_t groups, - const PadType& pad_type = PadType::EXPLICIT); - - // constructor which accept groups included in filters shape. - GroupConvolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const Strides& data_dilation_strides, - const PadType& pad_type = PadType::EXPLICIT); - Shape get_weights_dimensions() const; - const Strides& get_window_movement_strides() const - { - return m_window_movement_strides; - } - const Strides& get_window_dilation_strides() const - { - return m_window_dilation_strides; - } - const CoordinateDiff& get_padding_below() const { return m_padding_below; } - const CoordinateDiff& get_padding_above() const { return m_padding_above; } - const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; } - Output get_filters() { return input_value(1); } - Output get_data_batch() { return input_value(0); } - size_t get_groups() const { return m_groups; }; - const PadType& get_pad_type() const { return m_pad_type; } - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - virtual OutputVector decompose_op() const override; - - virtual void pre_validate_and_infer_types() override; - virtual void post_validate_and_infer_types() override; - - bool has_groups_in_filters() const { return m_groups_in_filters; } - - protected: - Strides m_window_movement_strides; - Strides m_window_dilation_strides; - CoordinateDiff m_padding_below; - CoordinateDiff m_padding_above; - Strides m_data_dilation_strides; - size_t m_groups; - PadType m_pad_type{PadType::NOTSET}; - - private: - bool m_groups_in_filters; - }; - - /// \brief Group Convolution data batch backprop - class BACKEND_API GroupConvolutionBackpropData : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"GroupConvolutionBackpropData", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - GroupConvolutionBackpropData() = default; - GroupConvolutionBackpropData(const Output& data_batch, - const Output& filters, - const Output& output_delta, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const size_t groups); - - const Strides& get_window_movement_strides() const - { - return m_window_movement_strides; - } - const Strides& get_window_dilation_strides() const - { - return m_window_dilation_strides; - } - const CoordinateDiff& get_padding_below() const { return m_padding_below; } - const CoordinateDiff& get_padding_above() const { return m_padding_above; } - size_t get_groups() const { return m_groups; }; - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - virtual OutputVector decompose_op() const override; - - virtual void pre_validate_and_infer_types() override; - - protected: - Strides m_window_movement_strides; - Strides m_window_dilation_strides; - CoordinateDiff m_padding_below; - CoordinateDiff m_padding_above; - size_t m_groups; - }; - } - } // namespace op -} // namespace ngraph - -NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/test/runtime/opset0_tbl.hpp b/ngraph/test/runtime/opset0_tbl.hpp deleted file mode 100644 index d5eb01e1bfc..00000000000 --- a/ngraph/test/runtime/opset0_tbl.hpp +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -// This collection contains one entry for each op. If an op is added it must be -// added to this list. -// -// In order to use this list you want to define a macro named exactly NGRAPH_OP -// When you are done you should undef the macro -// As an example if you wanted to make a list of all op names as strings you could do this: -// -// #define NGRAPH_OP(a,b) #a, -// std::vector op_names{ -// #include "this include file name" -// }; -// #undef NGRAPH_OP -// -// This sample expands to a list like this: -// "Abs", -// "Acos", -// ... -// -// #define NGRAPH_OP(a,b) b::a, -// std::vector op_names{ -// #include "this include file name" -// }; -// #undef NGRAPH_OP -// -// This sample expands to a list like this: -// ngraph::op::Abs, -// ngraph::op::Acos, -// ... -// -// It's that easy. You can use this for fun and profit. - -#ifndef NGRAPH_OP -#warning "NGRAPH_OP not defined" -#define NGRAPH_OP(x, y) -#endif - -NGRAPH_OP(Abs, ngraph::op) -NGRAPH_OP(Acos, ngraph::op) -NGRAPH_OP(Asin, ngraph::op) -NGRAPH_OP(Atan, ngraph::op) -NGRAPH_OP(AvgPool, ngraph::op::v0) -NGRAPH_OP(BatchNormInference, ngraph::op::v0) -NGRAPH_OP(Broadcast, ngraph::op::v1) -NGRAPH_OP(Ceiling, ngraph::op) -NGRAPH_OP(Clamp, ngraph::op) -NGRAPH_OP(Concat, ngraph::op) -NGRAPH_OP(Constant, ngraph::op) -NGRAPH_OP(Convert, ngraph::op) -NGRAPH_OP(Convolution, ngraph::op::v0) -NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v0) -NGRAPH_OP(Cos, ngraph::op) -NGRAPH_OP(Cosh, ngraph::op) -NGRAPH_OP(CumSum, ngraph::op::v0) -NGRAPH_OP(DepthToSpace, ngraph::op) -NGRAPH_OP(Elu, ngraph::op) -NGRAPH_OP(Erf, ngraph::op) -NGRAPH_OP(Exp, ngraph::op) -NGRAPH_OP(FakeQuantize, ngraph::op) -NGRAPH_OP(Floor, ngraph::op) -NGRAPH_OP(GRN, ngraph::op) -NGRAPH_OP(Gather, ngraph::op::v1) -NGRAPH_OP(Gelu, ngraph::op) -NGRAPH_OP(GroupConvolution, ngraph::op::v0) -NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v0) -NGRAPH_OP(HardSigmoid, ngraph::op) -NGRAPH_OP(Interpolate, ngraph::op::v0) -NGRAPH_OP(Log, ngraph::op) -NGRAPH_OP(LRN, ngraph::op) -NGRAPH_OP(LSTMSequence, ngraph::op::v0) -NGRAPH_OP(MatMul, ngraph::op) -NGRAPH_OP(MVN, ngraph::op) -NGRAPH_OP(Negative, ngraph::op) -NGRAPH_OP(NormalizeL2, ngraph::op::v0) -NGRAPH_OP(Parameter, ngraph::op) -NGRAPH_OP(PRelu, ngraph::op) -NGRAPH_OP(PriorBox, ngraph::op) -NGRAPH_OP(Range, ngraph::op) -NGRAPH_OP(Relu, ngraph::op) -NGRAPH_OP(Result, ngraph::op) -NGRAPH_OP(ReverseSequence, ngraph::op) -NGRAPH_OP(Selu, ngraph::op) -NGRAPH_OP(ShapeOf, ngraph::op) -NGRAPH_OP(ShuffleChannels, ngraph::op) -NGRAPH_OP(Sigmoid, ngraph::op) -NGRAPH_OP(Sign, ngraph::op) -NGRAPH_OP(Sin, ngraph::op) -NGRAPH_OP(Sinh, ngraph::op) -NGRAPH_OP(SpaceToDepth, ngraph::op) -NGRAPH_OP(Sqrt, ngraph::op) -NGRAPH_OP(SquaredDifference, ngraph::op) -NGRAPH_OP(Squeeze, ngraph::op) -NGRAPH_OP(Tan, ngraph::op) -NGRAPH_OP(Tanh, ngraph::op) -NGRAPH_OP(TensorIterator, ngraph::op) -NGRAPH_OP(Tile, ngraph::op::v0) -NGRAPH_OP(Unsqueeze, ngraph::op::v0) -NGRAPH_OP(Xor, ngraph::op) diff --git a/ngraph/test/runtime/pass/fused_op_decomposition.cpp b/ngraph/test/runtime/pass/fused_op_decomposition.cpp deleted file mode 100644 index 89cbba09536..00000000000 --- a/ngraph/test/runtime/pass/fused_op_decomposition.cpp +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "fused_op_decomposition.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/provenance.hpp" - -using namespace std; -using namespace ngraph; - -NGRAPH_SUPPRESS_DEPRECATED_START - -pass::FusedOpDecomposition::FusedOpDecomposition(op_query_t callback) - : m_has_direct_support{callback} -{ -} - -bool pass::FusedOpDecomposition::run_on_node(shared_ptr node) -{ - bool modified = false; - - if (op::supports_decompose(node)) - { - if (m_has_direct_support && m_has_direct_support(*node)) - { - // Op supported by backend. Do not decompose - return modified; - } - - OutputVector output_vector = node->decompose_op(); - NodeVector subgraph_outputs = as_node_vector(output_vector); - - if (ngraph::get_provenance_enabled()) - { - // Capture the input values as an edge for provenance - auto base_input_values = node->input_values(); - auto provenance_tags = node->get_provenance_tags(); - const std::string tag = "get_type_name()) + ">"; - provenance_tags.insert(tag); - - // Transfer the new provenance tags to the newly created ops - for (auto output_node : subgraph_outputs) - { - output_node->add_provenance_tags_above(base_input_values, provenance_tags); - } - } - - // Run recursively until no more fused ops - auto subgraph = extract_subgraph(subgraph_outputs, as_node_vector(node->input_values())); - for (auto subgraph_node : subgraph) - { - run_on_node(subgraph_node); - } - - size_t i = 0; - for (auto output_node : subgraph_outputs) - { - for (size_t j = 0; j < output_node->outputs().size(); j++, i++) - { - std::set> fop_users = node->outputs().at(i).get_target_inputs(); - for (auto fop_user : fop_users) - { - fop_user.replace_source_output(output_node->output(j)); - } - } - } - if (i != node->get_output_size()) - { - throw ngraph_error("While replacing " + node->get_name() + - ", mismatch between op output count and outputs of the decomposed " - "subgraph. Expected: " + - to_string(node->get_output_size()) + " Got: " + to_string(i)); - } - modified = true; - } - - return modified; -} diff --git a/ngraph/test/runtime/pass/fused_op_decomposition.hpp b/ngraph/test/runtime/pass/fused_op_decomposition.hpp deleted file mode 100644 index e1b7fdcfc35..00000000000 --- a/ngraph/test/runtime/pass/fused_op_decomposition.hpp +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include "backend_visibility.hpp" -#include "ngraph/pass/pass.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -namespace ngraph -{ - namespace pass - { - /// \brief The FusedOpDecomposition pass is used to decompose a fused op - /// into a sub-graph of supported ops if the fused op is not supported by - /// the backend. - /// - /// \details By default, the pass decomposes a fused op if it is not - /// supported by the backend and runs recursively until no more fused ops - /// can be found or the new ops are supported by the backend. - /// If the backend supports a fused op, then it can provide a callback - /// function while registering the pass. The callback function can then - /// provide logic to prevent decomposing the supported op. - /// It also adds provenance tags along the way to each op for easy reference - /// and debugging. - /// - /// In the example shown below, the original graph has a fused GeLU op. - /// After applying this pass, the GeLU op is decomposed into group of ops which - /// together perform the same operation as GeLU. - /// - /// - /// - /// - /// - /// - /// - /// - ///
Before the pass After the pass
\image html decompose_gelu_pre.svg \image html decompose_gelu_post.svg
- class BACKEND_API FusedOpDecomposition : public NodePass - { - public: /// \brief Function signature type for callback used to check whether provided node - /// is supported by backend. - using op_query_t = std::function; - - /// - /// \brief Constructor for the Fused operation decomposition pass. - /// - /// \param[in] callback The function object used to determine whether current backend - /// provide direct support for passed node. Should have signature: - /// bool fn(const Node&) - /// - FusedOpDecomposition(op_query_t callback = nullptr); - bool run_on_node(std::shared_ptr node) override; - - private: - /// \brief A function returning whether provided Node is supported by current backend. - /// The returned bool value is used to control whether decompose operator or not. - op_query_t m_has_direct_support = nullptr; - }; - } -} - -NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/test/runtime/pass/implicit_broadcast_elimination.cpp b/ngraph/test/runtime/pass/implicit_broadcast_elimination.cpp deleted file mode 100644 index ce380781e79..00000000000 --- a/ngraph/test/runtime/pass/implicit_broadcast_elimination.cpp +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "implicit_broadcast_elimination.hpp" - -#include "ngraph/builder/autobroadcast.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "ngraph/op/util/binary_elementwise_comparison.hpp" -#include "ngraph/op/util/binary_elementwise_logical.hpp" -#include "ngraph/op/util/op_types.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -using namespace std; -using namespace ngraph; - -bool ngraph::pass::ImplicitBroadcastElimination::run_on_node(std::shared_ptr node) -{ - if (ngraph::op::supports_auto_broadcast(node)) - { - if (node->get_autob().m_type != op::AutoBroadcastType::NONE) - { - auto new_args = pass::explicit_broadcast(node); - for (size_t i = 0; i < new_args.size(); i++) - { - node->input(i).replace_source_output(new_args[i]->output(0)); - } - return true; - } - } - return false; -} - -NodeVector ngraph::pass::explicit_broadcast(std::shared_ptr& node) -{ - NodeVector rc; - if (ngraph::op::supports_auto_broadcast(node)) - { - auto autob = node->get_autob(); - if (autob.m_type == op::AutoBroadcastType::NONE) - { - for (auto& val : node->input_values()) - rc.emplace_back(val.get_node_shared_ptr()); - } - else if (autob.m_type == op::AutoBroadcastType::NUMPY) - { - rc = as_node_vector(builder::numpy_broadcast_outputs(node->input_values())); - } - else if (autob.m_type == op::AutoBroadcastType::PDPD) - { - rc = as_node_vector(builder::pdpd_broadcast(node->input_values(), autob.m_axis)); - } - else - { - throw ngraph_error("Unsupported implicit broadcast type"); - } - } - return rc; -} diff --git a/ngraph/test/runtime/pass/implicit_broadcast_elimination.hpp b/ngraph/test/runtime/pass/implicit_broadcast_elimination.hpp deleted file mode 100644 index 939c2b2073c..00000000000 --- a/ngraph/test/runtime/pass/implicit_broadcast_elimination.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "backend_visibility.hpp" -#include "ngraph/node.hpp" -#include "ngraph/pass/pass.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -namespace ngraph -{ - namespace pass - { - NodeVector explicit_broadcast(std::shared_ptr& node); - class ImplicitBroadcastElimination; - } -} - -class BACKEND_API ngraph::pass::ImplicitBroadcastElimination : public ngraph::pass::NodePass -{ -public: - bool run_on_node(std::shared_ptr node) override; -}; - -NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/test/runtime/pass/liveness.cpp b/ngraph/test/runtime/pass/liveness.cpp deleted file mode 100644 index bb0b9487a6e..00000000000 --- a/ngraph/test/runtime/pass/liveness.cpp +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include "liveness.hpp" -#include "ngraph/descriptor/input.hpp" -#include "ngraph/descriptor/output.hpp" -#include "ngraph/function.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/log.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/parameter.hpp" -#include "ngraph/op/result.hpp" -#include "ngraph/util.hpp" - -using namespace std; -using namespace ngraph; - -bool pass::Liveness::run_on_function(shared_ptr function) -{ - auto ops = function->get_ordered_ops(); - - unordered_set persistent_tensors; - unordered_set output_tensors; - for (const shared_ptr& node : function->get_parameters()) - { - for (auto& output : node->outputs()) - { - descriptor::Tensor& tensor = output.get_tensor(); - persistent_tensors.insert(&tensor); - } - } - for (const shared_ptr& node : function->get_results()) - { - for (auto& output : node->outputs()) - { - descriptor::Tensor& tensor = output.get_tensor(); - persistent_tensors.insert(&tensor); - output_tensors.insert(&tensor); - } - } - for (const shared_ptr& node : ops) - { - if (auto constant_node = as_type_ptr(node)) - { - for (auto& output : constant_node->outputs()) - { - descriptor::Tensor& tensor = output.get_tensor(); - persistent_tensors.insert(&tensor); - } - } - } - - unordered_set currently_live; - for (auto it = ops.rbegin(); it != ops.rend(); it++) - { - const shared_ptr& node = *it; - node->liveness_new_list.clear(); - node->liveness_free_list.clear(); - unordered_set input_tensor_decls; - for (auto& input : node->inputs()) - { - descriptor::Tensor& tensor = input.get_tensor(); - if (persistent_tensors.find(&tensor) == persistent_tensors.end()) - { - input_tensor_decls.insert(&tensor); - } - } - - unordered_set output_tensor_decls; - for (auto& output : node->outputs()) - { - descriptor::Tensor& tensor = output.get_tensor(); - if (persistent_tensors.find(&tensor) == persistent_tensors.end()) - { - output_tensor_decls.insert(&tensor); - } - } - - unordered_set free_tensor_decls; - unordered_set new_tensor_decls; - unordered_set all_tensor_decls = input_tensor_decls; - all_tensor_decls.insert(output_tensor_decls.begin(), output_tensor_decls.end()); - - for (descriptor::Tensor* tensor_decl : all_tensor_decls) - { - if (currently_live.find(tensor_decl) == currently_live.end()) - { - // this is the last node that value is seen in - // delete it at the end of the op - currently_live.insert(tensor_decl); - if (output_tensors.find(tensor_decl) == output_tensors.end()) - { - // Don't free output tensors - free_tensor_decls.insert(tensor_decl); - } - } - } - - for (descriptor::Tensor* output_decl : output_tensor_decls) - { - auto currently_live_it = currently_live.find(output_decl); - if (currently_live_it != currently_live.end()) - { - new_tensor_decls.insert(output_decl); - currently_live.erase(currently_live_it); - } - } - node->liveness_free_list = free_tensor_decls; - node->liveness_new_list = new_tensor_decls; - } - - return false; -} diff --git a/ngraph/test/runtime/pass/liveness.hpp b/ngraph/test/runtime/pass/liveness.hpp deleted file mode 100644 index 64aff0bc8e0..00000000000 --- a/ngraph/test/runtime/pass/liveness.hpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "backend_visibility.hpp" -#include "ngraph/descriptor/tensor.hpp" -#include "ngraph/pass/pass.hpp" - -namespace ngraph -{ - namespace pass - { - class Liveness; - } -} - -class BACKEND_API ngraph::pass::Liveness : public FunctionPass -{ -public: - bool run_on_function(std::shared_ptr) override; -}; diff --git a/ngraph/test/runtime/pass/opset0_downgrade.cpp b/ngraph/test/runtime/pass/opset0_downgrade.cpp deleted file mode 100644 index 4d48854bf6b..00000000000 --- a/ngraph/test/runtime/pass/opset0_downgrade.cpp +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include - -#include "ngraph/builder/autobroadcast.hpp" -#include "ngraph/builder/reshape.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/ops.hpp" -#include "ngraph/provenance.hpp" -#include "ngraph/slice_plan.hpp" -#include "ngraph/type.hpp" -#include "ngraph/validation_util.hpp" -#include "op/avg_pool.hpp" -#include "pass/implicit_broadcast_elimination.hpp" -#include "pass/opset0_downgrade.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -using namespace std; -using namespace ngraph; - -namespace opset0_downgrade -{ - template - shared_ptr op_cast_binary_elementwise_node(const shared_ptr& node) - { - const auto input_arg0 = node->input_value(0); - const auto input_arg1 = node->input_value(1); - const auto autob = node->get_autob(); - auto replacement_node = make_shared(input_arg0, input_arg1, autob); - replace_node(node, replacement_node); - return replacement_node; - } - - template - shared_ptr op_cast_reduction_node(const shared_ptr& node) - { - auto replacement_node = make_shared(node->input_value(0), node->input_value(1)); - if (node->get_keep_dims()) - { - string v1_op_name = string{node->get_type_name()} + ":v1"; - string v0_op_name = string{OpV0{}.get_type_name()} + ":v0"; - - NGRAPH_CHECK(node->reduction_axes_constant(), - "Unable to convert ", - v1_op_name, - "to ", - v0_op_name, - " if reduction axes are not constant (for keep_dims=true). Node: ", - *node); - auto output_pshape = replacement_node->get_output_partial_shape(0); - NGRAPH_CHECK(output_pshape.is_static(), - "Unable to convert ", - v1_op_name, - "to ", - v0_op_name, - " if output shape is dynamic (for keep_dims=true). Node: ", - *node); - const auto output_shape = output_pshape.to_shape(); - auto reshaped_output_shape = output_shape; - for (const auto& axis : node->get_reduction_axes()) - { - reshaped_output_shape.insert(reshaped_output_shape.begin() + axis, 1); - } - auto shape_pattern = op::Constant::create( - element::u64, {reshaped_output_shape.size()}, reshaped_output_shape); - auto reshaped_product = - make_shared(replacement_node->output(0), shape_pattern, false); - return reshaped_product; - } - else - { - return replacement_node; - } - } - - // Default is that we did nothing - shared_ptr op_cast(shared_ptr node) { return nullptr; } - shared_ptr op_cast(shared_ptr node) - { - return op_cast_binary_elementwise_node(node); - } - - using DispatchMap = map node)>>; - - template - bool op_cast_thunk(shared_ptr node) - { - auto downgraded_node = op_cast(as_type_ptr(node)); - if (downgraded_node) - { - if (ngraph::get_provenance_enabled()) - { - const std::string provenance_tag = - "get_type_name()) + ")>"; - downgraded_node->add_provenance_tags_above(node->input_values(), {provenance_tag}); - } - return true; - } - return false; - } - - DispatchMap& get_dispatch_map() - { - static DispatchMap dispatch_map{ -#define NGRAPH_OP(NAME, NAMESPACE) {NAMESPACE::NAME::type_info, op_cast_thunk}, -#include "ngraph/opsets/opset1_tbl.hpp" -#undef NGRAPH_OP - }; - return dispatch_map; - } -} // namespace opset0_downgrade - -bool pass::Opset0Downgrade::run_on_node(shared_ptr node) -{ - bool modified = false; - auto& dispatch_map = opset0_downgrade::get_dispatch_map(); - auto it = dispatch_map.find(node->get_type_info()); - if (it != dispatch_map.end()) - { - modified = it->second(node); - } - return modified; -} diff --git a/ngraph/test/runtime/pass/opset0_downgrade.hpp b/ngraph/test/runtime/pass/opset0_downgrade.hpp deleted file mode 100644 index 96a831d9c9f..00000000000 --- a/ngraph/test/runtime/pass/opset0_downgrade.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "backend_visibility.hpp" -#include "ngraph/pass/pass.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -namespace ngraph -{ - namespace pass - { - class BACKEND_API Opset0Downgrade : public NodePass - { - public: - /// - /// \brief Constructor for the Opv1 downgrade transformation pass. - /// - /// \details This transformation pass iterates over all nodes in a graph - /// and updates version 1 ops to their version 0 equivalents. - /// All ops in the final graph have op version 0. - Opset0Downgrade() = default; - bool run_on_node(std::shared_ptr node) override; - }; - } -} - -NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/test/runtime/pass/opset1_downgrade.cpp b/ngraph/test/runtime/pass/opset1_downgrade.cpp deleted file mode 100644 index 1d77d39b8aa..00000000000 --- a/ngraph/test/runtime/pass/opset1_downgrade.cpp +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "ngraph/node.hpp" -#include "ngraph/ops.hpp" -#include "ngraph/provenance.hpp" -#include "ngraph/validation_util.hpp" -#include "opset1_downgrade.hpp" - -using namespace std; -using namespace ngraph; - -namespace opset1_downgrade -{ - shared_ptr op_cast(shared_ptr node) - { - const auto data = node->input_value(0).get_node_shared_ptr(); - const auto target_shape = node->input_value(1).get_node_shared_ptr(); - - shared_ptr replacement_node; - switch (node->get_broadcast_spec().m_type) - { - case op::BroadcastType::BIDIRECTIONAL: - { - const auto const_filled_with_ones = make_shared( - op::Constant::create(data->get_element_type(), {}, {1}), target_shape); - if (const_filled_with_ones->get_element_type() == element::boolean) - { - replacement_node = make_shared(data, const_filled_with_ones); - } - else - { - replacement_node = make_shared(data, const_filled_with_ones); - } - break; - } - case op::BroadcastType::EXPLICIT: - { - const auto axes_mapping = node->input_value(2).get_node_shared_ptr(); - replacement_node = make_shared( - data, target_shape, axes_mapping, op::AutoBroadcastType::EXPLICIT); - break; - } - case op::BroadcastType::NUMPY: - { - replacement_node = - make_shared(data, target_shape, op::AutoBroadcastType::NUMPY); - break; - } - case op::BroadcastType::PDPD: - { - op::AutoBroadcastSpec broadcast_spec; - broadcast_spec.m_type = op::AutoBroadcastType::PDPD; - broadcast_spec.m_axis = node->get_broadcast_spec().m_axis; - replacement_node = make_shared(data, target_shape, broadcast_spec); - break; - } - default: - { - NGRAPH_CHECK( - true, - "Not supported broadcast type during Broadcast:v3 to Broadcast:v1 conversion. ", - "Node: ", - *node); - } - } - replace_node(node, replacement_node); - return replacement_node; - } - - shared_ptr op_cast(shared_ptr node) - { - const auto data = node->input_value(0); - const auto k = node->input_value(1); - const auto replacement_node = make_shared(data, - k, - node->get_axis(), - node->get_mode(), - node->get_sort_type(), - node->get_index_element_type()); - replace_node(node, replacement_node); - return replacement_node; - } - - using DispatchMap = map node)>>; - - template - bool op_cast_thunk(shared_ptr node) - { - auto downgraded_node = op_cast(as_type_ptr(node)); - if (downgraded_node) - { - if (ngraph::get_provenance_enabled()) - { - const std::string provenance_tag = - "get_type_name()) + ")>"; - downgraded_node->add_provenance_tags_above(node->input_values(), {provenance_tag}); - } - return true; - } - return false; - } - - DispatchMap& get_dispatch_map() - { - static DispatchMap dispatch_map{ -#define NGRAPH_OP(NAME, NAMESPACE) {NAMESPACE::NAME::type_info, op_cast_thunk}, - NGRAPH_OP(Broadcast, op::v3) NGRAPH_OP(TopK, op::v3) -#undef NGRAPH_OP - }; - return dispatch_map; - } -} // namespace opset1_downgrade - -bool pass::Opset1Downgrade::run_on_node(shared_ptr node) -{ - bool modified = false; - auto& dispatch_map = opset1_downgrade::get_dispatch_map(); - auto it = dispatch_map.find(node->get_type_info()); - if (it != dispatch_map.end()) - { - modified = it->second(node); - } - return modified; -} diff --git a/ngraph/test/runtime/pass/opset1_downgrade.hpp b/ngraph/test/runtime/pass/opset1_downgrade.hpp deleted file mode 100644 index 52e9d8397bc..00000000000 --- a/ngraph/test/runtime/pass/opset1_downgrade.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "backend_visibility.hpp" -#include "ngraph/pass/pass.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -namespace ngraph -{ - namespace pass - { - class BACKEND_API Opset1Downgrade : public NodePass - { - public: - /// - /// \brief Constructor for the Opv1 downgrade transformation pass. - /// - /// \details This transformation pass iterates over all nodes in a graph - /// and updates version 3 ops to their version 1 equivalents. - /// All ops in the final graph have op version 1. - Opset1Downgrade() = default; - bool run_on_node(std::shared_ptr node) override; - }; - } -} - -NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/test/runtime/pass/opset1_upgrade.cpp b/ngraph/test/runtime/pass/opset1_upgrade.cpp deleted file mode 100644 index 9c8f2d4c8e7..00000000000 --- a/ngraph/test/runtime/pass/opset1_upgrade.cpp +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "opset1_upgrade.hpp" - -#include -#include -#include -#include - -#include "ngraph/builder/autobroadcast.hpp" -#include "ngraph/builder/reshape.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/ops.hpp" -#include "ngraph/provenance.hpp" -#include "op/avg_pool.hpp" -#include "op/convolution.hpp" -#include "op/group_conv.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -using namespace std; -using namespace ngraph; - -namespace opset1_upgrade -{ - template - shared_ptr op_cast_binary_elementwise_node(const shared_ptr& node) - { - const auto autob = node->get_autob(); - auto replacement_node = - make_shared(node->input_value(0), node->input_value(1), autob); - replace_node(node, replacement_node); - return replacement_node; - } - - // Default is that we didn nothing - shared_ptr op_cast(shared_ptr node) { return nullptr; } - shared_ptr op_cast(shared_ptr node) - { - auto data_batch_shape = node->get_data_batch_shape(); - auto strides = node->get_window_movement_strides_forward(); - auto dilations = node->get_window_dilation_strides_forward(); - auto pads_begin = node->get_padding_below_forward(); - auto pads_end = node->get_padding_above_forward(); - auto data_dilation_strides = node->get_data_dilation_strides_forward(); - - bool is_dds_valid = all_of(data_dilation_strides.begin(), - data_dilation_strides.end(), - [](size_t value) { return value == 1; }); - - NGRAPH_CHECK(is_dds_valid, - "Unable to convert ConvolutionBackpropData:0 to ConvolutionBackpropData:1 " - "with data dilation strides " - "other than `1`. Node: ", - *node); - - auto replacement_node = make_shared( - node->input_value(1), // data - node->input_value(0), // filters - op::Constant::create( - element::i64, - Shape{data_batch_shape.size() - 2}, - vector(data_batch_shape.begin() + 2, data_batch_shape.end())), - strides, - pads_begin, - pads_end, - dilations); - replace_node(node, replacement_node); - return replacement_node; - } - - shared_ptr op_cast(shared_ptr node) - { - auto strides = node->get_window_movement_strides(); - auto dilations = node->get_window_dilation_strides(); - auto pads_begin = node->get_padding_below(); - auto pads_end = node->get_padding_above(); - auto data_dilation_strides = node->get_data_dilation_strides(); - auto auto_pad = node->get_pad_type(); - - bool is_dds_valid = all_of(data_dilation_strides.begin(), - data_dilation_strides.end(), - [](size_t value) { return value == 1; }); - - NGRAPH_CHECK(is_dds_valid, - "Unable to convert GroupConvolution:0 to GroupConvolution:1" - "with data dilation strides other than `1`. Node: ", - *node); - - shared_ptr replacement_node; - if (node->has_groups_in_filters()) - { - replacement_node = make_shared(node->input_value(0), - node->input_value(1), - strides, - pads_begin, - pads_end, - dilations, - auto_pad); - } - else - { - NGRAPH_CHECK(node->get_input_partial_shape(1).is_static(), - "Unable to convert GroupConvolution:0 to GroupConvolution:1" - "with dynamic filters shape. Node: ", - *node); - - auto filters_shape = node->get_input_shape(1); - auto groups = node->get_groups(); - filters_shape[0] /= groups; - filters_shape.insert(filters_shape.begin(), groups); - - auto reshaped_filters = builder::opset1::reshape(node->input_value(1), filters_shape); - - replacement_node = make_shared(node->input_value(0), - reshaped_filters, - strides, - pads_begin, - pads_end, - dilations, - auto_pad); - } - replace_node(node, replacement_node); - return replacement_node; - } - - shared_ptr op_cast(shared_ptr node) - { - const auto strides = node->get_window_movement_strides(); - const auto dilations = node->get_window_dilation_strides(); - const auto pads_begin = node->get_padding_below(); - const auto pads_end = node->get_padding_above(); - - const auto data_batch_pshape = node->get_input_partial_shape(0); - const auto filters_pshape = node->get_input_partial_shape(1); - - NGRAPH_CHECK(data_batch_pshape.is_static(), - "Unable to convert GroupConvolutionBackpropData:0 to " - "GroupConvolutionBackpropData:1 with dynamic data_batch shape. Node: ", - *node); - NGRAPH_CHECK(filters_pshape.is_static(), - "Unable to convert GroupConvolutionBackpropData:0 to " - "GroupConvolutionBackpropData:1 with dynamic filters shape. Node: ", - *node); - - auto data_batch_shape = data_batch_pshape.to_shape(); - // Remove N, C from output shape to preserve only spatial dimentions. - data_batch_shape.erase(std::begin(data_batch_shape), - std::next(std::begin(data_batch_shape), 2)); - auto filters_shape = filters_pshape.to_shape(); - auto groups = node->get_groups(); - - filters_shape[0] /= groups; - filters_shape.insert(filters_shape.begin(), groups); - auto reshaped_filters = builder::opset1::reshape(node->input_value(1), filters_shape); - - auto replacement_node = make_shared( - node->input_value(2), - reshaped_filters, - op::Constant::create(element::i64, Shape{data_batch_shape.size()}, data_batch_shape), - strides, - pads_begin, - pads_end, - dilations); - replace_node(node, replacement_node); - return replacement_node; - } - - shared_ptr op_cast(shared_ptr node) - { - auto replacement_node = make_shared( - node->input_value(0), node->input_value(1), node->get_autob()); - replace_node(node, replacement_node); - return replacement_node; - } - - using DispatchMap = map node)>>; - - template - bool op_cast_thunk(shared_ptr node) - { - auto upgraded_node = op_cast(as_type_ptr(node)); - if (upgraded_node) - { - if (ngraph::get_provenance_enabled()) - { - const std::string provenance_tag = - "get_type_name()) + ")>"; - upgraded_node->add_provenance_tags_above(node->input_values(), {provenance_tag}); - } - return true; - } - return false; - } - - DispatchMap& get_dispatch_map() - { - NGRAPH_SUPPRESS_DEPRECATED_START - static DispatchMap dispatch_map{ -#define NGRAPH_OP(NAME, NAMESPACE) {NAMESPACE::NAME::type_info, op_cast_thunk}, -#include "opset0_tbl.hpp" -#undef NGRAPH_OP - }; - return dispatch_map; - NGRAPH_SUPPRESS_DEPRECATED_END - } -} // namespace opset1_upgrade - -bool pass::Opset1Upgrade::run_on_node(shared_ptr node) -{ - bool modified = false; - auto& dispatch_map = opset1_upgrade::get_dispatch_map(); - auto it = dispatch_map.find(node->get_type_info()); - if (it != dispatch_map.end()) - { - modified = it->second(node); - } - return modified; -} diff --git a/ngraph/test/runtime/pass/opset1_upgrade.hpp b/ngraph/test/runtime/pass/opset1_upgrade.hpp deleted file mode 100644 index c1942626fe3..00000000000 --- a/ngraph/test/runtime/pass/opset1_upgrade.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "backend_visibility.hpp" -#include "ngraph/pass/pass.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -namespace ngraph -{ - namespace pass - { - class BACKEND_API Opset1Upgrade : public NodePass - { - public: - /// - /// \brief Constructor for the Opset1Upgrade transformation pass. - /// - /// \details This transformation pass iterates over all nodes in a graph - /// and updates version 0 ops to their version 1 equivalents. - /// All ops in the final graph have op version 1. - Opset1Upgrade() = default; - bool run_on_node(std::shared_ptr node) override; - }; - } -} - -NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/test/tensor.cpp b/ngraph/test/tensor.cpp index 6b4c3597eeb..4deaadb3b75 100644 --- a/ngraph/test/tensor.cpp +++ b/ngraph/test/tensor.cpp @@ -13,7 +13,6 @@ #include "ngraph/ngraph.hpp" #include "ngraph/opsets/opset6.hpp" #include "ngraph/pass/manager.hpp" -#include "pass/liveness.hpp" #include "util/test_tools.hpp" NGRAPH_SUPPRESS_DEPRECATED_START @@ -21,66 +20,6 @@ NGRAPH_SUPPRESS_DEPRECATED_START using namespace std; using namespace ngraph; -TEST(tensor, size) -{ - pass::Manager pass_manager; - - pass_manager.register_pass(); - - { - auto arg0 = make_shared(element::f32, Shape{2, 3}); - auto add = make_shared(arg0, arg0); - auto f0 = make_shared(add, ParameterVector{arg0}); - - pass_manager.run_passes(f0); - - ASSERT_EQ(1, arg0->get_output_size()); - descriptor::Tensor& output = arg0->get_output_tensor(0); - EXPECT_EQ(2 * 3 * 4, output.size()); - } - - { - auto arg0 = make_shared(element::f32, Shape{}); - auto add = make_shared(arg0, arg0); - auto f0 = make_shared(add, ParameterVector{arg0}); - - pass_manager.run_passes(f0); - - ASSERT_EQ(1, arg0->get_output_size()); - descriptor::Tensor& output = arg0->get_output_tensor(0); - EXPECT_EQ(1 * 4, output.size()); - } - - { - auto arg0 = make_shared(element::f32, Shape{1}); - auto add = make_shared(arg0, arg0); - auto f0 = make_shared(add, ParameterVector{arg0}); - - pass_manager.run_passes(f0); - - ASSERT_EQ(1, arg0->get_output_size()); - descriptor::Tensor& output = arg0->get_output_tensor(0); - EXPECT_EQ(1 * 4, output.size()); - } -} - -TEST(tensor, output_flag) -{ - pass::Manager pass_manager; - pass_manager.register_pass(); - - auto arg0 = make_shared(element::f32, Shape{1}); - auto add = make_shared(arg0, arg0); - auto f0 = make_shared(add, ParameterVector{arg0}); - - pass_manager.run_passes(f0); - - for (size_t i = 0; i < f0->get_output_size(); ++i) - { - EXPECT_TRUE(op::is_output(f0->get_output_op(i))); - } -} - TEST(tensor, tensor_names) { auto arg0 = make_shared(element::f32, Shape{1}); diff --git a/ngraph/test/type_prop/asinh.cpp b/ngraph/test/type_prop/asinh.cpp new file mode 100644 index 00000000000..e6592a35b81 --- /dev/null +++ b/ngraph/test/type_prop/asinh.cpp @@ -0,0 +1,9 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "unary_ops.hpp" + +using Type = ::testing::Types; + +INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_asinh, UnaryOperator, Type); diff --git a/ngraph/test/type_prop/batch_to_space.cpp b/ngraph/test/type_prop/batch_to_space.cpp index c324e72e270..54dfd3bfd98 100644 --- a/ngraph/test/type_prop/batch_to_space.cpp +++ b/ngraph/test/type_prop/batch_to_space.cpp @@ -49,8 +49,8 @@ TEST(type_prop, batch_to_space_incompatible_input_element_types) element::Type integer64_et = element::i64; element::Type integer32_et = element::i32; - Shape data_sshape{10, 26, 4, 4}; - Shape inputs_sshape{4}; + Shape data_sshape{10, 26}; + Shape inputs_sshape{2}; vector test_cases; test_cases.push_back( @@ -97,8 +97,8 @@ TEST(type_prop, batch_to_space_invalid_input_element_types) { element::Type float_et = element::f32; - Shape data_sshape{10, 26, 4, 4}; - Shape inputs_sshape{4}; + Shape data_sshape{10, 26}; + Shape inputs_sshape{2}; const BatchToSpaceInputParams params{ InputInfo{float_et, data_sshape}, @@ -124,7 +124,7 @@ TEST(type_prop, batch_to_space_invalid_input_element_types) TEST(type_prop, batch_to_space_invalid_data_input_rank) { - Shape data_sshape{4, 2}; + Shape data_sshape{4}; element::Type data_et = element::f32; Shape inputs_sshape{2}; @@ -143,7 +143,7 @@ TEST(type_prop, batch_to_space_invalid_data_input_rank) } catch(const NodeValidationFailure& error) { - EXPECT_HAS_SUBSTRING(error.what(), "data input must have rank greater than or equal to 4"); + EXPECT_HAS_SUBSTRING(error.what(), "data input must have rank greater or equal than 2."); } catch (...) { @@ -153,11 +153,11 @@ TEST(type_prop, batch_to_space_invalid_data_input_rank) TEST(type_prop, batch_to_space_incompatible_secondary_inputs_shapes) { - Shape data_sshape{10, 26, 4, 4}; + Shape data_sshape{10, 26}; element::Type data_et = element::f32; - Shape inputs_sshape_1D{4}; - Shape inputs_sshape_2D{4, 1}; + Shape inputs_sshape_1D{2}; + Shape inputs_sshape_2D{2, 1}; element::Type inputs_et = element::i64; vector test_cases; @@ -203,10 +203,10 @@ TEST(type_prop, batch_to_space_incompatible_secondary_inputs_shapes) TEST(type_prop, batch_to_space_invalid_secondary_inputs_rank) { - Shape data_sshape{10, 26, 4, 4}; + Shape data_sshape{10, 26}; element::Type data_et = element::f32; - Shape inputs_sshape_2D{4, 1}; + Shape inputs_sshape_2D{2, 1}; element::Type inputs_et = element::i64; const BatchToSpaceInputParams params{ @@ -233,7 +233,7 @@ TEST(type_prop, batch_to_space_invalid_secondary_inputs_rank) TEST(type_prop, batch_to_space_incompatible_data_and_secondary_inputs_shapes) { - Shape data_sshape{10, 26, 4, 4}; + Shape data_sshape{10, 26}; element::Type data_et = element::f32; Shape inputs_sshape{5}; @@ -414,6 +414,22 @@ TEST(type_prop, batch_to_space_invalid_crops_out_of_bounds) } } +TEST(type_prop, batch_to_space_output_shape_2D) +{ + auto data = make_shared(element::f32, Shape{10, 26}); + auto block_shape = + make_shared(element::i64, Shape{2}, vector{1, 5}); + auto crops_begin = + make_shared(element::i64, Shape{2}, vector{0, 2}); + auto crops_end = + make_shared(element::i64, Shape{2}, vector{0, 0}); + auto batch_to_space = + make_shared(data, block_shape, crops_begin, crops_end); + + ASSERT_EQ(batch_to_space->get_element_type(), element::f32); + ASSERT_EQ(batch_to_space->get_shape(), (Shape{10 / 5, 26 * 5 - 2})); +} + TEST(type_prop, batch_to_space_output_shape_4D) { auto data = make_shared(element::f32, Shape{100, 7, 13, 3}); diff --git a/ngraph/test/type_prop/convolution.cpp b/ngraph/test/type_prop/convolution.cpp index fdd757da1a6..1e13c890a1e 100644 --- a/ngraph/test/type_prop/convolution.cpp +++ b/ngraph/test/type_prop/convolution.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/convolution.hpp" #include "gtest/gtest.h" #include "ngraph/ngraph.hpp" #include "util/type_prop.hpp" @@ -10,2234 +9,6 @@ using namespace std; using namespace ngraph; -TEST(type_prop, conv_1d_deduce) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); - auto conv = make_shared(param0, param1); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 91})); - - EXPECT_EQ(conv->get_window_movement_strides(), Strides{1}); - EXPECT_EQ(conv->get_window_dilation_strides(), Strides{1}); - EXPECT_EQ(conv->get_data_dilation_strides(), Strides{1}); - - EXPECT_EQ(conv->get_padding_below(), CoordinateDiff{0}); - EXPECT_EQ(conv->get_padding_above(), CoordinateDiff{0}); -} - -TEST(type_prop, conv_1d_deduce_padded) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); - auto move_strides = Strides{1}; - auto dilation_strides = Strides{1}; - auto padding_below = CoordinateDiff{2}; - auto padding_above = CoordinateDiff{3}; - auto conv = make_shared( - param0, param1, move_strides, dilation_strides, padding_below, padding_above); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 96})); - - EXPECT_EQ(conv->get_window_movement_strides(), Strides{1}); - EXPECT_EQ(conv->get_window_dilation_strides(), Strides{1}); - EXPECT_EQ(conv->get_data_dilation_strides(), Strides{1}); - - EXPECT_EQ(conv->get_padding_below(), CoordinateDiff{2}); - EXPECT_EQ(conv->get_padding_above(), CoordinateDiff{3}); -} - -TEST(type_prop, conv_1d_deduce_strided) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); - auto move_strides = Strides{2}; - auto conv = make_shared(param0, param1, move_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 46})); - - EXPECT_EQ(conv->get_window_movement_strides(), Strides{2}); - EXPECT_EQ(conv->get_window_dilation_strides(), Strides{1}); - EXPECT_EQ(conv->get_data_dilation_strides(), Strides{1}); - - EXPECT_EQ(conv->get_padding_below(), CoordinateDiff{0}); - EXPECT_EQ(conv->get_padding_above(), CoordinateDiff{0}); -} - -TEST(type_prop, conv_1d_deduce_strided_padded) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); - auto move_strides = Strides{2}; - auto dilation_strides = Strides{1}; - auto padding_below = CoordinateDiff{2}; - auto padding_above = CoordinateDiff{3}; - auto conv = make_shared( - param0, param1, move_strides, dilation_strides, padding_below, padding_above); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 48})); - - EXPECT_EQ(conv->get_window_movement_strides(), Strides{2}); - EXPECT_EQ(conv->get_window_dilation_strides(), Strides{1}); - EXPECT_EQ(conv->get_data_dilation_strides(), Strides{1}); - - EXPECT_EQ(conv->get_padding_below(), CoordinateDiff{2}); - EXPECT_EQ(conv->get_padding_above(), CoordinateDiff{3}); -} - -TEST(type_prop, conv_1d_deduce_strided_small_uneven) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 5}); - auto param1 = make_shared(element::f32, Shape{128, 3, 2}); - auto move_strides = Strides{2}; - auto conv = make_shared(param0, param1, move_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 2})); - - EXPECT_EQ(conv->get_window_movement_strides(), Strides{2}); - EXPECT_EQ(conv->get_window_dilation_strides(), Strides{1}); - EXPECT_EQ(conv->get_data_dilation_strides(), Strides{1}); - - EXPECT_EQ(conv->get_padding_below(), CoordinateDiff{0}); - EXPECT_EQ(conv->get_padding_above(), CoordinateDiff{0}); -} - -TEST(type_prop, conv_1d_deduce_strided_small_even) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 6}); - auto param1 = make_shared(element::f32, Shape{128, 3, 2}); - auto move_strides = Strides{2}; - auto conv = make_shared(param0, param1, move_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 3})); - - EXPECT_EQ(conv->get_window_movement_strides(), Strides{2}); - EXPECT_EQ(conv->get_window_dilation_strides(), Strides{1}); - EXPECT_EQ(conv->get_data_dilation_strides(), Strides{1}); - - EXPECT_EQ(conv->get_padding_below(), CoordinateDiff{0}); - EXPECT_EQ(conv->get_padding_above(), CoordinateDiff{0}); -} - -TEST(type_prop, conv_1d_deduce_window_dilated) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); - auto move_strides = Strides{1}; - auto dilate_strides = Strides{2}; - auto conv = make_shared(param0, param1, move_strides, dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 82})); - - EXPECT_EQ(conv->get_window_movement_strides(), Strides{1}); - EXPECT_EQ(conv->get_window_dilation_strides(), Strides{2}); - EXPECT_EQ(conv->get_data_dilation_strides(), Strides{1}); - - EXPECT_EQ(conv->get_padding_below(), CoordinateDiff{0}); - EXPECT_EQ(conv->get_padding_above(), CoordinateDiff{0}); -} - -TEST(type_prop, conv_1d_deduce_window_dilated_padded) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); - auto move_strides = Strides{1}; - auto dilate_strides = Strides{2}; - auto padding_below = CoordinateDiff{2}; - auto padding_above = CoordinateDiff{3}; - auto conv = make_shared( - param0, param1, move_strides, dilate_strides, padding_below, padding_above); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 87})); - - EXPECT_EQ(conv->get_window_movement_strides(), Strides{1}); - EXPECT_EQ(conv->get_window_dilation_strides(), Strides{2}); - EXPECT_EQ(conv->get_data_dilation_strides(), Strides{1}); - - EXPECT_EQ(conv->get_padding_below(), CoordinateDiff{2}); - EXPECT_EQ(conv->get_padding_above(), CoordinateDiff{3}); -} - -TEST(type_prop, conv_1d_deduce_window_dilated_data_dilated_padded) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); - auto move_strides = Strides{1}; - auto dilate_strides = Strides{2}; - auto padding_below = CoordinateDiff{2}; - auto padding_above = CoordinateDiff{3}; - auto data_dilate_strides = Strides{3}; - auto conv = make_shared(param0, - param1, - move_strides, - dilate_strides, - padding_below, - padding_above, - data_dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 285})); - - EXPECT_EQ(conv->get_window_movement_strides(), Strides{1}); - EXPECT_EQ(conv->get_window_dilation_strides(), Strides{2}); - EXPECT_EQ(conv->get_data_dilation_strides(), Strides{3}); - - EXPECT_EQ(conv->get_padding_below(), CoordinateDiff{2}); - EXPECT_EQ(conv->get_padding_above(), CoordinateDiff{3}); -} - -TEST(type_prop, conv_2d_deduce) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); - auto conv = make_shared(param0, param1); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 91, 131})); - - EXPECT_EQ(conv->get_window_movement_strides(), (Strides{1, 1})); - EXPECT_EQ(conv->get_window_dilation_strides(), (Strides{1, 1})); - EXPECT_EQ(conv->get_data_dilation_strides(), (Strides{1, 1})); - - EXPECT_EQ(conv->get_padding_below(), (CoordinateDiff{0, 0})); - EXPECT_EQ(conv->get_padding_above(), (CoordinateDiff{0, 0})); -} - -TEST(type_prop, conv_2d_deduce_padded) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); - auto move_strides = Strides{1, 1}; - auto dilate_strides = Strides{1, 1}; - auto padding_below = CoordinateDiff{2, 3}; - auto padding_above = CoordinateDiff{3, 4}; - auto conv = make_shared( - param0, param1, move_strides, dilate_strides, padding_below, padding_above); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 96, 138})); - - EXPECT_EQ(conv->get_window_movement_strides(), (Strides{1, 1})); - EXPECT_EQ(conv->get_window_dilation_strides(), (Strides{1, 1})); - EXPECT_EQ(conv->get_data_dilation_strides(), (Strides{1, 1})); - - EXPECT_EQ(conv->get_padding_below(), (CoordinateDiff{2, 3})); - EXPECT_EQ(conv->get_padding_above(), (CoordinateDiff{3, 4})); -} - -TEST(type_prop, conv_2d_deduce_padded_neg) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); - auto move_strides = Strides{1, 1}; - auto dilate_strides = Strides{1, 1}; - auto padding_below = CoordinateDiff{2, -3}; - auto padding_above = CoordinateDiff{3, -4}; - auto conv = make_shared( - param0, param1, move_strides, dilate_strides, padding_below, padding_above); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 96, 124})); - - EXPECT_EQ(conv->get_window_movement_strides(), (Strides{1, 1})); - EXPECT_EQ(conv->get_window_dilation_strides(), (Strides{1, 1})); - EXPECT_EQ(conv->get_data_dilation_strides(), (Strides{1, 1})); - - EXPECT_EQ(conv->get_padding_below(), (CoordinateDiff{2, -3})); - EXPECT_EQ(conv->get_padding_above(), (CoordinateDiff{3, -4})); -} - -struct DeduceAutoPadTest - : ::testing::TestWithParam< - std::tuple> -{ -}; - -TEST_P(DeduceAutoPadTest, same_lower) -{ - auto image_shape = std::get<0>(GetParam()); - image_shape.insert(image_shape.begin(), {1, 1}); // Add {N, C} - auto filter_shape = std::get<1>(GetParam()); - filter_shape.insert(filter_shape.begin(), {1, 1}); // Add {O, I} - auto param0 = make_shared(element::f32, image_shape); - auto param1 = make_shared(element::f32, filter_shape); - - auto conv = make_shared(param0, - param1, - std::get<2>(GetParam()), - std::get<3>(GetParam()), - CoordinateDiff(), - CoordinateDiff(), - Strides(), - op::PadType::SAME_LOWER); - EXPECT_EQ(conv->get_padding_above(), std::get<4>(GetParam())); - EXPECT_EQ(conv->get_padding_below(), std::get<5>(GetParam())); -} - -INSTANTIATE_TEST_SUITE_P(type_prop, - DeduceAutoPadTest, - ::testing::Values(std::make_tuple(Shape{5, 6}, - Shape{3, 4}, - Strides{2, 1}, - Strides{1, 1}, - CoordinateDiff{1, 1}, - CoordinateDiff{1, 2}), - std::make_tuple(Shape{3, 3}, - Shape{2, 2}, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{0, 0}, - CoordinateDiff{1, 1}), - std::make_tuple(Shape{28, 28}, - Shape{3, 3}, - Strides{2, 2}, - Strides{1, 1}, - CoordinateDiff{0, 0}, - CoordinateDiff{1, 1}), - std::make_tuple(Shape{100, 150}, - Shape{10, 20}, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{4, 9}, - CoordinateDiff{5, 10}), - std::make_tuple(Shape{2}, - Shape{1}, - Strides{3}, - Strides{1}, - CoordinateDiff{0}, - CoordinateDiff{0}), - std::make_tuple(Shape{10, 1}, - Shape{4, 1}, - Strides{1, 1}, - Strides{2, 1}, - CoordinateDiff{3, 0}, - CoordinateDiff{3, 0}), - std::make_tuple(Shape{10, 5, 6}, - Shape{3, 3, 4}, - Strides{1, 2, 1}, - Strides{2, 1, 1}, - CoordinateDiff{2, 1, 1}, - CoordinateDiff{2, 1, 2}))); - -TEST(type_prop, conv_2d_deduce_strided) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); - auto move_strides = Strides{2, 3}; - auto conv = make_shared(param0, param1, move_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 46, 44})); - - EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3})); - EXPECT_EQ(conv->get_window_dilation_strides(), (Strides{1, 1})); - EXPECT_EQ(conv->get_data_dilation_strides(), (Strides{1, 1})); - - EXPECT_EQ(conv->get_padding_below(), (CoordinateDiff{0, 0})); - EXPECT_EQ(conv->get_padding_above(), (CoordinateDiff{0, 0})); -} - -TEST(type_prop, conv_2d_deduce_strided_window_dilated) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); - auto move_strides = Strides{2, 3}; - auto dilate_strides = Strides{3, 2}; - auto conv = make_shared(param0, param1, move_strides, dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 37, 38})); - - EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3})); - EXPECT_EQ(conv->get_window_dilation_strides(), (Strides{3, 2})); - EXPECT_EQ(conv->get_data_dilation_strides(), (Strides{1, 1})); - - EXPECT_EQ(conv->get_padding_below(), (CoordinateDiff{0, 0})); - EXPECT_EQ(conv->get_padding_above(), (CoordinateDiff{0, 0})); -} - -TEST(type_prop, conv_2d_deduce_strided_window_dilated_data_dilated) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); - auto move_strides = Strides{2, 3}; - auto dilate_strides = Strides{3, 2}; - auto padding_below = CoordinateDiff{0, 0}; - auto padding_above = CoordinateDiff{0, 0}; - auto data_dilate_strides = Strides{2, 3}; - auto conv = make_shared(param0, - param1, - move_strides, - dilate_strides, - padding_below, - padding_above, - data_dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 86, 137})); - - EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3})); - EXPECT_EQ(conv->get_window_dilation_strides(), (Strides{3, 2})); - EXPECT_EQ(conv->get_data_dilation_strides(), (Strides{2, 3})); - - EXPECT_EQ(conv->get_padding_below(), (CoordinateDiff{0, 0})); - EXPECT_EQ(conv->get_padding_above(), (CoordinateDiff{0, 0})); -} - -TEST(type_prop, conv_2d_deduce_strided_window_dilated_small) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 7, 8}); - auto param1 = make_shared(element::f32, Shape{128, 3, 2, 3}); - auto move_strides = Strides{2, 3}; - auto dilate_strides = Strides{3, 2}; - auto conv = make_shared(param0, param1, move_strides, dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 2, 2})); - - EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3})); - EXPECT_EQ(conv->get_window_dilation_strides(), (Strides{3, 2})); - EXPECT_EQ(conv->get_data_dilation_strides(), (Strides{1, 1})); - - EXPECT_EQ(conv->get_padding_below(), (CoordinateDiff{0, 0})); - EXPECT_EQ(conv->get_padding_above(), (CoordinateDiff{0, 0})); -} - -TEST(type_prop, conv_3d_deduce_strided_window_dilated_small) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 7, 8, 10}); - auto param1 = make_shared(element::f32, Shape{128, 3, 2, 3, 2}); - auto move_strides = Strides{2, 3, 4}; - auto dilate_strides = Strides{3, 2, 2}; - auto conv = make_shared(param0, param1, move_strides, dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 2, 2, 2})); - - EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3, 4})); - EXPECT_EQ(conv->get_window_dilation_strides(), (Strides{3, 2, 2})); - EXPECT_EQ(conv->get_data_dilation_strides(), (Strides{1, 1, 1})); - - EXPECT_EQ(conv->get_padding_below(), (CoordinateDiff{0, 0, 0})); - EXPECT_EQ(conv->get_padding_above(), (CoordinateDiff{0, 0, 0})); -} - -TEST(type_prop, conv_3d_deduce_strided_window_dilated_data_dilated_small) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 7, 8, 10}); - auto param1 = make_shared(element::f32, Shape{128, 3, 2, 3, 2}); - auto move_strides = Strides{2, 3, 4}; - auto dilate_strides = Strides{3, 2, 2}; - auto padding_below = CoordinateDiff{0, 0, 0}; - auto padding_above = CoordinateDiff{0, 0, 0}; - auto data_dilate_strides = Strides{2, 3, 2}; - auto conv = make_shared(param0, - param1, - move_strides, - dilate_strides, - padding_below, - padding_above, - data_dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 5, 6, 5})); - - EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3, 4})); - EXPECT_EQ(conv->get_window_dilation_strides(), (Strides{3, 2, 2})); - EXPECT_EQ(conv->get_data_dilation_strides(), (Strides{2, 3, 2})); - - EXPECT_EQ(conv->get_padding_below(), (CoordinateDiff{0, 0, 0})); - EXPECT_EQ(conv->get_padding_above(), (CoordinateDiff{0, 0, 0})); -} - -TEST(type_prop, conv_invalid_element_type_mismatch) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{3, 3, 3, 3}); - auto param1 = make_shared(element::i32, Shape{3, 3, 2, 2}); - try - { - auto conv = make_shared(param0, param1); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with element type mismatch not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Element types for data batch and filters do not match")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_0d_input) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{}); - auto param1 = make_shared(element::f32, Shape{}); - try - { - auto conv = make_shared(param0, param1); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid 0D input not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Data batch and filters must have rank of at least 3 " - "(one batch axis, one input-channel axis, " - "and at least one spatial dimension)")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_1d_input) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{2}); - auto param1 = make_shared(element::f32, Shape{2}); - try - { - auto conv = make_shared(param0, param1); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid 1D input not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Data batch and filters must have rank of at least 3 " - "(one batch axis, one input-channel axis, " - "and at least one spatial dimension)")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_2d_input) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{2, 6}); - auto param1 = make_shared(element::f32, Shape{2, 6}); - try - { - auto conv = make_shared(param0, param1); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid 2D input not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Data batch and filters must have rank of at least 3 " - "(one batch axis, one input-channel axis, " - "and at least one spatial dimension)")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_0_batch_size) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{0, 6, 1}); - auto param1 = make_shared(element::f32, Shape{0, 6, 1}); - try - { - auto conv = make_shared(param0, param1); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with 0 batch size not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Batch size is zero")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_0_input_channels) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 0, 1}); - auto param1 = make_shared(element::f32, Shape{5, 0, 1}); - try - { - auto conv = make_shared(param0, param1); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with 0 input channels not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Data batch channel count and/or filter input channel count is zero")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_wrong_number_of_filter_dimensions_too_many) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{5, 2, 3, 3, 3}); - try - { - auto conv = make_shared(param0, param1); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with too many filter dimensions not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Data batch and filters rank do not match")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_wrong_number_of_filter_dimensions_too_few) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{5, 2, 3}); - try - { - auto conv = make_shared(param0, param1); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with too few filter dimensions not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Data batch and filters rank do not match")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_0_output_channels) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{0, 2, 3, 3}); - try - { - auto conv = make_shared(param0, param1); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with 0 output channels not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Filter output channel count is zero")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_input_channel_mismatch) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 3, 3, 3}); - try - { - auto conv = make_shared(param0, param1); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with channel count mismatch not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string( - "Data batch channel count (2) does not match filter input channel count (3)")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_movement_stride_rank) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); - try - { - auto conv = make_shared(param0, param1, Strides{2, 3, 8}); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with wrong movement stride rank not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Ranks for data item shape/filters shape (data batch has shape " - "{6,2,10,10}, so data item rank is 2 and filters have shape {6,2,3,3}, so " - "filters spatial rank is 2), data dilation (Strides{1, 1}), padding below " - "(CoordinateDiff{0, 0}), padding above (CoordinateDiff{0, 0}), filter " - "strides (Strides{2, 3, 8}), and filter dilation (Strides{1, 1}) do not " - "match")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_window_dilation_stride_rank) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); - try - { - auto conv = - make_shared(param0, param1, Strides{2, 3}, Strides{2, 3, 8}); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with wrong window dilation stride rank not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Ranks for data item shape/filters shape (data batch has shape " - "{6,2,10,10}, so data item rank is 2 and filters have shape {6,2,3,3}, so " - "filters spatial rank is 2), data dilation (Strides{1, 1}), padding below " - "(CoordinateDiff{0, 0}), padding above (CoordinateDiff{0, 0}), filter " - "strides (Strides{2, 3}), and filter dilation (Strides{2, 3, 8}) do not " - "match")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_data_dilation_stride_rank) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); - try - { - auto conv = make_shared(param0, - param1, - Strides{2, 3}, - Strides{2, 3}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{2, 3, 8}); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with wrong data dilation stride rank not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Ranks for data item shape/filters shape (data batch has shape " - "{6,2,10,10}, so data item rank is 2 and filters have shape {6,2,3,3}, so " - "filters spatial rank is 2), data dilation (Strides{2, 3, 8}), padding " - "below (CoordinateDiff{0, 0}), padding above (CoordinateDiff{0, 0}), " - "filter strides (Strides{2, 3}), and filter dilation (Strides{2, 3}) do " - "not match")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_padding_below_rank) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); - try - { - auto conv = make_shared(param0, - param1, - Strides{2, 3}, - Strides{1, 1}, - CoordinateDiff{0, 0, 0}, - CoordinateDiff{0, 0}); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with wrong padding-below rank not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string( - "Ranks for data item shape/filters shape (data batch has shape " - "{6,2,10,10}, so data item rank is 2 and filters have shape {6,2,3,3}, so " - "filters spatial rank is 2), data dilation (Strides{1, 1}), padding below " - "(CoordinateDiff{0, 0, 0}), padding above (CoordinateDiff{0, 0}), filter " - "strides (Strides{2, 3}), and filter dilation (Strides{1, 1}) do not match")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_padding_above_rank) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); - try - { - auto conv = make_shared(param0, - param1, - Strides{2, 3}, - Strides{2, 3}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0, 0}); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with wrong padding-above rank not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string( - "Ranks for data item shape/filters shape (data batch has shape " - "{6,2,10,10}, so data item rank is 2 and filters have shape {6,2,3,3}, so " - "filters spatial rank is 2), data dilation (Strides{1, 1}), padding below " - "(CoordinateDiff{0, 0}), padding above (CoordinateDiff{0, 0, 0}), filter " - "strides (Strides{2, 3}), and filter dilation (Strides{2, 3}) do not match")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_input_spatial_size_negative_after_padding) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); - try - { - auto conv = make_shared(param0, - param1, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{-4, 0}, - CoordinateDiff{-7, 0}); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with negative-length post-padding spatial axis not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Data shape after padding and dilation has dimension less " - "than 1 (dim: -1) at axis 0")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_input_spatial_size_zero_after_padding) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); - try - { - auto conv = make_shared(param0, - param1, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{-4, 0}, - CoordinateDiff{-6, 0}); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with zero-length post-padding spatial axis not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Data shape after padding and dilation has dimension less " - "than 1 (dim: 0) at axis 0")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_input_spatial_size_0) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 0, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); - try - { - auto conv = make_shared(param0, param1); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with zero-length spatial axis not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Data shape after padding and dilation has " - "dimension less than 1 (dim: 0) at axis 0")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_window_size_0) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 0}); - try - { - auto conv = make_shared(param0, param1); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with zero-length window axis not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Window after dilation has dimension less than 1 (dim: 0) at axis 1")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_window_dilation_stride_0) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); - try - { - auto conv = make_shared(param0, param1, Strides{2, 3}, Strides{2, 0}); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with wrong 0-length window dilation stride axis not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Window dilation (Strides{2, 0}) has zero dimension at axis 1")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_data_dilation_stride_0) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); - try - { - auto conv = make_shared(param0, - param1, - Strides{2, 3}, - Strides{2, 3}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{2, 0}); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with wrong 0-length data dilation stride axis not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Data dilation (Strides{2, 0}) has zero dimension at axis 1")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_dilated_window_too_large) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 8, 8}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); - try - { - auto conv = make_shared(param0, param1, Strides{1, 1}, Strides{4, 4}); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with oversized dilated window not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Window after dilation has dimension (dim: 9) larger than " - "the data shape after padding (dim: 8) at axis 0")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_invalid_movement_stride_0) -{ - // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); - try - { - auto conv = make_shared(param0, param1, Strides{0, 1}); - - // Should have thrown, so fail if it didn't - FAIL() << "Invalid input with wrong 0-length movement stride axis not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Window strides (Strides{0, 1}) has zero dimension at axis 0")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_ok) -{ - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape filters_shape{PartialShape::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - ASSERT_EQ(conv->get_output_element_type(0), element::f32); - ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); -} - -TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_strides_rank_wrong) -{ - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape filters_shape{PartialShape::dynamic()}; - Strides window_movement_strides{1, 1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Window stride rank mismatch not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Ranks for data item shape/filters shape (data batch has shape ?, so data " - "item rank is ? and filters have shape ?, so filters spatial rank is ?), " - "data dilation (Strides{1, 1}), padding below (CoordinateDiff{0, 0}), " - "padding above (CoordinateDiff{0, 0}), filter strides (Strides{1, 1, 1}), " - "and filter dilation (Strides{1, 1}) do not match")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_strides_dim_zero) -{ - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape filters_shape{PartialShape::dynamic()}; - Strides window_movement_strides{1, 0}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Window stride with dimension zero not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Window strides (Strides{1, 0}) has zero dimension at axis 1")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_dilation_rank_wrong) -{ - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape filters_shape{PartialShape::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Window dilation rank mismatch not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Ranks for data item shape/filters shape (data batch has shape ?, so data " - "item rank is ? and filters have shape ?, so filters spatial rank is ?), " - "data dilation (Strides{1, 1}), padding below (CoordinateDiff{0, 0}), " - "padding above (CoordinateDiff{0, 0}), filter strides (Strides{1, 1}), and " - "filter dilation (Strides{1, 1, 1}) do not match")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_dilation_dim_zero) -{ - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape filters_shape{PartialShape::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 0}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Window dilation with dimension zero not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Window dilation (Strides{1, 0}) has zero dimension at axis 1")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_padding_below_rank_wrong) -{ - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape filters_shape{PartialShape::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Padding below rank mismatch not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Ranks for data item shape/filters shape (data batch has shape ?, so data " - "item rank is ? and filters have shape ?, so filters spatial rank is ?), " - "data dilation (Strides{1, 1}), padding below (CoordinateDiff{0, 0, 0}), " - "padding above (CoordinateDiff{0, 0}), filter strides (Strides{1, 1}), and " - "filter dilation (Strides{1, 1}) do not match")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_padding_above_rank_wrong) -{ - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape filters_shape{PartialShape::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Padding above rank mismatch not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Ranks for data item shape/filters shape (data batch has shape ?, so data " - "item rank is ? and filters have shape ?, so filters spatial rank is ?), " - "data dilation (Strides{1, 1}), padding below (CoordinateDiff{0, 0}), " - "padding above (CoordinateDiff{0, 0, 0}), filter strides (Strides{1, 1}), " - "and filter dilation (Strides{1, 1}) do not match")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_data_dilation_rank_wrong) -{ - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape filters_shape{PartialShape::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Data dilation rank mismatch not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Ranks for data item shape/filters shape (data batch has shape ?, so data " - "item rank is ? and filters have shape ?, so filters spatial rank is ?), " - "data dilation (Strides{1, 1, 1}), padding below (CoordinateDiff{0, 0}), " - "padding above (CoordinateDiff{0, 0}), filter strides (Strides{1, 1}), and " - "filter dilation (Strides{1, 1}) do not match")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_data_dilation_dim_zero) -{ - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape filters_shape{PartialShape::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 0}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Data dilation with dimension zero not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Data dilation (Strides{1, 0}) has zero dimension at axis 1")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_ok) -{ - PartialShape data_batch_shape{PartialShape::dynamic(4)}; - PartialShape filters_shape{PartialShape::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - ASSERT_EQ(conv->get_output_element_type(0), element::f32); - ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); -} - -TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_data_batch_rank_wrong) -{ - PartialShape data_batch_shape{PartialShape::dynamic(5)}; - PartialShape filters_shape{PartialShape::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Data batch rank mismatch not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Ranks for data item shape/filters shape (data batch has shape " - "{?,?,?,?,?}, so data item rank is 3 and filters have shape ?, so filters " - "spatial rank is ?), data dilation (Strides{1, 1}), padding below " - "(CoordinateDiff{0, 0}), padding above (CoordinateDiff{0, 0}), filter " - "strides (Strides{1, 1}), and filter dilation (Strides{1, 1}) do not " - "match")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_batch_size_known_ok) -{ - PartialShape data_batch_shape{ - 64, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}; - PartialShape filters_shape{PartialShape::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - ASSERT_EQ(conv->get_output_element_type(0), element::f32); - ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( - PartialShape{64, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()})); -} - -TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_batch_size_known_zero) -{ - PartialShape data_batch_shape{ - 0, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}; - PartialShape filters_shape{PartialShape::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Zero batch size not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Batch size is zero")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_input_channel_count_known_ok) -{ - PartialShape data_batch_shape{ - Dimension::dynamic(), 3, Dimension::dynamic(), Dimension::dynamic()}; - PartialShape filters_shape{PartialShape::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - ASSERT_EQ(conv->get_output_element_type(0), element::f32); - ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); -} - -TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_input_channel_count_known_zero) -{ - PartialShape data_batch_shape{ - Dimension::dynamic(), 0, Dimension::dynamic(), Dimension::dynamic()}; - PartialShape filters_shape{PartialShape::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Zero input channel count not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Data batch channel count and/or filter input channel count is zero")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_output_channel_count_known_ok) -{ - PartialShape data_batch_shape{PartialShape::dynamic(4)}; - PartialShape filters_shape{ - 32, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - ASSERT_EQ(conv->get_output_element_type(0), element::f32); - ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( - PartialShape{Dimension::dynamic(), 32, Dimension::dynamic(), Dimension::dynamic()})); -} - -TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_output_channel_count_known_zero) -{ - PartialShape data_batch_shape{PartialShape::dynamic(4)}; - PartialShape filters_shape{0, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Zero output channel count not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Filter output channel count is zero")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_input_channel_count_known_ok) -{ - PartialShape data_batch_shape{PartialShape::dynamic(4)}; - PartialShape filters_shape{Dimension::dynamic(), 4, Dimension::dynamic(), Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - ASSERT_EQ(conv->get_output_element_type(0), element::f32); - ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); -} - -TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_input_channel_count_known_zero) -{ - PartialShape data_batch_shape{PartialShape::dynamic(4)}; - PartialShape filters_shape{Dimension::dynamic(), 0, Dimension::dynamic(), Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Zero input channel count not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Data batch channel count and/or filter input channel count is zero")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_ok) -{ - PartialShape data_batch_shape{PartialShape::dynamic(4)}; - PartialShape filters_shape{PartialShape::dynamic(4)}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - ASSERT_EQ(conv->get_output_element_type(0), element::f32); - ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); -} - -TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_arg_ranks_mismatch) -{ - PartialShape data_batch_shape{PartialShape::dynamic(5)}; - PartialShape filters_shape{PartialShape::dynamic(4)}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Argument rank mismatch not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Data batch and filters rank do not match (data batch " - "shape: {?,?,?,?,?}, filters shape: {?,?,?,?})")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_input_channel_counts_known_ok) -{ - PartialShape data_batch_shape{ - Dimension::dynamic(), 3, Dimension::dynamic(), Dimension::dynamic()}; - PartialShape filters_shape{Dimension::dynamic(), 3, Dimension::dynamic(), Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - ASSERT_EQ(conv->get_output_element_type(0), element::f32); - ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); -} - -TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_input_channel_counts_mismatch) -{ - PartialShape data_batch_shape{ - Dimension::dynamic(), 3, Dimension::dynamic(), Dimension::dynamic()}; - PartialShape filters_shape{ - Dimension::dynamic(), 22, Dimension::dynamic(), Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Input channel count mismatch not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string( - "Data batch channel count (3) does not match filter input channel count (22)")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspatial_known_ok) -{ - PartialShape data_batch_shape{64, 3, Dimension::dynamic(), Dimension::dynamic()}; - PartialShape filters_shape{100, 3, Dimension::dynamic(), Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - ASSERT_EQ(conv->get_output_element_type(0), element::f32); - ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( - PartialShape{64, 100, Dimension::dynamic(), Dimension::dynamic()})); -} - -TEST(type_prop, - conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspatial_some_spatial_known_ok) -{ - PartialShape data_batch_shape{64, 3, 200, Dimension::dynamic()}; - PartialShape filters_shape{100, 3, 5, Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - ASSERT_EQ(conv->get_output_element_type(0), element::f32); - ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( - PartialShape{64, 100, 196, Dimension::dynamic()})); -} - -TEST( - type_prop, - conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspatial_some_spatial_known_filters_too_big) -{ - PartialShape data_batch_shape{64, 3, 200, Dimension::dynamic()}; - PartialShape filters_shape{100, 3, 201, Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Oversize filter not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Window after dilation has dimension (dim: 201) larger " - "than the data shape after padding (dim: 200) at axis 0")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST( - type_prop, - conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspatial_some_spatial_known_filters_not_too_big_after_padding) -{ - PartialShape data_batch_shape{64, 3, 200, Dimension::dynamic()}; - PartialShape filters_shape{100, 3, 201, Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{2, 0}; - CoordinateDiff padding_above{-1, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - ASSERT_EQ(conv->get_output_element_type(0), element::f32); - ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( - PartialShape{64, 100, 1, Dimension::dynamic()})); -} - -TEST( - type_prop, - conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspatial_some_spatial_known_filters_not_too_big_after_data_dilation) -{ - PartialShape data_batch_shape{64, 3, 200, Dimension::dynamic()}; - PartialShape filters_shape{100, 3, 201, Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{2, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - ASSERT_EQ(conv->get_output_element_type(0), element::f32); - ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( - PartialShape{64, 100, 199, Dimension::dynamic()})); -} - -TEST( - type_prop, - conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspatial_some_spatial_known_filters_not_too_big_after_data_dilation_strided) -{ - PartialShape data_batch_shape{64, 3, 200, Dimension::dynamic()}; - PartialShape filters_shape{100, 3, 201, Dimension::dynamic()}; - Strides window_movement_strides{3, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{2, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - ASSERT_EQ(conv->get_output_element_type(0), element::f32); - ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( - PartialShape{64, 100, 67, Dimension::dynamic()})); -} - -TEST( - type_prop, - conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspatial_some_spatial_known_filters_too_big_after_filter_dilation) -{ - PartialShape data_batch_shape{64, 3, 200, Dimension::dynamic()}; - PartialShape filters_shape{100, 3, 101, Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{2, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Oversize filter after window dilation not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Window after dilation has dimension (dim: 201) larger " - "than the data shape after padding (dim: 200) at axis 0")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST( - type_prop, - conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspatial_some_spatial_zero_data_batch_dim) -{ - PartialShape data_batch_shape{64, 3, 200, 0}; - PartialShape filters_shape{100, 3, 5, Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Zero dimension in data batch not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Data shape after padding and dilation has " - "dimension less than 1 (dim: 0) at axis 1")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST( - type_prop, - conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspatial_some_spatial_positive_data_batch_dim_after_padding) -{ - PartialShape data_batch_shape{64, 3, 200, 0}; - PartialShape filters_shape{100, 3, 5, Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 2}; - CoordinateDiff padding_above{0, -1}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - ASSERT_EQ(conv->get_output_element_type(0), element::f32); - ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( - PartialShape{64, 100, 196, Dimension::dynamic()})); -} - -TEST( - type_prop, - conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspatial_some_spatial_zero_data_batch_dim_after_padding) -{ - PartialShape data_batch_shape{64, 3, 200, 20}; - PartialShape filters_shape{100, 3, 5, Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, 0}; - CoordinateDiff padding_above{0, -20}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Zero padded dimension in data batch not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Data shape after padding and dilation has " - "dimension less than 1 (dim: 0) at axis 1")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST( - type_prop, - conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspatial_some_spatial_negative_data_batch_dim_after_padding) -{ - PartialShape data_batch_shape{64, 3, 200, 20}; - PartialShape filters_shape{100, 3, 5, Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{0, -1}; - CoordinateDiff padding_above{0, -20}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); - - try - { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - FAIL() << "Negative padded dimension in data batch not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Data shape after padding and dilation has dimension less " - "than 1 (dim: -1) at axis 1")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, conv_partial_dynamic_et) -{ - // For this test the exact shape parameters are kind of arbitrary---just copied and pasted - // from some known-"OK" test above. We're only concerned about the element types. - PartialShape data_batch_shape{64, 3, 200, Dimension::dynamic()}; - PartialShape filters_shape{100, 3, 201, Dimension::dynamic()}; - Strides window_movement_strides{1, 1}; - Strides window_dilation_strides{1, 1}; - CoordinateDiff padding_below{2, 0}; - CoordinateDiff padding_above{-1, 0}; - Strides data_dilation_strides{1, 1}; - - auto param0 = make_shared(element::dynamic, data_batch_shape); - auto param1 = make_shared(element::dynamic, filters_shape); - - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); - - ASSERT_TRUE(conv->get_output_element_type(0).is_dynamic()); - ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( - PartialShape{64, 100, 1, Dimension::dynamic()})); -} - TEST(type_prop, conv_v1_partial_rank) { PartialShape data_batch_shape{PartialShape::dynamic()}; diff --git a/ngraph/test/type_prop/convolution_backprop_data.cpp b/ngraph/test/type_prop/convolution_backprop_data.cpp index 3dfac03a1cb..338044bcc98 100644 --- a/ngraph/test/type_prop/convolution_backprop_data.cpp +++ b/ngraph/test/type_prop/convolution_backprop_data.cpp @@ -16,260 +16,11 @@ #include "gtest/gtest.h" #include "ngraph/ngraph.hpp" -#include "op/convolution.hpp" #include "util/type_prop.hpp" using namespace std; using namespace ngraph; -// ---------------------------- v0 ---------------------------- -TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce) -{ - // Deduce type - Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 91}); // output delta - auto conv = make_shared(data_batch_shape, - param0, - param1, - Strides{1}, - Strides{1}, - CoordinateDiff{0}, - CoordinateDiff{0}, - Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), data_batch_shape); - - EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1}); - EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1}); - EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1}); - - EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{0}); - EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0}); -} - -TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce_padded) -{ - // Deduce type - Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 96}); // output delta - auto move_strides = Strides{1}; - auto dilation_strides = Strides{1}; - auto padding_below = CoordinateDiff{2}; - auto padding_above = CoordinateDiff{3}; - auto conv = make_shared(data_batch_shape, - param0, - param1, - move_strides, - dilation_strides, - padding_below, - padding_above, - Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), data_batch_shape); - - EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1}); - EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1}); - EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1}); - - EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{2}); - EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{3}); -} - -TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce_strided) -{ - // Deduce type - Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 46}); // output delta - auto move_strides = Strides{2}; - auto conv = make_shared(data_batch_shape, - param0, - param1, - move_strides, - Strides{1}, - CoordinateDiff{0}, - CoordinateDiff{0}, - Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), data_batch_shape); - - EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2}); - EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1}); - EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1}); - - EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{0}); - EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0}); -} - -TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce_strided_padded) -{ - // Deduce type - Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 48}); // output delta - auto move_strides = Strides{2}; - auto dilation_strides = Strides{1}; - auto padding_below = CoordinateDiff{2}; - auto padding_above = CoordinateDiff{3}; - auto conv = make_shared(data_batch_shape, - param0, - param1, - move_strides, - dilation_strides, - padding_below, - padding_above, - Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), data_batch_shape); - - EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2}); - EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1}); - EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1}); - - EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{2}); - EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{3}); -} - -TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce_strided_small_uneven) -{ - // Deduce type - Shape data_batch_shape{64, 3, 5}; - auto param0 = make_shared(element::f32, Shape{128, 3, 2}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 2}); // output delta - auto move_strides = Strides{2}; - auto conv = make_shared(data_batch_shape, - param0, - param1, - move_strides, - Strides{1}, - CoordinateDiff{0}, - CoordinateDiff{0}, - Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), data_batch_shape); - - EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2}); - EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1}); - EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1}); - - EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{0}); - EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0}); -} - -TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce_strided_small_even) -{ - // Deduce type - Shape data_batch_shape{64, 3, 6}; - auto param0 = make_shared(element::f32, Shape{128, 3, 2}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 3}); // output delta - auto move_strides = Strides{2}; - auto conv = make_shared(data_batch_shape, - param0, - param1, - move_strides, - Strides{1}, - CoordinateDiff{0}, - CoordinateDiff{0}, - Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), data_batch_shape); - - EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2}); - EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1}); - EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1}); - - EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{0}); - EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0}); -} - -TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce_window_dilated) -{ - // Deduce type - Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 82}); // output delta - auto move_strides = Strides{1}; - auto dilate_strides = Strides{2}; - auto conv = make_shared(data_batch_shape, - param0, - param1, - move_strides, - dilate_strides, - CoordinateDiff{0}, - CoordinateDiff{0}, - Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), data_batch_shape); - - EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1}); - EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{2}); - EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1}); - - EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{0}); - EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0}); -} - -TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce_window_dilated_padded) -{ - // Deduce type - Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 87}); // output delta - auto move_strides = Strides{1}; - auto dilate_strides = Strides{2}; - auto padding_below = CoordinateDiff{2}; - auto padding_above = CoordinateDiff{3}; - auto conv = make_shared(data_batch_shape, - param0, - param1, - move_strides, - dilate_strides, - padding_below, - padding_above, - Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), data_batch_shape); - - EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1}); - EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{2}); - EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1}); - - EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{2}); - EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{3}); -} - -TEST(type_prop, conv_backprop_data_v0_1d_batch_deduce_window_dilated_data_dilated_padded) -{ - // Deduce type - Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 285}); // output delta - auto move_strides = Strides{1}; - auto dilate_strides = Strides{2}; - auto padding_below = CoordinateDiff{2}; - auto padding_above = CoordinateDiff{3}; - auto data_dilate_strides = Strides{3}; - auto conv = make_shared(data_batch_shape, - param0, - param1, - move_strides, - dilate_strides, - padding_below, - padding_above, - data_dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); - EXPECT_EQ(conv->get_shape(), data_batch_shape); - - EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1}); - EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{2}); - EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{3}); - - EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{2}); - EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{3}); -} - // ---------------------------- v1 ---------------------------- TEST(type_prop, convolution_backprop_data_partial_auto_padding_upper) { @@ -338,7 +89,7 @@ TEST(type_prop, convolution_backprop_data_auto_pad_explicit_with_output_padding) auto filters = make_shared(inputs_et, filters_pshape); auto conv_backprop = make_shared( data, filters, strides, padding_begin, padding_end, dilations, auto_pad, output_padding); - + ASSERT_TRUE(conv_backprop->get_output_partial_shape(0).same_scheme(PartialShape{1, 6, 4, 4})); ASSERT_EQ(conv_backprop->get_pads_begin(), (CoordinateDiff{1, 1})); ASSERT_EQ(conv_backprop->get_pads_end(), (CoordinateDiff{1, 1})); @@ -361,9 +112,16 @@ TEST(type_prop, convolution_backprop_data_auto_pad_same_with_output_padding_and_ auto data = make_shared(inputs_et, data_pshape); auto filters = make_shared(inputs_et, filters_pshape); auto output_shape = op::Constant::create(element::i64, Shape{2}, {3, 3}); - auto conv_backprop = make_shared( - data, filters, output_shape, strides, padding_begin, padding_end, dilations, auto_pad, output_padding); - + auto conv_backprop = make_shared(data, + filters, + output_shape, + strides, + padding_begin, + padding_end, + dilations, + auto_pad, + output_padding); + ASSERT_TRUE(conv_backprop->get_output_partial_shape(0).same_scheme(PartialShape{1, 6, 3, 3})); ASSERT_EQ(conv_backprop->get_pads_begin(), (CoordinateDiff{1, 1})); ASSERT_EQ(conv_backprop->get_pads_end(), (CoordinateDiff{2, 2})); @@ -807,13 +565,15 @@ TEST(type_prop, convolution_backprop_data_invalid_et_inputs) // output shape input element type must be of integer type FAIL() << "Invalid element type of output_shape input not detected"; } - catch(const NodeValidationFailure& error) + catch (const NodeValidationFailure& error) { - EXPECT_HAS_SUBSTRING(error.what(), "Element type for output shape should be of integer type"); + EXPECT_HAS_SUBSTRING(error.what(), + "Element type for output shape should be of integer type"); } catch (...) { - FAIL() << "Element type of output_shape input validation check failed for unexpected reason"; + FAIL() + << "Element type of output_shape input validation check failed for unexpected reason"; } } @@ -899,9 +659,8 @@ TEST(type_prop, convolution_backprop_data_invalid_input_ranks) } catch (const NodeValidationFailure& error) { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Spatial shape of output input must be of rank 1")); + EXPECT_HAS_SUBSTRING(error.what(), + std::string("Spatial shape of output input must be of rank 1")); } catch (...) { @@ -930,7 +689,9 @@ TEST(type_prop, convolution_backprop_data_invalid_input_channel_dims) } catch (const NodeValidationFailure& error) { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Input channels dimension of data and filters inputs must be equal")); + EXPECT_HAS_SUBSTRING( + error.what(), + std::string("Input channels dimension of data and filters inputs must be equal")); } catch (...) { @@ -1159,4 +920,4 @@ TEST(type_prop, convolution_backprop_data_invalid_conv_param_spatial_dims) { FAIL() << "Output padding spatial dimensions validation check failed for unexpected reason"; } -} \ No newline at end of file +} diff --git a/ngraph/test/type_prop/cosh.cpp b/ngraph/test/type_prop/cosh.cpp new file mode 100644 index 00000000000..6db9dababb3 --- /dev/null +++ b/ngraph/test/type_prop/cosh.cpp @@ -0,0 +1,9 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "unary_ops.hpp" + +using Type = ::testing::Types; + +INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_cosh, UnaryOperator, Type); diff --git a/ngraph/test/util/engine/ie_engines.cpp b/ngraph/test/util/engine/ie_engines.cpp index 8e1a8f4e75e..ce5e4247b8f 100644 --- a/ngraph/test/util/engine/ie_engines.cpp +++ b/ngraph/test/util/engine/ie_engines.cpp @@ -6,7 +6,6 @@ #include "ngraph/opsets/opset.hpp" #include "ngraph/pass/manager.hpp" -#include "pass/opset1_upgrade.hpp" #include "shared_utils.hpp" using namespace ngraph; @@ -178,7 +177,6 @@ namespace test::IE_Engine::IE_Engine(const std::shared_ptr function, const char* device) : m_function{function} { - upgrade_and_validate_function(m_function); const auto cnn_network = InferenceEngine::CNNNetwork(m_function); m_network_inputs = cnn_network.getInputsInfo(); m_network_outputs = cnn_network.getOutputsInfo(); @@ -200,7 +198,7 @@ void test::IE_Engine::infer() if (m_network_inputs.size() != m_allocated_inputs) { IE_THROW() << "The tested graph has " << m_network_inputs.size() << " inputs, but " - << m_allocated_inputs << " were passed."; + << m_allocated_inputs << " were passed."; } else { @@ -294,26 +292,6 @@ testing::AssertionResult return comparison_result; } -std::shared_ptr - test::IE_Engine::upgrade_and_validate_function(const std::shared_ptr function) const -{ - pass::Manager passes; - passes.register_pass(); - passes.run_passes(function); - - static std::set ie_ops = get_ie_ops(); - for (const auto& node : function->get_ops()) - { - if (ie_ops.find(node->get_type_info()) == ie_ops.end()) - { - IE_THROW() << "Unsupported operator detected in the graph: " - << node->get_type_info().name; - } - } - - return function; -} - std::set test::IE_Engine::get_ie_ops() const { std::set ie_ops = get_opset1().get_type_info_set(); @@ -341,8 +319,8 @@ void test::IE_Engine::reset() namespace InferenceEngine { -// Without this section the linker is not able to find destructors for missing TBlob specializations -// which are instantiated in the unit tests that use TestCase and this engine + // Without this section the linker is not able to find destructors for missing TBlob + // specializations which are instantiated in the unit tests that use TestCase and this engine template TBlob::~TBlob() { diff --git a/ngraph/test/util/engine/ie_engines.hpp b/ngraph/test/util/engine/ie_engines.hpp index 4bcc5de195e..5e219c6351a 100644 --- a/ngraph/test/util/engine/ie_engines.hpp +++ b/ngraph/test/util/engine/ie_engines.hpp @@ -113,11 +113,6 @@ namespace ngraph unsigned int m_allocated_inputs = 0; unsigned int m_allocated_expected_outputs = 0; - /// Upgrades functions containing legacy opset0 to opset1 - /// and checks if the graph can be executed - std::shared_ptr - upgrade_and_validate_function(const std::shared_ptr function) const; - /// Retrieves a set of all ops IE can execute std::set get_ie_ops() const; @@ -160,5 +155,5 @@ namespace ngraph { static constexpr bool value = true; }; - } -} + } // namespace test +} // namespace ngraph diff --git a/ngraph/test/visitors/op/asinh.cpp b/ngraph/test/visitors/op/asinh.cpp new file mode 100644 index 00000000000..6e18ede91c8 --- /dev/null +++ b/ngraph/test/visitors/op/asinh.cpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "unary_ops.hpp" +using Type = ::testing::Types>; + +INSTANTIATE_TYPED_TEST_SUITE_P(visitor_without_attribute, + UnaryOperatorVisitor, + Type, + UnaryOperatorTypeName); \ No newline at end of file diff --git a/ngraph/test/visitors/op/batch_to_space.cpp b/ngraph/test/visitors/op/batch_to_space.cpp index 7e200bacafc..60f653283bf 100644 --- a/ngraph/test/visitors/op/batch_to_space.cpp +++ b/ngraph/test/visitors/op/batch_to_space.cpp @@ -16,10 +16,10 @@ using ngraph::test::NodeBuilder; TEST(attributes, batch_to_space_op) { NodeBuilder::get_ops().register_factory(); - auto data = make_shared(element::f32, Shape{128, 4, 2, 2}); - auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 2, 2, 2}); - auto crops_begin = make_shared(element::i64, Shape{4}, vector{0, 2, 0, 1}); - auto crops_end = make_shared(element::i64, Shape{4}, vector{0, 0, 1, 0}); + auto data = make_shared(element::f32, Shape{2, 128}); + auto block_shape = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto crops_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); + auto crops_end = make_shared(element::i64, Shape{2}, vector{0, 0}); auto batch2space = make_shared(data, block_shape, crops_begin, crops_end); NodeBuilder builder(batch2space); diff --git a/ngraph/test/visitors/op/cosh.cpp b/ngraph/test/visitors/op/cosh.cpp new file mode 100644 index 00000000000..d44b7701eb3 --- /dev/null +++ b/ngraph/test/visitors/op/cosh.cpp @@ -0,0 +1,11 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "unary_ops.hpp" +using Type = ::testing::Types>; + +INSTANTIATE_TYPED_TEST_SUITE_P(visitor_without_attribute, + UnaryOperatorVisitor, + Type, + UnaryOperatorTypeName); diff --git a/scripts/demo/car_1.bmp b/scripts/demo/car_1.bmp index 111cee4ff86..ffdb8bf2ec3 100644 Binary files a/scripts/demo/car_1.bmp and b/scripts/demo/car_1.bmp differ