diff --git a/.ci/openvino-onnx/Jenkinsfile b/.ci/openvino-onnx/Jenkinsfile index ff2e8a45125..2849579dcdb 100644 --- a/.ci/openvino-onnx/Jenkinsfile +++ b/.ci/openvino-onnx/Jenkinsfile @@ -113,8 +113,8 @@ def buildDockerImage(Map configuration, String workdir) { --build-arg BUILD_TYPE=${configuration.build_type} \ --build-arg PROTOBUF_LITE=${configuration.protobuf_lite} \ --file=.ci/openvino-onnx/Dockerfile \ - --build-arg http_proxy=http://proxy-ir.intel.com:911/ \ - --build-arg https_proxy=http://proxy-ir.intel.com:911/ . + --build-arg http_proxy=${HTTP_PROXY} \ + --build-arg https_proxy=${HTTPS_PROXY} . """ } diff --git a/docs/MO_DG/prepare_model/Supported_Frameworks_Layers.md b/docs/MO_DG/prepare_model/Supported_Frameworks_Layers.md index d5383275ad6..fdda8f40a0b 100644 --- a/docs/MO_DG/prepare_model/Supported_Frameworks_Layers.md +++ b/docs/MO_DG/prepare_model/Supported_Frameworks_Layers.md @@ -10,8 +10,11 @@ Standard Caffe\* layers: | BN | No | | BatchNorm | No | | Bias | No | +| Binarization (Intel experimental) | No | | Concat | No | | Convolution | No | +| ConvolutionBinary | No | +| Crop | No | | Deconvolution | No | | DetectionOutput | No | | Dropout | Not needed for inference | @@ -21,14 +24,25 @@ Standard Caffe\* layers: | InnerProduct | No | | Input | No | | LRN | No | +| Normalize | No | +| Python | Supported only for the Python Proposal operation | | Permute | No | | Pooling | No | | Power | No | +| PReLU | No | +| PriorBox | No | +| PriorBoxClustered | No | +| Proposal | No | +| PSROIPooling | No | | ROIPooling | No | +| RegionYolo | No | +| ReorgYolo | No | | ReLU | No | +| Resample | No | | Reshape | No | | Scale | No | | ShuffleChannel | No | +| Sigmoid | No | | Slice | No | | Softmax | No | | Tile | No | @@ -41,31 +55,44 @@ Standard MXNet\* symbols: | Symbol Name in MXNet\*| Limitations| | :----------| :----------| | _Plus | No | +| _contrib_box_nms | No | +| _contrib_DeformableConvolution | No | +| _contrib_DeformablePSROIPooling | No | | _contrib_MultiBoxDetection | "force_suppress" = 1 is not supported, non-default variances are not supported | | _contrib_MultiBoxPrior | No | | _contrib_Proposal | No | | _copy | Not needed for inference | +| _div_scalar | No | +| _greater_scalar | No | | _minus_scalar | No | | _mul_scalar | No | +| _plus_scalar | No | +| _rnn_param_concat | No | | _arange | No | | _contrib_AdaptiveAvgPooling2D | Converted to the Average Pooling with fixed paddings | | _maximum | No | | _minimum | No | | _np_roll | No | +| _zeros | No | | add_n | No | | arccosh | No | | arcsinh | No | | arctanh | No | | broadcast_add | No | +| broadcast_div | No | | broadcast_mul | No | +| broadcast_sub | No | +| BlockGrad | No | | cumsum | No | | div_scalar | No | | elementwise_sub | No | | elemwise_add | No | | elemwise_mul | No | +| elemwise_sub | No | | exp | No | | expand_dims | No | | greater_scalar | No | +| max | No | | minus_scalar | No | | null | Not needed for inference | | repeat | No | @@ -74,9 +101,11 @@ Standard MXNet\* symbols: | round | No | | sigmoid | No | | slice | No | +| SliceChannel | No | | slice_axis | No | | slice_channel | No | | slice_like | No | +| softmax | No | | stack | No | | swapaxis | No | | tile | No | @@ -100,6 +129,7 @@ Standard MXNet\* symbols: | L2Normalization | only 4D input is supported | | LRN | No | | LeakyReLU | supported "act_type" = "prelu", "elu", "leaky", "gelu" | +| ones_like | No | | Pad | No | | Pooling | No | | ROIPooling | No | @@ -113,6 +143,7 @@ Standard MXNet\* symbols: | Tile | No | | UpSampling | No | | Where | No | +| zeros_like | No | ## TensorFlow\* Supported Operations @@ -123,18 +154,27 @@ Standard TensorFlow\* operations: | Operation Name in TensorFlow\* | Limitations| | :----------| :----------| +| Abs | No | | Acosh | No | | Add | No | | AddV2 | No | | AddN | No | +| All | No | | ArgMax | No | | ArgMin | No | | Asinh | No | +| Assert | Not needed for inference | +| Assign | Not needed for inference | +| AssignSub | Not needed for inference | | Atanh | No | | AvgPool | No | | AvgPoolV2 | Supported only for constant-foldable kernel_size and strides inputs | +| AvgPool3D | No | +| BatchMatMul | No | +| BatchMatMulV2 | No | | BatchToSpaceND | No | | BiasAdd | No | +| BlockLSTM | No | | Bucketize | CPU only | | BroadcastTo | No | | Cast | No | @@ -144,14 +184,21 @@ Standard TensorFlow\* operations: | Const | No | | Conv2D | No | | Conv2DBackpropInput | No | +| Conv3D | No | +| Conv3DBackpropInputV2 | No | | Cos | No | | Cosh | No | | CropAndResize | "method" = "bilinear" only | +| CTCGreedyDecoder | Supported only with decoded indices output in a dense format | +| CTCLoss | Supported only with decoded indices input in a dense format | | CumSum | No | | DepthToSpace| No | | DepthwiseConv2dNative| No | +| Einsum | Supported only with equation that does not contain repeated labels within a subscript | +| Elu | No | | Enter | Supported only when it is fused to the TensorIterator layer | | Equal | No | +| Erf | No | | Exit | Supported only when it is fused to the TensorIterator layer | | Exp | No | | ExpandDims | No | @@ -163,34 +210,43 @@ Standard TensorFlow\* operations: | FFT | Supported only when it is part of a sub-graph of the special form | | FFT2D | Supported only when it is part of a sub-graph of the special form | | FFT3D | Supported only when it is part of a sub-graph of the special form | +| FIFOQueueV2 | Supported only when it is part of a sub-graph of the special form | | Fill | No | | Floor | No | | FloorDiv | No | +| FloorMod | No | | FusedBatchNorm | No | | FusedBatchNormV2 | No | | FusedBatchNormV3 | No | | Gather | No | | GatherNd | No | +| GatherTree | No | | GatherV2 | No | | Greater | No | | GreaterEqual | No | | Identity | Not needed for shape inference | +| IdentityN | No | | IFFT | Supported only when it is part of a sub-graph of the special form | | IFFT2D | Supported only when it is part of a sub-graph of the special form | | IFFT3D | Supported only when it is part of a sub-graph of the special form | +| IteratorGetNext | Supported only when it is part of a sub-graph of the special form | | LRN | No | +| LeakyRelu | No | | Less | No | +| LessEqual | No | | Log | No | | Log1p | No | | LogicalAnd | No | | LogicalOr | No | | LogicalNot | No | | LogSoftmax | No | +| LookupTableInsertV2 | Supported only when it is part of a sub-graph of the special form | | LoopCond | Supported only when it is fused to the TensorIterator layer | | MatMul | No | | Max | No | | MaxPool | No | | MaxPoolV2 | Supported only for constant-foldable kernel_size and strides inputs | +| MaxPool3D | No | | Maximum | No | | Mean | No | | Merge | Supported only when it is fused to the TensorIterator layer | @@ -200,9 +256,11 @@ Standard TensorFlow\* operations: | Mul | No | | Neg | No | | NextIteration | Supported only when it is fused to the TensorIterator layer | +| NonMaxSuppressionV2 | No | | NonMaxSuppressionV3 | No | | NonMaxSuppressionV4 | No | | NonMaxSuppressionV5 | No | +| NotEqual | No | | NoOp | No | | OneHot | No | | Pack | No | @@ -211,9 +269,11 @@ Standard TensorFlow\* operations: | Placeholder | No | | PlaceholderWithDefault | No | | Prod | No | +| QueueDequeueUpToV2 | Supported only when it is part of a sub-graph of the special form | | Range | No | | Rank | No | | RealDiv | No | +| Reciprocal | No | | Relu | No | | Relu6 | No | | Reshape | No | @@ -221,9 +281,12 @@ Standard TensorFlow\* operations: | ResizeNearestNeighbor | No | | ResourceGather| No | | ReverseSequence | No | +| ReverseV2 | Supported only when can be converted to the ReverseSequence operation | | Roll | No | | Round | No | +| Pow | No | | Rsqrt | No | +| Select | No | | Shape | No | | Sigmoid | No | | Sin | No | @@ -234,6 +297,10 @@ Standard TensorFlow\* operations: | Softplus | No | | Softsign | No | | SpaceToBatchND | No | +| SpaceToDepth | No | +| SparseFillEmptyRows | Supported only when it is part of a sub-graph of the special form | +| SparseReshape | Supported only when it is part of a sub-graph of the special form | +| SparseSegmentSum | Supported only when it is part of a sub-graph of the special form | | SparseToDense | CPU only | | Split | No | | SplitV | No | @@ -242,11 +309,13 @@ Standard TensorFlow\* operations: | SquaredDifference | No | | Square| No | | Squeeze | The case when squeeze axis is not specified is not supported | +| StatelessWhile | No | | StopGradient | Not needed for shape inference | | StridedSlice | Supported only for constant-foldable begin, end, and strides inputs | | Sub | No | | Sum | No | | Swish | No | +| swish_f32 | No | | Switch | Control flow propagation | | Tan | No | | Tanh | No | @@ -260,7 +329,9 @@ Standard TensorFlow\* operations: | TopkV2 | No | | Transpose | No | | Unpack | No | -| Where | No | +| Variable | No | +| VariableV2 | No | +| Where | Supported only when it is part of a sub-graph of the special form | | ZerosLike | No | @@ -356,13 +427,15 @@ Standard Kaldi\* Layers: | :----------| :----------| | addshift | No | | affinecomponent | No | +| affinecomponentpreconditionedonline | No | | affinetransform | No | +| backproptruncationcomponent | No | +| batchnormcomponent | No | | clipgradientcomponent | Not needed for inference | | concat | No | | convolutional1dcomponent | No | | convolutionalcomponent | No | | copy | No | -| Crop | No | | elementwiseproductcomponent | No | | fixedaffinecomponent | No | | fixedbiascomponent | No | @@ -383,9 +456,9 @@ Standard Kaldi\* Layers: | rectifiedlinearcomponent | No | | rescale | No | | sigmoid | No | +| sigmoidcomponent | No | | softmax | No | | softmaxComponent | No | -| softsign | No | | specaugmenttimemaskcomponent | Not needed for inference | | splicecomponent | No | | tanhcomponent | No | @@ -404,12 +477,14 @@ Standard ONNX\* operators: | Acosh | No | | Add | No | | Affine | No | +| And | No | | ArgMax | No | | ArgMin | No | | Asin | No | | Asinh | No | | Atan | No | | Atanh | No | +| ATen | Supported only for the 'embedding_bag' operator | | AveragePool | No | | BatchMatMul | No | | BatchNormalization | No | @@ -426,6 +501,7 @@ Standard ONNX\* operators: | Cosh | No | | Crop | No | | CumSum | No | +| DepthToSpace | No | | DequantizeLinear | No | | DetectionOutput (Intel experimental) | No | | Div | No | @@ -433,7 +509,14 @@ Standard ONNX\* operators: | Elu | No | | Equal | No | | Erf | No | +| Exp | No | | Expand | No | +| ExperimentalDetectronDetectionOutput (Intel experimental) | No | +| ExperimentalDetectronGenerateProposalsSingleImage (Intel experimental) | No | +| ExperimentalDetectronGroupNorm (Intel experimental) | No | +| ExperimentalDetectronPriorGridGenerator (Intel experimental) | No | +| ExperimentalDetectronROIFeatureExtractor (Intel experimental) | No | +| ExperimentalDetectronTopKROIs (Intel experimental) | No | | FakeQuantize (Intel experimental) | No | | Fill | No | | Flatten | No | @@ -451,6 +534,7 @@ Standard ONNX\* operators: | HardSigmoid | No | | Identity | Not needed for inference | | ImageScaler | No | +| InstanceNormalization | No | | LRN | No | | LSTM | Peepholes are not supported | | LeakyRelu | No | @@ -461,7 +545,9 @@ Standard ONNX\* operators: | LogicalOr | No | | LogSoftmax | No | | Loop | No | +| LpNormalization | No | | MatMul | No | +| Max | No | | MaxPool | No | | MeanVarianceNormalization | Reduction over the batch dimension is not supported, reduction over all dimensions except batch and channel ones is obligatory | | Min | No | @@ -475,6 +561,7 @@ Standard ONNX\* operators: | Pad | No | | Pow | No | | PriorBox (Intel experimental) | No | +| PriorBoxClustered | No | | QuantizeLinear | No | | RNN | No | | ROIAlign | No | @@ -506,6 +593,7 @@ Standard ONNX\* operators: | Softplus | No | | Softsign | No | | SpaceToDepth | No | +| Split | No | | Sqrt | No | | Squeeze | The case when squeeze axis is not specified is not supported | | Sub | No | diff --git a/docs/ops/movement/ReverseSequence_1.md b/docs/ops/movement/ReverseSequence_1.md index e5f2c3ff632..63c4fa10e5c 100644 --- a/docs/ops/movement/ReverseSequence_1.md +++ b/docs/ops/movement/ReverseSequence_1.md @@ -2,35 +2,45 @@ **Versioned name**: *ReverseSequence-1* -**Category**: data movement operation +**Category**: *Data movement* **Short description**: *ReverseSequence* reverses variable length slices of data. -**Detailed description**: *ReverseSequence* slices input along the dimension specified in the *batch_axis*, and for each slice *i*, reverses the first *lengths[i]* (the second input) elements along the dimension specified in the *seq_axis*. +**Detailed description** + +*ReverseSequence* slices a given input tensor `data` along the dimension specified in the *batch_axis* attribute. For each slice `i`, it reverses the first `seq_lengths[i]` elements along the dimension specified in the *seq_axis* attribute. **Attributes** * *batch_axis* - * **Description**: *batch_axis* is the index of the batch dimension. - * **Range of values**: an integer. Can be negative. + * **Description**: *batch_axis* is the index of the batch dimension along which `data` input tensor is sliced. + * **Range of values**: an integer within the range `[-rank(data), rank(data) - 1]` * **Type**: `int` - * **Default value**: 0 + * **Default value**: `0` * **Required**: *no* * *seq_axis* - * **Description**: *seq_axis* is the index of the sequence dimension. - * **Range of values**: an integer. Can be negative. + * **Description**: *seq_axis* is the index of the sequence dimension along which elements of `data` input tensor are reversed. + * **Range of values**: an integer within the range `[-rank(data), rank(data) - 1]` * **Type**: `int` - * **Default value**: 1 + * **Default value**: `1` * **Required**: *no* -**Inputs**: +**Inputs** -* **1**: tensor with input data to reverse. **Required.** +* **1**: `data` - Input data to reverse. A tensor of type *T1* and rank greater or equal to 2. **Required.** +* **2**: `seq_lengths` - Sequence lengths to reverse in the input tensor `data`. A 1D tensor comprising `data_shape[batch_axis]` elements of type *T2*. All element values must be integer values within the range `[1, data_shape[seq_axis]]`. Value `1` means, no elements are reversed. **Required.** -* **2**: 1D tensor populated with integers with sequence lengths in the 1st input tensor. **Required.** +**Outputs** + +* **1**: The result of slice and reverse `data` input tensor. A tensor of type *T1* and the same shape as `data` input tensor. + +**Types** + +* *T1*: any supported type. +* *T2*: any supported numerical type. **Example** @@ -38,19 +48,19 @@ - - 3 - 10 + + 4 + 10 100 200 - 3 + 4 - 3 + 4 10 100 200 diff --git a/docs/template_plugin/tests/functional/op_reference/cum_sum.cpp b/docs/template_plugin/tests/functional/op_reference/cum_sum.cpp new file mode 100644 index 00000000000..1539a138c3f --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/cum_sum.cpp @@ -0,0 +1,193 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include +#include + +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ngraph; +using namespace InferenceEngine; + +namespace { +struct CumSumParams { + // Custom axis input and attributes + template + CumSumParams(const PartialShape& shape, const element::Type& iType, const std::vector& iValues, const std::vector& oValues, const bool execlusive, + const bool reverse, const element::Type& axisType, AT axisVal, const PartialShape& axisShape) + : execlusive(execlusive), + reverse(reverse), + axisValue(axisVal), + axisShape(axisShape), + inShape(shape), + axisType(axisType), + inType(iType), + outType(iType), + axisData(CreateBlob(axisType, std::vector {axisVal})), + inputData(CreateBlob(iType, iValues)), + refData(CreateBlob(iType, oValues)), + testDefaults(false) {} + + // Default axis input and attributes + template + CumSumParams(const PartialShape& shape, const element::Type& iType, const std::vector& iValues, const std::vector& oValues) + : inShape(shape), + axisType(element::i32), + inType(iType), + outType(iType), + inputData(CreateBlob(iType, iValues)), + refData(CreateBlob(iType, oValues)), + testDefaults(true) {} + + bool execlusive = false; + bool reverse = false; + int64_t axisValue = 0; + + PartialShape axisShape; + PartialShape inShape; + element::Type axisType; + element::Type inType; + element::Type outType; + Blob::Ptr axisData; + Blob::Ptr inputData; + Blob::Ptr refData; + + bool testDefaults = false; +}; + +class ReferenceCumSumLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + if (params.testDefaults) { + function = CreateFunction(params.inShape, params.inType); + inputData = {params.inputData}; + refOutData = {params.refData}; + } else { + function = CreateFunction(params.inShape, params.inType, params.axisShape, params.axisType, params.execlusive, params.reverse); + inputData = {params.inputData, params.axisData}; + refOutData = {params.refData}; + } + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "testDefaults=" << param.testDefaults << "_"; + result << "axisValue=" << param.axisValue << "_"; + result << "execlusive=" << param.execlusive << "_"; + result << "reverse=" << param.reverse << "_"; + result << "inShape=" << param.inShape << "_"; + result << "iType=" << param.inType << "_"; + result << "axisType=" << param.axisType << "_"; + result << "oType=" << param.outType; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& data_shape, const element::Type& data_type, const PartialShape& axis_shape, + const element::Type& axis_type, const bool execlusive, const bool reverse) { + const auto data_param = std::make_shared(data_type, data_shape); + const auto axis_param = std::make_shared(axis_type, axis_shape); + const auto cum_sum = std::make_shared(data_param, axis_param, execlusive, reverse); + return std::make_shared(NodeVector {cum_sum}, ParameterVector {data_param, axis_param}); + } + + static std::shared_ptr CreateFunction(const PartialShape& data_shape, const element::Type& data_type) { + const auto data_param = std::make_shared(data_type, data_shape); + const auto cum_sum = std::make_shared(data_param); + return std::make_shared(NodeVector {cum_sum}, ParameterVector {data_param}); + } +}; + +TEST_P(ReferenceCumSumLayerTest, CompareWithHardcodedRefs) { + Exec(); +} + +template +std::vector generateCumSumParams(const element::Type& type) { + using T = typename element_type_traits::value_type; + std::vector opParams { + // Default axis input and attributes + CumSumParams(PartialShape {1}, type, std::vector {3}, std::vector {3}), + CumSumParams(PartialShape {6}, type, std::vector {1, 2, 3, 4, 5, 6}, std::vector {1, 3, 6, 10, 15, 21}), + CumSumParams(PartialShape {2, 4}, type, std::vector {0, 1, 2, 3, 4, 5, 6, 7}, std::vector {0, 1, 2, 3, 4, 6, 8, 10}), + // Custom axis input and attributes + CumSumParams(PartialShape {6}, type, std::vector {1, 2, 3, 4, 5, 6}, std::vector {1, 3, 6, 10, 15, 21}, false, false, element::i32, int32_t(0), + PartialShape {}), // axis i32 + CumSumParams(PartialShape {6}, type, std::vector {1, 2, 3, 4, 5, 6}, std::vector {1, 3, 6, 10, 15, 21}, false, false, element::i64, int64_t(0), + PartialShape {}), // axis i64 + CumSumParams(PartialShape {6}, type, std::vector {1, 2, 3, 4, 5, 6}, std::vector {21, 20, 18, 15, 11, 6}, false, true, element::i64, int64_t(0), + PartialShape {}), + CumSumParams(PartialShape {6}, type, std::vector {1, 2, 3, 4, 5, 6}, std::vector {0, 1, 3, 6, 10, 15}, true, false, element::i64, int64_t(0), + PartialShape {}), + CumSumParams(PartialShape {6}, type, std::vector {1, 2, 3, 4, 5, 6}, std::vector {20, 18, 15, 11, 6, 0}, true, true, element::i64, int64_t(0), + PartialShape {}), + + CumSumParams(PartialShape {2, 4}, type, std::vector {0, 1, 2, 3, 4, 5, 6, 7}, std::vector {0, 1, 2, 3, 4, 6, 8, 10}, false, false, element::i32, + int32_t(0), PartialShape {}), + CumSumParams(PartialShape {2, 4}, type, std::vector {0, 1, 2, 3, 4, 5, 6, 7}, std::vector {4, 6, 8, 10, 4, 5, 6, 7}, false, true, element::i32, + int32_t(0), PartialShape {}), + CumSumParams(PartialShape {2, 4}, type, std::vector {0, 1, 2, 3, 4, 5, 6, 7}, std::vector {0, 0, 0, 0, 0, 1, 2, 3}, true, false, element::i32, + int32_t(0), PartialShape {}), + CumSumParams(PartialShape {2, 4}, type, std::vector {0, 1, 2, 3, 4, 5, 6, 7}, std::vector {4, 5, 6, 7, 0, 0, 0, 0}, true, true, element::i32, + int32_t(0), PartialShape {}), + CumSumParams(PartialShape {2, 4}, type, std::vector {0, 1, 2, 3, 4, 5, 6, 7}, std::vector {0, 1, 3, 6, 4, 9, 15, 22}, false, false, element::i32, + int32_t(1), PartialShape {}), + CumSumParams(PartialShape {2, 4}, type, std::vector {0, 1, 2, 3, 4, 5, 6, 7}, std::vector {0, 0, 1, 3, 0, 4, 9, 15}, true, false, element::i32, + int32_t(1), PartialShape {}), + CumSumParams(PartialShape {2, 4}, type, std::vector {0, 1, 2, 3, 4, 5, 6, 7}, std::vector {6, 6, 5, 3, 22, 18, 13, 7}, false, true, element::i32, + int32_t(1), PartialShape {}), + CumSumParams(PartialShape {2, 4}, type, std::vector {0, 1, 2, 3, 4, 5, 6, 7}, std::vector {6, 5, 3, 0, 18, 13, 7, 0}, true, true, element::i32, + int32_t(1), PartialShape {}), + + CumSumParams(PartialShape {3, 2, 4}, type, + std::vector {0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23}, + std::vector {0, 1, 2, 3, 4, 5, 6, 7, + 8, 10, 12, 14, 16, 18, 20, 22, + 24, 27, 30, 33, 36, 39, 42, 45}, + false, false, element::i32, int32_t(0), PartialShape {}), + CumSumParams(PartialShape {3, 2, 4}, type, + std::vector {0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23}, + std::vector {0, 1, 2, 3, 4, 6, 8, 10, + 8, 9, 10, 11, 20, 22, 24, 26, + 16, 17, 18, 19, 36, 38, 40, 42}, + false, false, element::i32, int32_t(1), PartialShape {}), + CumSumParams(PartialShape {3, 2, 4}, type, + std::vector {0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23}, + std::vector {0, 1, 3, 6, 4, 9, 15, 22, + 8, 17, 27, 38, 12, 25, 39, 54, + 16, 33, 51, 70, 20, 41, 63, 86}, + false, false, element::i32, int32_t(2), PartialShape {}), + }; + return opParams; +} + +std::vector generateCumSumCombinedParams() { + const std::vector> opTypeParams { + generateCumSumParams(element::bf16), generateCumSumParams(element::f16), + generateCumSumParams(element::f32), generateCumSumParams(element::i32), + generateCumSumParams(element::i64), generateCumSumParams(element::u32), + generateCumSumParams(element::i8)}; + std::vector combinedParams; + std::for_each(opTypeParams.begin(), opTypeParams.end(), [&](std::vector params) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + }); + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_CumSum_With_Hardcoded_Refs, ReferenceCumSumLayerTest, ::testing::ValuesIn(generateCumSumCombinedParams()), + ReferenceCumSumLayerTest::getTestCaseName); +} // namespace diff --git a/inference-engine/src/low_precision_transformations/include/low_precision/concat.hpp b/inference-engine/src/low_precision_transformations/include/low_precision/concat.hpp index db16f572224..65cb9694eb8 100644 --- a/inference-engine/src/low_precision_transformations/include/low_precision/concat.hpp +++ b/inference-engine/src/low_precision_transformations/include/low_precision/concat.hpp @@ -26,6 +26,7 @@ public: bool transform(TransformationContext& context, ngraph::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + static bool isQuantizedStatic(const std::shared_ptr& layer) noexcept; protected: static bool isHandled( diff --git a/inference-engine/src/low_precision_transformations/src/concat.cpp b/inference-engine/src/low_precision_transformations/src/concat.cpp index 5c0da831b8e..b17846446a1 100644 --- a/inference-engine/src/low_precision_transformations/src/concat.cpp +++ b/inference-engine/src/low_precision_transformations/src/concat.cpp @@ -297,6 +297,22 @@ bool ConcatTransformation::isHandled(const TransformationContext& context, const return false; } +bool ConcatTransformation::isQuantizedStatic(const std::shared_ptr& layer) noexcept { + const auto concat = as_type_ptr(layer); + if (concat == nullptr) { + return false; + } + + const auto axis = concat->get_axis(); + const auto outputRank = concat->get_output_partial_shape(0).rank(); + if (axis < 0 && outputRank.is_dynamic()) { + return false; + } + + const size_t normalizedAxis = ngraph::normalize_axis(concat->get_friendly_name(), axis, outputRank); + return normalizedAxis == 1ul; +} + } // namespace low_precision } // namespace pass } // namespace ngraph diff --git a/inference-engine/src/low_precision_transformations/src/markup_can_be_quantized.cpp b/inference-engine/src/low_precision_transformations/src/markup_can_be_quantized.cpp index 3117efc2deb..10553e07fd7 100644 --- a/inference-engine/src/low_precision_transformations/src/markup_can_be_quantized.cpp +++ b/inference-engine/src/low_precision_transformations/src/markup_can_be_quantized.cpp @@ -7,6 +7,7 @@ #include #include +#include "low_precision/concat.hpp" #include "low_precision/convolution.hpp" #include "low_precision/convolution_backprop_data.hpp" #include "low_precision/group_convolution.hpp" @@ -54,6 +55,12 @@ bool ngraph::pass::low_precision::MarkupCanBeQuantized::run_on_function(std::sha } continue; } + if (const auto concat = std::dynamic_pointer_cast(node)) { + if (!ConcatTransformation::isQuantizedStatic(concat)) { + setEmptyPrecisions(concat); + } + continue; + } } return true; } diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_transformation.cpp index 9c539b7504a..b901d86203a 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_transformation.cpp @@ -230,16 +230,18 @@ TEST_P(ConcatTransformation, CompareFunctions) { auto res = compare_functions(referenceFunction, actualFunction, true, true, false, true, false); ASSERT_TRUE(res.first) << res.second; - const auto actualFakeQuantizes = LayerTransformation::get(actualFunction); - ASSERT_TRUE(checkIfOutputAttributesSharedValuesAreTheSame>(actualFakeQuantizes)) << - "PrecisionsAttribute are not the same"; - ConcatTransformationTestValues testValues = std::get<2>(GetParam()); - if (testValues.checkIntervalsAlignmentAttributes) { - auto operations = LayerTransformation::get(actualFunction); - operations.insert(operations.end(), actualFakeQuantizes.begin(), actualFakeQuantizes.end()); - ASSERT_TRUE(checkIfAttributesSharedValuesAreTheSame>(operations)) << - "IntervalsAlignmentAttribute are not the same"; + const auto actualFakeQuantizes = LayerTransformation::get(actualFunction); + if (testValues.axis == 1) { + ASSERT_TRUE(checkIfOutputAttributesSharedValuesAreTheSame>(actualFakeQuantizes)) << + "PrecisionsAttribute are not the same"; + + if (testValues.checkIntervalsAlignmentAttributes) { + auto operations = LayerTransformation::get(actualFunction); + operations.insert(operations.end(), actualFakeQuantizes.begin(), actualFakeQuantizes.end()); + ASSERT_TRUE(checkIfAttributesSharedValuesAreTheSame>(operations)) << + "IntervalsAlignmentAttribute are not the same"; + } } } diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_different_precision_on_children.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_different_precision_on_children.cpp index e781b8b258d..5bd1dd8d379 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_different_precision_on_children.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_different_precision_on_children.cpp @@ -11,11 +11,9 @@ #include #include -#include #include #include #include -#include #include "common_test_utils/ngraph_test_utils.hpp" #include "lpt_ngraph_functions/concat_function.hpp" @@ -45,7 +43,8 @@ public: ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize1; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize2; ngraph::element::Type precisionBeforeOp; - ngraph::builder::subgraph::DequantizationOperations dequantizationBefore; + ngraph::builder::subgraph::DequantizationOperations dequantizationBefore1; + ngraph::builder::subgraph::DequantizationOperations dequantizationBefore2; ngraph::element::Type precisionAfterOperation; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter1; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter2; @@ -63,6 +62,7 @@ class ConcatTransformationTestValues { public: TestTransformationParams params; bool multiChannels; + std::int64_t axis; ConcatTransformationActualValues actual; ConcatTransformationResultValues result; }; @@ -87,6 +87,7 @@ public: actualFunction = ngraph::builder::subgraph::ConcatFunction::getOriginalWithDifferentPrecisionOnChildren( precision, inputShape, + testValues.axis, testValues.actual.fakeQuantize1, testValues.actual.fakeQuantize2); @@ -100,17 +101,18 @@ public: transform.add(testValues.params); transform.add(testValues.params); transform.add(testValues.params); - transform.add(testValues.params); transform.transform(actualFunction); referenceFunction = ngraph::builder::subgraph::ConcatFunction::getReferenceWithDifferentPrecisionOnChildren( precision, inputShape, testValues.multiChannels, + testValues.axis, testValues.result.fakeQuantize1, testValues.result.fakeQuantize2, testValues.result.precisionBeforeOp, - testValues.result.dequantizationBefore, + testValues.result.dequantizationBefore1, + testValues.result.dequantizationBefore2, testValues.result.precisionAfterOperation, testValues.result.dequantizationAfter1, testValues.result.dequantizationAfter2); @@ -153,6 +155,7 @@ const std::vector testValues = { { LayerTransformation::createParamsU8I8(), false, + 1, { { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, { 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, {2.55f / 2.f} } @@ -162,15 +165,37 @@ const std::vector testValues = { { 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, { 128.f} }, ngraph::element::u8, {{}, {}, {}}, + {{}, {}, {}}, ngraph::element::u8, { ngraph::element::f32, {}, { 0.01f } }, { ngraph::element::f32, {}, { 0.01f } } } }, + // U8 with unsupported axis + { + LayerTransformation::createParamsU8I8(), + false, + 2, + { + { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, {2.55f / 2.f} } + }, + { + { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {255.f} }, + { 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, {255.f} }, + ngraph::element::u8, + {{ngraph::element::f32}, {}, {0.01f}}, + {{ngraph::element::f32}, {}, {0.005f}}, + ngraph::element::f32, + {{}, {}, {}}, + {{}, {}, {}} + } + }, // I8 { LayerTransformation::createParamsI8I8(), false, + 1, { { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, { 256ul, ngraph::Shape({}), {-1.28f / 2.f}, {1.27f / 2.f}, {-1.28f / 2.f}, {1.27f / 2.f} } @@ -180,6 +205,7 @@ const std::vector testValues = { { 256ul, ngraph::Shape({}), {-1.28f / 2.f}, {1.27f / 2.f}, {-64.f}, { 64.f} }, ngraph::element::i8, {{}, {}, {}}, + {{}, {}, {}}, ngraph::element::i8, { ngraph::element::f32, {}, { 0.01f } }, { ngraph::element::f32, {}, { 0.01f } } @@ -189,6 +215,7 @@ const std::vector testValues = { { LayerTransformation::createParamsU8I8(), true, + 1, { { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, { 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, {2.55f / 2.f} } @@ -198,6 +225,7 @@ const std::vector testValues = { { 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, { 255.f} }, ngraph::element::u8, {{}, {}, {}}, + {{}, {}, {}}, ngraph::element::u8, { ngraph::element::f32, {}, {{ 0.01f, 0.01f, 0.01f, 0.005f, 0.005f, 0.005f }} }, { ngraph::element::f32, {}, {{ 0.01f, 0.01f, 0.01f, 0.005f, 0.005f, 0.005f }} }, @@ -207,6 +235,7 @@ const std::vector testValues = { { LayerTransformation::createParamsI8I8(), true, + 1, { { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, { 256ul, ngraph::Shape({}), {-1.28f / 2.f}, {1.27f / 2.f}, {-1.28f / 2.f}, {1.27f / 2.f} } @@ -216,6 +245,7 @@ const std::vector testValues = { { 256ul, ngraph::Shape({}), {-1.28f / 2.f}, {1.27f / 2.f}, {-128.f}, {127.f} }, ngraph::element::i8, {{}, {}, {}}, + {{}, {}, {}}, ngraph::element::i8, { ngraph::element::f32, {}, {{ 0.01f, 0.01f, 0.01f, 0.005f, 0.005f, 0.005f }} }, { ngraph::element::f32, {}, {{ 0.01f, 0.01f, 0.01f, 0.005f, 0.005f, 0.005f }} }, @@ -225,6 +255,7 @@ const std::vector testValues = { { LayerTransformation::createParamsU8I8().setUpdatePrecisions(false), false, + 1, { { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, { 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, {2.55f / 2.f} } @@ -234,6 +265,7 @@ const std::vector testValues = { { 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, { 128.f} }, ngraph::element::f32, {{}, {}, {}}, + {{}, {}, {}}, ngraph::element::f32, { {}, {}, { 0.01f } }, { {}, {}, { 0.01f } } diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_unsupported_axis_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_unsupported_axis_transformation.cpp new file mode 100644 index 00000000000..2ac33c4de30 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_unsupported_axis_transformation.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "layer_transformation.hpp" + +#include + +#include "lpt_ngraph_functions/concat_function.hpp" +#include "simple_low_precision_transformer.hpp" + +using namespace ::testing; + +class smoke_LPT_ConcatWithUnsupportedAxis : public Test {}; + +TEST_F(smoke_LPT_ConcatWithUnsupportedAxis, rtInfoCheck) { + using namespace ngraph::builder::subgraph; + + const ngraph::element::Type precision = ngraph::element::f32; + const ngraph::PartialShape inputPShape = PartialShape{ 1, 3, 16, 16 }; + const std::int64_t unsupportedAxis = 2; + const auto fakeQuantize = FakeQuantizeOnData{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }; + + std::shared_ptr function = ConcatFunction::getOriginalWithDifferentPrecisionOnChildren( + precision, + inputPShape, + unsupportedAxis, + fakeQuantize, + fakeQuantize); + + SimpleLowPrecisionTransformer transformer; + transformer.transform(function); + + const auto actualConcat = LayerTransformation::get(function)[0]; + const auto& rtInfo = actualConcat->get_rt_info(); + ASSERT_TRUE(rtInfo.empty()) << "Unsupported concat mustn't contain LPT runtime attributes"; +} diff --git a/inference-engine/tests/functional/plugin/conformance/test_runner/skip_configs/skip_config_cpu.lst b/inference-engine/tests/functional/plugin/conformance/test_runner/skip_configs/skip_config_cpu.lst index 5065186480c..2bef674590d 100644 --- a/inference-engine/tests/functional/plugin/conformance/test_runner/skip_configs/skip_config_cpu.lst +++ b/inference-engine/tests/functional/plugin/conformance/test_runner/skip_configs/skip_config_cpu.lst @@ -1,9 +1,3 @@ -# OMZ: -# Hang: -.*AvgPool_1199829.* -.*AvgPool_1201153.* -.*ROIPooling_1199827.* - # DLB: # Hang: .*Convolution_1301086.* diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp index b3631fe57d7..72d30a9b019 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp @@ -16,42 +16,48 @@ const std::vector netPrecisions = { }; const std::vector trasformationParamValues = { - // LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsI8I8(), LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8() }; const std::vector testValues = { // U8 { + 1, + { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } + }, + // U8 and unsupported concat axis + { + 2, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } }, // I8 { + 1, { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f / 2.f}, {1.27f / 2.f} } }, // mixed: U8 + I8 { + 1, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } }, // mixed: I8 + U8 { + 1, { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } } }; -const std::vector multiChannel = { true/*, false*/ }; - INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatWithDifferentChildrenTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values(ngraph::PartialShape({ 1, 6, 10, 10 })), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::ValuesIn(testValues), - ::testing::ValuesIn(trasformationParamValues), - ::testing::ValuesIn(multiChannel)), + ::testing::ValuesIn(trasformationParamValues)), ConcatWithDifferentChildrenTransformation::getTestCaseName); } // namespace diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp index 731946ef016..0f09ea78560 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp @@ -16,42 +16,48 @@ const std::vector netPrecisions = { }; const std::vector trasformationParamValues = { - // LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsI8I8(), LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8() }; const std::vector testValues = { // U8 { + 1, + { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } + }, + // U8 and unsupported concat axis + { + 2, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } }, // I8 { - { 256ul, ngraph::Shape({}), {-128.f}, {127.f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {-128.f}, {127.f}, {-1.28f / 2}, {1.27f / 2} } + 1, + { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f / 2.f}, {1.27f / 2.f} } }, // mixed: U8 + I8 { + 1, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } }, // mixed: I8 + U8 { + 1, { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } } }; -const std::vector multiChannel = { true/*, false*/ }; - INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatWithDifferentChildrenTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values(ngraph::PartialShape({ 1, 3, 10, 10 })), ::testing::Values(CommonTestUtils::DEVICE_GPU), ::testing::ValuesIn(testValues), - ::testing::ValuesIn(trasformationParamValues), - ::testing::ValuesIn(multiChannel)), + ::testing::ValuesIn(trasformationParamValues)), ConcatWithDifferentChildrenTransformation::getTestCaseName); } // namespace diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_different_precision_on_children.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_different_precision_on_children.hpp index a92974bed4c..72a7c4b06e8 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_different_precision_on_children.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_different_precision_on_children.hpp @@ -13,6 +13,7 @@ namespace LayerTestsDefinitions { class ConcatWithDifferentChildrenTransformationParam { public: + std::int64_t axis; ngraph::builder::subgraph::FakeQuantizeOnData fqOnData1; ngraph::builder::subgraph::FakeQuantizeOnData fqOnData2; }; @@ -22,9 +23,8 @@ typedef std::tuple< ngraph::PartialShape, std::string, // target device: CPU, GPU ConcatWithDifferentChildrenTransformationParam, - ngraph::pass::low_precision::LayerTransformation::Params, // transformation parameters - // multichannel - bool> ConcatWithDifferentChildrenTransformationParams; + ngraph::pass::low_precision::LayerTransformation::Params // transformation parameters + > ConcatWithDifferentChildrenTransformationParams; class ConcatWithDifferentChildrenTransformation : public testing::WithParamInterface, diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_children.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_children.cpp index 6334b3d644f..1bca896058b 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_children.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_children.cpp @@ -25,13 +25,12 @@ std::string ConcatWithDifferentChildrenTransformation::getTestCaseName(testing:: std::string targetDevice; ConcatWithDifferentChildrenTransformationParam param; ngraph::pass::low_precision::LayerTransformation::Params params; - bool multiChannel; - std::tie(netPrecision, inputShapes, targetDevice, param, params, multiChannel) = obj.param; + std::tie(netPrecision, inputShapes, targetDevice, param, params) = obj.param; std::ostringstream result; result << getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params) << - (multiChannel ? "_multichannel" : "") << param.fqOnData1 << param.fqOnData2; + "_axis_" << param.axis << "_" << param.fqOnData1 << param.fqOnData2; return result.str(); } @@ -42,8 +41,7 @@ InferenceEngine::Blob::Ptr ConcatWithDifferentChildrenTransformation::GenerateIn std::string targetDevice; ConcatWithDifferentChildrenTransformationParam param; ngraph::pass::low_precision::LayerTransformation::Params params; - bool multiChannel; - std::tie(netPrecision, inputShapes, targetDevice, param, params, multiChannel) = this->GetParam(); + std::tie(netPrecision, inputShapes, targetDevice, param, params) = this->GetParam(); const float k = (info.name() == "input1") ? 1.f : (info.name() == "input2" ? 2.f : 3.f); return LayerTransformation::GenerateInput(ngraph::element::u8, info.getTensorDesc(), k); @@ -54,11 +52,10 @@ void ConcatWithDifferentChildrenTransformation::SetUp() { ngraph::PartialShape inputShapes; ConcatWithDifferentChildrenTransformationParam param; ngraph::pass::low_precision::LayerTransformation::Params params; - bool multiChannel; - std::tie(netPrecision, inputShapes, targetDevice, param, params, multiChannel) = this->GetParam(); + std::tie(netPrecision, inputShapes, targetDevice, param, params) = this->GetParam(); function = ngraph::builder::subgraph::ConcatFunction::getOriginalWithDifferentPrecisionOnChildren( - netPrecision, inputShapes, param.fqOnData1, param.fqOnData2); + netPrecision, inputShapes, param.axis, param.fqOnData1, param.fqOnData2); } TEST_P(ConcatWithDifferentChildrenTransformation, CompareWithRefImpl) { diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp index ae371c7b229..3d0b62963a6 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp @@ -30,17 +30,12 @@ void CumSumLayerTest::SetUp() { bool exclusive, reverse; int64_t axis; std::tie(inputShapes, inputPrecision, axis, exclusive, reverse, targetDevice) = this->GetParam(); - auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - ngraph::ParameterVector paramVector; - auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); - paramVector.push_back(paramData); + const auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); + const auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); + const auto axisNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, std::vector{axis})->output(0); + const auto cumSum = std::make_shared(paramData, axisNode, exclusive, reverse); - auto axisNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, std::vector{axis})->output(0); - - auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(paramVector)); - auto cumSum = std::dynamic_pointer_cast(ngraph::builder::makeCumSum(paramOuts[0], axisNode, exclusive, reverse)); - - ngraph::ResultVector results{std::make_shared(cumSum)}; - function = std::make_shared(results, paramVector, "cumsum"); + ngraph::ResultVector results{std::make_shared(cumSum)}; + function = std::make_shared(results, ngraph::ParameterVector{paramData}, "cumsum"); } } // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/concat_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/concat_function.hpp index 241b250bb00..0aa75c2cfbc 100644 --- a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/concat_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/concat_function.hpp @@ -82,6 +82,7 @@ public: static std::shared_ptr getOriginalWithDifferentPrecisionOnChildren( const ngraph::element::Type precision, const ngraph::PartialShape& inputShape, + const std::int64_t axis, const FakeQuantizeOnData& fqOnData1, const FakeQuantizeOnData& fqOnData2); @@ -229,10 +230,12 @@ public: const ngraph::element::Type precision, const ngraph::PartialShape& inputShape, const bool multiChannel, + const std::int64_t axis, const FakeQuantizeOnData& fqOnData1, const FakeQuantizeOnData& fqOnData2, const ngraph::element::Type precisionBeforeOp, - const DequantizationOperations& dequantizationBefore, + const DequantizationOperations& dequantizationBefore1, + const DequantizationOperations& dequantizationBefore2, const ngraph::element::Type precisionAfterOperation, const DequantizationOperations& dequantizationAfter1, const DequantizationOperations& dequantizationAfter2); diff --git a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/concat_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/concat_function.cpp index d07034de213..28c04e0ed4d 100644 --- a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/concat_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/concat_function.cpp @@ -600,6 +600,7 @@ std::shared_ptr ConcatFunction::getOriginalWithStridedSlice( std::shared_ptr ConcatFunction::getOriginalWithDifferentPrecisionOnChildren( const ngraph::element::Type precision, const ngraph::PartialShape& inputShape, + const std::int64_t axis, const FakeQuantizeOnData& fqOnData1, const FakeQuantizeOnData& fqOnData2) { const auto input1 = std::make_shared(precision, inputShape); @@ -610,11 +611,7 @@ std::shared_ptr ConcatFunction::getOriginalWithDifferentPrecis input2->set_friendly_name("input2"); const auto fakeQuantize2 = makeFakeQuantize(input2, precision, fqOnData2); - const std::shared_ptr concat = std::make_shared( - ngraph::OutputVector{ fakeQuantize1->output(0), fakeQuantize2->output(0) }, 1); - - auto& rtInfo = concat->get_rt_info(); - rtInfo["Variant::std::string"] = std::make_shared>("concat"); + const auto concat = std::make_shared(OutputVector{ fakeQuantize1->output(0), fakeQuantize2->output(0) }, axis); const std::vector kernel = { 3, 3 }; const std::vector stride = { 1, 1 }; @@ -1687,10 +1684,12 @@ std::shared_ptr ConcatFunction::getReferenceWithDifferentPreci const ngraph::element::Type precision, const ngraph::PartialShape& inputShape, const bool multiChannel, + const std::int64_t axis, const FakeQuantizeOnData& fqOnData1, const FakeQuantizeOnData& fqOnData2, const ngraph::element::Type precisionBeforeOp, - const DequantizationOperations& dequantizationBefore, + const DequantizationOperations& dequantizationBefore1, + const DequantizationOperations& dequantizationBefore2, const ngraph::element::Type precisionAfterOperation, const DequantizationOperations& dequantizationAfter1, const DequantizationOperations& dequantizationAfter2) { @@ -1700,7 +1699,7 @@ std::shared_ptr ConcatFunction::getReferenceWithDifferentPreci const auto fakeQuantize1 = makeFakeQuantizeTypeRelaxed(input1, precision, fqOnData1); low_precision::NetworkHelper::setOutDataPrecisionForTypeRelaxed(fakeQuantize1, precisionBeforeOp); fakeQuantize1->set_friendly_name("fakeQuantize1"); - const auto deqBefore1 = makeDequantization(fakeQuantize1, dequantizationBefore); + const auto deqBefore1 = makeDequantization(fakeQuantize1, dequantizationBefore1); const auto input2 = std::make_shared(precision, inputShape); input2->set_friendly_name("input2"); @@ -1708,16 +1707,12 @@ std::shared_ptr ConcatFunction::getReferenceWithDifferentPreci const auto fakeQuantize2 = makeFakeQuantizeTypeRelaxed(input2, precision, fqOnData2); low_precision::NetworkHelper::setOutDataPrecisionForTypeRelaxed(fakeQuantize2, precisionBeforeOp); fakeQuantize2->set_friendly_name("fakeQuantize2"); - const auto deqBefore2 = makeDequantization(fakeQuantize2, dequantizationBefore); + const auto deqBefore2 = makeDequantization(fakeQuantize2, dequantizationBefore2); - const std::shared_ptr concat = std::make_shared( - ngraph::OutputVector{ deqBefore1, deqBefore2 }, 1); + const auto concat = std::make_shared(OutputVector{ deqBefore1, deqBefore2 }, axis); low_precision::NetworkHelper::setOutDataPrecision(concat, precisionAfterOperation); concat->set_friendly_name("concat"); - auto& rtInfo = concat->get_rt_info(); - rtInfo["Variant::std::string"] = std::make_shared>("concat"); - const auto lastDequantization1 = makeDequantization(concat->output(0), dequantizationAfter1); const std::vector kernel = { 3, 3 }; @@ -1741,20 +1736,18 @@ std::shared_ptr ConcatFunction::getReferenceWithDifferentPreci ngraph::ResultVector results; results.push_back(std::make_shared(avgPool)); - if (!dequantizationAfter2.empty()) { - const std::shared_ptr maxPool = std::make_shared( - concat->output(0), - stride, - padBegin, - padEnd, - kernel, - roundingType, - padType); + const std::shared_ptr maxPool = std::make_shared( + concat->output(0), + stride, + padBegin, + padEnd, + kernel, + roundingType, + padType); - const std::shared_ptr lastDequantization2 = makeDequantization(maxPool, dequantizationAfter2); - lastDequantization2->set_friendly_name("MaxPool"); - results.push_back(std::make_shared(lastDequantization2)); - } + const std::shared_ptr lastDequantization2 = makeDequantization(maxPool, dequantizationAfter2); + lastDequantization2->set_friendly_name("MaxPool"); + results.push_back(std::make_shared(lastDequantization2)); std::shared_ptr function = std::make_shared( results, diff --git a/model-optimizer/extensions/middle/MarkSubgraphsWithCorrectLayout.py b/model-optimizer/extensions/middle/MarkSubgraphsWithCorrectLayout.py index 5017d3e4660..129cd184361 100644 --- a/model-optimizer/extensions/middle/MarkSubgraphsWithCorrectLayout.py +++ b/model-optimizer/extensions/middle/MarkSubgraphsWithCorrectLayout.py @@ -22,10 +22,14 @@ class MarkSubGraphsWithCorrectLayout(MiddleReplacementPattern): 1. Prevents from adding Transpose operations before and after "reinterp_shape" like operations which change rank of the input and output tensors of this layout agnostic op. 2. Disable attributes permutation for all intermediate ops between these "reinterp_shape" nodes. - 3. Marks nodes along the weight path of convolutions as in correct layout to not permute them from NHWC to NCHW + 3. Marks nodes along the weight path of convolutions as in correct layout to not permute them from NHWC to NCHW. + The latest is needed for TF NCHW graphs as well. In Conv/Deconv infer functions "set_permutation()" + ads "permutation" attr to weights data node even for NCHW, it is needed to permute Conv weights from the + original TF layout into IE even for NCHW graphs. Therefore for TF models + to prevent unwarranted permutations need to mark weights path as having correct layout even for NCHW graphs. """ enabled = True - graph_condition = [lambda graph: graph.graph['layout'] == 'NHWC'] + graph_condition = [lambda graph: graph.graph['fw'] == 'tf'] op_conditions = [lambda n: n.soft_get('op') == 'MatMul' and any([len(port.data.get_shape()) in (4, 5) for port in n.in_ports().values()]), ] diff --git a/model-optimizer/mo/ops/convolution.py b/model-optimizer/mo/ops/convolution.py index ee24fcd1370..3a7ff6cc288 100644 --- a/model-optimizer/mo/ops/convolution.py +++ b/model-optimizer/mo/ops/convolution.py @@ -256,6 +256,9 @@ class Convolution(Op): ('output_feature_channel', 'input:{}'.format(weights_index)), ]) + # is needed to permute Conv weights from the original TF [H, W, C_IN, C_OUT] into IE [C_OUT, C_IN, H, W] + # but for other nodes in weights subgraph permutations must turned off + # by marking with MarkSubGraphsWithCorrectLayout even if graph layout is NCHW. PermuteAttrs.set_permutation(node.in_node(weights_index), node, node.soft_get('get_weights_permute', None)) PermuteInputs().set_input_permutation( node.in_node(weights_index), node, 'input:{}'.format(weights_index), 'transpose') diff --git a/model-optimizer/mo/ops/deconvolution.py b/model-optimizer/mo/ops/deconvolution.py index f338267c4db..ba080582d85 100644 --- a/model-optimizer/mo/ops/deconvolution.py +++ b/model-optimizer/mo/ops/deconvolution.py @@ -99,7 +99,10 @@ class Deconvolution(Op): ('input_feature_channel', 'input:1'), ('output_feature_channel', 'input:1'), ]) - + + # is needed to permute Deconv weights from the original TF [H, W, C_OUT, C_IN] into IE [C_IN, C_OUT, H, W] + # but for other nodes in weights subgraph permutations must turned off + # by marking with MarkSubGraphsWithCorrectLayout even if graph layout is NCHW. PermuteAttrs.set_permutation(node.in_node(1), node, node.soft_get('get_weights_permute', None)) PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:1', 'transpose') PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0', 'shape') diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/cum_sum.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/cum_sum.hpp index cb304f03b61..699e0f863ad 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/cum_sum.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/cum_sum.hpp @@ -5,17 +5,11 @@ #pragma once #include -#include -#include -#include - -#include "ngraph/coordinate_transform.hpp" -#include "ngraph/type/bfloat16.hpp" -#include "ngraph/type/float16.hpp" namespace ngraph { namespace runtime { namespace reference { + template void cumsum(const T* arg, const P* axis_tensor, @@ -23,89 +17,29 @@ void cumsum(const T* arg, const Shape& tensor_shape, const bool exclusive, const bool reverse) { - NGRAPH_SUPPRESS_DEPRECATED_START - CoordinateTransform temp_transform(tensor_shape); - for (const Coordinate& output_coord : temp_transform) { - out[temp_transform.index(output_coord)] = 0; - } + const auto rank = tensor_shape.size(); + const auto axis = axis_tensor[0] >= 0 ? axis_tensor[0] : rank + axis_tensor[0]; + const auto axis_dim = tensor_shape[axis]; - P axis = axis_tensor[0]; - P rank = tensor_shape.size(); + const auto size_before_axis = shape_size(Shape(tensor_shape.begin(), tensor_shape.begin() + axis)); + const auto size_after_axis = shape_size(Shape(tensor_shape.begin() + axis + 1, tensor_shape.end())); - if (axis < -rank || axis > rank) { - throw ngraph_error("axis must be in the range [-rank, rank]"); - } - axis = axis < 0 ? rank + axis : axis; + const auto reverse_shift = reverse ? -1 : 1; + const auto element_shift = exclusive ? size_after_axis * reverse_shift : 0; - auto get_key = [&, axis](const Coordinate& coord) -> Coordinate { - Coordinate result(coord.size(), 0); - result[axis] = coord[axis]; - - for (size_t i = 0; i < coord.size(); i++) { - result[i] = coord[i] - result[i]; - } - return result; - }; - - auto update_output_buffer = - [&](size_t input_index, size_t output_index, T& prev, std::vector>& tensor_vec) -> void { - tensor_vec[input_index].second = prev + tensor_vec[input_index].second; - out[tensor_vec[output_index].first] = tensor_vec[input_index].second; - - // update prev to hold the last result value to compute ruuning sum for - // subsequent iter - prev = out[tensor_vec[output_index].first]; - }; - - auto cum_sum = [&, exclusive, reverse](std::vector>& tensor_vec) { - if (!reverse) { - T prev = 0; - for (size_t i = 0; i < tensor_vec.size(); i++) { - if (exclusive && i == 0) { - out[tensor_vec[i].first] = prev; - continue; - } - // we will compute running sum of j-1 elements if exlusive=1 or else - // for j elements if exclusive = 0 - size_t arg_index = exclusive == 1 ? i - 1 : i; - update_output_buffer(arg_index, i, prev, tensor_vec); - } - } else // reverse == true - { - T prev = 0; - for (size_t i = tensor_vec.size(); i-- > 0;) { - if (exclusive && i == tensor_vec.size() - 1) { - out[tensor_vec[i].first] = prev; - continue; - } - // we will compute running sum of j-1 elements if exlusive=1 or else - // for j elements if exclusive = 0 - size_t arg_index = exclusive == 1 ? i + 1 : i; - update_output_buffer(arg_index, i, prev, tensor_vec); + for (size_t i = 0; i < size_before_axis; ++i) { + const auto slice_idx = i * axis_dim * size_after_axis + reverse * size_after_axis * (axis_dim - 1); + for (size_t j = 0; j < size_after_axis; ++j) { + const auto sequence_start_idx = slice_idx + j; + out[sequence_start_idx] = exclusive ? T{0} : arg[sequence_start_idx]; + for (size_t k = 1; k < axis_dim; ++k) { + const auto element_idx = sequence_start_idx + (k * size_after_axis) * reverse_shift; + const auto in_idx = element_idx - element_shift; + const auto previous_sum_idx = element_idx - size_after_axis * reverse_shift; + out[element_idx] = out[previous_sum_idx] + arg[in_idx]; } } - }; - - // Map to collect tensor elements belonging to the same axis - std::map>> map_cooord_to_val; - CoordinateTransform input_transform(tensor_shape); - for (const Coordinate& input_coord : input_transform) { - // points to the current element in the input tensor - T current = arg[input_transform.index(input_coord)]; - auto key = get_key(input_coord); - auto index = input_transform.index(input_coord); - if (map_cooord_to_val.find(key) != map_cooord_to_val.end()) { - map_cooord_to_val[key].push_back(std::make_pair(index, current)); - } else { - map_cooord_to_val.insert({key, std::vector>()}); - map_cooord_to_val[key].push_back(std::make_pair(index, current)); - } } - // iterate the map and perform cumulative sum over the give axis - for (auto& it : map_cooord_to_val) { - cum_sum(it.second); - } - NGRAPH_SUPPRESS_DEPRECATED_END } } // namespace reference } // namespace runtime diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 8a7c7e1a0ba..28017824f22 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -428,7 +428,6 @@ set(MULTI_TEST_SRC backend/cosh.in.cpp backend/ctc_greedy_decoder.in.cpp backend/ctc_greedy_decoder_seq_len.in.cpp - backend/cum_sum.in.cpp backend/deformable_psroi_pooling.in.cpp backend/detection_output.in.cpp backend/dft.in.cpp diff --git a/ngraph/test/backend/cum_sum.in.cpp b/ngraph/test/backend/cum_sum.in.cpp deleted file mode 100644 index 9f8b1f91b30..00000000000 --- a/ngraph/test/backend/cum_sum.in.cpp +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/runtime/tensor.hpp" -#include "runtime/backend.hpp" -#include "util/all_close.hpp" -#include "util/all_close_f.hpp" -#include "util/ndarray.hpp" -#include "util/random.hpp" -#include "util/test_control.hpp" -#include "util/test_tools.hpp" - -static std::mt19937_64 random_generator; - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; - -NGRAPH_TEST(${BACKEND_NAME}, cum_sum_default) { - Shape shape{1, 4}; - auto A = make_shared(element::f32, shape); - auto axis = make_shared(element::i32, Shape{1}); - auto f = make_shared(make_shared(A, axis), ParameterVector{A, axis}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{1, 2, 3, 4}); - auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape()); - copy_data(axis_tensor, vector{1}); - auto result = backend->create_tensor(element::f32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a, axis_tensor}); - EXPECT_TRUE(test::all_close_f((vector{1, 3, 6, 10}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, cum_sum_2dim) { - Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); - auto axis = make_shared(element::i64, Shape{1}); - auto f = make_shared(make_shared(A, axis), ParameterVector{A, axis}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7}); - auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape()); - copy_data(axis_tensor, vector{0}); - auto result = backend->create_tensor(element::f32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a, axis_tensor}); - EXPECT_TRUE(test::all_close_f((vector{0, 1, 2, 3, 4, 6, 8, 10}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, cum_sum_2dim_default_axis) { - Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); - auto f = make_shared(make_shared(A), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7}); - auto result = backend->create_tensor(element::f32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{0, 1, 2, 3, 4, 6, 8, 10}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, cum_sum_3d) { - auto test_cumsum_3d = [](const int32_t axis_val) -> void { - Shape shape{3, 2, 4}; - auto A = make_shared(element::f32, shape); - auto axis = make_shared(element::i32, Shape{1}); - auto f = make_shared(make_shared(A, axis), ParameterVector{A, axis}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, - vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}); - auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape()); - copy_data(axis_tensor, vector{axis_val}); - auto result = backend->create_tensor(element::f32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a, axis_tensor}); - - if (axis_val == 0) { - EXPECT_TRUE(test::all_close_f( - (vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 20, 22, 24, 27, 30, 33, 36, 39, 42, 45}), - read_vector(result))); - } else if (axis_val == 1) { - EXPECT_TRUE(test::all_close_f( - (vector{0, 1, 2, 3, 4, 6, 8, 10, 8, 9, 10, 11, 20, 22, 24, 26, 16, 17, 18, 19, 36, 38, 40, 42}), - read_vector(result))); - } else if (axis_val == 2) { - EXPECT_TRUE(test::all_close_f((vector{0, 1, 3, 6, 4, 9, 15, 22, 8, 17, 27, 38, - 12, 25, 39, 54, 16, 33, 51, 70, 20, 41, 63, 86}), - read_vector(result))); - } - }; - test_cumsum_3d(0); - test_cumsum_3d(1); - test_cumsum_3d(2); -} - -NGRAPH_TEST(${BACKEND_NAME}, cum_sum_2dim_allmodes) { - auto test_cum_sum_allmodes = [](const int64_t axis_val, int exclusive, int reverse) { - Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); - auto axis = make_shared(element::i64, Shape{1}); - auto f = make_shared(make_shared(A, axis, exclusive, reverse), ParameterVector{A, axis}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7}); - auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape()); - copy_data(axis_tensor, vector{axis_val}); - auto result = backend->create_tensor(element::f32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a, axis_tensor}); - if (axis_val == 1 && exclusive == 1 && reverse == 0) { - EXPECT_TRUE(test::all_close_f((vector{0, 0, 1, 3, 0, 4, 9, 15}), read_vector(result))); - } else if (axis_val == 1 && exclusive == 0 && reverse == 1) { - EXPECT_TRUE(test::all_close_f((vector{6, 6, 5, 3, 22, 18, 13, 7}), read_vector(result))); - } else if (axis_val == 1 && exclusive == 1 && reverse == 1) { - EXPECT_TRUE(test::all_close_f((vector{6, 5, 3, 0, 18, 13, 7, 0}), read_vector(result))); - } else if (axis_val == 0 && exclusive == 0 && reverse == 0) { - EXPECT_TRUE(test::all_close_f((vector{0, 1, 2, 3, 4, 6, 8, 10}), read_vector(result))); - } else if (axis_val == 0 && exclusive == 1 && reverse == 1) { - EXPECT_TRUE(test::all_close_f((vector{4, 5, 6, 7, 0, 0, 0, 0}), read_vector(result))); - } else if (axis_val == 0 && exclusive == 0 && reverse == 1) { - EXPECT_TRUE(test::all_close_f((vector{4, 6, 8, 10, 4, 5, 6, 7}), read_vector(result))); - } - }; - - test_cum_sum_allmodes(1, 1, 0); - test_cum_sum_allmodes(-1, 0, 1); - test_cum_sum_allmodes(-1, 1, 1); - test_cum_sum_allmodes(0, 0, 0); - test_cum_sum_allmodes(0, 1, 1); - test_cum_sum_allmodes(0, 0, 1); -} diff --git a/tests/layer_tests/CMakeLists.txt b/tests/layer_tests/CMakeLists.txt new file mode 100644 index 00000000000..4f5b64a7220 --- /dev/null +++ b/tests/layer_tests/CMakeLists.txt @@ -0,0 +1,9 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +cmake_minimum_required(VERSION 3.13) + +project(layer_tests) + +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_references_config.xml b/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_references_config.xml index aee644448b2..87e2e36522b 100644 --- a/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_references_config.xml +++ b/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_references_config.xml @@ -51,5 +51,30 @@ # values from {"commit_id": "2947789b3b18a724096abbd9a5c535ae3128ce05", "commit_date": "2021-07-12 23:30"} and *= 1.3 # values from {"commit_id": "761e571042fa2b291d5954e523fffc1e2dfcafae", "commit_date": "2021-05-20 10:36"} and *= 1.3 # values from {"commit_id": "2947789b3b18a724096abbd9a5c535ae3128ce05", "commit_date": "2021-07-12 23:30"} and *= 1.3 + + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 + # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3 diff --git a/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_test_config.xml b/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_test_config.xml index ebb16b7de2d..03ac32f1320 100644 --- a/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_test_config.xml +++ b/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_test_config.xml @@ -14,5 +14,10 @@ + + + + + \ No newline at end of file